Merge tag 'drm-intel-next-2015-05-08' of git://anongit.freedesktop.org/drm-intel...
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33
34 /**
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
39 *
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
43 *
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
50 */
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
54
55 static void gen9_init_clock_gating(struct drm_device *dev)
56 {
57 struct drm_i915_private *dev_priv = dev->dev_private;
58
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
62 }
63
64 static void skl_init_clock_gating(struct drm_device *dev)
65 {
66 struct drm_i915_private *dev_priv = dev->dev_private;
67
68 gen9_init_clock_gating(dev);
69
70 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
71 /*
72 * WaDisableSDEUnitClockGating:skl
73 * WaSetGAPSunitClckGateDisable:skl
74 */
75 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
76 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
77 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
78
79 /* WaDisableVFUnitClockGating:skl */
80 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
81 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
82 }
83
84 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
85 /* WaDisableHDCInvalidation:skl */
86 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
87 BDW_DISABLE_HDC_INVALIDATION);
88
89 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
90 I915_WRITE(FF_SLICE_CS_CHICKEN2,
91 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
92 }
93
94 if (INTEL_REVID(dev) <= SKL_REVID_E0)
95 /* WaDisableLSQCROPERFforOCL:skl */
96 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
97 GEN8_LQSC_RO_PERF_DIS);
98 }
99
100 static void bxt_init_clock_gating(struct drm_device *dev)
101 {
102 struct drm_i915_private *dev_priv = dev->dev_private;
103
104 gen9_init_clock_gating(dev);
105
106 /*
107 * FIXME:
108 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
109 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
110 */
111 /* WaDisableSDEUnitClockGating:bxt */
112 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
113 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
114 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
115
116 /* FIXME: apply on A0 only */
117 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
118 }
119
120 static void i915_pineview_get_mem_freq(struct drm_device *dev)
121 {
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 u32 tmp;
124
125 tmp = I915_READ(CLKCFG);
126
127 switch (tmp & CLKCFG_FSB_MASK) {
128 case CLKCFG_FSB_533:
129 dev_priv->fsb_freq = 533; /* 133*4 */
130 break;
131 case CLKCFG_FSB_800:
132 dev_priv->fsb_freq = 800; /* 200*4 */
133 break;
134 case CLKCFG_FSB_667:
135 dev_priv->fsb_freq = 667; /* 167*4 */
136 break;
137 case CLKCFG_FSB_400:
138 dev_priv->fsb_freq = 400; /* 100*4 */
139 break;
140 }
141
142 switch (tmp & CLKCFG_MEM_MASK) {
143 case CLKCFG_MEM_533:
144 dev_priv->mem_freq = 533;
145 break;
146 case CLKCFG_MEM_667:
147 dev_priv->mem_freq = 667;
148 break;
149 case CLKCFG_MEM_800:
150 dev_priv->mem_freq = 800;
151 break;
152 }
153
154 /* detect pineview DDR3 setting */
155 tmp = I915_READ(CSHRDDR3CTL);
156 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
157 }
158
159 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
160 {
161 struct drm_i915_private *dev_priv = dev->dev_private;
162 u16 ddrpll, csipll;
163
164 ddrpll = I915_READ16(DDRMPLL1);
165 csipll = I915_READ16(CSIPLL0);
166
167 switch (ddrpll & 0xff) {
168 case 0xc:
169 dev_priv->mem_freq = 800;
170 break;
171 case 0x10:
172 dev_priv->mem_freq = 1066;
173 break;
174 case 0x14:
175 dev_priv->mem_freq = 1333;
176 break;
177 case 0x18:
178 dev_priv->mem_freq = 1600;
179 break;
180 default:
181 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
182 ddrpll & 0xff);
183 dev_priv->mem_freq = 0;
184 break;
185 }
186
187 dev_priv->ips.r_t = dev_priv->mem_freq;
188
189 switch (csipll & 0x3ff) {
190 case 0x00c:
191 dev_priv->fsb_freq = 3200;
192 break;
193 case 0x00e:
194 dev_priv->fsb_freq = 3733;
195 break;
196 case 0x010:
197 dev_priv->fsb_freq = 4266;
198 break;
199 case 0x012:
200 dev_priv->fsb_freq = 4800;
201 break;
202 case 0x014:
203 dev_priv->fsb_freq = 5333;
204 break;
205 case 0x016:
206 dev_priv->fsb_freq = 5866;
207 break;
208 case 0x018:
209 dev_priv->fsb_freq = 6400;
210 break;
211 default:
212 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
213 csipll & 0x3ff);
214 dev_priv->fsb_freq = 0;
215 break;
216 }
217
218 if (dev_priv->fsb_freq == 3200) {
219 dev_priv->ips.c_m = 0;
220 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
221 dev_priv->ips.c_m = 1;
222 } else {
223 dev_priv->ips.c_m = 2;
224 }
225 }
226
227 static const struct cxsr_latency cxsr_latency_table[] = {
228 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
229 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
230 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
231 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
232 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
233
234 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
235 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
236 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
237 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
238 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
239
240 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
241 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
242 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
243 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
244 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
245
246 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
247 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
248 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
249 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
250 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
251
252 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
253 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
254 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
255 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
256 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
257
258 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
259 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
260 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
261 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
262 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
263 };
264
265 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
266 int is_ddr3,
267 int fsb,
268 int mem)
269 {
270 const struct cxsr_latency *latency;
271 int i;
272
273 if (fsb == 0 || mem == 0)
274 return NULL;
275
276 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
277 latency = &cxsr_latency_table[i];
278 if (is_desktop == latency->is_desktop &&
279 is_ddr3 == latency->is_ddr3 &&
280 fsb == latency->fsb_freq && mem == latency->mem_freq)
281 return latency;
282 }
283
284 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
285
286 return NULL;
287 }
288
289 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
290 {
291 u32 val;
292
293 mutex_lock(&dev_priv->rps.hw_lock);
294
295 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
296 if (enable)
297 val &= ~FORCE_DDR_HIGH_FREQ;
298 else
299 val |= FORCE_DDR_HIGH_FREQ;
300 val &= ~FORCE_DDR_LOW_FREQ;
301 val |= FORCE_DDR_FREQ_REQ_ACK;
302 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
303
304 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
305 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
306 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
307
308 mutex_unlock(&dev_priv->rps.hw_lock);
309 }
310
311 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
312 {
313 u32 val;
314
315 mutex_lock(&dev_priv->rps.hw_lock);
316
317 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
318 if (enable)
319 val |= DSP_MAXFIFO_PM5_ENABLE;
320 else
321 val &= ~DSP_MAXFIFO_PM5_ENABLE;
322 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
323
324 mutex_unlock(&dev_priv->rps.hw_lock);
325 }
326
327 #define FW_WM(value, plane) \
328 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
329
330 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
331 {
332 struct drm_device *dev = dev_priv->dev;
333 u32 val;
334
335 if (IS_VALLEYVIEW(dev)) {
336 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
337 if (IS_CHERRYVIEW(dev))
338 chv_set_memory_pm5(dev_priv, enable);
339 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
340 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
341 } else if (IS_PINEVIEW(dev)) {
342 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
343 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
344 I915_WRITE(DSPFW3, val);
345 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
346 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
347 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
348 I915_WRITE(FW_BLC_SELF, val);
349 } else if (IS_I915GM(dev)) {
350 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
351 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
352 I915_WRITE(INSTPM, val);
353 } else {
354 return;
355 }
356
357 DRM_DEBUG_KMS("memory self-refresh is %s\n",
358 enable ? "enabled" : "disabled");
359 }
360
361
362 /*
363 * Latency for FIFO fetches is dependent on several factors:
364 * - memory configuration (speed, channels)
365 * - chipset
366 * - current MCH state
367 * It can be fairly high in some situations, so here we assume a fairly
368 * pessimal value. It's a tradeoff between extra memory fetches (if we
369 * set this value too high, the FIFO will fetch frequently to stay full)
370 * and power consumption (set it too low to save power and we might see
371 * FIFO underruns and display "flicker").
372 *
373 * A value of 5us seems to be a good balance; safe for very low end
374 * platforms but not overly aggressive on lower latency configs.
375 */
376 static const int pessimal_latency_ns = 5000;
377
378 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
379 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
380
381 static int vlv_get_fifo_size(struct drm_device *dev,
382 enum pipe pipe, int plane)
383 {
384 struct drm_i915_private *dev_priv = dev->dev_private;
385 int sprite0_start, sprite1_start, size;
386
387 switch (pipe) {
388 uint32_t dsparb, dsparb2, dsparb3;
389 case PIPE_A:
390 dsparb = I915_READ(DSPARB);
391 dsparb2 = I915_READ(DSPARB2);
392 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
393 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
394 break;
395 case PIPE_B:
396 dsparb = I915_READ(DSPARB);
397 dsparb2 = I915_READ(DSPARB2);
398 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
399 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
400 break;
401 case PIPE_C:
402 dsparb2 = I915_READ(DSPARB2);
403 dsparb3 = I915_READ(DSPARB3);
404 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
405 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
406 break;
407 default:
408 return 0;
409 }
410
411 switch (plane) {
412 case 0:
413 size = sprite0_start;
414 break;
415 case 1:
416 size = sprite1_start - sprite0_start;
417 break;
418 case 2:
419 size = 512 - 1 - sprite1_start;
420 break;
421 default:
422 return 0;
423 }
424
425 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
426 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
427 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
428 size);
429
430 return size;
431 }
432
433 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
434 {
435 struct drm_i915_private *dev_priv = dev->dev_private;
436 uint32_t dsparb = I915_READ(DSPARB);
437 int size;
438
439 size = dsparb & 0x7f;
440 if (plane)
441 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
442
443 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
444 plane ? "B" : "A", size);
445
446 return size;
447 }
448
449 static int i830_get_fifo_size(struct drm_device *dev, int plane)
450 {
451 struct drm_i915_private *dev_priv = dev->dev_private;
452 uint32_t dsparb = I915_READ(DSPARB);
453 int size;
454
455 size = dsparb & 0x1ff;
456 if (plane)
457 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
458 size >>= 1; /* Convert to cachelines */
459
460 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
461 plane ? "B" : "A", size);
462
463 return size;
464 }
465
466 static int i845_get_fifo_size(struct drm_device *dev, int plane)
467 {
468 struct drm_i915_private *dev_priv = dev->dev_private;
469 uint32_t dsparb = I915_READ(DSPARB);
470 int size;
471
472 size = dsparb & 0x7f;
473 size >>= 2; /* Convert to cachelines */
474
475 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
476 plane ? "B" : "A",
477 size);
478
479 return size;
480 }
481
482 /* Pineview has different values for various configs */
483 static const struct intel_watermark_params pineview_display_wm = {
484 .fifo_size = PINEVIEW_DISPLAY_FIFO,
485 .max_wm = PINEVIEW_MAX_WM,
486 .default_wm = PINEVIEW_DFT_WM,
487 .guard_size = PINEVIEW_GUARD_WM,
488 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
489 };
490 static const struct intel_watermark_params pineview_display_hplloff_wm = {
491 .fifo_size = PINEVIEW_DISPLAY_FIFO,
492 .max_wm = PINEVIEW_MAX_WM,
493 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
494 .guard_size = PINEVIEW_GUARD_WM,
495 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
496 };
497 static const struct intel_watermark_params pineview_cursor_wm = {
498 .fifo_size = PINEVIEW_CURSOR_FIFO,
499 .max_wm = PINEVIEW_CURSOR_MAX_WM,
500 .default_wm = PINEVIEW_CURSOR_DFT_WM,
501 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
502 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
503 };
504 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
505 .fifo_size = PINEVIEW_CURSOR_FIFO,
506 .max_wm = PINEVIEW_CURSOR_MAX_WM,
507 .default_wm = PINEVIEW_CURSOR_DFT_WM,
508 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
509 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
510 };
511 static const struct intel_watermark_params g4x_wm_info = {
512 .fifo_size = G4X_FIFO_SIZE,
513 .max_wm = G4X_MAX_WM,
514 .default_wm = G4X_MAX_WM,
515 .guard_size = 2,
516 .cacheline_size = G4X_FIFO_LINE_SIZE,
517 };
518 static const struct intel_watermark_params g4x_cursor_wm_info = {
519 .fifo_size = I965_CURSOR_FIFO,
520 .max_wm = I965_CURSOR_MAX_WM,
521 .default_wm = I965_CURSOR_DFT_WM,
522 .guard_size = 2,
523 .cacheline_size = G4X_FIFO_LINE_SIZE,
524 };
525 static const struct intel_watermark_params valleyview_wm_info = {
526 .fifo_size = VALLEYVIEW_FIFO_SIZE,
527 .max_wm = VALLEYVIEW_MAX_WM,
528 .default_wm = VALLEYVIEW_MAX_WM,
529 .guard_size = 2,
530 .cacheline_size = G4X_FIFO_LINE_SIZE,
531 };
532 static const struct intel_watermark_params valleyview_cursor_wm_info = {
533 .fifo_size = I965_CURSOR_FIFO,
534 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
535 .default_wm = I965_CURSOR_DFT_WM,
536 .guard_size = 2,
537 .cacheline_size = G4X_FIFO_LINE_SIZE,
538 };
539 static const struct intel_watermark_params i965_cursor_wm_info = {
540 .fifo_size = I965_CURSOR_FIFO,
541 .max_wm = I965_CURSOR_MAX_WM,
542 .default_wm = I965_CURSOR_DFT_WM,
543 .guard_size = 2,
544 .cacheline_size = I915_FIFO_LINE_SIZE,
545 };
546 static const struct intel_watermark_params i945_wm_info = {
547 .fifo_size = I945_FIFO_SIZE,
548 .max_wm = I915_MAX_WM,
549 .default_wm = 1,
550 .guard_size = 2,
551 .cacheline_size = I915_FIFO_LINE_SIZE,
552 };
553 static const struct intel_watermark_params i915_wm_info = {
554 .fifo_size = I915_FIFO_SIZE,
555 .max_wm = I915_MAX_WM,
556 .default_wm = 1,
557 .guard_size = 2,
558 .cacheline_size = I915_FIFO_LINE_SIZE,
559 };
560 static const struct intel_watermark_params i830_a_wm_info = {
561 .fifo_size = I855GM_FIFO_SIZE,
562 .max_wm = I915_MAX_WM,
563 .default_wm = 1,
564 .guard_size = 2,
565 .cacheline_size = I830_FIFO_LINE_SIZE,
566 };
567 static const struct intel_watermark_params i830_bc_wm_info = {
568 .fifo_size = I855GM_FIFO_SIZE,
569 .max_wm = I915_MAX_WM/2,
570 .default_wm = 1,
571 .guard_size = 2,
572 .cacheline_size = I830_FIFO_LINE_SIZE,
573 };
574 static const struct intel_watermark_params i845_wm_info = {
575 .fifo_size = I830_FIFO_SIZE,
576 .max_wm = I915_MAX_WM,
577 .default_wm = 1,
578 .guard_size = 2,
579 .cacheline_size = I830_FIFO_LINE_SIZE,
580 };
581
582 /**
583 * intel_calculate_wm - calculate watermark level
584 * @clock_in_khz: pixel clock
585 * @wm: chip FIFO params
586 * @pixel_size: display pixel size
587 * @latency_ns: memory latency for the platform
588 *
589 * Calculate the watermark level (the level at which the display plane will
590 * start fetching from memory again). Each chip has a different display
591 * FIFO size and allocation, so the caller needs to figure that out and pass
592 * in the correct intel_watermark_params structure.
593 *
594 * As the pixel clock runs, the FIFO will be drained at a rate that depends
595 * on the pixel size. When it reaches the watermark level, it'll start
596 * fetching FIFO line sized based chunks from memory until the FIFO fills
597 * past the watermark point. If the FIFO drains completely, a FIFO underrun
598 * will occur, and a display engine hang could result.
599 */
600 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
601 const struct intel_watermark_params *wm,
602 int fifo_size,
603 int pixel_size,
604 unsigned long latency_ns)
605 {
606 long entries_required, wm_size;
607
608 /*
609 * Note: we need to make sure we don't overflow for various clock &
610 * latency values.
611 * clocks go from a few thousand to several hundred thousand.
612 * latency is usually a few thousand
613 */
614 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
615 1000;
616 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
617
618 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
619
620 wm_size = fifo_size - (entries_required + wm->guard_size);
621
622 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
623
624 /* Don't promote wm_size to unsigned... */
625 if (wm_size > (long)wm->max_wm)
626 wm_size = wm->max_wm;
627 if (wm_size <= 0)
628 wm_size = wm->default_wm;
629
630 /*
631 * Bspec seems to indicate that the value shouldn't be lower than
632 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
633 * Lets go for 8 which is the burst size since certain platforms
634 * already use a hardcoded 8 (which is what the spec says should be
635 * done).
636 */
637 if (wm_size <= 8)
638 wm_size = 8;
639
640 return wm_size;
641 }
642
643 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
644 {
645 struct drm_crtc *crtc, *enabled = NULL;
646
647 for_each_crtc(dev, crtc) {
648 if (intel_crtc_active(crtc)) {
649 if (enabled)
650 return NULL;
651 enabled = crtc;
652 }
653 }
654
655 return enabled;
656 }
657
658 static void pineview_update_wm(struct drm_crtc *unused_crtc)
659 {
660 struct drm_device *dev = unused_crtc->dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
662 struct drm_crtc *crtc;
663 const struct cxsr_latency *latency;
664 u32 reg;
665 unsigned long wm;
666
667 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
668 dev_priv->fsb_freq, dev_priv->mem_freq);
669 if (!latency) {
670 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
671 intel_set_memory_cxsr(dev_priv, false);
672 return;
673 }
674
675 crtc = single_enabled_crtc(dev);
676 if (crtc) {
677 const struct drm_display_mode *adjusted_mode;
678 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
679 int clock;
680
681 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
682 clock = adjusted_mode->crtc_clock;
683
684 /* Display SR */
685 wm = intel_calculate_wm(clock, &pineview_display_wm,
686 pineview_display_wm.fifo_size,
687 pixel_size, latency->display_sr);
688 reg = I915_READ(DSPFW1);
689 reg &= ~DSPFW_SR_MASK;
690 reg |= FW_WM(wm, SR);
691 I915_WRITE(DSPFW1, reg);
692 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
693
694 /* cursor SR */
695 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
696 pineview_display_wm.fifo_size,
697 pixel_size, latency->cursor_sr);
698 reg = I915_READ(DSPFW3);
699 reg &= ~DSPFW_CURSOR_SR_MASK;
700 reg |= FW_WM(wm, CURSOR_SR);
701 I915_WRITE(DSPFW3, reg);
702
703 /* Display HPLL off SR */
704 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
705 pineview_display_hplloff_wm.fifo_size,
706 pixel_size, latency->display_hpll_disable);
707 reg = I915_READ(DSPFW3);
708 reg &= ~DSPFW_HPLL_SR_MASK;
709 reg |= FW_WM(wm, HPLL_SR);
710 I915_WRITE(DSPFW3, reg);
711
712 /* cursor HPLL off SR */
713 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
714 pineview_display_hplloff_wm.fifo_size,
715 pixel_size, latency->cursor_hpll_disable);
716 reg = I915_READ(DSPFW3);
717 reg &= ~DSPFW_HPLL_CURSOR_MASK;
718 reg |= FW_WM(wm, HPLL_CURSOR);
719 I915_WRITE(DSPFW3, reg);
720 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
721
722 intel_set_memory_cxsr(dev_priv, true);
723 } else {
724 intel_set_memory_cxsr(dev_priv, false);
725 }
726 }
727
728 static bool g4x_compute_wm0(struct drm_device *dev,
729 int plane,
730 const struct intel_watermark_params *display,
731 int display_latency_ns,
732 const struct intel_watermark_params *cursor,
733 int cursor_latency_ns,
734 int *plane_wm,
735 int *cursor_wm)
736 {
737 struct drm_crtc *crtc;
738 const struct drm_display_mode *adjusted_mode;
739 int htotal, hdisplay, clock, pixel_size;
740 int line_time_us, line_count;
741 int entries, tlb_miss;
742
743 crtc = intel_get_crtc_for_plane(dev, plane);
744 if (!intel_crtc_active(crtc)) {
745 *cursor_wm = cursor->guard_size;
746 *plane_wm = display->guard_size;
747 return false;
748 }
749
750 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
751 clock = adjusted_mode->crtc_clock;
752 htotal = adjusted_mode->crtc_htotal;
753 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
754 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
755
756 /* Use the small buffer method to calculate plane watermark */
757 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
758 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
759 if (tlb_miss > 0)
760 entries += tlb_miss;
761 entries = DIV_ROUND_UP(entries, display->cacheline_size);
762 *plane_wm = entries + display->guard_size;
763 if (*plane_wm > (int)display->max_wm)
764 *plane_wm = display->max_wm;
765
766 /* Use the large buffer method to calculate cursor watermark */
767 line_time_us = max(htotal * 1000 / clock, 1);
768 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
769 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
770 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
771 if (tlb_miss > 0)
772 entries += tlb_miss;
773 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
774 *cursor_wm = entries + cursor->guard_size;
775 if (*cursor_wm > (int)cursor->max_wm)
776 *cursor_wm = (int)cursor->max_wm;
777
778 return true;
779 }
780
781 /*
782 * Check the wm result.
783 *
784 * If any calculated watermark values is larger than the maximum value that
785 * can be programmed into the associated watermark register, that watermark
786 * must be disabled.
787 */
788 static bool g4x_check_srwm(struct drm_device *dev,
789 int display_wm, int cursor_wm,
790 const struct intel_watermark_params *display,
791 const struct intel_watermark_params *cursor)
792 {
793 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
794 display_wm, cursor_wm);
795
796 if (display_wm > display->max_wm) {
797 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
798 display_wm, display->max_wm);
799 return false;
800 }
801
802 if (cursor_wm > cursor->max_wm) {
803 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
804 cursor_wm, cursor->max_wm);
805 return false;
806 }
807
808 if (!(display_wm || cursor_wm)) {
809 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
810 return false;
811 }
812
813 return true;
814 }
815
816 static bool g4x_compute_srwm(struct drm_device *dev,
817 int plane,
818 int latency_ns,
819 const struct intel_watermark_params *display,
820 const struct intel_watermark_params *cursor,
821 int *display_wm, int *cursor_wm)
822 {
823 struct drm_crtc *crtc;
824 const struct drm_display_mode *adjusted_mode;
825 int hdisplay, htotal, pixel_size, clock;
826 unsigned long line_time_us;
827 int line_count, line_size;
828 int small, large;
829 int entries;
830
831 if (!latency_ns) {
832 *display_wm = *cursor_wm = 0;
833 return false;
834 }
835
836 crtc = intel_get_crtc_for_plane(dev, plane);
837 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
838 clock = adjusted_mode->crtc_clock;
839 htotal = adjusted_mode->crtc_htotal;
840 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
841 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
842
843 line_time_us = max(htotal * 1000 / clock, 1);
844 line_count = (latency_ns / line_time_us + 1000) / 1000;
845 line_size = hdisplay * pixel_size;
846
847 /* Use the minimum of the small and large buffer method for primary */
848 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
849 large = line_count * line_size;
850
851 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
852 *display_wm = entries + display->guard_size;
853
854 /* calculate the self-refresh watermark for display cursor */
855 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
856 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
857 *cursor_wm = entries + cursor->guard_size;
858
859 return g4x_check_srwm(dev,
860 *display_wm, *cursor_wm,
861 display, cursor);
862 }
863
864 #define FW_WM_VLV(value, plane) \
865 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
866
867 static void vlv_write_wm_values(struct intel_crtc *crtc,
868 const struct vlv_wm_values *wm)
869 {
870 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
871 enum pipe pipe = crtc->pipe;
872
873 I915_WRITE(VLV_DDL(pipe),
874 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
875 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
876 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
877 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
878
879 I915_WRITE(DSPFW1,
880 FW_WM(wm->sr.plane, SR) |
881 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
882 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
883 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
884 I915_WRITE(DSPFW2,
885 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
886 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
887 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
888 I915_WRITE(DSPFW3,
889 FW_WM(wm->sr.cursor, CURSOR_SR));
890
891 if (IS_CHERRYVIEW(dev_priv)) {
892 I915_WRITE(DSPFW7_CHV,
893 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
894 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
895 I915_WRITE(DSPFW8_CHV,
896 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
897 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
898 I915_WRITE(DSPFW9_CHV,
899 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
900 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
901 I915_WRITE(DSPHOWM,
902 FW_WM(wm->sr.plane >> 9, SR_HI) |
903 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
904 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
905 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
906 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
907 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
908 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
909 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
910 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
911 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
912 } else {
913 I915_WRITE(DSPFW7,
914 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
915 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
916 I915_WRITE(DSPHOWM,
917 FW_WM(wm->sr.plane >> 9, SR_HI) |
918 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
919 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
920 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
921 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
922 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
923 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
924 }
925
926 POSTING_READ(DSPFW1);
927
928 dev_priv->wm.vlv = *wm;
929 }
930
931 #undef FW_WM_VLV
932
933 static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
934 struct drm_plane *plane)
935 {
936 struct drm_device *dev = crtc->dev;
937 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
938 int entries, prec_mult, drain_latency, pixel_size;
939 int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
940 const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
941
942 /*
943 * FIXME the plane might have an fb
944 * but be invisible (eg. due to clipping)
945 */
946 if (!intel_crtc->active || !plane->state->fb)
947 return 0;
948
949 if (WARN(clock == 0, "Pixel clock is zero!\n"))
950 return 0;
951
952 pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
953
954 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
955 return 0;
956
957 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
958
959 prec_mult = high_precision;
960 drain_latency = 64 * prec_mult * 4 / entries;
961
962 if (drain_latency > DRAIN_LATENCY_MASK) {
963 prec_mult /= 2;
964 drain_latency = 64 * prec_mult * 4 / entries;
965 }
966
967 if (drain_latency > DRAIN_LATENCY_MASK)
968 drain_latency = DRAIN_LATENCY_MASK;
969
970 return drain_latency | (prec_mult == high_precision ?
971 DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
972 }
973
974 static int vlv_compute_wm(struct intel_crtc *crtc,
975 struct intel_plane *plane,
976 int fifo_size)
977 {
978 int clock, entries, pixel_size;
979
980 /*
981 * FIXME the plane might have an fb
982 * but be invisible (eg. due to clipping)
983 */
984 if (!crtc->active || !plane->base.state->fb)
985 return 0;
986
987 pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
988 clock = crtc->config->base.adjusted_mode.crtc_clock;
989
990 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
991
992 /*
993 * Set up the watermark such that we don't start issuing memory
994 * requests until we are within PND's max deadline value (256us).
995 * Idea being to be idle as long as possible while still taking
996 * advatange of PND's deadline scheduling. The limit of 8
997 * cachelines (used when the FIFO will anyway drain in less time
998 * than 256us) should match what we would be done if trickle
999 * feed were enabled.
1000 */
1001 return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
1002 }
1003
1004 static bool vlv_compute_sr_wm(struct drm_device *dev,
1005 struct vlv_wm_values *wm)
1006 {
1007 struct drm_i915_private *dev_priv = to_i915(dev);
1008 struct drm_crtc *crtc;
1009 enum pipe pipe = INVALID_PIPE;
1010 int num_planes = 0;
1011 int fifo_size = 0;
1012 struct intel_plane *plane;
1013
1014 wm->sr.cursor = wm->sr.plane = 0;
1015
1016 crtc = single_enabled_crtc(dev);
1017 /* maxfifo not supported on pipe C */
1018 if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
1019 pipe = to_intel_crtc(crtc)->pipe;
1020 num_planes = !!wm->pipe[pipe].primary +
1021 !!wm->pipe[pipe].sprite[0] +
1022 !!wm->pipe[pipe].sprite[1];
1023 fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1024 }
1025
1026 if (fifo_size == 0 || num_planes > 1)
1027 return false;
1028
1029 wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
1030 to_intel_plane(crtc->cursor), 0x3f);
1031
1032 list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
1033 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1034 continue;
1035
1036 if (plane->pipe != pipe)
1037 continue;
1038
1039 wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
1040 plane, fifo_size);
1041 if (wm->sr.plane != 0)
1042 break;
1043 }
1044
1045 return true;
1046 }
1047
1048 static void valleyview_update_wm(struct drm_crtc *crtc)
1049 {
1050 struct drm_device *dev = crtc->dev;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1053 enum pipe pipe = intel_crtc->pipe;
1054 bool cxsr_enabled;
1055 struct vlv_wm_values wm = dev_priv->wm.vlv;
1056
1057 wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
1058 wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
1059 to_intel_plane(crtc->primary),
1060 vlv_get_fifo_size(dev, pipe, 0));
1061
1062 wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
1063 wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
1064 to_intel_plane(crtc->cursor),
1065 0x3f);
1066
1067 cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
1068
1069 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
1070 return;
1071
1072 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1073 "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
1074 wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1075 wm.sr.plane, wm.sr.cursor);
1076
1077 /*
1078 * FIXME DDR DVFS introduces massive memory latencies which
1079 * are not known to system agent so any deadline specified
1080 * by the display may not be respected. To support DDR DVFS
1081 * the watermark code needs to be rewritten to essentially
1082 * bypass deadline mechanism and rely solely on the
1083 * watermarks. For now disable DDR DVFS.
1084 */
1085 if (IS_CHERRYVIEW(dev_priv))
1086 chv_set_memory_dvfs(dev_priv, false);
1087
1088 if (!cxsr_enabled)
1089 intel_set_memory_cxsr(dev_priv, false);
1090
1091 vlv_write_wm_values(intel_crtc, &wm);
1092
1093 if (cxsr_enabled)
1094 intel_set_memory_cxsr(dev_priv, true);
1095 }
1096
1097 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1098 struct drm_crtc *crtc,
1099 uint32_t sprite_width,
1100 uint32_t sprite_height,
1101 int pixel_size,
1102 bool enabled, bool scaled)
1103 {
1104 struct drm_device *dev = crtc->dev;
1105 struct drm_i915_private *dev_priv = dev->dev_private;
1106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1107 enum pipe pipe = intel_crtc->pipe;
1108 int sprite = to_intel_plane(plane)->plane;
1109 bool cxsr_enabled;
1110 struct vlv_wm_values wm = dev_priv->wm.vlv;
1111
1112 if (enabled) {
1113 wm.ddl[pipe].sprite[sprite] =
1114 vlv_compute_drain_latency(crtc, plane);
1115
1116 wm.pipe[pipe].sprite[sprite] =
1117 vlv_compute_wm(intel_crtc,
1118 to_intel_plane(plane),
1119 vlv_get_fifo_size(dev, pipe, sprite+1));
1120 } else {
1121 wm.ddl[pipe].sprite[sprite] = 0;
1122 wm.pipe[pipe].sprite[sprite] = 0;
1123 }
1124
1125 cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
1126
1127 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
1128 return;
1129
1130 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
1131 "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
1132 sprite_name(pipe, sprite),
1133 wm.pipe[pipe].sprite[sprite],
1134 wm.sr.plane, wm.sr.cursor);
1135
1136 if (!cxsr_enabled)
1137 intel_set_memory_cxsr(dev_priv, false);
1138
1139 vlv_write_wm_values(intel_crtc, &wm);
1140
1141 if (cxsr_enabled)
1142 intel_set_memory_cxsr(dev_priv, true);
1143 }
1144
1145 #define single_plane_enabled(mask) is_power_of_2(mask)
1146
1147 static void g4x_update_wm(struct drm_crtc *crtc)
1148 {
1149 struct drm_device *dev = crtc->dev;
1150 static const int sr_latency_ns = 12000;
1151 struct drm_i915_private *dev_priv = dev->dev_private;
1152 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1153 int plane_sr, cursor_sr;
1154 unsigned int enabled = 0;
1155 bool cxsr_enabled;
1156
1157 if (g4x_compute_wm0(dev, PIPE_A,
1158 &g4x_wm_info, pessimal_latency_ns,
1159 &g4x_cursor_wm_info, pessimal_latency_ns,
1160 &planea_wm, &cursora_wm))
1161 enabled |= 1 << PIPE_A;
1162
1163 if (g4x_compute_wm0(dev, PIPE_B,
1164 &g4x_wm_info, pessimal_latency_ns,
1165 &g4x_cursor_wm_info, pessimal_latency_ns,
1166 &planeb_wm, &cursorb_wm))
1167 enabled |= 1 << PIPE_B;
1168
1169 if (single_plane_enabled(enabled) &&
1170 g4x_compute_srwm(dev, ffs(enabled) - 1,
1171 sr_latency_ns,
1172 &g4x_wm_info,
1173 &g4x_cursor_wm_info,
1174 &plane_sr, &cursor_sr)) {
1175 cxsr_enabled = true;
1176 } else {
1177 cxsr_enabled = false;
1178 intel_set_memory_cxsr(dev_priv, false);
1179 plane_sr = cursor_sr = 0;
1180 }
1181
1182 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1183 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1184 planea_wm, cursora_wm,
1185 planeb_wm, cursorb_wm,
1186 plane_sr, cursor_sr);
1187
1188 I915_WRITE(DSPFW1,
1189 FW_WM(plane_sr, SR) |
1190 FW_WM(cursorb_wm, CURSORB) |
1191 FW_WM(planeb_wm, PLANEB) |
1192 FW_WM(planea_wm, PLANEA));
1193 I915_WRITE(DSPFW2,
1194 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1195 FW_WM(cursora_wm, CURSORA));
1196 /* HPLL off in SR has some issues on G4x... disable it */
1197 I915_WRITE(DSPFW3,
1198 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1199 FW_WM(cursor_sr, CURSOR_SR));
1200
1201 if (cxsr_enabled)
1202 intel_set_memory_cxsr(dev_priv, true);
1203 }
1204
1205 static void i965_update_wm(struct drm_crtc *unused_crtc)
1206 {
1207 struct drm_device *dev = unused_crtc->dev;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 struct drm_crtc *crtc;
1210 int srwm = 1;
1211 int cursor_sr = 16;
1212 bool cxsr_enabled;
1213
1214 /* Calc sr entries for one plane configs */
1215 crtc = single_enabled_crtc(dev);
1216 if (crtc) {
1217 /* self-refresh has much higher latency */
1218 static const int sr_latency_ns = 12000;
1219 const struct drm_display_mode *adjusted_mode =
1220 &to_intel_crtc(crtc)->config->base.adjusted_mode;
1221 int clock = adjusted_mode->crtc_clock;
1222 int htotal = adjusted_mode->crtc_htotal;
1223 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1224 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
1225 unsigned long line_time_us;
1226 int entries;
1227
1228 line_time_us = max(htotal * 1000 / clock, 1);
1229
1230 /* Use ns/us then divide to preserve precision */
1231 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1232 pixel_size * hdisplay;
1233 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1234 srwm = I965_FIFO_SIZE - entries;
1235 if (srwm < 0)
1236 srwm = 1;
1237 srwm &= 0x1ff;
1238 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1239 entries, srwm);
1240
1241 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1242 pixel_size * crtc->cursor->state->crtc_w;
1243 entries = DIV_ROUND_UP(entries,
1244 i965_cursor_wm_info.cacheline_size);
1245 cursor_sr = i965_cursor_wm_info.fifo_size -
1246 (entries + i965_cursor_wm_info.guard_size);
1247
1248 if (cursor_sr > i965_cursor_wm_info.max_wm)
1249 cursor_sr = i965_cursor_wm_info.max_wm;
1250
1251 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1252 "cursor %d\n", srwm, cursor_sr);
1253
1254 cxsr_enabled = true;
1255 } else {
1256 cxsr_enabled = false;
1257 /* Turn off self refresh if both pipes are enabled */
1258 intel_set_memory_cxsr(dev_priv, false);
1259 }
1260
1261 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1262 srwm);
1263
1264 /* 965 has limitations... */
1265 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1266 FW_WM(8, CURSORB) |
1267 FW_WM(8, PLANEB) |
1268 FW_WM(8, PLANEA));
1269 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1270 FW_WM(8, PLANEC_OLD));
1271 /* update cursor SR watermark */
1272 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1273
1274 if (cxsr_enabled)
1275 intel_set_memory_cxsr(dev_priv, true);
1276 }
1277
1278 #undef FW_WM
1279
1280 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1281 {
1282 struct drm_device *dev = unused_crtc->dev;
1283 struct drm_i915_private *dev_priv = dev->dev_private;
1284 const struct intel_watermark_params *wm_info;
1285 uint32_t fwater_lo;
1286 uint32_t fwater_hi;
1287 int cwm, srwm = 1;
1288 int fifo_size;
1289 int planea_wm, planeb_wm;
1290 struct drm_crtc *crtc, *enabled = NULL;
1291
1292 if (IS_I945GM(dev))
1293 wm_info = &i945_wm_info;
1294 else if (!IS_GEN2(dev))
1295 wm_info = &i915_wm_info;
1296 else
1297 wm_info = &i830_a_wm_info;
1298
1299 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1300 crtc = intel_get_crtc_for_plane(dev, 0);
1301 if (intel_crtc_active(crtc)) {
1302 const struct drm_display_mode *adjusted_mode;
1303 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1304 if (IS_GEN2(dev))
1305 cpp = 4;
1306
1307 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1308 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1309 wm_info, fifo_size, cpp,
1310 pessimal_latency_ns);
1311 enabled = crtc;
1312 } else {
1313 planea_wm = fifo_size - wm_info->guard_size;
1314 if (planea_wm > (long)wm_info->max_wm)
1315 planea_wm = wm_info->max_wm;
1316 }
1317
1318 if (IS_GEN2(dev))
1319 wm_info = &i830_bc_wm_info;
1320
1321 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1322 crtc = intel_get_crtc_for_plane(dev, 1);
1323 if (intel_crtc_active(crtc)) {
1324 const struct drm_display_mode *adjusted_mode;
1325 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
1326 if (IS_GEN2(dev))
1327 cpp = 4;
1328
1329 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1330 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1331 wm_info, fifo_size, cpp,
1332 pessimal_latency_ns);
1333 if (enabled == NULL)
1334 enabled = crtc;
1335 else
1336 enabled = NULL;
1337 } else {
1338 planeb_wm = fifo_size - wm_info->guard_size;
1339 if (planeb_wm > (long)wm_info->max_wm)
1340 planeb_wm = wm_info->max_wm;
1341 }
1342
1343 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1344
1345 if (IS_I915GM(dev) && enabled) {
1346 struct drm_i915_gem_object *obj;
1347
1348 obj = intel_fb_obj(enabled->primary->state->fb);
1349
1350 /* self-refresh seems busted with untiled */
1351 if (obj->tiling_mode == I915_TILING_NONE)
1352 enabled = NULL;
1353 }
1354
1355 /*
1356 * Overlay gets an aggressive default since video jitter is bad.
1357 */
1358 cwm = 2;
1359
1360 /* Play safe and disable self-refresh before adjusting watermarks. */
1361 intel_set_memory_cxsr(dev_priv, false);
1362
1363 /* Calc sr entries for one plane configs */
1364 if (HAS_FW_BLC(dev) && enabled) {
1365 /* self-refresh has much higher latency */
1366 static const int sr_latency_ns = 6000;
1367 const struct drm_display_mode *adjusted_mode =
1368 &to_intel_crtc(enabled)->config->base.adjusted_mode;
1369 int clock = adjusted_mode->crtc_clock;
1370 int htotal = adjusted_mode->crtc_htotal;
1371 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1372 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
1373 unsigned long line_time_us;
1374 int entries;
1375
1376 line_time_us = max(htotal * 1000 / clock, 1);
1377
1378 /* Use ns/us then divide to preserve precision */
1379 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1380 pixel_size * hdisplay;
1381 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1382 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1383 srwm = wm_info->fifo_size - entries;
1384 if (srwm < 0)
1385 srwm = 1;
1386
1387 if (IS_I945G(dev) || IS_I945GM(dev))
1388 I915_WRITE(FW_BLC_SELF,
1389 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1390 else if (IS_I915GM(dev))
1391 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1392 }
1393
1394 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1395 planea_wm, planeb_wm, cwm, srwm);
1396
1397 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1398 fwater_hi = (cwm & 0x1f);
1399
1400 /* Set request length to 8 cachelines per fetch */
1401 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1402 fwater_hi = fwater_hi | (1 << 8);
1403
1404 I915_WRITE(FW_BLC, fwater_lo);
1405 I915_WRITE(FW_BLC2, fwater_hi);
1406
1407 if (enabled)
1408 intel_set_memory_cxsr(dev_priv, true);
1409 }
1410
1411 static void i845_update_wm(struct drm_crtc *unused_crtc)
1412 {
1413 struct drm_device *dev = unused_crtc->dev;
1414 struct drm_i915_private *dev_priv = dev->dev_private;
1415 struct drm_crtc *crtc;
1416 const struct drm_display_mode *adjusted_mode;
1417 uint32_t fwater_lo;
1418 int planea_wm;
1419
1420 crtc = single_enabled_crtc(dev);
1421 if (crtc == NULL)
1422 return;
1423
1424 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1425 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1426 &i845_wm_info,
1427 dev_priv->display.get_fifo_size(dev, 0),
1428 4, pessimal_latency_ns);
1429 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1430 fwater_lo |= (3<<8) | planea_wm;
1431
1432 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1433
1434 I915_WRITE(FW_BLC, fwater_lo);
1435 }
1436
1437 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1438 struct drm_crtc *crtc)
1439 {
1440 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1441 uint32_t pixel_rate;
1442
1443 pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
1444
1445 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1446 * adjust the pixel_rate here. */
1447
1448 if (intel_crtc->config->pch_pfit.enabled) {
1449 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1450 uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
1451
1452 pipe_w = intel_crtc->config->pipe_src_w;
1453 pipe_h = intel_crtc->config->pipe_src_h;
1454 pfit_w = (pfit_size >> 16) & 0xFFFF;
1455 pfit_h = pfit_size & 0xFFFF;
1456 if (pipe_w < pfit_w)
1457 pipe_w = pfit_w;
1458 if (pipe_h < pfit_h)
1459 pipe_h = pfit_h;
1460
1461 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1462 pfit_w * pfit_h);
1463 }
1464
1465 return pixel_rate;
1466 }
1467
1468 /* latency must be in 0.1us units. */
1469 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1470 uint32_t latency)
1471 {
1472 uint64_t ret;
1473
1474 if (WARN(latency == 0, "Latency value missing\n"))
1475 return UINT_MAX;
1476
1477 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1478 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1479
1480 return ret;
1481 }
1482
1483 /* latency must be in 0.1us units. */
1484 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1485 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1486 uint32_t latency)
1487 {
1488 uint32_t ret;
1489
1490 if (WARN(latency == 0, "Latency value missing\n"))
1491 return UINT_MAX;
1492
1493 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1494 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1495 ret = DIV_ROUND_UP(ret, 64) + 2;
1496 return ret;
1497 }
1498
1499 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1500 uint8_t bytes_per_pixel)
1501 {
1502 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1503 }
1504
1505 struct skl_pipe_wm_parameters {
1506 bool active;
1507 uint32_t pipe_htotal;
1508 uint32_t pixel_rate; /* in KHz */
1509 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1510 struct intel_plane_wm_parameters cursor;
1511 };
1512
1513 struct ilk_pipe_wm_parameters {
1514 bool active;
1515 uint32_t pipe_htotal;
1516 uint32_t pixel_rate;
1517 struct intel_plane_wm_parameters pri;
1518 struct intel_plane_wm_parameters spr;
1519 struct intel_plane_wm_parameters cur;
1520 };
1521
1522 struct ilk_wm_maximums {
1523 uint16_t pri;
1524 uint16_t spr;
1525 uint16_t cur;
1526 uint16_t fbc;
1527 };
1528
1529 /* used in computing the new watermarks state */
1530 struct intel_wm_config {
1531 unsigned int num_pipes_active;
1532 bool sprites_enabled;
1533 bool sprites_scaled;
1534 };
1535
1536 /*
1537 * For both WM_PIPE and WM_LP.
1538 * mem_value must be in 0.1us units.
1539 */
1540 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1541 uint32_t mem_value,
1542 bool is_lp)
1543 {
1544 uint32_t method1, method2;
1545
1546 if (!params->active || !params->pri.enabled)
1547 return 0;
1548
1549 method1 = ilk_wm_method1(params->pixel_rate,
1550 params->pri.bytes_per_pixel,
1551 mem_value);
1552
1553 if (!is_lp)
1554 return method1;
1555
1556 method2 = ilk_wm_method2(params->pixel_rate,
1557 params->pipe_htotal,
1558 params->pri.horiz_pixels,
1559 params->pri.bytes_per_pixel,
1560 mem_value);
1561
1562 return min(method1, method2);
1563 }
1564
1565 /*
1566 * For both WM_PIPE and WM_LP.
1567 * mem_value must be in 0.1us units.
1568 */
1569 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1570 uint32_t mem_value)
1571 {
1572 uint32_t method1, method2;
1573
1574 if (!params->active || !params->spr.enabled)
1575 return 0;
1576
1577 method1 = ilk_wm_method1(params->pixel_rate,
1578 params->spr.bytes_per_pixel,
1579 mem_value);
1580 method2 = ilk_wm_method2(params->pixel_rate,
1581 params->pipe_htotal,
1582 params->spr.horiz_pixels,
1583 params->spr.bytes_per_pixel,
1584 mem_value);
1585 return min(method1, method2);
1586 }
1587
1588 /*
1589 * For both WM_PIPE and WM_LP.
1590 * mem_value must be in 0.1us units.
1591 */
1592 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1593 uint32_t mem_value)
1594 {
1595 if (!params->active || !params->cur.enabled)
1596 return 0;
1597
1598 return ilk_wm_method2(params->pixel_rate,
1599 params->pipe_htotal,
1600 params->cur.horiz_pixels,
1601 params->cur.bytes_per_pixel,
1602 mem_value);
1603 }
1604
1605 /* Only for WM_LP. */
1606 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1607 uint32_t pri_val)
1608 {
1609 if (!params->active || !params->pri.enabled)
1610 return 0;
1611
1612 return ilk_wm_fbc(pri_val,
1613 params->pri.horiz_pixels,
1614 params->pri.bytes_per_pixel);
1615 }
1616
1617 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1618 {
1619 if (INTEL_INFO(dev)->gen >= 8)
1620 return 3072;
1621 else if (INTEL_INFO(dev)->gen >= 7)
1622 return 768;
1623 else
1624 return 512;
1625 }
1626
1627 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1628 int level, bool is_sprite)
1629 {
1630 if (INTEL_INFO(dev)->gen >= 8)
1631 /* BDW primary/sprite plane watermarks */
1632 return level == 0 ? 255 : 2047;
1633 else if (INTEL_INFO(dev)->gen >= 7)
1634 /* IVB/HSW primary/sprite plane watermarks */
1635 return level == 0 ? 127 : 1023;
1636 else if (!is_sprite)
1637 /* ILK/SNB primary plane watermarks */
1638 return level == 0 ? 127 : 511;
1639 else
1640 /* ILK/SNB sprite plane watermarks */
1641 return level == 0 ? 63 : 255;
1642 }
1643
1644 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1645 int level)
1646 {
1647 if (INTEL_INFO(dev)->gen >= 7)
1648 return level == 0 ? 63 : 255;
1649 else
1650 return level == 0 ? 31 : 63;
1651 }
1652
1653 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1654 {
1655 if (INTEL_INFO(dev)->gen >= 8)
1656 return 31;
1657 else
1658 return 15;
1659 }
1660
1661 /* Calculate the maximum primary/sprite plane watermark */
1662 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1663 int level,
1664 const struct intel_wm_config *config,
1665 enum intel_ddb_partitioning ddb_partitioning,
1666 bool is_sprite)
1667 {
1668 unsigned int fifo_size = ilk_display_fifo_size(dev);
1669
1670 /* if sprites aren't enabled, sprites get nothing */
1671 if (is_sprite && !config->sprites_enabled)
1672 return 0;
1673
1674 /* HSW allows LP1+ watermarks even with multiple pipes */
1675 if (level == 0 || config->num_pipes_active > 1) {
1676 fifo_size /= INTEL_INFO(dev)->num_pipes;
1677
1678 /*
1679 * For some reason the non self refresh
1680 * FIFO size is only half of the self
1681 * refresh FIFO size on ILK/SNB.
1682 */
1683 if (INTEL_INFO(dev)->gen <= 6)
1684 fifo_size /= 2;
1685 }
1686
1687 if (config->sprites_enabled) {
1688 /* level 0 is always calculated with 1:1 split */
1689 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1690 if (is_sprite)
1691 fifo_size *= 5;
1692 fifo_size /= 6;
1693 } else {
1694 fifo_size /= 2;
1695 }
1696 }
1697
1698 /* clamp to max that the registers can hold */
1699 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1700 }
1701
1702 /* Calculate the maximum cursor plane watermark */
1703 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1704 int level,
1705 const struct intel_wm_config *config)
1706 {
1707 /* HSW LP1+ watermarks w/ multiple pipes */
1708 if (level > 0 && config->num_pipes_active > 1)
1709 return 64;
1710
1711 /* otherwise just report max that registers can hold */
1712 return ilk_cursor_wm_reg_max(dev, level);
1713 }
1714
1715 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1716 int level,
1717 const struct intel_wm_config *config,
1718 enum intel_ddb_partitioning ddb_partitioning,
1719 struct ilk_wm_maximums *max)
1720 {
1721 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1722 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1723 max->cur = ilk_cursor_wm_max(dev, level, config);
1724 max->fbc = ilk_fbc_wm_reg_max(dev);
1725 }
1726
1727 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1728 int level,
1729 struct ilk_wm_maximums *max)
1730 {
1731 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1732 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1733 max->cur = ilk_cursor_wm_reg_max(dev, level);
1734 max->fbc = ilk_fbc_wm_reg_max(dev);
1735 }
1736
1737 static bool ilk_validate_wm_level(int level,
1738 const struct ilk_wm_maximums *max,
1739 struct intel_wm_level *result)
1740 {
1741 bool ret;
1742
1743 /* already determined to be invalid? */
1744 if (!result->enable)
1745 return false;
1746
1747 result->enable = result->pri_val <= max->pri &&
1748 result->spr_val <= max->spr &&
1749 result->cur_val <= max->cur;
1750
1751 ret = result->enable;
1752
1753 /*
1754 * HACK until we can pre-compute everything,
1755 * and thus fail gracefully if LP0 watermarks
1756 * are exceeded...
1757 */
1758 if (level == 0 && !result->enable) {
1759 if (result->pri_val > max->pri)
1760 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1761 level, result->pri_val, max->pri);
1762 if (result->spr_val > max->spr)
1763 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1764 level, result->spr_val, max->spr);
1765 if (result->cur_val > max->cur)
1766 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1767 level, result->cur_val, max->cur);
1768
1769 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1770 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1771 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1772 result->enable = true;
1773 }
1774
1775 return ret;
1776 }
1777
1778 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1779 int level,
1780 const struct ilk_pipe_wm_parameters *p,
1781 struct intel_wm_level *result)
1782 {
1783 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1784 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1785 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1786
1787 /* WM1+ latency values stored in 0.5us units */
1788 if (level > 0) {
1789 pri_latency *= 5;
1790 spr_latency *= 5;
1791 cur_latency *= 5;
1792 }
1793
1794 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1795 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1796 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1797 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1798 result->enable = true;
1799 }
1800
1801 static uint32_t
1802 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1803 {
1804 struct drm_i915_private *dev_priv = dev->dev_private;
1805 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1806 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
1807 u32 linetime, ips_linetime;
1808
1809 if (!intel_crtc->active)
1810 return 0;
1811
1812 /* The WM are computed with base on how long it takes to fill a single
1813 * row at the given clock rate, multiplied by 8.
1814 * */
1815 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1816 mode->crtc_clock);
1817 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1818 dev_priv->display.get_display_clock_speed(dev_priv->dev));
1819
1820 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
1821 PIPE_WM_LINETIME_TIME(linetime);
1822 }
1823
1824 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1825 {
1826 struct drm_i915_private *dev_priv = dev->dev_private;
1827
1828 if (IS_GEN9(dev)) {
1829 uint32_t val;
1830 int ret, i;
1831 int level, max_level = ilk_wm_max_level(dev);
1832
1833 /* read the first set of memory latencies[0:3] */
1834 val = 0; /* data0 to be programmed to 0 for first set */
1835 mutex_lock(&dev_priv->rps.hw_lock);
1836 ret = sandybridge_pcode_read(dev_priv,
1837 GEN9_PCODE_READ_MEM_LATENCY,
1838 &val);
1839 mutex_unlock(&dev_priv->rps.hw_lock);
1840
1841 if (ret) {
1842 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1843 return;
1844 }
1845
1846 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1847 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1848 GEN9_MEM_LATENCY_LEVEL_MASK;
1849 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1850 GEN9_MEM_LATENCY_LEVEL_MASK;
1851 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1852 GEN9_MEM_LATENCY_LEVEL_MASK;
1853
1854 /* read the second set of memory latencies[4:7] */
1855 val = 1; /* data0 to be programmed to 1 for second set */
1856 mutex_lock(&dev_priv->rps.hw_lock);
1857 ret = sandybridge_pcode_read(dev_priv,
1858 GEN9_PCODE_READ_MEM_LATENCY,
1859 &val);
1860 mutex_unlock(&dev_priv->rps.hw_lock);
1861 if (ret) {
1862 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
1863 return;
1864 }
1865
1866 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
1867 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
1868 GEN9_MEM_LATENCY_LEVEL_MASK;
1869 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
1870 GEN9_MEM_LATENCY_LEVEL_MASK;
1871 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
1872 GEN9_MEM_LATENCY_LEVEL_MASK;
1873
1874 /*
1875 * WaWmMemoryReadLatency:skl
1876 *
1877 * punit doesn't take into account the read latency so we need
1878 * to add 2us to the various latency levels we retrieve from
1879 * the punit.
1880 * - W0 is a bit special in that it's the only level that
1881 * can't be disabled if we want to have display working, so
1882 * we always add 2us there.
1883 * - For levels >=1, punit returns 0us latency when they are
1884 * disabled, so we respect that and don't add 2us then
1885 *
1886 * Additionally, if a level n (n > 1) has a 0us latency, all
1887 * levels m (m >= n) need to be disabled. We make sure to
1888 * sanitize the values out of the punit to satisfy this
1889 * requirement.
1890 */
1891 wm[0] += 2;
1892 for (level = 1; level <= max_level; level++)
1893 if (wm[level] != 0)
1894 wm[level] += 2;
1895 else {
1896 for (i = level + 1; i <= max_level; i++)
1897 wm[i] = 0;
1898
1899 break;
1900 }
1901 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1902 uint64_t sskpd = I915_READ64(MCH_SSKPD);
1903
1904 wm[0] = (sskpd >> 56) & 0xFF;
1905 if (wm[0] == 0)
1906 wm[0] = sskpd & 0xF;
1907 wm[1] = (sskpd >> 4) & 0xFF;
1908 wm[2] = (sskpd >> 12) & 0xFF;
1909 wm[3] = (sskpd >> 20) & 0x1FF;
1910 wm[4] = (sskpd >> 32) & 0x1FF;
1911 } else if (INTEL_INFO(dev)->gen >= 6) {
1912 uint32_t sskpd = I915_READ(MCH_SSKPD);
1913
1914 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
1915 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
1916 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
1917 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
1918 } else if (INTEL_INFO(dev)->gen >= 5) {
1919 uint32_t mltr = I915_READ(MLTR_ILK);
1920
1921 /* ILK primary LP0 latency is 700 ns */
1922 wm[0] = 7;
1923 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
1924 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
1925 }
1926 }
1927
1928 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
1929 {
1930 /* ILK sprite LP0 latency is 1300 ns */
1931 if (INTEL_INFO(dev)->gen == 5)
1932 wm[0] = 13;
1933 }
1934
1935 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
1936 {
1937 /* ILK cursor LP0 latency is 1300 ns */
1938 if (INTEL_INFO(dev)->gen == 5)
1939 wm[0] = 13;
1940
1941 /* WaDoubleCursorLP3Latency:ivb */
1942 if (IS_IVYBRIDGE(dev))
1943 wm[3] *= 2;
1944 }
1945
1946 int ilk_wm_max_level(const struct drm_device *dev)
1947 {
1948 /* how many WM levels are we expecting */
1949 if (IS_GEN9(dev))
1950 return 7;
1951 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1952 return 4;
1953 else if (INTEL_INFO(dev)->gen >= 6)
1954 return 3;
1955 else
1956 return 2;
1957 }
1958
1959 static void intel_print_wm_latency(struct drm_device *dev,
1960 const char *name,
1961 const uint16_t wm[8])
1962 {
1963 int level, max_level = ilk_wm_max_level(dev);
1964
1965 for (level = 0; level <= max_level; level++) {
1966 unsigned int latency = wm[level];
1967
1968 if (latency == 0) {
1969 DRM_ERROR("%s WM%d latency not provided\n",
1970 name, level);
1971 continue;
1972 }
1973
1974 /*
1975 * - latencies are in us on gen9.
1976 * - before then, WM1+ latency values are in 0.5us units
1977 */
1978 if (IS_GEN9(dev))
1979 latency *= 10;
1980 else if (level > 0)
1981 latency *= 5;
1982
1983 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
1984 name, level, wm[level],
1985 latency / 10, latency % 10);
1986 }
1987 }
1988
1989 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
1990 uint16_t wm[5], uint16_t min)
1991 {
1992 int level, max_level = ilk_wm_max_level(dev_priv->dev);
1993
1994 if (wm[0] >= min)
1995 return false;
1996
1997 wm[0] = max(wm[0], min);
1998 for (level = 1; level <= max_level; level++)
1999 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2000
2001 return true;
2002 }
2003
2004 static void snb_wm_latency_quirk(struct drm_device *dev)
2005 {
2006 struct drm_i915_private *dev_priv = dev->dev_private;
2007 bool changed;
2008
2009 /*
2010 * The BIOS provided WM memory latency values are often
2011 * inadequate for high resolution displays. Adjust them.
2012 */
2013 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2014 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2015 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2016
2017 if (!changed)
2018 return;
2019
2020 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2021 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2022 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2023 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2024 }
2025
2026 static void ilk_setup_wm_latency(struct drm_device *dev)
2027 {
2028 struct drm_i915_private *dev_priv = dev->dev_private;
2029
2030 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2031
2032 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2033 sizeof(dev_priv->wm.pri_latency));
2034 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2035 sizeof(dev_priv->wm.pri_latency));
2036
2037 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2038 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2039
2040 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2041 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2042 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2043
2044 if (IS_GEN6(dev))
2045 snb_wm_latency_quirk(dev);
2046 }
2047
2048 static void skl_setup_wm_latency(struct drm_device *dev)
2049 {
2050 struct drm_i915_private *dev_priv = dev->dev_private;
2051
2052 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2053 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2054 }
2055
2056 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2057 struct ilk_pipe_wm_parameters *p)
2058 {
2059 struct drm_device *dev = crtc->dev;
2060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2061 enum pipe pipe = intel_crtc->pipe;
2062 struct drm_plane *plane;
2063
2064 if (!intel_crtc->active)
2065 return;
2066
2067 p->active = true;
2068 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2069 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2070
2071 if (crtc->primary->state->fb) {
2072 p->pri.enabled = true;
2073 p->pri.bytes_per_pixel =
2074 crtc->primary->state->fb->bits_per_pixel / 8;
2075 } else {
2076 p->pri.enabled = false;
2077 p->pri.bytes_per_pixel = 0;
2078 }
2079
2080 if (crtc->cursor->state->fb) {
2081 p->cur.enabled = true;
2082 p->cur.bytes_per_pixel = 4;
2083 } else {
2084 p->cur.enabled = false;
2085 p->cur.bytes_per_pixel = 0;
2086 }
2087 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
2088 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
2089
2090 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2091 struct intel_plane *intel_plane = to_intel_plane(plane);
2092
2093 if (intel_plane->pipe == pipe) {
2094 p->spr = intel_plane->wm;
2095 break;
2096 }
2097 }
2098 }
2099
2100 static void ilk_compute_wm_config(struct drm_device *dev,
2101 struct intel_wm_config *config)
2102 {
2103 struct intel_crtc *intel_crtc;
2104
2105 /* Compute the currently _active_ config */
2106 for_each_intel_crtc(dev, intel_crtc) {
2107 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2108
2109 if (!wm->pipe_enabled)
2110 continue;
2111
2112 config->sprites_enabled |= wm->sprites_enabled;
2113 config->sprites_scaled |= wm->sprites_scaled;
2114 config->num_pipes_active++;
2115 }
2116 }
2117
2118 /* Compute new watermarks for the pipe */
2119 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2120 const struct ilk_pipe_wm_parameters *params,
2121 struct intel_pipe_wm *pipe_wm)
2122 {
2123 struct drm_device *dev = crtc->dev;
2124 const struct drm_i915_private *dev_priv = dev->dev_private;
2125 int level, max_level = ilk_wm_max_level(dev);
2126 /* LP0 watermark maximums depend on this pipe alone */
2127 struct intel_wm_config config = {
2128 .num_pipes_active = 1,
2129 .sprites_enabled = params->spr.enabled,
2130 .sprites_scaled = params->spr.scaled,
2131 };
2132 struct ilk_wm_maximums max;
2133
2134 pipe_wm->pipe_enabled = params->active;
2135 pipe_wm->sprites_enabled = params->spr.enabled;
2136 pipe_wm->sprites_scaled = params->spr.scaled;
2137
2138 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2139 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2140 max_level = 1;
2141
2142 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2143 if (params->spr.scaled)
2144 max_level = 0;
2145
2146 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2147
2148 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2149 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2150
2151 /* LP0 watermarks always use 1/2 DDB partitioning */
2152 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2153
2154 /* At least LP0 must be valid */
2155 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2156 return false;
2157
2158 ilk_compute_wm_reg_maximums(dev, 1, &max);
2159
2160 for (level = 1; level <= max_level; level++) {
2161 struct intel_wm_level wm = {};
2162
2163 ilk_compute_wm_level(dev_priv, level, params, &wm);
2164
2165 /*
2166 * Disable any watermark level that exceeds the
2167 * register maximums since such watermarks are
2168 * always invalid.
2169 */
2170 if (!ilk_validate_wm_level(level, &max, &wm))
2171 break;
2172
2173 pipe_wm->wm[level] = wm;
2174 }
2175
2176 return true;
2177 }
2178
2179 /*
2180 * Merge the watermarks from all active pipes for a specific level.
2181 */
2182 static void ilk_merge_wm_level(struct drm_device *dev,
2183 int level,
2184 struct intel_wm_level *ret_wm)
2185 {
2186 const struct intel_crtc *intel_crtc;
2187
2188 ret_wm->enable = true;
2189
2190 for_each_intel_crtc(dev, intel_crtc) {
2191 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2192 const struct intel_wm_level *wm = &active->wm[level];
2193
2194 if (!active->pipe_enabled)
2195 continue;
2196
2197 /*
2198 * The watermark values may have been used in the past,
2199 * so we must maintain them in the registers for some
2200 * time even if the level is now disabled.
2201 */
2202 if (!wm->enable)
2203 ret_wm->enable = false;
2204
2205 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2206 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2207 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2208 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2209 }
2210 }
2211
2212 /*
2213 * Merge all low power watermarks for all active pipes.
2214 */
2215 static void ilk_wm_merge(struct drm_device *dev,
2216 const struct intel_wm_config *config,
2217 const struct ilk_wm_maximums *max,
2218 struct intel_pipe_wm *merged)
2219 {
2220 int level, max_level = ilk_wm_max_level(dev);
2221 int last_enabled_level = max_level;
2222
2223 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2224 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2225 config->num_pipes_active > 1)
2226 return;
2227
2228 /* ILK: FBC WM must be disabled always */
2229 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2230
2231 /* merge each WM1+ level */
2232 for (level = 1; level <= max_level; level++) {
2233 struct intel_wm_level *wm = &merged->wm[level];
2234
2235 ilk_merge_wm_level(dev, level, wm);
2236
2237 if (level > last_enabled_level)
2238 wm->enable = false;
2239 else if (!ilk_validate_wm_level(level, max, wm))
2240 /* make sure all following levels get disabled */
2241 last_enabled_level = level - 1;
2242
2243 /*
2244 * The spec says it is preferred to disable
2245 * FBC WMs instead of disabling a WM level.
2246 */
2247 if (wm->fbc_val > max->fbc) {
2248 if (wm->enable)
2249 merged->fbc_wm_enabled = false;
2250 wm->fbc_val = 0;
2251 }
2252 }
2253
2254 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2255 /*
2256 * FIXME this is racy. FBC might get enabled later.
2257 * What we should check here is whether FBC can be
2258 * enabled sometime later.
2259 */
2260 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2261 for (level = 2; level <= max_level; level++) {
2262 struct intel_wm_level *wm = &merged->wm[level];
2263
2264 wm->enable = false;
2265 }
2266 }
2267 }
2268
2269 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2270 {
2271 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2272 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2273 }
2274
2275 /* The value we need to program into the WM_LPx latency field */
2276 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2277 {
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279
2280 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2281 return 2 * level;
2282 else
2283 return dev_priv->wm.pri_latency[level];
2284 }
2285
2286 static void ilk_compute_wm_results(struct drm_device *dev,
2287 const struct intel_pipe_wm *merged,
2288 enum intel_ddb_partitioning partitioning,
2289 struct ilk_wm_values *results)
2290 {
2291 struct intel_crtc *intel_crtc;
2292 int level, wm_lp;
2293
2294 results->enable_fbc_wm = merged->fbc_wm_enabled;
2295 results->partitioning = partitioning;
2296
2297 /* LP1+ register values */
2298 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2299 const struct intel_wm_level *r;
2300
2301 level = ilk_wm_lp_to_level(wm_lp, merged);
2302
2303 r = &merged->wm[level];
2304
2305 /*
2306 * Maintain the watermark values even if the level is
2307 * disabled. Doing otherwise could cause underruns.
2308 */
2309 results->wm_lp[wm_lp - 1] =
2310 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2311 (r->pri_val << WM1_LP_SR_SHIFT) |
2312 r->cur_val;
2313
2314 if (r->enable)
2315 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2316
2317 if (INTEL_INFO(dev)->gen >= 8)
2318 results->wm_lp[wm_lp - 1] |=
2319 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2320 else
2321 results->wm_lp[wm_lp - 1] |=
2322 r->fbc_val << WM1_LP_FBC_SHIFT;
2323
2324 /*
2325 * Always set WM1S_LP_EN when spr_val != 0, even if the
2326 * level is disabled. Doing otherwise could cause underruns.
2327 */
2328 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2329 WARN_ON(wm_lp != 1);
2330 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2331 } else
2332 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2333 }
2334
2335 /* LP0 register values */
2336 for_each_intel_crtc(dev, intel_crtc) {
2337 enum pipe pipe = intel_crtc->pipe;
2338 const struct intel_wm_level *r =
2339 &intel_crtc->wm.active.wm[0];
2340
2341 if (WARN_ON(!r->enable))
2342 continue;
2343
2344 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2345
2346 results->wm_pipe[pipe] =
2347 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2348 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2349 r->cur_val;
2350 }
2351 }
2352
2353 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2354 * case both are at the same level. Prefer r1 in case they're the same. */
2355 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2356 struct intel_pipe_wm *r1,
2357 struct intel_pipe_wm *r2)
2358 {
2359 int level, max_level = ilk_wm_max_level(dev);
2360 int level1 = 0, level2 = 0;
2361
2362 for (level = 1; level <= max_level; level++) {
2363 if (r1->wm[level].enable)
2364 level1 = level;
2365 if (r2->wm[level].enable)
2366 level2 = level;
2367 }
2368
2369 if (level1 == level2) {
2370 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2371 return r2;
2372 else
2373 return r1;
2374 } else if (level1 > level2) {
2375 return r1;
2376 } else {
2377 return r2;
2378 }
2379 }
2380
2381 /* dirty bits used to track which watermarks need changes */
2382 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2383 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2384 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2385 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2386 #define WM_DIRTY_FBC (1 << 24)
2387 #define WM_DIRTY_DDB (1 << 25)
2388
2389 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2390 const struct ilk_wm_values *old,
2391 const struct ilk_wm_values *new)
2392 {
2393 unsigned int dirty = 0;
2394 enum pipe pipe;
2395 int wm_lp;
2396
2397 for_each_pipe(dev_priv, pipe) {
2398 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2399 dirty |= WM_DIRTY_LINETIME(pipe);
2400 /* Must disable LP1+ watermarks too */
2401 dirty |= WM_DIRTY_LP_ALL;
2402 }
2403
2404 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2405 dirty |= WM_DIRTY_PIPE(pipe);
2406 /* Must disable LP1+ watermarks too */
2407 dirty |= WM_DIRTY_LP_ALL;
2408 }
2409 }
2410
2411 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2412 dirty |= WM_DIRTY_FBC;
2413 /* Must disable LP1+ watermarks too */
2414 dirty |= WM_DIRTY_LP_ALL;
2415 }
2416
2417 if (old->partitioning != new->partitioning) {
2418 dirty |= WM_DIRTY_DDB;
2419 /* Must disable LP1+ watermarks too */
2420 dirty |= WM_DIRTY_LP_ALL;
2421 }
2422
2423 /* LP1+ watermarks already deemed dirty, no need to continue */
2424 if (dirty & WM_DIRTY_LP_ALL)
2425 return dirty;
2426
2427 /* Find the lowest numbered LP1+ watermark in need of an update... */
2428 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2429 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2430 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2431 break;
2432 }
2433
2434 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2435 for (; wm_lp <= 3; wm_lp++)
2436 dirty |= WM_DIRTY_LP(wm_lp);
2437
2438 return dirty;
2439 }
2440
2441 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2442 unsigned int dirty)
2443 {
2444 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2445 bool changed = false;
2446
2447 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2448 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2449 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2450 changed = true;
2451 }
2452 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2453 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2454 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2455 changed = true;
2456 }
2457 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2458 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2459 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2460 changed = true;
2461 }
2462
2463 /*
2464 * Don't touch WM1S_LP_EN here.
2465 * Doing so could cause underruns.
2466 */
2467
2468 return changed;
2469 }
2470
2471 /*
2472 * The spec says we shouldn't write when we don't need, because every write
2473 * causes WMs to be re-evaluated, expending some power.
2474 */
2475 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2476 struct ilk_wm_values *results)
2477 {
2478 struct drm_device *dev = dev_priv->dev;
2479 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2480 unsigned int dirty;
2481 uint32_t val;
2482
2483 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2484 if (!dirty)
2485 return;
2486
2487 _ilk_disable_lp_wm(dev_priv, dirty);
2488
2489 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2490 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2491 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2492 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2493 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2494 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2495
2496 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2497 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2498 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2499 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2500 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2501 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2502
2503 if (dirty & WM_DIRTY_DDB) {
2504 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2505 val = I915_READ(WM_MISC);
2506 if (results->partitioning == INTEL_DDB_PART_1_2)
2507 val &= ~WM_MISC_DATA_PARTITION_5_6;
2508 else
2509 val |= WM_MISC_DATA_PARTITION_5_6;
2510 I915_WRITE(WM_MISC, val);
2511 } else {
2512 val = I915_READ(DISP_ARB_CTL2);
2513 if (results->partitioning == INTEL_DDB_PART_1_2)
2514 val &= ~DISP_DATA_PARTITION_5_6;
2515 else
2516 val |= DISP_DATA_PARTITION_5_6;
2517 I915_WRITE(DISP_ARB_CTL2, val);
2518 }
2519 }
2520
2521 if (dirty & WM_DIRTY_FBC) {
2522 val = I915_READ(DISP_ARB_CTL);
2523 if (results->enable_fbc_wm)
2524 val &= ~DISP_FBC_WM_DIS;
2525 else
2526 val |= DISP_FBC_WM_DIS;
2527 I915_WRITE(DISP_ARB_CTL, val);
2528 }
2529
2530 if (dirty & WM_DIRTY_LP(1) &&
2531 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2532 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2533
2534 if (INTEL_INFO(dev)->gen >= 7) {
2535 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2536 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2537 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2538 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2539 }
2540
2541 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2542 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2543 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2544 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2545 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2546 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2547
2548 dev_priv->wm.hw = *results;
2549 }
2550
2551 static bool ilk_disable_lp_wm(struct drm_device *dev)
2552 {
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554
2555 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2556 }
2557
2558 /*
2559 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2560 * different active planes.
2561 */
2562
2563 #define SKL_DDB_SIZE 896 /* in blocks */
2564 #define BXT_DDB_SIZE 512
2565
2566 static void
2567 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2568 struct drm_crtc *for_crtc,
2569 const struct intel_wm_config *config,
2570 const struct skl_pipe_wm_parameters *params,
2571 struct skl_ddb_entry *alloc /* out */)
2572 {
2573 struct drm_crtc *crtc;
2574 unsigned int pipe_size, ddb_size;
2575 int nth_active_pipe;
2576
2577 if (!params->active) {
2578 alloc->start = 0;
2579 alloc->end = 0;
2580 return;
2581 }
2582
2583 if (IS_BROXTON(dev))
2584 ddb_size = BXT_DDB_SIZE;
2585 else
2586 ddb_size = SKL_DDB_SIZE;
2587
2588 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2589
2590 nth_active_pipe = 0;
2591 for_each_crtc(dev, crtc) {
2592 if (!to_intel_crtc(crtc)->active)
2593 continue;
2594
2595 if (crtc == for_crtc)
2596 break;
2597
2598 nth_active_pipe++;
2599 }
2600
2601 pipe_size = ddb_size / config->num_pipes_active;
2602 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
2603 alloc->end = alloc->start + pipe_size;
2604 }
2605
2606 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2607 {
2608 if (config->num_pipes_active == 1)
2609 return 32;
2610
2611 return 8;
2612 }
2613
2614 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2615 {
2616 entry->start = reg & 0x3ff;
2617 entry->end = (reg >> 16) & 0x3ff;
2618 if (entry->end)
2619 entry->end += 1;
2620 }
2621
2622 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2623 struct skl_ddb_allocation *ddb /* out */)
2624 {
2625 enum pipe pipe;
2626 int plane;
2627 u32 val;
2628
2629 for_each_pipe(dev_priv, pipe) {
2630 for_each_plane(dev_priv, pipe, plane) {
2631 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2632 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2633 val);
2634 }
2635
2636 val = I915_READ(CUR_BUF_CFG(pipe));
2637 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2638 }
2639 }
2640
2641 static unsigned int
2642 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
2643 {
2644 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2645 }
2646
2647 /*
2648 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2649 * a 8192x4096@32bpp framebuffer:
2650 * 3 * 4096 * 8192 * 4 < 2^32
2651 */
2652 static unsigned int
2653 skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2654 const struct skl_pipe_wm_parameters *params)
2655 {
2656 unsigned int total_data_rate = 0;
2657 int plane;
2658
2659 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2660 const struct intel_plane_wm_parameters *p;
2661
2662 p = &params->plane[plane];
2663 if (!p->enabled)
2664 continue;
2665
2666 total_data_rate += skl_plane_relative_data_rate(p);
2667 }
2668
2669 return total_data_rate;
2670 }
2671
2672 static void
2673 skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2674 const struct intel_wm_config *config,
2675 const struct skl_pipe_wm_parameters *params,
2676 struct skl_ddb_allocation *ddb /* out */)
2677 {
2678 struct drm_device *dev = crtc->dev;
2679 struct drm_i915_private *dev_priv = dev->dev_private;
2680 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2681 enum pipe pipe = intel_crtc->pipe;
2682 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
2683 uint16_t alloc_size, start, cursor_blocks;
2684 uint16_t minimum[I915_MAX_PLANES];
2685 unsigned int total_data_rate;
2686 int plane;
2687
2688 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2689 alloc_size = skl_ddb_entry_size(alloc);
2690 if (alloc_size == 0) {
2691 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2692 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2693 return;
2694 }
2695
2696 cursor_blocks = skl_cursor_allocation(config);
2697 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
2698 ddb->cursor[pipe].end = alloc->end;
2699
2700 alloc_size -= cursor_blocks;
2701 alloc->end -= cursor_blocks;
2702
2703 /* 1. Allocate the mininum required blocks for each active plane */
2704 for_each_plane(dev_priv, pipe, plane) {
2705 const struct intel_plane_wm_parameters *p;
2706
2707 p = &params->plane[plane];
2708 if (!p->enabled)
2709 continue;
2710
2711 minimum[plane] = 8;
2712 alloc_size -= minimum[plane];
2713 }
2714
2715 /*
2716 * 2. Distribute the remaining space in proportion to the amount of
2717 * data each plane needs to fetch from memory.
2718 *
2719 * FIXME: we may not allocate every single block here.
2720 */
2721 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
2722
2723 start = alloc->start;
2724 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2725 const struct intel_plane_wm_parameters *p;
2726 unsigned int data_rate;
2727 uint16_t plane_blocks;
2728
2729 p = &params->plane[plane];
2730 if (!p->enabled)
2731 continue;
2732
2733 data_rate = skl_plane_relative_data_rate(p);
2734
2735 /*
2736 * promote the expression to 64 bits to avoid overflowing, the
2737 * result is < available as data_rate / total_data_rate < 1
2738 */
2739 plane_blocks = minimum[plane];
2740 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
2741 total_data_rate);
2742
2743 ddb->plane[pipe][plane].start = start;
2744 ddb->plane[pipe][plane].end = start + plane_blocks;
2745
2746 start += plane_blocks;
2747 }
2748
2749 }
2750
2751 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
2752 {
2753 /* TODO: Take into account the scalers once we support them */
2754 return config->base.adjusted_mode.crtc_clock;
2755 }
2756
2757 /*
2758 * The max latency should be 257 (max the punit can code is 255 and we add 2us
2759 * for the read latency) and bytes_per_pixel should always be <= 8, so that
2760 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
2761 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
2762 */
2763 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2764 uint32_t latency)
2765 {
2766 uint32_t wm_intermediate_val, ret;
2767
2768 if (latency == 0)
2769 return UINT_MAX;
2770
2771 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
2772 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
2773
2774 return ret;
2775 }
2776
2777 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2778 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2779 uint64_t tiling, uint32_t latency)
2780 {
2781 uint32_t ret;
2782 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2783 uint32_t wm_intermediate_val;
2784
2785 if (latency == 0)
2786 return UINT_MAX;
2787
2788 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
2789
2790 if (tiling == I915_FORMAT_MOD_Y_TILED ||
2791 tiling == I915_FORMAT_MOD_Yf_TILED) {
2792 plane_bytes_per_line *= 4;
2793 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2794 plane_blocks_per_line /= 4;
2795 } else {
2796 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2797 }
2798
2799 wm_intermediate_val = latency * pixel_rate;
2800 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2801 plane_blocks_per_line;
2802
2803 return ret;
2804 }
2805
2806 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
2807 const struct intel_crtc *intel_crtc)
2808 {
2809 struct drm_device *dev = intel_crtc->base.dev;
2810 struct drm_i915_private *dev_priv = dev->dev_private;
2811 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
2812 enum pipe pipe = intel_crtc->pipe;
2813
2814 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
2815 sizeof(new_ddb->plane[pipe])))
2816 return true;
2817
2818 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
2819 sizeof(new_ddb->cursor[pipe])))
2820 return true;
2821
2822 return false;
2823 }
2824
2825 static void skl_compute_wm_global_parameters(struct drm_device *dev,
2826 struct intel_wm_config *config)
2827 {
2828 struct drm_crtc *crtc;
2829 struct drm_plane *plane;
2830
2831 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2832 config->num_pipes_active += to_intel_crtc(crtc)->active;
2833
2834 /* FIXME: I don't think we need those two global parameters on SKL */
2835 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2836 struct intel_plane *intel_plane = to_intel_plane(plane);
2837
2838 config->sprites_enabled |= intel_plane->wm.enabled;
2839 config->sprites_scaled |= intel_plane->wm.scaled;
2840 }
2841 }
2842
2843 static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
2844 struct skl_pipe_wm_parameters *p)
2845 {
2846 struct drm_device *dev = crtc->dev;
2847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2848 enum pipe pipe = intel_crtc->pipe;
2849 struct drm_plane *plane;
2850 struct drm_framebuffer *fb;
2851 int i = 1; /* Index for sprite planes start */
2852
2853 p->active = intel_crtc->active;
2854 if (p->active) {
2855 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2856 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
2857
2858 fb = crtc->primary->state->fb;
2859 if (fb) {
2860 p->plane[0].enabled = true;
2861 p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
2862 p->plane[0].tiling = fb->modifier[0];
2863 } else {
2864 p->plane[0].enabled = false;
2865 p->plane[0].bytes_per_pixel = 0;
2866 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
2867 }
2868 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
2869 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
2870 p->plane[0].rotation = crtc->primary->state->rotation;
2871
2872 fb = crtc->cursor->state->fb;
2873 if (fb) {
2874 p->cursor.enabled = true;
2875 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
2876 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
2877 p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
2878 } else {
2879 p->cursor.enabled = false;
2880 p->cursor.bytes_per_pixel = 0;
2881 p->cursor.horiz_pixels = 64;
2882 p->cursor.vert_pixels = 64;
2883 }
2884 }
2885
2886 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2887 struct intel_plane *intel_plane = to_intel_plane(plane);
2888
2889 if (intel_plane->pipe == pipe &&
2890 plane->type == DRM_PLANE_TYPE_OVERLAY)
2891 p->plane[i++] = intel_plane->wm;
2892 }
2893 }
2894
2895 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2896 struct skl_pipe_wm_parameters *p,
2897 struct intel_plane_wm_parameters *p_params,
2898 uint16_t ddb_allocation,
2899 int level,
2900 uint16_t *out_blocks, /* out */
2901 uint8_t *out_lines /* out */)
2902 {
2903 uint32_t latency = dev_priv->wm.skl_latency[level];
2904 uint32_t method1, method2;
2905 uint32_t plane_bytes_per_line, plane_blocks_per_line;
2906 uint32_t res_blocks, res_lines;
2907 uint32_t selected_result;
2908
2909 if (latency == 0 || !p->active || !p_params->enabled)
2910 return false;
2911
2912 method1 = skl_wm_method1(p->pixel_rate,
2913 p_params->bytes_per_pixel,
2914 latency);
2915 method2 = skl_wm_method2(p->pixel_rate,
2916 p->pipe_htotal,
2917 p_params->horiz_pixels,
2918 p_params->bytes_per_pixel,
2919 p_params->tiling,
2920 latency);
2921
2922 plane_bytes_per_line = p_params->horiz_pixels *
2923 p_params->bytes_per_pixel;
2924 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2925
2926 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2927 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
2928 uint32_t min_scanlines = 4;
2929 uint32_t y_tile_minimum;
2930 if (intel_rotation_90_or_270(p_params->rotation)) {
2931 switch (p_params->bytes_per_pixel) {
2932 case 1:
2933 min_scanlines = 16;
2934 break;
2935 case 2:
2936 min_scanlines = 8;
2937 break;
2938 case 8:
2939 WARN(1, "Unsupported pixel depth for rotation");
2940 }
2941 }
2942 y_tile_minimum = plane_blocks_per_line * min_scanlines;
2943 selected_result = max(method2, y_tile_minimum);
2944 } else {
2945 if ((ddb_allocation / plane_blocks_per_line) >= 1)
2946 selected_result = min(method1, method2);
2947 else
2948 selected_result = method1;
2949 }
2950
2951 res_blocks = selected_result + 1;
2952 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
2953
2954 if (level >= 1 && level <= 7) {
2955 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
2956 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
2957 res_lines += 4;
2958 else
2959 res_blocks++;
2960 }
2961
2962 if (res_blocks >= ddb_allocation || res_lines > 31)
2963 return false;
2964
2965 *out_blocks = res_blocks;
2966 *out_lines = res_lines;
2967
2968 return true;
2969 }
2970
2971 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
2972 struct skl_ddb_allocation *ddb,
2973 struct skl_pipe_wm_parameters *p,
2974 enum pipe pipe,
2975 int level,
2976 int num_planes,
2977 struct skl_wm_level *result)
2978 {
2979 uint16_t ddb_blocks;
2980 int i;
2981
2982 for (i = 0; i < num_planes; i++) {
2983 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
2984
2985 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
2986 p, &p->plane[i],
2987 ddb_blocks,
2988 level,
2989 &result->plane_res_b[i],
2990 &result->plane_res_l[i]);
2991 }
2992
2993 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
2994 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
2995 ddb_blocks, level,
2996 &result->cursor_res_b,
2997 &result->cursor_res_l);
2998 }
2999
3000 static uint32_t
3001 skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3002 {
3003 if (!to_intel_crtc(crtc)->active)
3004 return 0;
3005
3006 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
3007
3008 }
3009
3010 static void skl_compute_transition_wm(struct drm_crtc *crtc,
3011 struct skl_pipe_wm_parameters *params,
3012 struct skl_wm_level *trans_wm /* out */)
3013 {
3014 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3015 int i;
3016
3017 if (!params->active)
3018 return;
3019
3020 /* Until we know more, just disable transition WMs */
3021 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3022 trans_wm->plane_en[i] = false;
3023 trans_wm->cursor_en = false;
3024 }
3025
3026 static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3027 struct skl_ddb_allocation *ddb,
3028 struct skl_pipe_wm_parameters *params,
3029 struct skl_pipe_wm *pipe_wm)
3030 {
3031 struct drm_device *dev = crtc->dev;
3032 const struct drm_i915_private *dev_priv = dev->dev_private;
3033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3034 int level, max_level = ilk_wm_max_level(dev);
3035
3036 for (level = 0; level <= max_level; level++) {
3037 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3038 level, intel_num_planes(intel_crtc),
3039 &pipe_wm->wm[level]);
3040 }
3041 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3042
3043 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
3044 }
3045
3046 static void skl_compute_wm_results(struct drm_device *dev,
3047 struct skl_pipe_wm_parameters *p,
3048 struct skl_pipe_wm *p_wm,
3049 struct skl_wm_values *r,
3050 struct intel_crtc *intel_crtc)
3051 {
3052 int level, max_level = ilk_wm_max_level(dev);
3053 enum pipe pipe = intel_crtc->pipe;
3054 uint32_t temp;
3055 int i;
3056
3057 for (level = 0; level <= max_level; level++) {
3058 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3059 temp = 0;
3060
3061 temp |= p_wm->wm[level].plane_res_l[i] <<
3062 PLANE_WM_LINES_SHIFT;
3063 temp |= p_wm->wm[level].plane_res_b[i];
3064 if (p_wm->wm[level].plane_en[i])
3065 temp |= PLANE_WM_EN;
3066
3067 r->plane[pipe][i][level] = temp;
3068 }
3069
3070 temp = 0;
3071
3072 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3073 temp |= p_wm->wm[level].cursor_res_b;
3074
3075 if (p_wm->wm[level].cursor_en)
3076 temp |= PLANE_WM_EN;
3077
3078 r->cursor[pipe][level] = temp;
3079
3080 }
3081
3082 /* transition WMs */
3083 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3084 temp = 0;
3085 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3086 temp |= p_wm->trans_wm.plane_res_b[i];
3087 if (p_wm->trans_wm.plane_en[i])
3088 temp |= PLANE_WM_EN;
3089
3090 r->plane_trans[pipe][i] = temp;
3091 }
3092
3093 temp = 0;
3094 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3095 temp |= p_wm->trans_wm.cursor_res_b;
3096 if (p_wm->trans_wm.cursor_en)
3097 temp |= PLANE_WM_EN;
3098
3099 r->cursor_trans[pipe] = temp;
3100
3101 r->wm_linetime[pipe] = p_wm->linetime;
3102 }
3103
3104 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3105 const struct skl_ddb_entry *entry)
3106 {
3107 if (entry->end)
3108 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3109 else
3110 I915_WRITE(reg, 0);
3111 }
3112
3113 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3114 const struct skl_wm_values *new)
3115 {
3116 struct drm_device *dev = dev_priv->dev;
3117 struct intel_crtc *crtc;
3118
3119 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3120 int i, level, max_level = ilk_wm_max_level(dev);
3121 enum pipe pipe = crtc->pipe;
3122
3123 if (!new->dirty[pipe])
3124 continue;
3125
3126 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3127
3128 for (level = 0; level <= max_level; level++) {
3129 for (i = 0; i < intel_num_planes(crtc); i++)
3130 I915_WRITE(PLANE_WM(pipe, i, level),
3131 new->plane[pipe][i][level]);
3132 I915_WRITE(CUR_WM(pipe, level),
3133 new->cursor[pipe][level]);
3134 }
3135 for (i = 0; i < intel_num_planes(crtc); i++)
3136 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3137 new->plane_trans[pipe][i]);
3138 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3139
3140 for (i = 0; i < intel_num_planes(crtc); i++)
3141 skl_ddb_entry_write(dev_priv,
3142 PLANE_BUF_CFG(pipe, i),
3143 &new->ddb.plane[pipe][i]);
3144
3145 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3146 &new->ddb.cursor[pipe]);
3147 }
3148 }
3149
3150 /*
3151 * When setting up a new DDB allocation arrangement, we need to correctly
3152 * sequence the times at which the new allocations for the pipes are taken into
3153 * account or we'll have pipes fetching from space previously allocated to
3154 * another pipe.
3155 *
3156 * Roughly the sequence looks like:
3157 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3158 * overlapping with a previous light-up pipe (another way to put it is:
3159 * pipes with their new allocation strickly included into their old ones).
3160 * 2. re-allocate the other pipes that get their allocation reduced
3161 * 3. allocate the pipes having their allocation increased
3162 *
3163 * Steps 1. and 2. are here to take care of the following case:
3164 * - Initially DDB looks like this:
3165 * | B | C |
3166 * - enable pipe A.
3167 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3168 * allocation
3169 * | A | B | C |
3170 *
3171 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3172 */
3173
3174 static void
3175 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3176 {
3177 int plane;
3178
3179 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3180
3181 for_each_plane(dev_priv, pipe, plane) {
3182 I915_WRITE(PLANE_SURF(pipe, plane),
3183 I915_READ(PLANE_SURF(pipe, plane)));
3184 }
3185 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3186 }
3187
3188 static bool
3189 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3190 const struct skl_ddb_allocation *new,
3191 enum pipe pipe)
3192 {
3193 uint16_t old_size, new_size;
3194
3195 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3196 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3197
3198 return old_size != new_size &&
3199 new->pipe[pipe].start >= old->pipe[pipe].start &&
3200 new->pipe[pipe].end <= old->pipe[pipe].end;
3201 }
3202
3203 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3204 struct skl_wm_values *new_values)
3205 {
3206 struct drm_device *dev = dev_priv->dev;
3207 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3208 bool reallocated[I915_MAX_PIPES] = {};
3209 struct intel_crtc *crtc;
3210 enum pipe pipe;
3211
3212 new_ddb = &new_values->ddb;
3213 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3214
3215 /*
3216 * First pass: flush the pipes with the new allocation contained into
3217 * the old space.
3218 *
3219 * We'll wait for the vblank on those pipes to ensure we can safely
3220 * re-allocate the freed space without this pipe fetching from it.
3221 */
3222 for_each_intel_crtc(dev, crtc) {
3223 if (!crtc->active)
3224 continue;
3225
3226 pipe = crtc->pipe;
3227
3228 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3229 continue;
3230
3231 skl_wm_flush_pipe(dev_priv, pipe, 1);
3232 intel_wait_for_vblank(dev, pipe);
3233
3234 reallocated[pipe] = true;
3235 }
3236
3237
3238 /*
3239 * Second pass: flush the pipes that are having their allocation
3240 * reduced, but overlapping with a previous allocation.
3241 *
3242 * Here as well we need to wait for the vblank to make sure the freed
3243 * space is not used anymore.
3244 */
3245 for_each_intel_crtc(dev, crtc) {
3246 if (!crtc->active)
3247 continue;
3248
3249 pipe = crtc->pipe;
3250
3251 if (reallocated[pipe])
3252 continue;
3253
3254 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3255 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3256 skl_wm_flush_pipe(dev_priv, pipe, 2);
3257 intel_wait_for_vblank(dev, pipe);
3258 reallocated[pipe] = true;
3259 }
3260 }
3261
3262 /*
3263 * Third pass: flush the pipes that got more space allocated.
3264 *
3265 * We don't need to actively wait for the update here, next vblank
3266 * will just get more DDB space with the correct WM values.
3267 */
3268 for_each_intel_crtc(dev, crtc) {
3269 if (!crtc->active)
3270 continue;
3271
3272 pipe = crtc->pipe;
3273
3274 /*
3275 * At this point, only the pipes more space than before are
3276 * left to re-allocate.
3277 */
3278 if (reallocated[pipe])
3279 continue;
3280
3281 skl_wm_flush_pipe(dev_priv, pipe, 3);
3282 }
3283 }
3284
3285 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3286 struct skl_pipe_wm_parameters *params,
3287 struct intel_wm_config *config,
3288 struct skl_ddb_allocation *ddb, /* out */
3289 struct skl_pipe_wm *pipe_wm /* out */)
3290 {
3291 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3292
3293 skl_compute_wm_pipe_parameters(crtc, params);
3294 skl_allocate_pipe_ddb(crtc, config, params, ddb);
3295 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3296
3297 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3298 return false;
3299
3300 intel_crtc->wm.skl_active = *pipe_wm;
3301 return true;
3302 }
3303
3304 static void skl_update_other_pipe_wm(struct drm_device *dev,
3305 struct drm_crtc *crtc,
3306 struct intel_wm_config *config,
3307 struct skl_wm_values *r)
3308 {
3309 struct intel_crtc *intel_crtc;
3310 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3311
3312 /*
3313 * If the WM update hasn't changed the allocation for this_crtc (the
3314 * crtc we are currently computing the new WM values for), other
3315 * enabled crtcs will keep the same allocation and we don't need to
3316 * recompute anything for them.
3317 */
3318 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3319 return;
3320
3321 /*
3322 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3323 * other active pipes need new DDB allocation and WM values.
3324 */
3325 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3326 base.head) {
3327 struct skl_pipe_wm_parameters params = {};
3328 struct skl_pipe_wm pipe_wm = {};
3329 bool wm_changed;
3330
3331 if (this_crtc->pipe == intel_crtc->pipe)
3332 continue;
3333
3334 if (!intel_crtc->active)
3335 continue;
3336
3337 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3338 &params, config,
3339 &r->ddb, &pipe_wm);
3340
3341 /*
3342 * If we end up re-computing the other pipe WM values, it's
3343 * because it was really needed, so we expect the WM values to
3344 * be different.
3345 */
3346 WARN_ON(!wm_changed);
3347
3348 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3349 r->dirty[intel_crtc->pipe] = true;
3350 }
3351 }
3352
3353 static void skl_update_wm(struct drm_crtc *crtc)
3354 {
3355 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3356 struct drm_device *dev = crtc->dev;
3357 struct drm_i915_private *dev_priv = dev->dev_private;
3358 struct skl_pipe_wm_parameters params = {};
3359 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3360 struct skl_pipe_wm pipe_wm = {};
3361 struct intel_wm_config config = {};
3362
3363 memset(results, 0, sizeof(*results));
3364
3365 skl_compute_wm_global_parameters(dev, &config);
3366
3367 if (!skl_update_pipe_wm(crtc, &params, &config,
3368 &results->ddb, &pipe_wm))
3369 return;
3370
3371 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3372 results->dirty[intel_crtc->pipe] = true;
3373
3374 skl_update_other_pipe_wm(dev, crtc, &config, results);
3375 skl_write_wm_values(dev_priv, results);
3376 skl_flush_wm_values(dev_priv, results);
3377
3378 /* store the new configuration */
3379 dev_priv->wm.skl_hw = *results;
3380 }
3381
3382 static void
3383 skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3384 uint32_t sprite_width, uint32_t sprite_height,
3385 int pixel_size, bool enabled, bool scaled)
3386 {
3387 struct intel_plane *intel_plane = to_intel_plane(plane);
3388 struct drm_framebuffer *fb = plane->state->fb;
3389
3390 intel_plane->wm.enabled = enabled;
3391 intel_plane->wm.scaled = scaled;
3392 intel_plane->wm.horiz_pixels = sprite_width;
3393 intel_plane->wm.vert_pixels = sprite_height;
3394 intel_plane->wm.bytes_per_pixel = pixel_size;
3395 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
3396 /*
3397 * Framebuffer can be NULL on plane disable, but it does not
3398 * matter for watermarks if we assume no tiling in that case.
3399 */
3400 if (fb)
3401 intel_plane->wm.tiling = fb->modifier[0];
3402 intel_plane->wm.rotation = plane->state->rotation;
3403
3404 skl_update_wm(crtc);
3405 }
3406
3407 static void ilk_update_wm(struct drm_crtc *crtc)
3408 {
3409 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3410 struct drm_device *dev = crtc->dev;
3411 struct drm_i915_private *dev_priv = dev->dev_private;
3412 struct ilk_wm_maximums max;
3413 struct ilk_pipe_wm_parameters params = {};
3414 struct ilk_wm_values results = {};
3415 enum intel_ddb_partitioning partitioning;
3416 struct intel_pipe_wm pipe_wm = {};
3417 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3418 struct intel_wm_config config = {};
3419
3420 ilk_compute_wm_parameters(crtc, &params);
3421
3422 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
3423
3424 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3425 return;
3426
3427 intel_crtc->wm.active = pipe_wm;
3428
3429 ilk_compute_wm_config(dev, &config);
3430
3431 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3432 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3433
3434 /* 5/6 split only in single pipe config on IVB+ */
3435 if (INTEL_INFO(dev)->gen >= 7 &&
3436 config.num_pipes_active == 1 && config.sprites_enabled) {
3437 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3438 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3439
3440 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3441 } else {
3442 best_lp_wm = &lp_wm_1_2;
3443 }
3444
3445 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3446 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3447
3448 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3449
3450 ilk_write_wm_values(dev_priv, &results);
3451 }
3452
3453 static void
3454 ilk_update_sprite_wm(struct drm_plane *plane,
3455 struct drm_crtc *crtc,
3456 uint32_t sprite_width, uint32_t sprite_height,
3457 int pixel_size, bool enabled, bool scaled)
3458 {
3459 struct drm_device *dev = plane->dev;
3460 struct intel_plane *intel_plane = to_intel_plane(plane);
3461
3462 intel_plane->wm.enabled = enabled;
3463 intel_plane->wm.scaled = scaled;
3464 intel_plane->wm.horiz_pixels = sprite_width;
3465 intel_plane->wm.vert_pixels = sprite_width;
3466 intel_plane->wm.bytes_per_pixel = pixel_size;
3467
3468 /*
3469 * IVB workaround: must disable low power watermarks for at least
3470 * one frame before enabling scaling. LP watermarks can be re-enabled
3471 * when scaling is disabled.
3472 *
3473 * WaCxSRDisabledForSpriteScaling:ivb
3474 */
3475 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3476 intel_wait_for_vblank(dev, intel_plane->pipe);
3477
3478 ilk_update_wm(crtc);
3479 }
3480
3481 static void skl_pipe_wm_active_state(uint32_t val,
3482 struct skl_pipe_wm *active,
3483 bool is_transwm,
3484 bool is_cursor,
3485 int i,
3486 int level)
3487 {
3488 bool is_enabled = (val & PLANE_WM_EN) != 0;
3489
3490 if (!is_transwm) {
3491 if (!is_cursor) {
3492 active->wm[level].plane_en[i] = is_enabled;
3493 active->wm[level].plane_res_b[i] =
3494 val & PLANE_WM_BLOCKS_MASK;
3495 active->wm[level].plane_res_l[i] =
3496 (val >> PLANE_WM_LINES_SHIFT) &
3497 PLANE_WM_LINES_MASK;
3498 } else {
3499 active->wm[level].cursor_en = is_enabled;
3500 active->wm[level].cursor_res_b =
3501 val & PLANE_WM_BLOCKS_MASK;
3502 active->wm[level].cursor_res_l =
3503 (val >> PLANE_WM_LINES_SHIFT) &
3504 PLANE_WM_LINES_MASK;
3505 }
3506 } else {
3507 if (!is_cursor) {
3508 active->trans_wm.plane_en[i] = is_enabled;
3509 active->trans_wm.plane_res_b[i] =
3510 val & PLANE_WM_BLOCKS_MASK;
3511 active->trans_wm.plane_res_l[i] =
3512 (val >> PLANE_WM_LINES_SHIFT) &
3513 PLANE_WM_LINES_MASK;
3514 } else {
3515 active->trans_wm.cursor_en = is_enabled;
3516 active->trans_wm.cursor_res_b =
3517 val & PLANE_WM_BLOCKS_MASK;
3518 active->trans_wm.cursor_res_l =
3519 (val >> PLANE_WM_LINES_SHIFT) &
3520 PLANE_WM_LINES_MASK;
3521 }
3522 }
3523 }
3524
3525 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3526 {
3527 struct drm_device *dev = crtc->dev;
3528 struct drm_i915_private *dev_priv = dev->dev_private;
3529 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3531 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3532 enum pipe pipe = intel_crtc->pipe;
3533 int level, i, max_level;
3534 uint32_t temp;
3535
3536 max_level = ilk_wm_max_level(dev);
3537
3538 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3539
3540 for (level = 0; level <= max_level; level++) {
3541 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3542 hw->plane[pipe][i][level] =
3543 I915_READ(PLANE_WM(pipe, i, level));
3544 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3545 }
3546
3547 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3548 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3549 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3550
3551 if (!intel_crtc->active)
3552 return;
3553
3554 hw->dirty[pipe] = true;
3555
3556 active->linetime = hw->wm_linetime[pipe];
3557
3558 for (level = 0; level <= max_level; level++) {
3559 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3560 temp = hw->plane[pipe][i][level];
3561 skl_pipe_wm_active_state(temp, active, false,
3562 false, i, level);
3563 }
3564 temp = hw->cursor[pipe][level];
3565 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3566 }
3567
3568 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3569 temp = hw->plane_trans[pipe][i];
3570 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3571 }
3572
3573 temp = hw->cursor_trans[pipe];
3574 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3575 }
3576
3577 void skl_wm_get_hw_state(struct drm_device *dev)
3578 {
3579 struct drm_i915_private *dev_priv = dev->dev_private;
3580 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3581 struct drm_crtc *crtc;
3582
3583 skl_ddb_get_hw_state(dev_priv, ddb);
3584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3585 skl_pipe_wm_get_hw_state(crtc);
3586 }
3587
3588 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3589 {
3590 struct drm_device *dev = crtc->dev;
3591 struct drm_i915_private *dev_priv = dev->dev_private;
3592 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3593 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3594 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3595 enum pipe pipe = intel_crtc->pipe;
3596 static const unsigned int wm0_pipe_reg[] = {
3597 [PIPE_A] = WM0_PIPEA_ILK,
3598 [PIPE_B] = WM0_PIPEB_ILK,
3599 [PIPE_C] = WM0_PIPEC_IVB,
3600 };
3601
3602 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3603 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3604 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3605
3606 active->pipe_enabled = intel_crtc->active;
3607
3608 if (active->pipe_enabled) {
3609 u32 tmp = hw->wm_pipe[pipe];
3610
3611 /*
3612 * For active pipes LP0 watermark is marked as
3613 * enabled, and LP1+ watermaks as disabled since
3614 * we can't really reverse compute them in case
3615 * multiple pipes are active.
3616 */
3617 active->wm[0].enable = true;
3618 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3619 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3620 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3621 active->linetime = hw->wm_linetime[pipe];
3622 } else {
3623 int level, max_level = ilk_wm_max_level(dev);
3624
3625 /*
3626 * For inactive pipes, all watermark levels
3627 * should be marked as enabled but zeroed,
3628 * which is what we'd compute them to.
3629 */
3630 for (level = 0; level <= max_level; level++)
3631 active->wm[level].enable = true;
3632 }
3633 }
3634
3635 void ilk_wm_get_hw_state(struct drm_device *dev)
3636 {
3637 struct drm_i915_private *dev_priv = dev->dev_private;
3638 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3639 struct drm_crtc *crtc;
3640
3641 for_each_crtc(dev, crtc)
3642 ilk_pipe_wm_get_hw_state(crtc);
3643
3644 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
3645 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
3646 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
3647
3648 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
3649 if (INTEL_INFO(dev)->gen >= 7) {
3650 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3651 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3652 }
3653
3654 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3655 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3656 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3657 else if (IS_IVYBRIDGE(dev))
3658 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
3659 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3660
3661 hw->enable_fbc_wm =
3662 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3663 }
3664
3665 /**
3666 * intel_update_watermarks - update FIFO watermark values based on current modes
3667 *
3668 * Calculate watermark values for the various WM regs based on current mode
3669 * and plane configuration.
3670 *
3671 * There are several cases to deal with here:
3672 * - normal (i.e. non-self-refresh)
3673 * - self-refresh (SR) mode
3674 * - lines are large relative to FIFO size (buffer can hold up to 2)
3675 * - lines are small relative to FIFO size (buffer can hold more than 2
3676 * lines), so need to account for TLB latency
3677 *
3678 * The normal calculation is:
3679 * watermark = dotclock * bytes per pixel * latency
3680 * where latency is platform & configuration dependent (we assume pessimal
3681 * values here).
3682 *
3683 * The SR calculation is:
3684 * watermark = (trunc(latency/line time)+1) * surface width *
3685 * bytes per pixel
3686 * where
3687 * line time = htotal / dotclock
3688 * surface width = hdisplay for normal plane and 64 for cursor
3689 * and latency is assumed to be high, as above.
3690 *
3691 * The final value programmed to the register should always be rounded up,
3692 * and include an extra 2 entries to account for clock crossings.
3693 *
3694 * We don't use the sprite, so we can ignore that. And on Crestline we have
3695 * to set the non-SR watermarks to 8.
3696 */
3697 void intel_update_watermarks(struct drm_crtc *crtc)
3698 {
3699 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3700
3701 if (dev_priv->display.update_wm)
3702 dev_priv->display.update_wm(crtc);
3703 }
3704
3705 void intel_update_sprite_watermarks(struct drm_plane *plane,
3706 struct drm_crtc *crtc,
3707 uint32_t sprite_width,
3708 uint32_t sprite_height,
3709 int pixel_size,
3710 bool enabled, bool scaled)
3711 {
3712 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3713
3714 if (dev_priv->display.update_sprite_wm)
3715 dev_priv->display.update_sprite_wm(plane, crtc,
3716 sprite_width, sprite_height,
3717 pixel_size, enabled, scaled);
3718 }
3719
3720 /**
3721 * Lock protecting IPS related data structures
3722 */
3723 DEFINE_SPINLOCK(mchdev_lock);
3724
3725 /* Global for IPS driver to get at the current i915 device. Protected by
3726 * mchdev_lock. */
3727 static struct drm_i915_private *i915_mch_dev;
3728
3729 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3730 {
3731 struct drm_i915_private *dev_priv = dev->dev_private;
3732 u16 rgvswctl;
3733
3734 assert_spin_locked(&mchdev_lock);
3735
3736 rgvswctl = I915_READ16(MEMSWCTL);
3737 if (rgvswctl & MEMCTL_CMD_STS) {
3738 DRM_DEBUG("gpu busy, RCS change rejected\n");
3739 return false; /* still busy with another command */
3740 }
3741
3742 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3743 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3744 I915_WRITE16(MEMSWCTL, rgvswctl);
3745 POSTING_READ16(MEMSWCTL);
3746
3747 rgvswctl |= MEMCTL_CMD_STS;
3748 I915_WRITE16(MEMSWCTL, rgvswctl);
3749
3750 return true;
3751 }
3752
3753 static void ironlake_enable_drps(struct drm_device *dev)
3754 {
3755 struct drm_i915_private *dev_priv = dev->dev_private;
3756 u32 rgvmodectl = I915_READ(MEMMODECTL);
3757 u8 fmax, fmin, fstart, vstart;
3758
3759 spin_lock_irq(&mchdev_lock);
3760
3761 /* Enable temp reporting */
3762 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3763 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3764
3765 /* 100ms RC evaluation intervals */
3766 I915_WRITE(RCUPEI, 100000);
3767 I915_WRITE(RCDNEI, 100000);
3768
3769 /* Set max/min thresholds to 90ms and 80ms respectively */
3770 I915_WRITE(RCBMAXAVG, 90000);
3771 I915_WRITE(RCBMINAVG, 80000);
3772
3773 I915_WRITE(MEMIHYST, 1);
3774
3775 /* Set up min, max, and cur for interrupt handling */
3776 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3777 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3778 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3779 MEMMODE_FSTART_SHIFT;
3780
3781 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3782 PXVFREQ_PX_SHIFT;
3783
3784 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3785 dev_priv->ips.fstart = fstart;
3786
3787 dev_priv->ips.max_delay = fstart;
3788 dev_priv->ips.min_delay = fmin;
3789 dev_priv->ips.cur_delay = fstart;
3790
3791 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3792 fmax, fmin, fstart);
3793
3794 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3795
3796 /*
3797 * Interrupts will be enabled in ironlake_irq_postinstall
3798 */
3799
3800 I915_WRITE(VIDSTART, vstart);
3801 POSTING_READ(VIDSTART);
3802
3803 rgvmodectl |= MEMMODE_SWMODE_EN;
3804 I915_WRITE(MEMMODECTL, rgvmodectl);
3805
3806 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3807 DRM_ERROR("stuck trying to change perf mode\n");
3808 mdelay(1);
3809
3810 ironlake_set_drps(dev, fstart);
3811
3812 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3813 I915_READ(0x112e0);
3814 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3815 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3816 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3817
3818 spin_unlock_irq(&mchdev_lock);
3819 }
3820
3821 static void ironlake_disable_drps(struct drm_device *dev)
3822 {
3823 struct drm_i915_private *dev_priv = dev->dev_private;
3824 u16 rgvswctl;
3825
3826 spin_lock_irq(&mchdev_lock);
3827
3828 rgvswctl = I915_READ16(MEMSWCTL);
3829
3830 /* Ack interrupts, disable EFC interrupt */
3831 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3832 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3833 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3834 I915_WRITE(DEIIR, DE_PCU_EVENT);
3835 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3836
3837 /* Go back to the starting frequency */
3838 ironlake_set_drps(dev, dev_priv->ips.fstart);
3839 mdelay(1);
3840 rgvswctl |= MEMCTL_CMD_STS;
3841 I915_WRITE(MEMSWCTL, rgvswctl);
3842 mdelay(1);
3843
3844 spin_unlock_irq(&mchdev_lock);
3845 }
3846
3847 /* There's a funny hw issue where the hw returns all 0 when reading from
3848 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3849 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3850 * all limits and the gpu stuck at whatever frequency it is at atm).
3851 */
3852 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3853 {
3854 u32 limits;
3855
3856 /* Only set the down limit when we've reached the lowest level to avoid
3857 * getting more interrupts, otherwise leave this clear. This prevents a
3858 * race in the hw when coming out of rc6: There's a tiny window where
3859 * the hw runs at the minimal clock before selecting the desired
3860 * frequency, if the down threshold expires in that window we will not
3861 * receive a down interrupt. */
3862 if (IS_GEN9(dev_priv->dev)) {
3863 limits = (dev_priv->rps.max_freq_softlimit) << 23;
3864 if (val <= dev_priv->rps.min_freq_softlimit)
3865 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
3866 } else {
3867 limits = dev_priv->rps.max_freq_softlimit << 24;
3868 if (val <= dev_priv->rps.min_freq_softlimit)
3869 limits |= dev_priv->rps.min_freq_softlimit << 16;
3870 }
3871
3872 return limits;
3873 }
3874
3875 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3876 {
3877 int new_power;
3878 u32 threshold_up = 0, threshold_down = 0; /* in % */
3879 u32 ei_up = 0, ei_down = 0;
3880
3881 new_power = dev_priv->rps.power;
3882 switch (dev_priv->rps.power) {
3883 case LOW_POWER:
3884 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3885 new_power = BETWEEN;
3886 break;
3887
3888 case BETWEEN:
3889 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3890 new_power = LOW_POWER;
3891 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3892 new_power = HIGH_POWER;
3893 break;
3894
3895 case HIGH_POWER:
3896 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3897 new_power = BETWEEN;
3898 break;
3899 }
3900 /* Max/min bins are special */
3901 if (val <= dev_priv->rps.min_freq_softlimit)
3902 new_power = LOW_POWER;
3903 if (val >= dev_priv->rps.max_freq_softlimit)
3904 new_power = HIGH_POWER;
3905 if (new_power == dev_priv->rps.power)
3906 return;
3907
3908 /* Note the units here are not exactly 1us, but 1280ns. */
3909 switch (new_power) {
3910 case LOW_POWER:
3911 /* Upclock if more than 95% busy over 16ms */
3912 ei_up = 16000;
3913 threshold_up = 95;
3914
3915 /* Downclock if less than 85% busy over 32ms */
3916 ei_down = 32000;
3917 threshold_down = 85;
3918 break;
3919
3920 case BETWEEN:
3921 /* Upclock if more than 90% busy over 13ms */
3922 ei_up = 13000;
3923 threshold_up = 90;
3924
3925 /* Downclock if less than 75% busy over 32ms */
3926 ei_down = 32000;
3927 threshold_down = 75;
3928 break;
3929
3930 case HIGH_POWER:
3931 /* Upclock if more than 85% busy over 10ms */
3932 ei_up = 10000;
3933 threshold_up = 85;
3934
3935 /* Downclock if less than 60% busy over 32ms */
3936 ei_down = 32000;
3937 threshold_down = 60;
3938 break;
3939 }
3940
3941 I915_WRITE(GEN6_RP_UP_EI,
3942 GT_INTERVAL_FROM_US(dev_priv, ei_up));
3943 I915_WRITE(GEN6_RP_UP_THRESHOLD,
3944 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
3945
3946 I915_WRITE(GEN6_RP_DOWN_EI,
3947 GT_INTERVAL_FROM_US(dev_priv, ei_down));
3948 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
3949 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
3950
3951 I915_WRITE(GEN6_RP_CONTROL,
3952 GEN6_RP_MEDIA_TURBO |
3953 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3954 GEN6_RP_MEDIA_IS_GFX |
3955 GEN6_RP_ENABLE |
3956 GEN6_RP_UP_BUSY_AVG |
3957 GEN6_RP_DOWN_IDLE_AVG);
3958
3959 dev_priv->rps.power = new_power;
3960 dev_priv->rps.up_threshold = threshold_up;
3961 dev_priv->rps.down_threshold = threshold_down;
3962 dev_priv->rps.last_adj = 0;
3963 }
3964
3965 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3966 {
3967 u32 mask = 0;
3968
3969 if (val > dev_priv->rps.min_freq_softlimit)
3970 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3971 if (val < dev_priv->rps.max_freq_softlimit)
3972 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
3973
3974 mask &= dev_priv->pm_rps_events;
3975
3976 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
3977 }
3978
3979 /* gen6_set_rps is called to update the frequency request, but should also be
3980 * called when the range (min_delay and max_delay) is modified so that we can
3981 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3982 static void gen6_set_rps(struct drm_device *dev, u8 val)
3983 {
3984 struct drm_i915_private *dev_priv = dev->dev_private;
3985
3986 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3987 WARN_ON(val > dev_priv->rps.max_freq);
3988 WARN_ON(val < dev_priv->rps.min_freq);
3989
3990 /* min/max delay may still have been modified so be sure to
3991 * write the limits value.
3992 */
3993 if (val != dev_priv->rps.cur_freq) {
3994 gen6_set_rps_thresholds(dev_priv, val);
3995
3996 if (IS_GEN9(dev))
3997 I915_WRITE(GEN6_RPNSWREQ,
3998 GEN9_FREQUENCY(val));
3999 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4000 I915_WRITE(GEN6_RPNSWREQ,
4001 HSW_FREQUENCY(val));
4002 else
4003 I915_WRITE(GEN6_RPNSWREQ,
4004 GEN6_FREQUENCY(val) |
4005 GEN6_OFFSET(0) |
4006 GEN6_AGGRESSIVE_TURBO);
4007 }
4008
4009 /* Make sure we continue to get interrupts
4010 * until we hit the minimum or maximum frequencies.
4011 */
4012 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4013 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4014
4015 POSTING_READ(GEN6_RPNSWREQ);
4016
4017 dev_priv->rps.cur_freq = val;
4018 trace_intel_gpu_freq_change(val * 50);
4019 }
4020
4021 static void valleyview_set_rps(struct drm_device *dev, u8 val)
4022 {
4023 struct drm_i915_private *dev_priv = dev->dev_private;
4024
4025 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4026 WARN_ON(val > dev_priv->rps.max_freq);
4027 WARN_ON(val < dev_priv->rps.min_freq);
4028
4029 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4030 "Odd GPU freq value\n"))
4031 val &= ~1;
4032
4033 if (val != dev_priv->rps.cur_freq) {
4034 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4035 if (!IS_CHERRYVIEW(dev_priv))
4036 gen6_set_rps_thresholds(dev_priv, val);
4037 }
4038
4039 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4040
4041 dev_priv->rps.cur_freq = val;
4042 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4043 }
4044
4045 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
4046 *
4047 * * If Gfx is Idle, then
4048 * 1. Mask Turbo interrupts
4049 * 2. Bring up Gfx clock
4050 * 3. Change the freq to Rpn and wait till P-Unit updates freq
4051 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
4052 * 5. Unmask Turbo interrupts
4053 */
4054 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4055 {
4056 struct drm_device *dev = dev_priv->dev;
4057 u32 val = dev_priv->rps.idle_freq;
4058
4059 /* CHV and latest VLV don't need to force the gfx clock */
4060 if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
4061 valleyview_set_rps(dev_priv->dev, val);
4062 return;
4063 }
4064
4065 /*
4066 * When we are idle. Drop to min voltage state.
4067 */
4068
4069 if (dev_priv->rps.cur_freq <= val)
4070 return;
4071
4072 /* Mask turbo interrupt so that they will not come in between */
4073 I915_WRITE(GEN6_PMINTRMSK,
4074 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4075
4076 vlv_force_gfx_clock(dev_priv, true);
4077
4078 dev_priv->rps.cur_freq = val;
4079
4080 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4081
4082 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
4083 & GENFREQSTATUS) == 0, 100))
4084 DRM_ERROR("timed out waiting for Punit\n");
4085
4086 gen6_set_rps_thresholds(dev_priv, val);
4087 vlv_force_gfx_clock(dev_priv, false);
4088
4089 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4090 }
4091
4092 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4093 {
4094 mutex_lock(&dev_priv->rps.hw_lock);
4095 if (dev_priv->rps.enabled) {
4096 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4097 gen6_rps_reset_ei(dev_priv);
4098 I915_WRITE(GEN6_PMINTRMSK,
4099 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4100 }
4101 mutex_unlock(&dev_priv->rps.hw_lock);
4102 }
4103
4104 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4105 {
4106 struct drm_device *dev = dev_priv->dev;
4107
4108 mutex_lock(&dev_priv->rps.hw_lock);
4109 if (dev_priv->rps.enabled) {
4110 if (IS_VALLEYVIEW(dev))
4111 vlv_set_rps_idle(dev_priv);
4112 else
4113 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4114 dev_priv->rps.last_adj = 0;
4115 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4116 }
4117
4118 while (!list_empty(&dev_priv->rps.clients))
4119 list_del_init(dev_priv->rps.clients.next);
4120 mutex_unlock(&dev_priv->rps.hw_lock);
4121 }
4122
4123 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4124 struct drm_i915_file_private *file_priv)
4125 {
4126 u32 val;
4127
4128 mutex_lock(&dev_priv->rps.hw_lock);
4129 val = dev_priv->rps.max_freq_softlimit;
4130 if (dev_priv->rps.enabled &&
4131 dev_priv->mm.busy &&
4132 dev_priv->rps.cur_freq < val &&
4133 (file_priv == NULL || list_empty(&file_priv->rps_boost))) {
4134 intel_set_rps(dev_priv->dev, val);
4135 dev_priv->rps.last_adj = 0;
4136
4137 if (file_priv != NULL) {
4138 list_add(&file_priv->rps_boost, &dev_priv->rps.clients);
4139 file_priv->rps_boosts++;
4140 } else
4141 dev_priv->rps.boosts++;
4142 }
4143 mutex_unlock(&dev_priv->rps.hw_lock);
4144 }
4145
4146 void intel_set_rps(struct drm_device *dev, u8 val)
4147 {
4148 if (IS_VALLEYVIEW(dev))
4149 valleyview_set_rps(dev, val);
4150 else
4151 gen6_set_rps(dev, val);
4152 }
4153
4154 static void gen9_disable_rps(struct drm_device *dev)
4155 {
4156 struct drm_i915_private *dev_priv = dev->dev_private;
4157
4158 I915_WRITE(GEN6_RC_CONTROL, 0);
4159 I915_WRITE(GEN9_PG_ENABLE, 0);
4160 }
4161
4162 static void gen6_disable_rps(struct drm_device *dev)
4163 {
4164 struct drm_i915_private *dev_priv = dev->dev_private;
4165
4166 I915_WRITE(GEN6_RC_CONTROL, 0);
4167 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4168 }
4169
4170 static void cherryview_disable_rps(struct drm_device *dev)
4171 {
4172 struct drm_i915_private *dev_priv = dev->dev_private;
4173
4174 I915_WRITE(GEN6_RC_CONTROL, 0);
4175 }
4176
4177 static void valleyview_disable_rps(struct drm_device *dev)
4178 {
4179 struct drm_i915_private *dev_priv = dev->dev_private;
4180
4181 /* we're doing forcewake before Disabling RC6,
4182 * This what the BIOS expects when going into suspend */
4183 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4184
4185 I915_WRITE(GEN6_RC_CONTROL, 0);
4186
4187 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4188 }
4189
4190 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4191 {
4192 if (IS_VALLEYVIEW(dev)) {
4193 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4194 mode = GEN6_RC_CTL_RC6_ENABLE;
4195 else
4196 mode = 0;
4197 }
4198 if (HAS_RC6p(dev))
4199 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4200 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4201 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4202 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4203
4204 else
4205 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4206 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
4207 }
4208
4209 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4210 {
4211 /* No RC6 before Ironlake */
4212 if (INTEL_INFO(dev)->gen < 5)
4213 return 0;
4214
4215 /* RC6 is only on Ironlake mobile not on desktop */
4216 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
4217 return 0;
4218
4219 /* Respect the kernel parameter if it is set */
4220 if (enable_rc6 >= 0) {
4221 int mask;
4222
4223 if (HAS_RC6p(dev))
4224 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4225 INTEL_RC6pp_ENABLE;
4226 else
4227 mask = INTEL_RC6_ENABLE;
4228
4229 if ((enable_rc6 & mask) != enable_rc6)
4230 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4231 enable_rc6 & mask, enable_rc6, mask);
4232
4233 return enable_rc6 & mask;
4234 }
4235
4236 /* Disable RC6 on Ironlake */
4237 if (INTEL_INFO(dev)->gen == 5)
4238 return 0;
4239
4240 if (IS_IVYBRIDGE(dev))
4241 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4242
4243 return INTEL_RC6_ENABLE;
4244 }
4245
4246 int intel_enable_rc6(const struct drm_device *dev)
4247 {
4248 return i915.enable_rc6;
4249 }
4250
4251 static void gen6_init_rps_frequencies(struct drm_device *dev)
4252 {
4253 struct drm_i915_private *dev_priv = dev->dev_private;
4254 uint32_t rp_state_cap;
4255 u32 ddcc_status = 0;
4256 int ret;
4257
4258 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4259 /* All of these values are in units of 50MHz */
4260 dev_priv->rps.cur_freq = 0;
4261 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4262 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4263 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4264 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4265 if (IS_SKYLAKE(dev)) {
4266 /* Store the frequency values in 16.66 MHZ units, which is
4267 the natural hardware unit for SKL */
4268 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4269 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4270 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4271 }
4272 /* hw_max = RP0 until we check for overclocking */
4273 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4274
4275 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4276 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4277 ret = sandybridge_pcode_read(dev_priv,
4278 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4279 &ddcc_status);
4280 if (0 == ret)
4281 dev_priv->rps.efficient_freq =
4282 clamp_t(u8,
4283 ((ddcc_status >> 8) & 0xff),
4284 dev_priv->rps.min_freq,
4285 dev_priv->rps.max_freq);
4286 }
4287
4288 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4289
4290 /* Preserve min/max settings in case of re-init */
4291 if (dev_priv->rps.max_freq_softlimit == 0)
4292 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4293
4294 if (dev_priv->rps.min_freq_softlimit == 0) {
4295 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4296 dev_priv->rps.min_freq_softlimit =
4297 max_t(int, dev_priv->rps.efficient_freq,
4298 intel_freq_opcode(dev_priv, 450));
4299 else
4300 dev_priv->rps.min_freq_softlimit =
4301 dev_priv->rps.min_freq;
4302 }
4303 }
4304
4305 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
4306 static void gen9_enable_rps(struct drm_device *dev)
4307 {
4308 struct drm_i915_private *dev_priv = dev->dev_private;
4309
4310 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4311
4312 gen6_init_rps_frequencies(dev);
4313
4314 /* Program defaults and thresholds for RPS*/
4315 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4316 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
4317
4318 /* 1 second timeout*/
4319 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4320 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4321
4322 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
4323
4324 /* Leaning on the below call to gen6_set_rps to program/setup the
4325 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4326 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4327 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4328 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4329
4330 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4331 }
4332
4333 static void gen9_enable_rc6(struct drm_device *dev)
4334 {
4335 struct drm_i915_private *dev_priv = dev->dev_private;
4336 struct intel_engine_cs *ring;
4337 uint32_t rc6_mask = 0;
4338 int unused;
4339
4340 /* 1a: Software RC state - RC0 */
4341 I915_WRITE(GEN6_RC_STATE, 0);
4342
4343 /* 1b: Get forcewake during program sequence. Although the driver
4344 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4345 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4346
4347 /* 2a: Disable RC states. */
4348 I915_WRITE(GEN6_RC_CONTROL, 0);
4349
4350 /* 2b: Program RC6 thresholds.*/
4351 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4352 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4353 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4354 for_each_ring(ring, dev_priv, unused)
4355 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4356 I915_WRITE(GEN6_RC_SLEEP, 0);
4357 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4358
4359 /* 2c: Program Coarse Power Gating Policies. */
4360 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4361 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4362
4363 /* 3a: Enable RC6 */
4364 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4365 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4366 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4367 "on" : "off");
4368 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4369 GEN6_RC_CTL_EI_MODE(1) |
4370 rc6_mask);
4371
4372 /*
4373 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4374 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
4375 */
4376 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4377 GEN9_MEDIA_PG_ENABLE : 0);
4378
4379
4380 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4381
4382 }
4383
4384 static void gen8_enable_rps(struct drm_device *dev)
4385 {
4386 struct drm_i915_private *dev_priv = dev->dev_private;
4387 struct intel_engine_cs *ring;
4388 uint32_t rc6_mask = 0;
4389 int unused;
4390
4391 /* 1a: Software RC state - RC0 */
4392 I915_WRITE(GEN6_RC_STATE, 0);
4393
4394 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4395 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4396 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4397
4398 /* 2a: Disable RC states. */
4399 I915_WRITE(GEN6_RC_CONTROL, 0);
4400
4401 /* Initialize rps frequencies */
4402 gen6_init_rps_frequencies(dev);
4403
4404 /* 2b: Program RC6 thresholds.*/
4405 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4406 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4407 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4408 for_each_ring(ring, dev_priv, unused)
4409 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4410 I915_WRITE(GEN6_RC_SLEEP, 0);
4411 if (IS_BROADWELL(dev))
4412 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4413 else
4414 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4415
4416 /* 3: Enable RC6 */
4417 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4418 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4419 intel_print_rc6_info(dev, rc6_mask);
4420 if (IS_BROADWELL(dev))
4421 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4422 GEN7_RC_CTL_TO_MODE |
4423 rc6_mask);
4424 else
4425 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4426 GEN6_RC_CTL_EI_MODE(1) |
4427 rc6_mask);
4428
4429 /* 4 Program defaults and thresholds for RPS*/
4430 I915_WRITE(GEN6_RPNSWREQ,
4431 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4432 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4433 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4434 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4435 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4436
4437 /* Docs recommend 900MHz, and 300 MHz respectively */
4438 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4439 dev_priv->rps.max_freq_softlimit << 24 |
4440 dev_priv->rps.min_freq_softlimit << 16);
4441
4442 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4443 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4444 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4445 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4446
4447 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4448
4449 /* 5: Enable RPS */
4450 I915_WRITE(GEN6_RP_CONTROL,
4451 GEN6_RP_MEDIA_TURBO |
4452 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4453 GEN6_RP_MEDIA_IS_GFX |
4454 GEN6_RP_ENABLE |
4455 GEN6_RP_UP_BUSY_AVG |
4456 GEN6_RP_DOWN_IDLE_AVG);
4457
4458 /* 6: Ring frequency + overclocking (our driver does this later */
4459
4460 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4461 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4462
4463 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4464 }
4465
4466 static void gen6_enable_rps(struct drm_device *dev)
4467 {
4468 struct drm_i915_private *dev_priv = dev->dev_private;
4469 struct intel_engine_cs *ring;
4470 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4471 u32 gtfifodbg;
4472 int rc6_mode;
4473 int i, ret;
4474
4475 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4476
4477 /* Here begins a magic sequence of register writes to enable
4478 * auto-downclocking.
4479 *
4480 * Perhaps there might be some value in exposing these to
4481 * userspace...
4482 */
4483 I915_WRITE(GEN6_RC_STATE, 0);
4484
4485 /* Clear the DBG now so we don't confuse earlier errors */
4486 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4487 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4488 I915_WRITE(GTFIFODBG, gtfifodbg);
4489 }
4490
4491 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4492
4493 /* Initialize rps frequencies */
4494 gen6_init_rps_frequencies(dev);
4495
4496 /* disable the counters and set deterministic thresholds */
4497 I915_WRITE(GEN6_RC_CONTROL, 0);
4498
4499 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4500 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4501 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4502 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4503 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4504
4505 for_each_ring(ring, dev_priv, i)
4506 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4507
4508 I915_WRITE(GEN6_RC_SLEEP, 0);
4509 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4510 if (IS_IVYBRIDGE(dev))
4511 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4512 else
4513 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4514 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4515 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4516
4517 /* Check if we are enabling RC6 */
4518 rc6_mode = intel_enable_rc6(dev_priv->dev);
4519 if (rc6_mode & INTEL_RC6_ENABLE)
4520 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4521
4522 /* We don't use those on Haswell */
4523 if (!IS_HASWELL(dev)) {
4524 if (rc6_mode & INTEL_RC6p_ENABLE)
4525 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4526
4527 if (rc6_mode & INTEL_RC6pp_ENABLE)
4528 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4529 }
4530
4531 intel_print_rc6_info(dev, rc6_mask);
4532
4533 I915_WRITE(GEN6_RC_CONTROL,
4534 rc6_mask |
4535 GEN6_RC_CTL_EI_MODE(1) |
4536 GEN6_RC_CTL_HW_ENABLE);
4537
4538 /* Power down if completely idle for over 50ms */
4539 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4540 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4541
4542 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4543 if (ret)
4544 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4545
4546 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4547 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4548 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4549 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4550 (pcu_mbox & 0xff) * 50);
4551 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4552 }
4553
4554 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4555 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
4556
4557 rc6vids = 0;
4558 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4559 if (IS_GEN6(dev) && ret) {
4560 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4561 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4562 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4563 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4564 rc6vids &= 0xffff00;
4565 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4566 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4567 if (ret)
4568 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4569 }
4570
4571 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4572 }
4573
4574 static void __gen6_update_ring_freq(struct drm_device *dev)
4575 {
4576 struct drm_i915_private *dev_priv = dev->dev_private;
4577 int min_freq = 15;
4578 unsigned int gpu_freq;
4579 unsigned int max_ia_freq, min_ring_freq;
4580 int scaling_factor = 180;
4581 struct cpufreq_policy *policy;
4582
4583 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4584
4585 policy = cpufreq_cpu_get(0);
4586 if (policy) {
4587 max_ia_freq = policy->cpuinfo.max_freq;
4588 cpufreq_cpu_put(policy);
4589 } else {
4590 /*
4591 * Default to measured freq if none found, PCU will ensure we
4592 * don't go over
4593 */
4594 max_ia_freq = tsc_khz;
4595 }
4596
4597 /* Convert from kHz to MHz */
4598 max_ia_freq /= 1000;
4599
4600 min_ring_freq = I915_READ(DCLK) & 0xf;
4601 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4602 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4603
4604 /*
4605 * For each potential GPU frequency, load a ring frequency we'd like
4606 * to use for memory access. We do this by specifying the IA frequency
4607 * the PCU should use as a reference to determine the ring frequency.
4608 */
4609 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
4610 gpu_freq--) {
4611 int diff = dev_priv->rps.max_freq - gpu_freq;
4612 unsigned int ia_freq = 0, ring_freq = 0;
4613
4614 if (INTEL_INFO(dev)->gen >= 8) {
4615 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4616 ring_freq = max(min_ring_freq, gpu_freq);
4617 } else if (IS_HASWELL(dev)) {
4618 ring_freq = mult_frac(gpu_freq, 5, 4);
4619 ring_freq = max(min_ring_freq, ring_freq);
4620 /* leave ia_freq as the default, chosen by cpufreq */
4621 } else {
4622 /* On older processors, there is no separate ring
4623 * clock domain, so in order to boost the bandwidth
4624 * of the ring, we need to upclock the CPU (ia_freq).
4625 *
4626 * For GPU frequencies less than 750MHz,
4627 * just use the lowest ring freq.
4628 */
4629 if (gpu_freq < min_freq)
4630 ia_freq = 800;
4631 else
4632 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
4633 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
4634 }
4635
4636 sandybridge_pcode_write(dev_priv,
4637 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
4638 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
4639 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
4640 gpu_freq);
4641 }
4642 }
4643
4644 void gen6_update_ring_freq(struct drm_device *dev)
4645 {
4646 struct drm_i915_private *dev_priv = dev->dev_private;
4647
4648 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
4649 return;
4650
4651 mutex_lock(&dev_priv->rps.hw_lock);
4652 __gen6_update_ring_freq(dev);
4653 mutex_unlock(&dev_priv->rps.hw_lock);
4654 }
4655
4656 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
4657 {
4658 struct drm_device *dev = dev_priv->dev;
4659 u32 val, rp0;
4660
4661 if (dev->pdev->revision >= 0x20) {
4662 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4663
4664 switch (INTEL_INFO(dev)->eu_total) {
4665 case 8:
4666 /* (2 * 4) config */
4667 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
4668 break;
4669 case 12:
4670 /* (2 * 6) config */
4671 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
4672 break;
4673 case 16:
4674 /* (2 * 8) config */
4675 default:
4676 /* Setting (2 * 8) Min RP0 for any other combination */
4677 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
4678 break;
4679 }
4680 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
4681 } else {
4682 /* For pre-production hardware */
4683 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4684 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4685 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4686 }
4687 return rp0;
4688 }
4689
4690 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4691 {
4692 u32 val, rpe;
4693
4694 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
4695 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
4696
4697 return rpe;
4698 }
4699
4700 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
4701 {
4702 struct drm_device *dev = dev_priv->dev;
4703 u32 val, rp1;
4704
4705 if (dev->pdev->revision >= 0x20) {
4706 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
4707 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
4708 } else {
4709 /* For pre-production hardware */
4710 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4711 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
4712 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
4713 }
4714 return rp1;
4715 }
4716
4717 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
4718 {
4719 struct drm_device *dev = dev_priv->dev;
4720 u32 val, rpn;
4721
4722 if (dev->pdev->revision >= 0x20) {
4723 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
4724 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
4725 FB_GFX_FREQ_FUSE_MASK);
4726 } else { /* For pre-production hardware */
4727 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4728 rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
4729 PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
4730 }
4731
4732 return rpn;
4733 }
4734
4735 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4736 {
4737 u32 val, rp1;
4738
4739 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4740
4741 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4742
4743 return rp1;
4744 }
4745
4746 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4747 {
4748 u32 val, rp0;
4749
4750 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4751
4752 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4753 /* Clamp to max */
4754 rp0 = min_t(u32, rp0, 0xea);
4755
4756 return rp0;
4757 }
4758
4759 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4760 {
4761 u32 val, rpe;
4762
4763 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4764 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4765 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4766 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4767
4768 return rpe;
4769 }
4770
4771 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4772 {
4773 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4774 }
4775
4776 /* Check that the pctx buffer wasn't move under us. */
4777 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4778 {
4779 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4780
4781 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4782 dev_priv->vlv_pctx->stolen->start);
4783 }
4784
4785
4786 /* Check that the pcbr address is not empty. */
4787 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4788 {
4789 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4790
4791 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4792 }
4793
4794 static void cherryview_setup_pctx(struct drm_device *dev)
4795 {
4796 struct drm_i915_private *dev_priv = dev->dev_private;
4797 unsigned long pctx_paddr, paddr;
4798 struct i915_gtt *gtt = &dev_priv->gtt;
4799 u32 pcbr;
4800 int pctx_size = 32*1024;
4801
4802 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4803
4804 pcbr = I915_READ(VLV_PCBR);
4805 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4806 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4807 paddr = (dev_priv->mm.stolen_base +
4808 (gtt->stolen_size - pctx_size));
4809
4810 pctx_paddr = (paddr & (~4095));
4811 I915_WRITE(VLV_PCBR, pctx_paddr);
4812 }
4813
4814 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4815 }
4816
4817 static void valleyview_setup_pctx(struct drm_device *dev)
4818 {
4819 struct drm_i915_private *dev_priv = dev->dev_private;
4820 struct drm_i915_gem_object *pctx;
4821 unsigned long pctx_paddr;
4822 u32 pcbr;
4823 int pctx_size = 24*1024;
4824
4825 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4826
4827 pcbr = I915_READ(VLV_PCBR);
4828 if (pcbr) {
4829 /* BIOS set it up already, grab the pre-alloc'd space */
4830 int pcbr_offset;
4831
4832 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4833 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4834 pcbr_offset,
4835 I915_GTT_OFFSET_NONE,
4836 pctx_size);
4837 goto out;
4838 }
4839
4840 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4841
4842 /*
4843 * From the Gunit register HAS:
4844 * The Gfx driver is expected to program this register and ensure
4845 * proper allocation within Gfx stolen memory. For example, this
4846 * register should be programmed such than the PCBR range does not
4847 * overlap with other ranges, such as the frame buffer, protected
4848 * memory, or any other relevant ranges.
4849 */
4850 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4851 if (!pctx) {
4852 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4853 return;
4854 }
4855
4856 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4857 I915_WRITE(VLV_PCBR, pctx_paddr);
4858
4859 out:
4860 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4861 dev_priv->vlv_pctx = pctx;
4862 }
4863
4864 static void valleyview_cleanup_pctx(struct drm_device *dev)
4865 {
4866 struct drm_i915_private *dev_priv = dev->dev_private;
4867
4868 if (WARN_ON(!dev_priv->vlv_pctx))
4869 return;
4870
4871 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4872 dev_priv->vlv_pctx = NULL;
4873 }
4874
4875 static void valleyview_init_gt_powersave(struct drm_device *dev)
4876 {
4877 struct drm_i915_private *dev_priv = dev->dev_private;
4878 u32 val;
4879
4880 valleyview_setup_pctx(dev);
4881
4882 mutex_lock(&dev_priv->rps.hw_lock);
4883
4884 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4885 switch ((val >> 6) & 3) {
4886 case 0:
4887 case 1:
4888 dev_priv->mem_freq = 800;
4889 break;
4890 case 2:
4891 dev_priv->mem_freq = 1066;
4892 break;
4893 case 3:
4894 dev_priv->mem_freq = 1333;
4895 break;
4896 }
4897 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4898
4899 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4900 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4901 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4902 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4903 dev_priv->rps.max_freq);
4904
4905 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4906 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4907 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4908 dev_priv->rps.efficient_freq);
4909
4910 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4911 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4912 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4913 dev_priv->rps.rp1_freq);
4914
4915 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4916 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4917 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4918 dev_priv->rps.min_freq);
4919
4920 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4921
4922 /* Preserve min/max settings in case of re-init */
4923 if (dev_priv->rps.max_freq_softlimit == 0)
4924 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4925
4926 if (dev_priv->rps.min_freq_softlimit == 0)
4927 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4928
4929 mutex_unlock(&dev_priv->rps.hw_lock);
4930 }
4931
4932 static void cherryview_init_gt_powersave(struct drm_device *dev)
4933 {
4934 struct drm_i915_private *dev_priv = dev->dev_private;
4935 u32 val;
4936
4937 cherryview_setup_pctx(dev);
4938
4939 mutex_lock(&dev_priv->rps.hw_lock);
4940
4941 mutex_lock(&dev_priv->dpio_lock);
4942 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
4943 mutex_unlock(&dev_priv->dpio_lock);
4944
4945 switch ((val >> 2) & 0x7) {
4946 case 0:
4947 case 1:
4948 dev_priv->rps.cz_freq = 200;
4949 dev_priv->mem_freq = 1600;
4950 break;
4951 case 2:
4952 dev_priv->rps.cz_freq = 267;
4953 dev_priv->mem_freq = 1600;
4954 break;
4955 case 3:
4956 dev_priv->rps.cz_freq = 333;
4957 dev_priv->mem_freq = 2000;
4958 break;
4959 case 4:
4960 dev_priv->rps.cz_freq = 320;
4961 dev_priv->mem_freq = 1600;
4962 break;
4963 case 5:
4964 dev_priv->rps.cz_freq = 400;
4965 dev_priv->mem_freq = 1600;
4966 break;
4967 }
4968 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4969
4970 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4971 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4972 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4973 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4974 dev_priv->rps.max_freq);
4975
4976 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4977 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4978 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4979 dev_priv->rps.efficient_freq);
4980
4981 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4982 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4983 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4984 dev_priv->rps.rp1_freq);
4985
4986 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4987 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4988 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4989 dev_priv->rps.min_freq);
4990
4991 WARN_ONCE((dev_priv->rps.max_freq |
4992 dev_priv->rps.efficient_freq |
4993 dev_priv->rps.rp1_freq |
4994 dev_priv->rps.min_freq) & 1,
4995 "Odd GPU freq values\n");
4996
4997 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4998
4999 /* Preserve min/max settings in case of re-init */
5000 if (dev_priv->rps.max_freq_softlimit == 0)
5001 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5002
5003 if (dev_priv->rps.min_freq_softlimit == 0)
5004 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5005
5006 mutex_unlock(&dev_priv->rps.hw_lock);
5007 }
5008
5009 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5010 {
5011 valleyview_cleanup_pctx(dev);
5012 }
5013
5014 static void cherryview_enable_rps(struct drm_device *dev)
5015 {
5016 struct drm_i915_private *dev_priv = dev->dev_private;
5017 struct intel_engine_cs *ring;
5018 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5019 int i;
5020
5021 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5022
5023 gtfifodbg = I915_READ(GTFIFODBG);
5024 if (gtfifodbg) {
5025 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5026 gtfifodbg);
5027 I915_WRITE(GTFIFODBG, gtfifodbg);
5028 }
5029
5030 cherryview_check_pctx(dev_priv);
5031
5032 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5033 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5034 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5035
5036 /* Disable RC states. */
5037 I915_WRITE(GEN6_RC_CONTROL, 0);
5038
5039 /* 2a: Program RC6 thresholds.*/
5040 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5041 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5042 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5043
5044 for_each_ring(ring, dev_priv, i)
5045 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5046 I915_WRITE(GEN6_RC_SLEEP, 0);
5047
5048 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5049 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5050
5051 /* allows RC6 residency counter to work */
5052 I915_WRITE(VLV_COUNTER_CONTROL,
5053 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5054 VLV_MEDIA_RC6_COUNT_EN |
5055 VLV_RENDER_RC6_COUNT_EN));
5056
5057 /* For now we assume BIOS is allocating and populating the PCBR */
5058 pcbr = I915_READ(VLV_PCBR);
5059
5060 /* 3: Enable RC6 */
5061 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5062 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5063 rc6_mode = GEN7_RC_CTL_TO_MODE;
5064
5065 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5066
5067 /* 4 Program defaults and thresholds for RPS*/
5068 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5069 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5070 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5071 I915_WRITE(GEN6_RP_UP_EI, 66000);
5072 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5073
5074 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5075
5076 /* 5: Enable RPS */
5077 I915_WRITE(GEN6_RP_CONTROL,
5078 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5079 GEN6_RP_MEDIA_IS_GFX |
5080 GEN6_RP_ENABLE |
5081 GEN6_RP_UP_BUSY_AVG |
5082 GEN6_RP_DOWN_IDLE_AVG);
5083
5084 /* Setting Fixed Bias */
5085 val = VLV_OVERRIDE_EN |
5086 VLV_SOC_TDP_EN |
5087 CHV_BIAS_CPU_50_SOC_50;
5088 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5089
5090 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5091
5092 /* RPS code assumes GPLL is used */
5093 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5094
5095 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5096 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5097
5098 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5099 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5100 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5101 dev_priv->rps.cur_freq);
5102
5103 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5104 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5105 dev_priv->rps.efficient_freq);
5106
5107 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5108
5109 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5110 }
5111
5112 static void valleyview_enable_rps(struct drm_device *dev)
5113 {
5114 struct drm_i915_private *dev_priv = dev->dev_private;
5115 struct intel_engine_cs *ring;
5116 u32 gtfifodbg, val, rc6_mode = 0;
5117 int i;
5118
5119 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5120
5121 valleyview_check_pctx(dev_priv);
5122
5123 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
5124 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5125 gtfifodbg);
5126 I915_WRITE(GTFIFODBG, gtfifodbg);
5127 }
5128
5129 /* If VLV, Forcewake all wells, else re-direct to regular path */
5130 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5131
5132 /* Disable RC states. */
5133 I915_WRITE(GEN6_RC_CONTROL, 0);
5134
5135 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5136 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5137 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5138 I915_WRITE(GEN6_RP_UP_EI, 66000);
5139 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5140
5141 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5142
5143 I915_WRITE(GEN6_RP_CONTROL,
5144 GEN6_RP_MEDIA_TURBO |
5145 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5146 GEN6_RP_MEDIA_IS_GFX |
5147 GEN6_RP_ENABLE |
5148 GEN6_RP_UP_BUSY_AVG |
5149 GEN6_RP_DOWN_IDLE_CONT);
5150
5151 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5152 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5153 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5154
5155 for_each_ring(ring, dev_priv, i)
5156 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5157
5158 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5159
5160 /* allows RC6 residency counter to work */
5161 I915_WRITE(VLV_COUNTER_CONTROL,
5162 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5163 VLV_RENDER_RC0_COUNT_EN |
5164 VLV_MEDIA_RC6_COUNT_EN |
5165 VLV_RENDER_RC6_COUNT_EN));
5166
5167 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5168 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5169
5170 intel_print_rc6_info(dev, rc6_mode);
5171
5172 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5173
5174 /* Setting Fixed Bias */
5175 val = VLV_OVERRIDE_EN |
5176 VLV_SOC_TDP_EN |
5177 VLV_BIAS_CPU_125_SOC_875;
5178 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5179
5180 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5181
5182 /* RPS code assumes GPLL is used */
5183 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5184
5185 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5186 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5187
5188 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5189 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5190 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5191 dev_priv->rps.cur_freq);
5192
5193 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5194 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5195 dev_priv->rps.efficient_freq);
5196
5197 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5198
5199 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5200 }
5201
5202 static unsigned long intel_pxfreq(u32 vidfreq)
5203 {
5204 unsigned long freq;
5205 int div = (vidfreq & 0x3f0000) >> 16;
5206 int post = (vidfreq & 0x3000) >> 12;
5207 int pre = (vidfreq & 0x7);
5208
5209 if (!pre)
5210 return 0;
5211
5212 freq = ((div * 133333) / ((1<<post) * pre));
5213
5214 return freq;
5215 }
5216
5217 static const struct cparams {
5218 u16 i;
5219 u16 t;
5220 u16 m;
5221 u16 c;
5222 } cparams[] = {
5223 { 1, 1333, 301, 28664 },
5224 { 1, 1066, 294, 24460 },
5225 { 1, 800, 294, 25192 },
5226 { 0, 1333, 276, 27605 },
5227 { 0, 1066, 276, 27605 },
5228 { 0, 800, 231, 23784 },
5229 };
5230
5231 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5232 {
5233 u64 total_count, diff, ret;
5234 u32 count1, count2, count3, m = 0, c = 0;
5235 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5236 int i;
5237
5238 assert_spin_locked(&mchdev_lock);
5239
5240 diff1 = now - dev_priv->ips.last_time1;
5241
5242 /* Prevent division-by-zero if we are asking too fast.
5243 * Also, we don't get interesting results if we are polling
5244 * faster than once in 10ms, so just return the saved value
5245 * in such cases.
5246 */
5247 if (diff1 <= 10)
5248 return dev_priv->ips.chipset_power;
5249
5250 count1 = I915_READ(DMIEC);
5251 count2 = I915_READ(DDREC);
5252 count3 = I915_READ(CSIEC);
5253
5254 total_count = count1 + count2 + count3;
5255
5256 /* FIXME: handle per-counter overflow */
5257 if (total_count < dev_priv->ips.last_count1) {
5258 diff = ~0UL - dev_priv->ips.last_count1;
5259 diff += total_count;
5260 } else {
5261 diff = total_count - dev_priv->ips.last_count1;
5262 }
5263
5264 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5265 if (cparams[i].i == dev_priv->ips.c_m &&
5266 cparams[i].t == dev_priv->ips.r_t) {
5267 m = cparams[i].m;
5268 c = cparams[i].c;
5269 break;
5270 }
5271 }
5272
5273 diff = div_u64(diff, diff1);
5274 ret = ((m * diff) + c);
5275 ret = div_u64(ret, 10);
5276
5277 dev_priv->ips.last_count1 = total_count;
5278 dev_priv->ips.last_time1 = now;
5279
5280 dev_priv->ips.chipset_power = ret;
5281
5282 return ret;
5283 }
5284
5285 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5286 {
5287 struct drm_device *dev = dev_priv->dev;
5288 unsigned long val;
5289
5290 if (INTEL_INFO(dev)->gen != 5)
5291 return 0;
5292
5293 spin_lock_irq(&mchdev_lock);
5294
5295 val = __i915_chipset_val(dev_priv);
5296
5297 spin_unlock_irq(&mchdev_lock);
5298
5299 return val;
5300 }
5301
5302 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5303 {
5304 unsigned long m, x, b;
5305 u32 tsfs;
5306
5307 tsfs = I915_READ(TSFS);
5308
5309 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5310 x = I915_READ8(TR1);
5311
5312 b = tsfs & TSFS_INTR_MASK;
5313
5314 return ((m * x) / 127) - b;
5315 }
5316
5317 static int _pxvid_to_vd(u8 pxvid)
5318 {
5319 if (pxvid == 0)
5320 return 0;
5321
5322 if (pxvid >= 8 && pxvid < 31)
5323 pxvid = 31;
5324
5325 return (pxvid + 2) * 125;
5326 }
5327
5328 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5329 {
5330 struct drm_device *dev = dev_priv->dev;
5331 const int vd = _pxvid_to_vd(pxvid);
5332 const int vm = vd - 1125;
5333
5334 if (INTEL_INFO(dev)->is_mobile)
5335 return vm > 0 ? vm : 0;
5336
5337 return vd;
5338 }
5339
5340 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5341 {
5342 u64 now, diff, diffms;
5343 u32 count;
5344
5345 assert_spin_locked(&mchdev_lock);
5346
5347 now = ktime_get_raw_ns();
5348 diffms = now - dev_priv->ips.last_time2;
5349 do_div(diffms, NSEC_PER_MSEC);
5350
5351 /* Don't divide by 0 */
5352 if (!diffms)
5353 return;
5354
5355 count = I915_READ(GFXEC);
5356
5357 if (count < dev_priv->ips.last_count2) {
5358 diff = ~0UL - dev_priv->ips.last_count2;
5359 diff += count;
5360 } else {
5361 diff = count - dev_priv->ips.last_count2;
5362 }
5363
5364 dev_priv->ips.last_count2 = count;
5365 dev_priv->ips.last_time2 = now;
5366
5367 /* More magic constants... */
5368 diff = diff * 1181;
5369 diff = div_u64(diff, diffms * 10);
5370 dev_priv->ips.gfx_power = diff;
5371 }
5372
5373 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5374 {
5375 struct drm_device *dev = dev_priv->dev;
5376
5377 if (INTEL_INFO(dev)->gen != 5)
5378 return;
5379
5380 spin_lock_irq(&mchdev_lock);
5381
5382 __i915_update_gfx_val(dev_priv);
5383
5384 spin_unlock_irq(&mchdev_lock);
5385 }
5386
5387 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5388 {
5389 unsigned long t, corr, state1, corr2, state2;
5390 u32 pxvid, ext_v;
5391
5392 assert_spin_locked(&mchdev_lock);
5393
5394 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
5395 pxvid = (pxvid >> 24) & 0x7f;
5396 ext_v = pvid_to_extvid(dev_priv, pxvid);
5397
5398 state1 = ext_v;
5399
5400 t = i915_mch_val(dev_priv);
5401
5402 /* Revel in the empirically derived constants */
5403
5404 /* Correction factor in 1/100000 units */
5405 if (t > 80)
5406 corr = ((t * 2349) + 135940);
5407 else if (t >= 50)
5408 corr = ((t * 964) + 29317);
5409 else /* < 50 */
5410 corr = ((t * 301) + 1004);
5411
5412 corr = corr * ((150142 * state1) / 10000 - 78642);
5413 corr /= 100000;
5414 corr2 = (corr * dev_priv->ips.corr);
5415
5416 state2 = (corr2 * state1) / 10000;
5417 state2 /= 100; /* convert to mW */
5418
5419 __i915_update_gfx_val(dev_priv);
5420
5421 return dev_priv->ips.gfx_power + state2;
5422 }
5423
5424 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5425 {
5426 struct drm_device *dev = dev_priv->dev;
5427 unsigned long val;
5428
5429 if (INTEL_INFO(dev)->gen != 5)
5430 return 0;
5431
5432 spin_lock_irq(&mchdev_lock);
5433
5434 val = __i915_gfx_val(dev_priv);
5435
5436 spin_unlock_irq(&mchdev_lock);
5437
5438 return val;
5439 }
5440
5441 /**
5442 * i915_read_mch_val - return value for IPS use
5443 *
5444 * Calculate and return a value for the IPS driver to use when deciding whether
5445 * we have thermal and power headroom to increase CPU or GPU power budget.
5446 */
5447 unsigned long i915_read_mch_val(void)
5448 {
5449 struct drm_i915_private *dev_priv;
5450 unsigned long chipset_val, graphics_val, ret = 0;
5451
5452 spin_lock_irq(&mchdev_lock);
5453 if (!i915_mch_dev)
5454 goto out_unlock;
5455 dev_priv = i915_mch_dev;
5456
5457 chipset_val = __i915_chipset_val(dev_priv);
5458 graphics_val = __i915_gfx_val(dev_priv);
5459
5460 ret = chipset_val + graphics_val;
5461
5462 out_unlock:
5463 spin_unlock_irq(&mchdev_lock);
5464
5465 return ret;
5466 }
5467 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5468
5469 /**
5470 * i915_gpu_raise - raise GPU frequency limit
5471 *
5472 * Raise the limit; IPS indicates we have thermal headroom.
5473 */
5474 bool i915_gpu_raise(void)
5475 {
5476 struct drm_i915_private *dev_priv;
5477 bool ret = true;
5478
5479 spin_lock_irq(&mchdev_lock);
5480 if (!i915_mch_dev) {
5481 ret = false;
5482 goto out_unlock;
5483 }
5484 dev_priv = i915_mch_dev;
5485
5486 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5487 dev_priv->ips.max_delay--;
5488
5489 out_unlock:
5490 spin_unlock_irq(&mchdev_lock);
5491
5492 return ret;
5493 }
5494 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5495
5496 /**
5497 * i915_gpu_lower - lower GPU frequency limit
5498 *
5499 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5500 * frequency maximum.
5501 */
5502 bool i915_gpu_lower(void)
5503 {
5504 struct drm_i915_private *dev_priv;
5505 bool ret = true;
5506
5507 spin_lock_irq(&mchdev_lock);
5508 if (!i915_mch_dev) {
5509 ret = false;
5510 goto out_unlock;
5511 }
5512 dev_priv = i915_mch_dev;
5513
5514 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5515 dev_priv->ips.max_delay++;
5516
5517 out_unlock:
5518 spin_unlock_irq(&mchdev_lock);
5519
5520 return ret;
5521 }
5522 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5523
5524 /**
5525 * i915_gpu_busy - indicate GPU business to IPS
5526 *
5527 * Tell the IPS driver whether or not the GPU is busy.
5528 */
5529 bool i915_gpu_busy(void)
5530 {
5531 struct drm_i915_private *dev_priv;
5532 struct intel_engine_cs *ring;
5533 bool ret = false;
5534 int i;
5535
5536 spin_lock_irq(&mchdev_lock);
5537 if (!i915_mch_dev)
5538 goto out_unlock;
5539 dev_priv = i915_mch_dev;
5540
5541 for_each_ring(ring, dev_priv, i)
5542 ret |= !list_empty(&ring->request_list);
5543
5544 out_unlock:
5545 spin_unlock_irq(&mchdev_lock);
5546
5547 return ret;
5548 }
5549 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5550
5551 /**
5552 * i915_gpu_turbo_disable - disable graphics turbo
5553 *
5554 * Disable graphics turbo by resetting the max frequency and setting the
5555 * current frequency to the default.
5556 */
5557 bool i915_gpu_turbo_disable(void)
5558 {
5559 struct drm_i915_private *dev_priv;
5560 bool ret = true;
5561
5562 spin_lock_irq(&mchdev_lock);
5563 if (!i915_mch_dev) {
5564 ret = false;
5565 goto out_unlock;
5566 }
5567 dev_priv = i915_mch_dev;
5568
5569 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5570
5571 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5572 ret = false;
5573
5574 out_unlock:
5575 spin_unlock_irq(&mchdev_lock);
5576
5577 return ret;
5578 }
5579 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5580
5581 /**
5582 * Tells the intel_ips driver that the i915 driver is now loaded, if
5583 * IPS got loaded first.
5584 *
5585 * This awkward dance is so that neither module has to depend on the
5586 * other in order for IPS to do the appropriate communication of
5587 * GPU turbo limits to i915.
5588 */
5589 static void
5590 ips_ping_for_i915_load(void)
5591 {
5592 void (*link)(void);
5593
5594 link = symbol_get(ips_link_to_i915_driver);
5595 if (link) {
5596 link();
5597 symbol_put(ips_link_to_i915_driver);
5598 }
5599 }
5600
5601 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5602 {
5603 /* We only register the i915 ips part with intel-ips once everything is
5604 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5605 spin_lock_irq(&mchdev_lock);
5606 i915_mch_dev = dev_priv;
5607 spin_unlock_irq(&mchdev_lock);
5608
5609 ips_ping_for_i915_load();
5610 }
5611
5612 void intel_gpu_ips_teardown(void)
5613 {
5614 spin_lock_irq(&mchdev_lock);
5615 i915_mch_dev = NULL;
5616 spin_unlock_irq(&mchdev_lock);
5617 }
5618
5619 static void intel_init_emon(struct drm_device *dev)
5620 {
5621 struct drm_i915_private *dev_priv = dev->dev_private;
5622 u32 lcfuse;
5623 u8 pxw[16];
5624 int i;
5625
5626 /* Disable to program */
5627 I915_WRITE(ECR, 0);
5628 POSTING_READ(ECR);
5629
5630 /* Program energy weights for various events */
5631 I915_WRITE(SDEW, 0x15040d00);
5632 I915_WRITE(CSIEW0, 0x007f0000);
5633 I915_WRITE(CSIEW1, 0x1e220004);
5634 I915_WRITE(CSIEW2, 0x04000004);
5635
5636 for (i = 0; i < 5; i++)
5637 I915_WRITE(PEW + (i * 4), 0);
5638 for (i = 0; i < 3; i++)
5639 I915_WRITE(DEW + (i * 4), 0);
5640
5641 /* Program P-state weights to account for frequency power adjustment */
5642 for (i = 0; i < 16; i++) {
5643 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5644 unsigned long freq = intel_pxfreq(pxvidfreq);
5645 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5646 PXVFREQ_PX_SHIFT;
5647 unsigned long val;
5648
5649 val = vid * vid;
5650 val *= (freq / 1000);
5651 val *= 255;
5652 val /= (127*127*900);
5653 if (val > 0xff)
5654 DRM_ERROR("bad pxval: %ld\n", val);
5655 pxw[i] = val;
5656 }
5657 /* Render standby states get 0 weight */
5658 pxw[14] = 0;
5659 pxw[15] = 0;
5660
5661 for (i = 0; i < 4; i++) {
5662 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5663 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5664 I915_WRITE(PXW + (i * 4), val);
5665 }
5666
5667 /* Adjust magic regs to magic values (more experimental results) */
5668 I915_WRITE(OGW0, 0);
5669 I915_WRITE(OGW1, 0);
5670 I915_WRITE(EG0, 0x00007f00);
5671 I915_WRITE(EG1, 0x0000000e);
5672 I915_WRITE(EG2, 0x000e0000);
5673 I915_WRITE(EG3, 0x68000300);
5674 I915_WRITE(EG4, 0x42000000);
5675 I915_WRITE(EG5, 0x00140031);
5676 I915_WRITE(EG6, 0);
5677 I915_WRITE(EG7, 0);
5678
5679 for (i = 0; i < 8; i++)
5680 I915_WRITE(PXWL + (i * 4), 0);
5681
5682 /* Enable PMON + select events */
5683 I915_WRITE(ECR, 0x80000019);
5684
5685 lcfuse = I915_READ(LCFUSE02);
5686
5687 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5688 }
5689
5690 void intel_init_gt_powersave(struct drm_device *dev)
5691 {
5692 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5693
5694 if (IS_CHERRYVIEW(dev))
5695 cherryview_init_gt_powersave(dev);
5696 else if (IS_VALLEYVIEW(dev))
5697 valleyview_init_gt_powersave(dev);
5698 }
5699
5700 void intel_cleanup_gt_powersave(struct drm_device *dev)
5701 {
5702 if (IS_CHERRYVIEW(dev))
5703 return;
5704 else if (IS_VALLEYVIEW(dev))
5705 valleyview_cleanup_gt_powersave(dev);
5706 }
5707
5708 static void gen6_suspend_rps(struct drm_device *dev)
5709 {
5710 struct drm_i915_private *dev_priv = dev->dev_private;
5711
5712 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5713
5714 gen6_disable_rps_interrupts(dev);
5715 }
5716
5717 /**
5718 * intel_suspend_gt_powersave - suspend PM work and helper threads
5719 * @dev: drm device
5720 *
5721 * We don't want to disable RC6 or other features here, we just want
5722 * to make sure any work we've queued has finished and won't bother
5723 * us while we're suspended.
5724 */
5725 void intel_suspend_gt_powersave(struct drm_device *dev)
5726 {
5727 struct drm_i915_private *dev_priv = dev->dev_private;
5728
5729 if (INTEL_INFO(dev)->gen < 6)
5730 return;
5731
5732 gen6_suspend_rps(dev);
5733
5734 /* Force GPU to min freq during suspend */
5735 gen6_rps_idle(dev_priv);
5736 }
5737
5738 void intel_disable_gt_powersave(struct drm_device *dev)
5739 {
5740 struct drm_i915_private *dev_priv = dev->dev_private;
5741
5742 if (IS_IRONLAKE_M(dev)) {
5743 ironlake_disable_drps(dev);
5744 } else if (INTEL_INFO(dev)->gen >= 6) {
5745 intel_suspend_gt_powersave(dev);
5746
5747 mutex_lock(&dev_priv->rps.hw_lock);
5748 if (INTEL_INFO(dev)->gen >= 9)
5749 gen9_disable_rps(dev);
5750 else if (IS_CHERRYVIEW(dev))
5751 cherryview_disable_rps(dev);
5752 else if (IS_VALLEYVIEW(dev))
5753 valleyview_disable_rps(dev);
5754 else
5755 gen6_disable_rps(dev);
5756
5757 dev_priv->rps.enabled = false;
5758 mutex_unlock(&dev_priv->rps.hw_lock);
5759 }
5760 }
5761
5762 static void intel_gen6_powersave_work(struct work_struct *work)
5763 {
5764 struct drm_i915_private *dev_priv =
5765 container_of(work, struct drm_i915_private,
5766 rps.delayed_resume_work.work);
5767 struct drm_device *dev = dev_priv->dev;
5768
5769 mutex_lock(&dev_priv->rps.hw_lock);
5770
5771 gen6_reset_rps_interrupts(dev);
5772
5773 if (IS_CHERRYVIEW(dev)) {
5774 cherryview_enable_rps(dev);
5775 } else if (IS_VALLEYVIEW(dev)) {
5776 valleyview_enable_rps(dev);
5777 } else if (INTEL_INFO(dev)->gen >= 9) {
5778 gen9_enable_rc6(dev);
5779 gen9_enable_rps(dev);
5780 __gen6_update_ring_freq(dev);
5781 } else if (IS_BROADWELL(dev)) {
5782 gen8_enable_rps(dev);
5783 __gen6_update_ring_freq(dev);
5784 } else {
5785 gen6_enable_rps(dev);
5786 __gen6_update_ring_freq(dev);
5787 }
5788
5789 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
5790 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
5791
5792 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
5793 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
5794
5795 dev_priv->rps.enabled = true;
5796
5797 gen6_enable_rps_interrupts(dev);
5798
5799 mutex_unlock(&dev_priv->rps.hw_lock);
5800
5801 intel_runtime_pm_put(dev_priv);
5802 }
5803
5804 void intel_enable_gt_powersave(struct drm_device *dev)
5805 {
5806 struct drm_i915_private *dev_priv = dev->dev_private;
5807
5808 /* Powersaving is controlled by the host when inside a VM */
5809 if (intel_vgpu_active(dev))
5810 return;
5811
5812 if (IS_IRONLAKE_M(dev)) {
5813 mutex_lock(&dev->struct_mutex);
5814 ironlake_enable_drps(dev);
5815 intel_init_emon(dev);
5816 mutex_unlock(&dev->struct_mutex);
5817 } else if (INTEL_INFO(dev)->gen >= 6) {
5818 /*
5819 * PCU communication is slow and this doesn't need to be
5820 * done at any specific time, so do this out of our fast path
5821 * to make resume and init faster.
5822 *
5823 * We depend on the HW RC6 power context save/restore
5824 * mechanism when entering D3 through runtime PM suspend. So
5825 * disable RPM until RPS/RC6 is properly setup. We can only
5826 * get here via the driver load/system resume/runtime resume
5827 * paths, so the _noresume version is enough (and in case of
5828 * runtime resume it's necessary).
5829 */
5830 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5831 round_jiffies_up_relative(HZ)))
5832 intel_runtime_pm_get_noresume(dev_priv);
5833 }
5834 }
5835
5836 void intel_reset_gt_powersave(struct drm_device *dev)
5837 {
5838 struct drm_i915_private *dev_priv = dev->dev_private;
5839
5840 if (INTEL_INFO(dev)->gen < 6)
5841 return;
5842
5843 gen6_suspend_rps(dev);
5844 dev_priv->rps.enabled = false;
5845 }
5846
5847 static void ibx_init_clock_gating(struct drm_device *dev)
5848 {
5849 struct drm_i915_private *dev_priv = dev->dev_private;
5850
5851 /*
5852 * On Ibex Peak and Cougar Point, we need to disable clock
5853 * gating for the panel power sequencer or it will fail to
5854 * start up when no ports are active.
5855 */
5856 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5857 }
5858
5859 static void g4x_disable_trickle_feed(struct drm_device *dev)
5860 {
5861 struct drm_i915_private *dev_priv = dev->dev_private;
5862 int pipe;
5863
5864 for_each_pipe(dev_priv, pipe) {
5865 I915_WRITE(DSPCNTR(pipe),
5866 I915_READ(DSPCNTR(pipe)) |
5867 DISPPLANE_TRICKLE_FEED_DISABLE);
5868 intel_flush_primary_plane(dev_priv, pipe);
5869 }
5870 }
5871
5872 static void ilk_init_lp_watermarks(struct drm_device *dev)
5873 {
5874 struct drm_i915_private *dev_priv = dev->dev_private;
5875
5876 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5877 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5878 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5879
5880 /*
5881 * Don't touch WM1S_LP_EN here.
5882 * Doing so could cause underruns.
5883 */
5884 }
5885
5886 static void ironlake_init_clock_gating(struct drm_device *dev)
5887 {
5888 struct drm_i915_private *dev_priv = dev->dev_private;
5889 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5890
5891 /*
5892 * Required for FBC
5893 * WaFbcDisableDpfcClockGating:ilk
5894 */
5895 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5896 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5897 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5898
5899 I915_WRITE(PCH_3DCGDIS0,
5900 MARIUNIT_CLOCK_GATE_DISABLE |
5901 SVSMUNIT_CLOCK_GATE_DISABLE);
5902 I915_WRITE(PCH_3DCGDIS1,
5903 VFMUNIT_CLOCK_GATE_DISABLE);
5904
5905 /*
5906 * According to the spec the following bits should be set in
5907 * order to enable memory self-refresh
5908 * The bit 22/21 of 0x42004
5909 * The bit 5 of 0x42020
5910 * The bit 15 of 0x45000
5911 */
5912 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5913 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5914 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5915 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5916 I915_WRITE(DISP_ARB_CTL,
5917 (I915_READ(DISP_ARB_CTL) |
5918 DISP_FBC_WM_DIS));
5919
5920 ilk_init_lp_watermarks(dev);
5921
5922 /*
5923 * Based on the document from hardware guys the following bits
5924 * should be set unconditionally in order to enable FBC.
5925 * The bit 22 of 0x42000
5926 * The bit 22 of 0x42004
5927 * The bit 7,8,9 of 0x42020.
5928 */
5929 if (IS_IRONLAKE_M(dev)) {
5930 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5931 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5932 I915_READ(ILK_DISPLAY_CHICKEN1) |
5933 ILK_FBCQ_DIS);
5934 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5935 I915_READ(ILK_DISPLAY_CHICKEN2) |
5936 ILK_DPARB_GATE);
5937 }
5938
5939 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5940
5941 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5942 I915_READ(ILK_DISPLAY_CHICKEN2) |
5943 ILK_ELPIN_409_SELECT);
5944 I915_WRITE(_3D_CHICKEN2,
5945 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5946 _3D_CHICKEN2_WM_READ_PIPELINED);
5947
5948 /* WaDisableRenderCachePipelinedFlush:ilk */
5949 I915_WRITE(CACHE_MODE_0,
5950 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5951
5952 /* WaDisable_RenderCache_OperationalFlush:ilk */
5953 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5954
5955 g4x_disable_trickle_feed(dev);
5956
5957 ibx_init_clock_gating(dev);
5958 }
5959
5960 static void cpt_init_clock_gating(struct drm_device *dev)
5961 {
5962 struct drm_i915_private *dev_priv = dev->dev_private;
5963 int pipe;
5964 uint32_t val;
5965
5966 /*
5967 * On Ibex Peak and Cougar Point, we need to disable clock
5968 * gating for the panel power sequencer or it will fail to
5969 * start up when no ports are active.
5970 */
5971 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5972 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5973 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5974 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5975 DPLS_EDP_PPS_FIX_DIS);
5976 /* The below fixes the weird display corruption, a few pixels shifted
5977 * downward, on (only) LVDS of some HP laptops with IVY.
5978 */
5979 for_each_pipe(dev_priv, pipe) {
5980 val = I915_READ(TRANS_CHICKEN2(pipe));
5981 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5982 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5983 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5984 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5985 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5986 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5987 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5988 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5989 }
5990 /* WADP0ClockGatingDisable */
5991 for_each_pipe(dev_priv, pipe) {
5992 I915_WRITE(TRANS_CHICKEN1(pipe),
5993 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5994 }
5995 }
5996
5997 static void gen6_check_mch_setup(struct drm_device *dev)
5998 {
5999 struct drm_i915_private *dev_priv = dev->dev_private;
6000 uint32_t tmp;
6001
6002 tmp = I915_READ(MCH_SSKPD);
6003 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6004 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6005 tmp);
6006 }
6007
6008 static void gen6_init_clock_gating(struct drm_device *dev)
6009 {
6010 struct drm_i915_private *dev_priv = dev->dev_private;
6011 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6012
6013 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6014
6015 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6016 I915_READ(ILK_DISPLAY_CHICKEN2) |
6017 ILK_ELPIN_409_SELECT);
6018
6019 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6020 I915_WRITE(_3D_CHICKEN,
6021 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6022
6023 /* WaDisable_RenderCache_OperationalFlush:snb */
6024 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6025
6026 /*
6027 * BSpec recoomends 8x4 when MSAA is used,
6028 * however in practice 16x4 seems fastest.
6029 *
6030 * Note that PS/WM thread counts depend on the WIZ hashing
6031 * disable bit, which we don't touch here, but it's good
6032 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6033 */
6034 I915_WRITE(GEN6_GT_MODE,
6035 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6036
6037 ilk_init_lp_watermarks(dev);
6038
6039 I915_WRITE(CACHE_MODE_0,
6040 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6041
6042 I915_WRITE(GEN6_UCGCTL1,
6043 I915_READ(GEN6_UCGCTL1) |
6044 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6045 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6046
6047 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6048 * gating disable must be set. Failure to set it results in
6049 * flickering pixels due to Z write ordering failures after
6050 * some amount of runtime in the Mesa "fire" demo, and Unigine
6051 * Sanctuary and Tropics, and apparently anything else with
6052 * alpha test or pixel discard.
6053 *
6054 * According to the spec, bit 11 (RCCUNIT) must also be set,
6055 * but we didn't debug actual testcases to find it out.
6056 *
6057 * WaDisableRCCUnitClockGating:snb
6058 * WaDisableRCPBUnitClockGating:snb
6059 */
6060 I915_WRITE(GEN6_UCGCTL2,
6061 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6062 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6063
6064 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6065 I915_WRITE(_3D_CHICKEN3,
6066 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6067
6068 /*
6069 * Bspec says:
6070 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6071 * 3DSTATE_SF number of SF output attributes is more than 16."
6072 */
6073 I915_WRITE(_3D_CHICKEN3,
6074 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6075
6076 /*
6077 * According to the spec the following bits should be
6078 * set in order to enable memory self-refresh and fbc:
6079 * The bit21 and bit22 of 0x42000
6080 * The bit21 and bit22 of 0x42004
6081 * The bit5 and bit7 of 0x42020
6082 * The bit14 of 0x70180
6083 * The bit14 of 0x71180
6084 *
6085 * WaFbcAsynchFlipDisableFbcQueue:snb
6086 */
6087 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6088 I915_READ(ILK_DISPLAY_CHICKEN1) |
6089 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6090 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6091 I915_READ(ILK_DISPLAY_CHICKEN2) |
6092 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6093 I915_WRITE(ILK_DSPCLK_GATE_D,
6094 I915_READ(ILK_DSPCLK_GATE_D) |
6095 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6096 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6097
6098 g4x_disable_trickle_feed(dev);
6099
6100 cpt_init_clock_gating(dev);
6101
6102 gen6_check_mch_setup(dev);
6103 }
6104
6105 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6106 {
6107 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6108
6109 /*
6110 * WaVSThreadDispatchOverride:ivb,vlv
6111 *
6112 * This actually overrides the dispatch
6113 * mode for all thread types.
6114 */
6115 reg &= ~GEN7_FF_SCHED_MASK;
6116 reg |= GEN7_FF_TS_SCHED_HW;
6117 reg |= GEN7_FF_VS_SCHED_HW;
6118 reg |= GEN7_FF_DS_SCHED_HW;
6119
6120 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6121 }
6122
6123 static void lpt_init_clock_gating(struct drm_device *dev)
6124 {
6125 struct drm_i915_private *dev_priv = dev->dev_private;
6126
6127 /*
6128 * TODO: this bit should only be enabled when really needed, then
6129 * disabled when not needed anymore in order to save power.
6130 */
6131 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
6132 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6133 I915_READ(SOUTH_DSPCLK_GATE_D) |
6134 PCH_LP_PARTITION_LEVEL_DISABLE);
6135
6136 /* WADPOClockGatingDisable:hsw */
6137 I915_WRITE(_TRANSA_CHICKEN1,
6138 I915_READ(_TRANSA_CHICKEN1) |
6139 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6140 }
6141
6142 static void lpt_suspend_hw(struct drm_device *dev)
6143 {
6144 struct drm_i915_private *dev_priv = dev->dev_private;
6145
6146 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6147 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6148
6149 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6150 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6151 }
6152 }
6153
6154 static void broadwell_init_clock_gating(struct drm_device *dev)
6155 {
6156 struct drm_i915_private *dev_priv = dev->dev_private;
6157 enum pipe pipe;
6158
6159 I915_WRITE(WM3_LP_ILK, 0);
6160 I915_WRITE(WM2_LP_ILK, 0);
6161 I915_WRITE(WM1_LP_ILK, 0);
6162
6163 /* WaSwitchSolVfFArbitrationPriority:bdw */
6164 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6165
6166 /* WaPsrDPAMaskVBlankInSRD:bdw */
6167 I915_WRITE(CHICKEN_PAR1_1,
6168 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6169
6170 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6171 for_each_pipe(dev_priv, pipe) {
6172 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6173 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6174 BDW_DPRS_MASK_VBLANK_SRD);
6175 }
6176
6177 /* WaVSRefCountFullforceMissDisable:bdw */
6178 /* WaDSRefCountFullforceMissDisable:bdw */
6179 I915_WRITE(GEN7_FF_THREAD_MODE,
6180 I915_READ(GEN7_FF_THREAD_MODE) &
6181 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6182
6183 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6184 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6185
6186 /* WaDisableSDEUnitClockGating:bdw */
6187 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6188 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6189
6190 lpt_init_clock_gating(dev);
6191 }
6192
6193 static void haswell_init_clock_gating(struct drm_device *dev)
6194 {
6195 struct drm_i915_private *dev_priv = dev->dev_private;
6196
6197 ilk_init_lp_watermarks(dev);
6198
6199 /* L3 caching of data atomics doesn't work -- disable it. */
6200 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6201 I915_WRITE(HSW_ROW_CHICKEN3,
6202 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6203
6204 /* This is required by WaCatErrorRejectionIssue:hsw */
6205 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6206 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6207 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6208
6209 /* WaVSRefCountFullforceMissDisable:hsw */
6210 I915_WRITE(GEN7_FF_THREAD_MODE,
6211 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6212
6213 /* WaDisable_RenderCache_OperationalFlush:hsw */
6214 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6215
6216 /* enable HiZ Raw Stall Optimization */
6217 I915_WRITE(CACHE_MODE_0_GEN7,
6218 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6219
6220 /* WaDisable4x2SubspanOptimization:hsw */
6221 I915_WRITE(CACHE_MODE_1,
6222 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6223
6224 /*
6225 * BSpec recommends 8x4 when MSAA is used,
6226 * however in practice 16x4 seems fastest.
6227 *
6228 * Note that PS/WM thread counts depend on the WIZ hashing
6229 * disable bit, which we don't touch here, but it's good
6230 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6231 */
6232 I915_WRITE(GEN7_GT_MODE,
6233 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6234
6235 /* WaSampleCChickenBitEnable:hsw */
6236 I915_WRITE(HALF_SLICE_CHICKEN3,
6237 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6238
6239 /* WaSwitchSolVfFArbitrationPriority:hsw */
6240 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6241
6242 /* WaRsPkgCStateDisplayPMReq:hsw */
6243 I915_WRITE(CHICKEN_PAR1_1,
6244 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6245
6246 lpt_init_clock_gating(dev);
6247 }
6248
6249 static void ivybridge_init_clock_gating(struct drm_device *dev)
6250 {
6251 struct drm_i915_private *dev_priv = dev->dev_private;
6252 uint32_t snpcr;
6253
6254 ilk_init_lp_watermarks(dev);
6255
6256 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6257
6258 /* WaDisableEarlyCull:ivb */
6259 I915_WRITE(_3D_CHICKEN3,
6260 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6261
6262 /* WaDisableBackToBackFlipFix:ivb */
6263 I915_WRITE(IVB_CHICKEN3,
6264 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6265 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6266
6267 /* WaDisablePSDDualDispatchEnable:ivb */
6268 if (IS_IVB_GT1(dev))
6269 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6270 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6271
6272 /* WaDisable_RenderCache_OperationalFlush:ivb */
6273 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6274
6275 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6276 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6277 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6278
6279 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6280 I915_WRITE(GEN7_L3CNTLREG1,
6281 GEN7_WA_FOR_GEN7_L3_CONTROL);
6282 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6283 GEN7_WA_L3_CHICKEN_MODE);
6284 if (IS_IVB_GT1(dev))
6285 I915_WRITE(GEN7_ROW_CHICKEN2,
6286 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6287 else {
6288 /* must write both registers */
6289 I915_WRITE(GEN7_ROW_CHICKEN2,
6290 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6291 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6292 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6293 }
6294
6295 /* WaForceL3Serialization:ivb */
6296 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6297 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6298
6299 /*
6300 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6301 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6302 */
6303 I915_WRITE(GEN6_UCGCTL2,
6304 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6305
6306 /* This is required by WaCatErrorRejectionIssue:ivb */
6307 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6308 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6309 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6310
6311 g4x_disable_trickle_feed(dev);
6312
6313 gen7_setup_fixed_func_scheduler(dev_priv);
6314
6315 if (0) { /* causes HiZ corruption on ivb:gt1 */
6316 /* enable HiZ Raw Stall Optimization */
6317 I915_WRITE(CACHE_MODE_0_GEN7,
6318 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6319 }
6320
6321 /* WaDisable4x2SubspanOptimization:ivb */
6322 I915_WRITE(CACHE_MODE_1,
6323 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6324
6325 /*
6326 * BSpec recommends 8x4 when MSAA is used,
6327 * however in practice 16x4 seems fastest.
6328 *
6329 * Note that PS/WM thread counts depend on the WIZ hashing
6330 * disable bit, which we don't touch here, but it's good
6331 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6332 */
6333 I915_WRITE(GEN7_GT_MODE,
6334 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6335
6336 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6337 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6338 snpcr |= GEN6_MBC_SNPCR_MED;
6339 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6340
6341 if (!HAS_PCH_NOP(dev))
6342 cpt_init_clock_gating(dev);
6343
6344 gen6_check_mch_setup(dev);
6345 }
6346
6347 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6348 {
6349 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6350
6351 /*
6352 * Disable trickle feed and enable pnd deadline calculation
6353 */
6354 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6355 I915_WRITE(CBR1_VLV, 0);
6356 }
6357
6358 static void valleyview_init_clock_gating(struct drm_device *dev)
6359 {
6360 struct drm_i915_private *dev_priv = dev->dev_private;
6361
6362 vlv_init_display_clock_gating(dev_priv);
6363
6364 /* WaDisableEarlyCull:vlv */
6365 I915_WRITE(_3D_CHICKEN3,
6366 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6367
6368 /* WaDisableBackToBackFlipFix:vlv */
6369 I915_WRITE(IVB_CHICKEN3,
6370 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6371 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6372
6373 /* WaPsdDispatchEnable:vlv */
6374 /* WaDisablePSDDualDispatchEnable:vlv */
6375 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6376 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6377 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6378
6379 /* WaDisable_RenderCache_OperationalFlush:vlv */
6380 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6381
6382 /* WaForceL3Serialization:vlv */
6383 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6384 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6385
6386 /* WaDisableDopClockGating:vlv */
6387 I915_WRITE(GEN7_ROW_CHICKEN2,
6388 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6389
6390 /* This is required by WaCatErrorRejectionIssue:vlv */
6391 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6392 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6393 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6394
6395 gen7_setup_fixed_func_scheduler(dev_priv);
6396
6397 /*
6398 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6399 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6400 */
6401 I915_WRITE(GEN6_UCGCTL2,
6402 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6403
6404 /* WaDisableL3Bank2xClockGate:vlv
6405 * Disabling L3 clock gating- MMIO 940c[25] = 1
6406 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6407 I915_WRITE(GEN7_UCGCTL4,
6408 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6409
6410 /*
6411 * BSpec says this must be set, even though
6412 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6413 */
6414 I915_WRITE(CACHE_MODE_1,
6415 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6416
6417 /*
6418 * BSpec recommends 8x4 when MSAA is used,
6419 * however in practice 16x4 seems fastest.
6420 *
6421 * Note that PS/WM thread counts depend on the WIZ hashing
6422 * disable bit, which we don't touch here, but it's good
6423 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6424 */
6425 I915_WRITE(GEN7_GT_MODE,
6426 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6427
6428 /*
6429 * WaIncreaseL3CreditsForVLVB0:vlv
6430 * This is the hardware default actually.
6431 */
6432 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6433
6434 /*
6435 * WaDisableVLVClockGating_VBIIssue:vlv
6436 * Disable clock gating on th GCFG unit to prevent a delay
6437 * in the reporting of vblank events.
6438 */
6439 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6440 }
6441
6442 static void cherryview_init_clock_gating(struct drm_device *dev)
6443 {
6444 struct drm_i915_private *dev_priv = dev->dev_private;
6445
6446 vlv_init_display_clock_gating(dev_priv);
6447
6448 /* WaVSRefCountFullforceMissDisable:chv */
6449 /* WaDSRefCountFullforceMissDisable:chv */
6450 I915_WRITE(GEN7_FF_THREAD_MODE,
6451 I915_READ(GEN7_FF_THREAD_MODE) &
6452 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6453
6454 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6455 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6456 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6457
6458 /* WaDisableCSUnitClockGating:chv */
6459 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6460 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6461
6462 /* WaDisableSDEUnitClockGating:chv */
6463 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6464 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6465 }
6466
6467 static void g4x_init_clock_gating(struct drm_device *dev)
6468 {
6469 struct drm_i915_private *dev_priv = dev->dev_private;
6470 uint32_t dspclk_gate;
6471
6472 I915_WRITE(RENCLK_GATE_D1, 0);
6473 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6474 GS_UNIT_CLOCK_GATE_DISABLE |
6475 CL_UNIT_CLOCK_GATE_DISABLE);
6476 I915_WRITE(RAMCLK_GATE_D, 0);
6477 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6478 OVRUNIT_CLOCK_GATE_DISABLE |
6479 OVCUNIT_CLOCK_GATE_DISABLE;
6480 if (IS_GM45(dev))
6481 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6482 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6483
6484 /* WaDisableRenderCachePipelinedFlush */
6485 I915_WRITE(CACHE_MODE_0,
6486 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6487
6488 /* WaDisable_RenderCache_OperationalFlush:g4x */
6489 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6490
6491 g4x_disable_trickle_feed(dev);
6492 }
6493
6494 static void crestline_init_clock_gating(struct drm_device *dev)
6495 {
6496 struct drm_i915_private *dev_priv = dev->dev_private;
6497
6498 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6499 I915_WRITE(RENCLK_GATE_D2, 0);
6500 I915_WRITE(DSPCLK_GATE_D, 0);
6501 I915_WRITE(RAMCLK_GATE_D, 0);
6502 I915_WRITE16(DEUC, 0);
6503 I915_WRITE(MI_ARB_STATE,
6504 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6505
6506 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6507 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6508 }
6509
6510 static void broadwater_init_clock_gating(struct drm_device *dev)
6511 {
6512 struct drm_i915_private *dev_priv = dev->dev_private;
6513
6514 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6515 I965_RCC_CLOCK_GATE_DISABLE |
6516 I965_RCPB_CLOCK_GATE_DISABLE |
6517 I965_ISC_CLOCK_GATE_DISABLE |
6518 I965_FBC_CLOCK_GATE_DISABLE);
6519 I915_WRITE(RENCLK_GATE_D2, 0);
6520 I915_WRITE(MI_ARB_STATE,
6521 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6522
6523 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6524 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6525 }
6526
6527 static void gen3_init_clock_gating(struct drm_device *dev)
6528 {
6529 struct drm_i915_private *dev_priv = dev->dev_private;
6530 u32 dstate = I915_READ(D_STATE);
6531
6532 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6533 DSTATE_DOT_CLOCK_GATING;
6534 I915_WRITE(D_STATE, dstate);
6535
6536 if (IS_PINEVIEW(dev))
6537 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6538
6539 /* IIR "flip pending" means done if this bit is set */
6540 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6541
6542 /* interrupts should cause a wake up from C3 */
6543 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6544
6545 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
6546 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6547
6548 I915_WRITE(MI_ARB_STATE,
6549 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6550 }
6551
6552 static void i85x_init_clock_gating(struct drm_device *dev)
6553 {
6554 struct drm_i915_private *dev_priv = dev->dev_private;
6555
6556 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6557
6558 /* interrupts should cause a wake up from C3 */
6559 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6560 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6561
6562 I915_WRITE(MEM_MODE,
6563 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6564 }
6565
6566 static void i830_init_clock_gating(struct drm_device *dev)
6567 {
6568 struct drm_i915_private *dev_priv = dev->dev_private;
6569
6570 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6571
6572 I915_WRITE(MEM_MODE,
6573 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6574 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6575 }
6576
6577 void intel_init_clock_gating(struct drm_device *dev)
6578 {
6579 struct drm_i915_private *dev_priv = dev->dev_private;
6580
6581 if (dev_priv->display.init_clock_gating)
6582 dev_priv->display.init_clock_gating(dev);
6583 }
6584
6585 void intel_suspend_hw(struct drm_device *dev)
6586 {
6587 if (HAS_PCH_LPT(dev))
6588 lpt_suspend_hw(dev);
6589 }
6590
6591 /* Set up chip specific power management-related functions */
6592 void intel_init_pm(struct drm_device *dev)
6593 {
6594 struct drm_i915_private *dev_priv = dev->dev_private;
6595
6596 intel_fbc_init(dev_priv);
6597
6598 /* For cxsr */
6599 if (IS_PINEVIEW(dev))
6600 i915_pineview_get_mem_freq(dev);
6601 else if (IS_GEN5(dev))
6602 i915_ironlake_get_mem_freq(dev);
6603
6604 /* For FIFO watermark updates */
6605 if (INTEL_INFO(dev)->gen >= 9) {
6606 skl_setup_wm_latency(dev);
6607
6608 if (IS_BROXTON(dev))
6609 dev_priv->display.init_clock_gating =
6610 bxt_init_clock_gating;
6611 else if (IS_SKYLAKE(dev))
6612 dev_priv->display.init_clock_gating =
6613 skl_init_clock_gating;
6614 dev_priv->display.update_wm = skl_update_wm;
6615 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
6616 } else if (HAS_PCH_SPLIT(dev)) {
6617 ilk_setup_wm_latency(dev);
6618
6619 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6620 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6621 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6622 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6623 dev_priv->display.update_wm = ilk_update_wm;
6624 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6625 } else {
6626 DRM_DEBUG_KMS("Failed to read display plane latency. "
6627 "Disable CxSR\n");
6628 }
6629
6630 if (IS_GEN5(dev))
6631 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6632 else if (IS_GEN6(dev))
6633 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6634 else if (IS_IVYBRIDGE(dev))
6635 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6636 else if (IS_HASWELL(dev))
6637 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6638 else if (INTEL_INFO(dev)->gen == 8)
6639 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6640 } else if (IS_CHERRYVIEW(dev)) {
6641 dev_priv->display.update_wm = valleyview_update_wm;
6642 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6643 dev_priv->display.init_clock_gating =
6644 cherryview_init_clock_gating;
6645 } else if (IS_VALLEYVIEW(dev)) {
6646 dev_priv->display.update_wm = valleyview_update_wm;
6647 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6648 dev_priv->display.init_clock_gating =
6649 valleyview_init_clock_gating;
6650 } else if (IS_PINEVIEW(dev)) {
6651 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6652 dev_priv->is_ddr3,
6653 dev_priv->fsb_freq,
6654 dev_priv->mem_freq)) {
6655 DRM_INFO("failed to find known CxSR latency "
6656 "(found ddr%s fsb freq %d, mem freq %d), "
6657 "disabling CxSR\n",
6658 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6659 dev_priv->fsb_freq, dev_priv->mem_freq);
6660 /* Disable CxSR and never update its watermark again */
6661 intel_set_memory_cxsr(dev_priv, false);
6662 dev_priv->display.update_wm = NULL;
6663 } else
6664 dev_priv->display.update_wm = pineview_update_wm;
6665 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6666 } else if (IS_G4X(dev)) {
6667 dev_priv->display.update_wm = g4x_update_wm;
6668 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6669 } else if (IS_GEN4(dev)) {
6670 dev_priv->display.update_wm = i965_update_wm;
6671 if (IS_CRESTLINE(dev))
6672 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6673 else if (IS_BROADWATER(dev))
6674 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6675 } else if (IS_GEN3(dev)) {
6676 dev_priv->display.update_wm = i9xx_update_wm;
6677 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6678 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6679 } else if (IS_GEN2(dev)) {
6680 if (INTEL_INFO(dev)->num_pipes == 1) {
6681 dev_priv->display.update_wm = i845_update_wm;
6682 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6683 } else {
6684 dev_priv->display.update_wm = i9xx_update_wm;
6685 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6686 }
6687
6688 if (IS_I85X(dev) || IS_I865G(dev))
6689 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6690 else
6691 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6692 } else {
6693 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6694 }
6695 }
6696
6697 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
6698 {
6699 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6700
6701 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6702 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6703 return -EAGAIN;
6704 }
6705
6706 I915_WRITE(GEN6_PCODE_DATA, *val);
6707 I915_WRITE(GEN6_PCODE_DATA1, 0);
6708 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6709
6710 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6711 500)) {
6712 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6713 return -ETIMEDOUT;
6714 }
6715
6716 *val = I915_READ(GEN6_PCODE_DATA);
6717 I915_WRITE(GEN6_PCODE_DATA, 0);
6718
6719 return 0;
6720 }
6721
6722 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
6723 {
6724 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6725
6726 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6727 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6728 return -EAGAIN;
6729 }
6730
6731 I915_WRITE(GEN6_PCODE_DATA, val);
6732 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6733
6734 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6735 500)) {
6736 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6737 return -ETIMEDOUT;
6738 }
6739
6740 I915_WRITE(GEN6_PCODE_DATA, 0);
6741
6742 return 0;
6743 }
6744
6745 static int vlv_gpu_freq_div(unsigned int czclk_freq)
6746 {
6747 switch (czclk_freq) {
6748 case 200:
6749 return 10;
6750 case 267:
6751 return 12;
6752 case 320:
6753 case 333:
6754 return 16;
6755 case 400:
6756 return 20;
6757 default:
6758 return -1;
6759 }
6760 }
6761
6762 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6763 {
6764 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6765
6766 div = vlv_gpu_freq_div(czclk_freq);
6767 if (div < 0)
6768 return div;
6769
6770 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
6771 }
6772
6773 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6774 {
6775 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6776
6777 mul = vlv_gpu_freq_div(czclk_freq);
6778 if (mul < 0)
6779 return mul;
6780
6781 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
6782 }
6783
6784 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6785 {
6786 int div, czclk_freq = dev_priv->rps.cz_freq;
6787
6788 div = vlv_gpu_freq_div(czclk_freq) / 2;
6789 if (div < 0)
6790 return div;
6791
6792 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
6793 }
6794
6795 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6796 {
6797 int mul, czclk_freq = dev_priv->rps.cz_freq;
6798
6799 mul = vlv_gpu_freq_div(czclk_freq) / 2;
6800 if (mul < 0)
6801 return mul;
6802
6803 /* CHV needs even values */
6804 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
6805 }
6806
6807 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
6808 {
6809 if (IS_GEN9(dev_priv->dev))
6810 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
6811 else if (IS_CHERRYVIEW(dev_priv->dev))
6812 return chv_gpu_freq(dev_priv, val);
6813 else if (IS_VALLEYVIEW(dev_priv->dev))
6814 return byt_gpu_freq(dev_priv, val);
6815 else
6816 return val * GT_FREQUENCY_MULTIPLIER;
6817 }
6818
6819 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
6820 {
6821 if (IS_GEN9(dev_priv->dev))
6822 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
6823 else if (IS_CHERRYVIEW(dev_priv->dev))
6824 return chv_freq_opcode(dev_priv, val);
6825 else if (IS_VALLEYVIEW(dev_priv->dev))
6826 return byt_freq_opcode(dev_priv, val);
6827 else
6828 return val / GT_FREQUENCY_MULTIPLIER;
6829 }
6830
6831 struct request_boost {
6832 struct work_struct work;
6833 struct drm_i915_gem_request *rq;
6834 };
6835
6836 static void __intel_rps_boost_work(struct work_struct *work)
6837 {
6838 struct request_boost *boost = container_of(work, struct request_boost, work);
6839
6840 if (!i915_gem_request_completed(boost->rq, true))
6841 gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL);
6842
6843 i915_gem_request_unreference__unlocked(boost->rq);
6844 kfree(boost);
6845 }
6846
6847 void intel_queue_rps_boost_for_request(struct drm_device *dev,
6848 struct drm_i915_gem_request *rq)
6849 {
6850 struct request_boost *boost;
6851
6852 if (rq == NULL || INTEL_INFO(dev)->gen < 6)
6853 return;
6854
6855 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
6856 if (boost == NULL)
6857 return;
6858
6859 i915_gem_request_reference(rq);
6860 boost->rq = rq;
6861
6862 INIT_WORK(&boost->work, __intel_rps_boost_work);
6863 queue_work(to_i915(dev)->wq, &boost->work);
6864 }
6865
6866 void intel_pm_setup(struct drm_device *dev)
6867 {
6868 struct drm_i915_private *dev_priv = dev->dev_private;
6869
6870 mutex_init(&dev_priv->rps.hw_lock);
6871
6872 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6873 intel_gen6_powersave_work);
6874 INIT_LIST_HEAD(&dev_priv->rps.clients);
6875
6876 dev_priv->pm.suspended = false;
6877 }
This page took 0.24487 seconds and 6 git commands to generate.