drm/i915: drop WaSetupGtModeTdRowDispatch:snb
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
36
37 /**
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 *
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
46 *
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
53 */
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
57
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
61 *
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
64 *
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
67 */
68
69 static void i8xx_disable_fbc(struct drm_device *dev)
70 {
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 fbc_ctl;
73
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
77 return;
78
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
81
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
85 return;
86 }
87
88 DRM_DEBUG_KMS("disabled FBC\n");
89 }
90
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
92 {
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
97 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
98 int cfb_pitch;
99 int i;
100 u32 fbc_ctl;
101
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0];
105
106 /* FBC_CTL wants 32B or 64B units */
107 if (IS_GEN2(dev))
108 cfb_pitch = (cfb_pitch / 32) - 1;
109 else
110 cfb_pitch = (cfb_pitch / 64) - 1;
111
112 /* Clear old tags */
113 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
114 I915_WRITE(FBC_TAG + (i * 4), 0);
115
116 if (IS_GEN4(dev)) {
117 u32 fbc_ctl2;
118
119 /* Set it up... */
120 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
121 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
122 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
123 I915_WRITE(FBC_FENCE_OFF, crtc->y);
124 }
125
126 /* enable it... */
127 fbc_ctl = I915_READ(FBC_CONTROL);
128 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
129 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
130 if (IS_I945GM(dev))
131 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
132 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
133 fbc_ctl |= obj->fence_reg;
134 I915_WRITE(FBC_CONTROL, fbc_ctl);
135
136 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
137 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
138 }
139
140 static bool i8xx_fbc_enabled(struct drm_device *dev)
141 {
142 struct drm_i915_private *dev_priv = dev->dev_private;
143
144 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
145 }
146
147 static void g4x_enable_fbc(struct drm_crtc *crtc)
148 {
149 struct drm_device *dev = crtc->dev;
150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct drm_framebuffer *fb = crtc->primary->fb;
152 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
154 u32 dpfc_ctl;
155
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
159 else
160 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
161 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
162
163 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
164
165 /* enable it... */
166 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
167
168 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
169 }
170
171 static void g4x_disable_fbc(struct drm_device *dev)
172 {
173 struct drm_i915_private *dev_priv = dev->dev_private;
174 u32 dpfc_ctl;
175
176 /* Disable compression */
177 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) {
179 dpfc_ctl &= ~DPFC_CTL_EN;
180 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
181
182 DRM_DEBUG_KMS("disabled FBC\n");
183 }
184 }
185
186 static bool g4x_fbc_enabled(struct drm_device *dev)
187 {
188 struct drm_i915_private *dev_priv = dev->dev_private;
189
190 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
191 }
192
193 static void sandybridge_blit_fbc_update(struct drm_device *dev)
194 {
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 u32 blt_ecoskpd;
197
198 /* Make sure blitter notifies FBC of writes */
199
200 /* Blitter is part of Media powerwell on VLV. No impact of
201 * his param in other platforms for now */
202 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
203
204 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
206 GEN6_BLITTER_LOCK_SHIFT;
207 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
208 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
211 GEN6_BLITTER_LOCK_SHIFT);
212 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
213 POSTING_READ(GEN6_BLITTER_ECOSKPD);
214
215 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
216 }
217
218 static void ironlake_enable_fbc(struct drm_crtc *crtc)
219 {
220 struct drm_device *dev = crtc->dev;
221 struct drm_i915_private *dev_priv = dev->dev_private;
222 struct drm_framebuffer *fb = crtc->primary->fb;
223 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
225 u32 dpfc_ctl;
226
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++;
230
231 switch (dev_priv->fbc.threshold) {
232 case 4:
233 case 3:
234 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
235 break;
236 case 2:
237 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
238 break;
239 case 1:
240 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
241 break;
242 }
243 dpfc_ctl |= DPFC_CTL_FENCE_EN;
244 if (IS_GEN5(dev))
245 dpfc_ctl |= obj->fence_reg;
246
247 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
248 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
249 /* enable it... */
250 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
251
252 if (IS_GEN6(dev)) {
253 I915_WRITE(SNB_DPFC_CTL_SA,
254 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
255 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
256 sandybridge_blit_fbc_update(dev);
257 }
258
259 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
260 }
261
262 static void ironlake_disable_fbc(struct drm_device *dev)
263 {
264 struct drm_i915_private *dev_priv = dev->dev_private;
265 u32 dpfc_ctl;
266
267 /* Disable compression */
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) {
270 dpfc_ctl &= ~DPFC_CTL_EN;
271 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
272
273 DRM_DEBUG_KMS("disabled FBC\n");
274 }
275 }
276
277 static bool ironlake_fbc_enabled(struct drm_device *dev)
278 {
279 struct drm_i915_private *dev_priv = dev->dev_private;
280
281 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
282 }
283
284 static void gen7_enable_fbc(struct drm_crtc *crtc)
285 {
286 struct drm_device *dev = crtc->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 struct drm_framebuffer *fb = crtc->primary->fb;
289 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291 u32 dpfc_ctl;
292
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++;
296
297 switch (dev_priv->fbc.threshold) {
298 case 4:
299 case 3:
300 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
301 break;
302 case 2:
303 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
304 break;
305 case 1:
306 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
307 break;
308 }
309
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
311
312 if (dev_priv->fbc.false_color)
313 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
314
315 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
316
317 if (IS_IVYBRIDGE(dev)) {
318 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
319 I915_WRITE(ILK_DISPLAY_CHICKEN1,
320 I915_READ(ILK_DISPLAY_CHICKEN1) |
321 ILK_FBCQ_DIS);
322 } else {
323 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
324 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
325 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
326 HSW_FBCQ_DIS);
327 }
328
329 I915_WRITE(SNB_DPFC_CTL_SA,
330 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
331 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
332
333 sandybridge_blit_fbc_update(dev);
334
335 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
336 }
337
338 bool intel_fbc_enabled(struct drm_device *dev)
339 {
340 struct drm_i915_private *dev_priv = dev->dev_private;
341
342 if (!dev_priv->display.fbc_enabled)
343 return false;
344
345 return dev_priv->display.fbc_enabled(dev);
346 }
347
348 void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
349 {
350 struct drm_i915_private *dev_priv = dev->dev_private;
351
352 if (!IS_GEN8(dev))
353 return;
354
355 I915_WRITE(MSG_FBC_REND_STATE, value);
356 }
357
358 static void intel_fbc_work_fn(struct work_struct *__work)
359 {
360 struct intel_fbc_work *work =
361 container_of(to_delayed_work(__work),
362 struct intel_fbc_work, work);
363 struct drm_device *dev = work->crtc->dev;
364 struct drm_i915_private *dev_priv = dev->dev_private;
365
366 mutex_lock(&dev->struct_mutex);
367 if (work == dev_priv->fbc.fbc_work) {
368 /* Double check that we haven't switched fb without cancelling
369 * the prior work.
370 */
371 if (work->crtc->primary->fb == work->fb) {
372 dev_priv->display.enable_fbc(work->crtc);
373
374 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
375 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
376 dev_priv->fbc.y = work->crtc->y;
377 }
378
379 dev_priv->fbc.fbc_work = NULL;
380 }
381 mutex_unlock(&dev->struct_mutex);
382
383 kfree(work);
384 }
385
386 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
387 {
388 if (dev_priv->fbc.fbc_work == NULL)
389 return;
390
391 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
392
393 /* Synchronisation is provided by struct_mutex and checking of
394 * dev_priv->fbc.fbc_work, so we can perform the cancellation
395 * entirely asynchronously.
396 */
397 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
398 /* tasklet was killed before being run, clean up */
399 kfree(dev_priv->fbc.fbc_work);
400
401 /* Mark the work as no longer wanted so that if it does
402 * wake-up (because the work was already running and waiting
403 * for our mutex), it will discover that is no longer
404 * necessary to run.
405 */
406 dev_priv->fbc.fbc_work = NULL;
407 }
408
409 static void intel_enable_fbc(struct drm_crtc *crtc)
410 {
411 struct intel_fbc_work *work;
412 struct drm_device *dev = crtc->dev;
413 struct drm_i915_private *dev_priv = dev->dev_private;
414
415 if (!dev_priv->display.enable_fbc)
416 return;
417
418 intel_cancel_fbc_work(dev_priv);
419
420 work = kzalloc(sizeof(*work), GFP_KERNEL);
421 if (work == NULL) {
422 DRM_ERROR("Failed to allocate FBC work structure\n");
423 dev_priv->display.enable_fbc(crtc);
424 return;
425 }
426
427 work->crtc = crtc;
428 work->fb = crtc->primary->fb;
429 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
430
431 dev_priv->fbc.fbc_work = work;
432
433 /* Delay the actual enabling to let pageflipping cease and the
434 * display to settle before starting the compression. Note that
435 * this delay also serves a second purpose: it allows for a
436 * vblank to pass after disabling the FBC before we attempt
437 * to modify the control registers.
438 *
439 * A more complicated solution would involve tracking vblanks
440 * following the termination of the page-flipping sequence
441 * and indeed performing the enable as a co-routine and not
442 * waiting synchronously upon the vblank.
443 *
444 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
445 */
446 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
447 }
448
449 void intel_disable_fbc(struct drm_device *dev)
450 {
451 struct drm_i915_private *dev_priv = dev->dev_private;
452
453 intel_cancel_fbc_work(dev_priv);
454
455 if (!dev_priv->display.disable_fbc)
456 return;
457
458 dev_priv->display.disable_fbc(dev);
459 dev_priv->fbc.plane = -1;
460 }
461
462 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
463 enum no_fbc_reason reason)
464 {
465 if (dev_priv->fbc.no_fbc_reason == reason)
466 return false;
467
468 dev_priv->fbc.no_fbc_reason = reason;
469 return true;
470 }
471
472 /**
473 * intel_update_fbc - enable/disable FBC as needed
474 * @dev: the drm_device
475 *
476 * Set up the framebuffer compression hardware at mode set time. We
477 * enable it if possible:
478 * - plane A only (on pre-965)
479 * - no pixel mulitply/line duplication
480 * - no alpha buffer discard
481 * - no dual wide
482 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
483 *
484 * We can't assume that any compression will take place (worst case),
485 * so the compressed buffer has to be the same size as the uncompressed
486 * one. It also must reside (along with the line length buffer) in
487 * stolen memory.
488 *
489 * We need to enable/disable FBC on a global basis.
490 */
491 void intel_update_fbc(struct drm_device *dev)
492 {
493 struct drm_i915_private *dev_priv = dev->dev_private;
494 struct drm_crtc *crtc = NULL, *tmp_crtc;
495 struct intel_crtc *intel_crtc;
496 struct drm_framebuffer *fb;
497 struct drm_i915_gem_object *obj;
498 const struct drm_display_mode *adjusted_mode;
499 unsigned int max_width, max_height;
500
501 if (!HAS_FBC(dev)) {
502 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
503 return;
504 }
505
506 if (!i915.powersave) {
507 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
508 DRM_DEBUG_KMS("fbc disabled per module param\n");
509 return;
510 }
511
512 /*
513 * If FBC is already on, we just have to verify that we can
514 * keep it that way...
515 * Need to disable if:
516 * - more than one pipe is active
517 * - changing FBC params (stride, fence, mode)
518 * - new fb is too large to fit in compressed buffer
519 * - going to an unsupported config (interlace, pixel multiply, etc.)
520 */
521 for_each_crtc(dev, tmp_crtc) {
522 if (intel_crtc_active(tmp_crtc) &&
523 to_intel_crtc(tmp_crtc)->primary_enabled) {
524 if (crtc) {
525 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
526 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
527 goto out_disable;
528 }
529 crtc = tmp_crtc;
530 }
531 }
532
533 if (!crtc || crtc->primary->fb == NULL) {
534 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
535 DRM_DEBUG_KMS("no output, disabling\n");
536 goto out_disable;
537 }
538
539 intel_crtc = to_intel_crtc(crtc);
540 fb = crtc->primary->fb;
541 obj = intel_fb_obj(fb);
542 adjusted_mode = &intel_crtc->config.adjusted_mode;
543
544 if (i915.enable_fbc < 0) {
545 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
546 DRM_DEBUG_KMS("disabled per chip default\n");
547 goto out_disable;
548 }
549 if (!i915.enable_fbc) {
550 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
551 DRM_DEBUG_KMS("fbc disabled per module param\n");
552 goto out_disable;
553 }
554 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
555 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
556 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
557 DRM_DEBUG_KMS("mode incompatible with compression, "
558 "disabling\n");
559 goto out_disable;
560 }
561
562 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
563 max_width = 4096;
564 max_height = 4096;
565 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
566 max_width = 4096;
567 max_height = 2048;
568 } else {
569 max_width = 2048;
570 max_height = 1536;
571 }
572 if (intel_crtc->config.pipe_src_w > max_width ||
573 intel_crtc->config.pipe_src_h > max_height) {
574 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
575 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
576 goto out_disable;
577 }
578 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
579 intel_crtc->plane != PLANE_A) {
580 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
581 DRM_DEBUG_KMS("plane not A, disabling compression\n");
582 goto out_disable;
583 }
584
585 /* The use of a CPU fence is mandatory in order to detect writes
586 * by the CPU to the scanout and trigger updates to the FBC.
587 */
588 if (obj->tiling_mode != I915_TILING_X ||
589 obj->fence_reg == I915_FENCE_REG_NONE) {
590 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
592 goto out_disable;
593 }
594 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
595 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
596 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
598 goto out_disable;
599 }
600
601 /* If the kernel debugger is active, always disable compression */
602 if (in_dbg_master())
603 goto out_disable;
604
605 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
606 drm_format_plane_cpp(fb->pixel_format, 0))) {
607 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
608 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
609 goto out_disable;
610 }
611
612 /* If the scanout has not changed, don't modify the FBC settings.
613 * Note that we make the fundamental assumption that the fb->obj
614 * cannot be unpinned (and have its GTT offset and fence revoked)
615 * without first being decoupled from the scanout and FBC disabled.
616 */
617 if (dev_priv->fbc.plane == intel_crtc->plane &&
618 dev_priv->fbc.fb_id == fb->base.id &&
619 dev_priv->fbc.y == crtc->y)
620 return;
621
622 if (intel_fbc_enabled(dev)) {
623 /* We update FBC along two paths, after changing fb/crtc
624 * configuration (modeswitching) and after page-flipping
625 * finishes. For the latter, we know that not only did
626 * we disable the FBC at the start of the page-flip
627 * sequence, but also more than one vblank has passed.
628 *
629 * For the former case of modeswitching, it is possible
630 * to switch between two FBC valid configurations
631 * instantaneously so we do need to disable the FBC
632 * before we can modify its control registers. We also
633 * have to wait for the next vblank for that to take
634 * effect. However, since we delay enabling FBC we can
635 * assume that a vblank has passed since disabling and
636 * that we can safely alter the registers in the deferred
637 * callback.
638 *
639 * In the scenario that we go from a valid to invalid
640 * and then back to valid FBC configuration we have
641 * no strict enforcement that a vblank occurred since
642 * disabling the FBC. However, along all current pipe
643 * disabling paths we do need to wait for a vblank at
644 * some point. And we wait before enabling FBC anyway.
645 */
646 DRM_DEBUG_KMS("disabling active FBC for update\n");
647 intel_disable_fbc(dev);
648 }
649
650 intel_enable_fbc(crtc);
651 dev_priv->fbc.no_fbc_reason = FBC_OK;
652 return;
653
654 out_disable:
655 /* Multiple disables should be harmless */
656 if (intel_fbc_enabled(dev)) {
657 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
658 intel_disable_fbc(dev);
659 }
660 i915_gem_stolen_cleanup_compression(dev);
661 }
662
663 static void i915_pineview_get_mem_freq(struct drm_device *dev)
664 {
665 struct drm_i915_private *dev_priv = dev->dev_private;
666 u32 tmp;
667
668 tmp = I915_READ(CLKCFG);
669
670 switch (tmp & CLKCFG_FSB_MASK) {
671 case CLKCFG_FSB_533:
672 dev_priv->fsb_freq = 533; /* 133*4 */
673 break;
674 case CLKCFG_FSB_800:
675 dev_priv->fsb_freq = 800; /* 200*4 */
676 break;
677 case CLKCFG_FSB_667:
678 dev_priv->fsb_freq = 667; /* 167*4 */
679 break;
680 case CLKCFG_FSB_400:
681 dev_priv->fsb_freq = 400; /* 100*4 */
682 break;
683 }
684
685 switch (tmp & CLKCFG_MEM_MASK) {
686 case CLKCFG_MEM_533:
687 dev_priv->mem_freq = 533;
688 break;
689 case CLKCFG_MEM_667:
690 dev_priv->mem_freq = 667;
691 break;
692 case CLKCFG_MEM_800:
693 dev_priv->mem_freq = 800;
694 break;
695 }
696
697 /* detect pineview DDR3 setting */
698 tmp = I915_READ(CSHRDDR3CTL);
699 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
700 }
701
702 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
703 {
704 struct drm_i915_private *dev_priv = dev->dev_private;
705 u16 ddrpll, csipll;
706
707 ddrpll = I915_READ16(DDRMPLL1);
708 csipll = I915_READ16(CSIPLL0);
709
710 switch (ddrpll & 0xff) {
711 case 0xc:
712 dev_priv->mem_freq = 800;
713 break;
714 case 0x10:
715 dev_priv->mem_freq = 1066;
716 break;
717 case 0x14:
718 dev_priv->mem_freq = 1333;
719 break;
720 case 0x18:
721 dev_priv->mem_freq = 1600;
722 break;
723 default:
724 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
725 ddrpll & 0xff);
726 dev_priv->mem_freq = 0;
727 break;
728 }
729
730 dev_priv->ips.r_t = dev_priv->mem_freq;
731
732 switch (csipll & 0x3ff) {
733 case 0x00c:
734 dev_priv->fsb_freq = 3200;
735 break;
736 case 0x00e:
737 dev_priv->fsb_freq = 3733;
738 break;
739 case 0x010:
740 dev_priv->fsb_freq = 4266;
741 break;
742 case 0x012:
743 dev_priv->fsb_freq = 4800;
744 break;
745 case 0x014:
746 dev_priv->fsb_freq = 5333;
747 break;
748 case 0x016:
749 dev_priv->fsb_freq = 5866;
750 break;
751 case 0x018:
752 dev_priv->fsb_freq = 6400;
753 break;
754 default:
755 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
756 csipll & 0x3ff);
757 dev_priv->fsb_freq = 0;
758 break;
759 }
760
761 if (dev_priv->fsb_freq == 3200) {
762 dev_priv->ips.c_m = 0;
763 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
764 dev_priv->ips.c_m = 1;
765 } else {
766 dev_priv->ips.c_m = 2;
767 }
768 }
769
770 static const struct cxsr_latency cxsr_latency_table[] = {
771 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
772 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
773 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
774 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
775 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
776
777 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
778 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
779 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
780 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
781 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
782
783 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
784 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
785 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
786 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
787 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
788
789 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
790 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
791 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
792 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
793 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
794
795 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
796 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
797 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
798 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
799 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
800
801 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
802 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
803 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
804 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
805 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
806 };
807
808 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
809 int is_ddr3,
810 int fsb,
811 int mem)
812 {
813 const struct cxsr_latency *latency;
814 int i;
815
816 if (fsb == 0 || mem == 0)
817 return NULL;
818
819 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
820 latency = &cxsr_latency_table[i];
821 if (is_desktop == latency->is_desktop &&
822 is_ddr3 == latency->is_ddr3 &&
823 fsb == latency->fsb_freq && mem == latency->mem_freq)
824 return latency;
825 }
826
827 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
828
829 return NULL;
830 }
831
832 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
833 {
834 struct drm_device *dev = dev_priv->dev;
835 u32 val;
836
837 if (IS_VALLEYVIEW(dev)) {
838 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
839 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
840 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
841 } else if (IS_PINEVIEW(dev)) {
842 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
843 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
844 I915_WRITE(DSPFW3, val);
845 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
846 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
847 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
848 I915_WRITE(FW_BLC_SELF, val);
849 } else if (IS_I915GM(dev)) {
850 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
851 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
852 I915_WRITE(INSTPM, val);
853 } else {
854 return;
855 }
856
857 DRM_DEBUG_KMS("memory self-refresh is %s\n",
858 enable ? "enabled" : "disabled");
859 }
860
861 /*
862 * Latency for FIFO fetches is dependent on several factors:
863 * - memory configuration (speed, channels)
864 * - chipset
865 * - current MCH state
866 * It can be fairly high in some situations, so here we assume a fairly
867 * pessimal value. It's a tradeoff between extra memory fetches (if we
868 * set this value too high, the FIFO will fetch frequently to stay full)
869 * and power consumption (set it too low to save power and we might see
870 * FIFO underruns and display "flicker").
871 *
872 * A value of 5us seems to be a good balance; safe for very low end
873 * platforms but not overly aggressive on lower latency configs.
874 */
875 static const int pessimal_latency_ns = 5000;
876
877 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
878 {
879 struct drm_i915_private *dev_priv = dev->dev_private;
880 uint32_t dsparb = I915_READ(DSPARB);
881 int size;
882
883 size = dsparb & 0x7f;
884 if (plane)
885 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
886
887 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
888 plane ? "B" : "A", size);
889
890 return size;
891 }
892
893 static int i830_get_fifo_size(struct drm_device *dev, int plane)
894 {
895 struct drm_i915_private *dev_priv = dev->dev_private;
896 uint32_t dsparb = I915_READ(DSPARB);
897 int size;
898
899 size = dsparb & 0x1ff;
900 if (plane)
901 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
902 size >>= 1; /* Convert to cachelines */
903
904 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
905 plane ? "B" : "A", size);
906
907 return size;
908 }
909
910 static int i845_get_fifo_size(struct drm_device *dev, int plane)
911 {
912 struct drm_i915_private *dev_priv = dev->dev_private;
913 uint32_t dsparb = I915_READ(DSPARB);
914 int size;
915
916 size = dsparb & 0x7f;
917 size >>= 2; /* Convert to cachelines */
918
919 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
920 plane ? "B" : "A",
921 size);
922
923 return size;
924 }
925
926 /* Pineview has different values for various configs */
927 static const struct intel_watermark_params pineview_display_wm = {
928 .fifo_size = PINEVIEW_DISPLAY_FIFO,
929 .max_wm = PINEVIEW_MAX_WM,
930 .default_wm = PINEVIEW_DFT_WM,
931 .guard_size = PINEVIEW_GUARD_WM,
932 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
933 };
934 static const struct intel_watermark_params pineview_display_hplloff_wm = {
935 .fifo_size = PINEVIEW_DISPLAY_FIFO,
936 .max_wm = PINEVIEW_MAX_WM,
937 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
938 .guard_size = PINEVIEW_GUARD_WM,
939 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
940 };
941 static const struct intel_watermark_params pineview_cursor_wm = {
942 .fifo_size = PINEVIEW_CURSOR_FIFO,
943 .max_wm = PINEVIEW_CURSOR_MAX_WM,
944 .default_wm = PINEVIEW_CURSOR_DFT_WM,
945 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
946 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
947 };
948 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
949 .fifo_size = PINEVIEW_CURSOR_FIFO,
950 .max_wm = PINEVIEW_CURSOR_MAX_WM,
951 .default_wm = PINEVIEW_CURSOR_DFT_WM,
952 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
953 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
954 };
955 static const struct intel_watermark_params g4x_wm_info = {
956 .fifo_size = G4X_FIFO_SIZE,
957 .max_wm = G4X_MAX_WM,
958 .default_wm = G4X_MAX_WM,
959 .guard_size = 2,
960 .cacheline_size = G4X_FIFO_LINE_SIZE,
961 };
962 static const struct intel_watermark_params g4x_cursor_wm_info = {
963 .fifo_size = I965_CURSOR_FIFO,
964 .max_wm = I965_CURSOR_MAX_WM,
965 .default_wm = I965_CURSOR_DFT_WM,
966 .guard_size = 2,
967 .cacheline_size = G4X_FIFO_LINE_SIZE,
968 };
969 static const struct intel_watermark_params valleyview_wm_info = {
970 .fifo_size = VALLEYVIEW_FIFO_SIZE,
971 .max_wm = VALLEYVIEW_MAX_WM,
972 .default_wm = VALLEYVIEW_MAX_WM,
973 .guard_size = 2,
974 .cacheline_size = G4X_FIFO_LINE_SIZE,
975 };
976 static const struct intel_watermark_params valleyview_cursor_wm_info = {
977 .fifo_size = I965_CURSOR_FIFO,
978 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
979 .default_wm = I965_CURSOR_DFT_WM,
980 .guard_size = 2,
981 .cacheline_size = G4X_FIFO_LINE_SIZE,
982 };
983 static const struct intel_watermark_params i965_cursor_wm_info = {
984 .fifo_size = I965_CURSOR_FIFO,
985 .max_wm = I965_CURSOR_MAX_WM,
986 .default_wm = I965_CURSOR_DFT_WM,
987 .guard_size = 2,
988 .cacheline_size = I915_FIFO_LINE_SIZE,
989 };
990 static const struct intel_watermark_params i945_wm_info = {
991 .fifo_size = I945_FIFO_SIZE,
992 .max_wm = I915_MAX_WM,
993 .default_wm = 1,
994 .guard_size = 2,
995 .cacheline_size = I915_FIFO_LINE_SIZE,
996 };
997 static const struct intel_watermark_params i915_wm_info = {
998 .fifo_size = I915_FIFO_SIZE,
999 .max_wm = I915_MAX_WM,
1000 .default_wm = 1,
1001 .guard_size = 2,
1002 .cacheline_size = I915_FIFO_LINE_SIZE,
1003 };
1004 static const struct intel_watermark_params i830_a_wm_info = {
1005 .fifo_size = I855GM_FIFO_SIZE,
1006 .max_wm = I915_MAX_WM,
1007 .default_wm = 1,
1008 .guard_size = 2,
1009 .cacheline_size = I830_FIFO_LINE_SIZE,
1010 };
1011 static const struct intel_watermark_params i830_bc_wm_info = {
1012 .fifo_size = I855GM_FIFO_SIZE,
1013 .max_wm = I915_MAX_WM/2,
1014 .default_wm = 1,
1015 .guard_size = 2,
1016 .cacheline_size = I830_FIFO_LINE_SIZE,
1017 };
1018 static const struct intel_watermark_params i845_wm_info = {
1019 .fifo_size = I830_FIFO_SIZE,
1020 .max_wm = I915_MAX_WM,
1021 .default_wm = 1,
1022 .guard_size = 2,
1023 .cacheline_size = I830_FIFO_LINE_SIZE,
1024 };
1025
1026 /**
1027 * intel_calculate_wm - calculate watermark level
1028 * @clock_in_khz: pixel clock
1029 * @wm: chip FIFO params
1030 * @pixel_size: display pixel size
1031 * @latency_ns: memory latency for the platform
1032 *
1033 * Calculate the watermark level (the level at which the display plane will
1034 * start fetching from memory again). Each chip has a different display
1035 * FIFO size and allocation, so the caller needs to figure that out and pass
1036 * in the correct intel_watermark_params structure.
1037 *
1038 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1039 * on the pixel size. When it reaches the watermark level, it'll start
1040 * fetching FIFO line sized based chunks from memory until the FIFO fills
1041 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1042 * will occur, and a display engine hang could result.
1043 */
1044 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1045 const struct intel_watermark_params *wm,
1046 int fifo_size,
1047 int pixel_size,
1048 unsigned long latency_ns)
1049 {
1050 long entries_required, wm_size;
1051
1052 /*
1053 * Note: we need to make sure we don't overflow for various clock &
1054 * latency values.
1055 * clocks go from a few thousand to several hundred thousand.
1056 * latency is usually a few thousand
1057 */
1058 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1059 1000;
1060 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1061
1062 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1063
1064 wm_size = fifo_size - (entries_required + wm->guard_size);
1065
1066 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1067
1068 /* Don't promote wm_size to unsigned... */
1069 if (wm_size > (long)wm->max_wm)
1070 wm_size = wm->max_wm;
1071 if (wm_size <= 0)
1072 wm_size = wm->default_wm;
1073
1074 /*
1075 * Bspec seems to indicate that the value shouldn't be lower than
1076 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1077 * Lets go for 8 which is the burst size since certain platforms
1078 * already use a hardcoded 8 (which is what the spec says should be
1079 * done).
1080 */
1081 if (wm_size <= 8)
1082 wm_size = 8;
1083
1084 return wm_size;
1085 }
1086
1087 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1088 {
1089 struct drm_crtc *crtc, *enabled = NULL;
1090
1091 for_each_crtc(dev, crtc) {
1092 if (intel_crtc_active(crtc)) {
1093 if (enabled)
1094 return NULL;
1095 enabled = crtc;
1096 }
1097 }
1098
1099 return enabled;
1100 }
1101
1102 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1103 {
1104 struct drm_device *dev = unused_crtc->dev;
1105 struct drm_i915_private *dev_priv = dev->dev_private;
1106 struct drm_crtc *crtc;
1107 const struct cxsr_latency *latency;
1108 u32 reg;
1109 unsigned long wm;
1110
1111 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1112 dev_priv->fsb_freq, dev_priv->mem_freq);
1113 if (!latency) {
1114 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1115 intel_set_memory_cxsr(dev_priv, false);
1116 return;
1117 }
1118
1119 crtc = single_enabled_crtc(dev);
1120 if (crtc) {
1121 const struct drm_display_mode *adjusted_mode;
1122 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1123 int clock;
1124
1125 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1126 clock = adjusted_mode->crtc_clock;
1127
1128 /* Display SR */
1129 wm = intel_calculate_wm(clock, &pineview_display_wm,
1130 pineview_display_wm.fifo_size,
1131 pixel_size, latency->display_sr);
1132 reg = I915_READ(DSPFW1);
1133 reg &= ~DSPFW_SR_MASK;
1134 reg |= wm << DSPFW_SR_SHIFT;
1135 I915_WRITE(DSPFW1, reg);
1136 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1137
1138 /* cursor SR */
1139 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1140 pineview_display_wm.fifo_size,
1141 pixel_size, latency->cursor_sr);
1142 reg = I915_READ(DSPFW3);
1143 reg &= ~DSPFW_CURSOR_SR_MASK;
1144 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1145 I915_WRITE(DSPFW3, reg);
1146
1147 /* Display HPLL off SR */
1148 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1149 pineview_display_hplloff_wm.fifo_size,
1150 pixel_size, latency->display_hpll_disable);
1151 reg = I915_READ(DSPFW3);
1152 reg &= ~DSPFW_HPLL_SR_MASK;
1153 reg |= wm & DSPFW_HPLL_SR_MASK;
1154 I915_WRITE(DSPFW3, reg);
1155
1156 /* cursor HPLL off SR */
1157 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1158 pineview_display_hplloff_wm.fifo_size,
1159 pixel_size, latency->cursor_hpll_disable);
1160 reg = I915_READ(DSPFW3);
1161 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1162 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1163 I915_WRITE(DSPFW3, reg);
1164 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1165
1166 intel_set_memory_cxsr(dev_priv, true);
1167 } else {
1168 intel_set_memory_cxsr(dev_priv, false);
1169 }
1170 }
1171
1172 static bool g4x_compute_wm0(struct drm_device *dev,
1173 int plane,
1174 const struct intel_watermark_params *display,
1175 int display_latency_ns,
1176 const struct intel_watermark_params *cursor,
1177 int cursor_latency_ns,
1178 int *plane_wm,
1179 int *cursor_wm)
1180 {
1181 struct drm_crtc *crtc;
1182 const struct drm_display_mode *adjusted_mode;
1183 int htotal, hdisplay, clock, pixel_size;
1184 int line_time_us, line_count;
1185 int entries, tlb_miss;
1186
1187 crtc = intel_get_crtc_for_plane(dev, plane);
1188 if (!intel_crtc_active(crtc)) {
1189 *cursor_wm = cursor->guard_size;
1190 *plane_wm = display->guard_size;
1191 return false;
1192 }
1193
1194 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1195 clock = adjusted_mode->crtc_clock;
1196 htotal = adjusted_mode->crtc_htotal;
1197 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1198 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1199
1200 /* Use the small buffer method to calculate plane watermark */
1201 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1202 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1203 if (tlb_miss > 0)
1204 entries += tlb_miss;
1205 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1206 *plane_wm = entries + display->guard_size;
1207 if (*plane_wm > (int)display->max_wm)
1208 *plane_wm = display->max_wm;
1209
1210 /* Use the large buffer method to calculate cursor watermark */
1211 line_time_us = max(htotal * 1000 / clock, 1);
1212 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1213 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1214 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1215 if (tlb_miss > 0)
1216 entries += tlb_miss;
1217 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1218 *cursor_wm = entries + cursor->guard_size;
1219 if (*cursor_wm > (int)cursor->max_wm)
1220 *cursor_wm = (int)cursor->max_wm;
1221
1222 return true;
1223 }
1224
1225 /*
1226 * Check the wm result.
1227 *
1228 * If any calculated watermark values is larger than the maximum value that
1229 * can be programmed into the associated watermark register, that watermark
1230 * must be disabled.
1231 */
1232 static bool g4x_check_srwm(struct drm_device *dev,
1233 int display_wm, int cursor_wm,
1234 const struct intel_watermark_params *display,
1235 const struct intel_watermark_params *cursor)
1236 {
1237 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1238 display_wm, cursor_wm);
1239
1240 if (display_wm > display->max_wm) {
1241 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1242 display_wm, display->max_wm);
1243 return false;
1244 }
1245
1246 if (cursor_wm > cursor->max_wm) {
1247 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1248 cursor_wm, cursor->max_wm);
1249 return false;
1250 }
1251
1252 if (!(display_wm || cursor_wm)) {
1253 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1254 return false;
1255 }
1256
1257 return true;
1258 }
1259
1260 static bool g4x_compute_srwm(struct drm_device *dev,
1261 int plane,
1262 int latency_ns,
1263 const struct intel_watermark_params *display,
1264 const struct intel_watermark_params *cursor,
1265 int *display_wm, int *cursor_wm)
1266 {
1267 struct drm_crtc *crtc;
1268 const struct drm_display_mode *adjusted_mode;
1269 int hdisplay, htotal, pixel_size, clock;
1270 unsigned long line_time_us;
1271 int line_count, line_size;
1272 int small, large;
1273 int entries;
1274
1275 if (!latency_ns) {
1276 *display_wm = *cursor_wm = 0;
1277 return false;
1278 }
1279
1280 crtc = intel_get_crtc_for_plane(dev, plane);
1281 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1282 clock = adjusted_mode->crtc_clock;
1283 htotal = adjusted_mode->crtc_htotal;
1284 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1285 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1286
1287 line_time_us = max(htotal * 1000 / clock, 1);
1288 line_count = (latency_ns / line_time_us + 1000) / 1000;
1289 line_size = hdisplay * pixel_size;
1290
1291 /* Use the minimum of the small and large buffer method for primary */
1292 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1293 large = line_count * line_size;
1294
1295 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1296 *display_wm = entries + display->guard_size;
1297
1298 /* calculate the self-refresh watermark for display cursor */
1299 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1300 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1301 *cursor_wm = entries + cursor->guard_size;
1302
1303 return g4x_check_srwm(dev,
1304 *display_wm, *cursor_wm,
1305 display, cursor);
1306 }
1307
1308 static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1309 int pixel_size,
1310 int *prec_mult,
1311 int *drain_latency)
1312 {
1313 int entries;
1314 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1315
1316 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1317 return false;
1318
1319 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1320 return false;
1321
1322 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1323 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1324 DRAIN_LATENCY_PRECISION_32;
1325 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1326
1327 if (*drain_latency > DRAIN_LATENCY_MASK)
1328 *drain_latency = DRAIN_LATENCY_MASK;
1329
1330 return true;
1331 }
1332
1333 /*
1334 * Update drain latency registers of memory arbiter
1335 *
1336 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1337 * to be programmed. Each plane has a drain latency multiplier and a drain
1338 * latency value.
1339 */
1340
1341 static void vlv_update_drain_latency(struct drm_crtc *crtc)
1342 {
1343 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1345 int pixel_size;
1346 int drain_latency;
1347 enum pipe pipe = intel_crtc->pipe;
1348 int plane_prec, prec_mult, plane_dl;
1349
1350 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
1351 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
1352 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1353
1354 if (!intel_crtc_active(crtc)) {
1355 I915_WRITE(VLV_DDL(pipe), plane_dl);
1356 return;
1357 }
1358
1359 /* Primary plane Drain Latency */
1360 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1361 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1362 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1363 DDL_PLANE_PRECISION_64 :
1364 DDL_PLANE_PRECISION_32;
1365 plane_dl |= plane_prec | drain_latency;
1366 }
1367
1368 /* Cursor Drain Latency
1369 * BPP is always 4 for cursor
1370 */
1371 pixel_size = 4;
1372
1373 /* Program cursor DL only if it is enabled */
1374 if (intel_crtc->cursor_base &&
1375 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1376 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1377 DDL_CURSOR_PRECISION_64 :
1378 DDL_CURSOR_PRECISION_32;
1379 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1380 }
1381
1382 I915_WRITE(VLV_DDL(pipe), plane_dl);
1383 }
1384
1385 #define single_plane_enabled(mask) is_power_of_2(mask)
1386
1387 static void valleyview_update_wm(struct drm_crtc *crtc)
1388 {
1389 struct drm_device *dev = crtc->dev;
1390 static const int sr_latency_ns = 12000;
1391 struct drm_i915_private *dev_priv = dev->dev_private;
1392 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1393 int plane_sr, cursor_sr;
1394 int ignore_plane_sr, ignore_cursor_sr;
1395 unsigned int enabled = 0;
1396 bool cxsr_enabled;
1397
1398 vlv_update_drain_latency(crtc);
1399
1400 if (g4x_compute_wm0(dev, PIPE_A,
1401 &valleyview_wm_info, pessimal_latency_ns,
1402 &valleyview_cursor_wm_info, pessimal_latency_ns,
1403 &planea_wm, &cursora_wm))
1404 enabled |= 1 << PIPE_A;
1405
1406 if (g4x_compute_wm0(dev, PIPE_B,
1407 &valleyview_wm_info, pessimal_latency_ns,
1408 &valleyview_cursor_wm_info, pessimal_latency_ns,
1409 &planeb_wm, &cursorb_wm))
1410 enabled |= 1 << PIPE_B;
1411
1412 if (single_plane_enabled(enabled) &&
1413 g4x_compute_srwm(dev, ffs(enabled) - 1,
1414 sr_latency_ns,
1415 &valleyview_wm_info,
1416 &valleyview_cursor_wm_info,
1417 &plane_sr, &ignore_cursor_sr) &&
1418 g4x_compute_srwm(dev, ffs(enabled) - 1,
1419 2*sr_latency_ns,
1420 &valleyview_wm_info,
1421 &valleyview_cursor_wm_info,
1422 &ignore_plane_sr, &cursor_sr)) {
1423 cxsr_enabled = true;
1424 } else {
1425 cxsr_enabled = false;
1426 intel_set_memory_cxsr(dev_priv, false);
1427 plane_sr = cursor_sr = 0;
1428 }
1429
1430 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1431 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1432 planea_wm, cursora_wm,
1433 planeb_wm, cursorb_wm,
1434 plane_sr, cursor_sr);
1435
1436 I915_WRITE(DSPFW1,
1437 (plane_sr << DSPFW_SR_SHIFT) |
1438 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1439 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1440 (planea_wm << DSPFW_PLANEA_SHIFT));
1441 I915_WRITE(DSPFW2,
1442 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1443 (cursora_wm << DSPFW_CURSORA_SHIFT));
1444 I915_WRITE(DSPFW3,
1445 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1446 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1447
1448 if (cxsr_enabled)
1449 intel_set_memory_cxsr(dev_priv, true);
1450 }
1451
1452 static void cherryview_update_wm(struct drm_crtc *crtc)
1453 {
1454 struct drm_device *dev = crtc->dev;
1455 static const int sr_latency_ns = 12000;
1456 struct drm_i915_private *dev_priv = dev->dev_private;
1457 int planea_wm, planeb_wm, planec_wm;
1458 int cursora_wm, cursorb_wm, cursorc_wm;
1459 int plane_sr, cursor_sr;
1460 int ignore_plane_sr, ignore_cursor_sr;
1461 unsigned int enabled = 0;
1462 bool cxsr_enabled;
1463
1464 vlv_update_drain_latency(crtc);
1465
1466 if (g4x_compute_wm0(dev, PIPE_A,
1467 &valleyview_wm_info, pessimal_latency_ns,
1468 &valleyview_cursor_wm_info, pessimal_latency_ns,
1469 &planea_wm, &cursora_wm))
1470 enabled |= 1 << PIPE_A;
1471
1472 if (g4x_compute_wm0(dev, PIPE_B,
1473 &valleyview_wm_info, pessimal_latency_ns,
1474 &valleyview_cursor_wm_info, pessimal_latency_ns,
1475 &planeb_wm, &cursorb_wm))
1476 enabled |= 1 << PIPE_B;
1477
1478 if (g4x_compute_wm0(dev, PIPE_C,
1479 &valleyview_wm_info, pessimal_latency_ns,
1480 &valleyview_cursor_wm_info, pessimal_latency_ns,
1481 &planec_wm, &cursorc_wm))
1482 enabled |= 1 << PIPE_C;
1483
1484 if (single_plane_enabled(enabled) &&
1485 g4x_compute_srwm(dev, ffs(enabled) - 1,
1486 sr_latency_ns,
1487 &valleyview_wm_info,
1488 &valleyview_cursor_wm_info,
1489 &plane_sr, &ignore_cursor_sr) &&
1490 g4x_compute_srwm(dev, ffs(enabled) - 1,
1491 2*sr_latency_ns,
1492 &valleyview_wm_info,
1493 &valleyview_cursor_wm_info,
1494 &ignore_plane_sr, &cursor_sr)) {
1495 cxsr_enabled = true;
1496 } else {
1497 cxsr_enabled = false;
1498 intel_set_memory_cxsr(dev_priv, false);
1499 plane_sr = cursor_sr = 0;
1500 }
1501
1502 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1503 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1504 "SR: plane=%d, cursor=%d\n",
1505 planea_wm, cursora_wm,
1506 planeb_wm, cursorb_wm,
1507 planec_wm, cursorc_wm,
1508 plane_sr, cursor_sr);
1509
1510 I915_WRITE(DSPFW1,
1511 (plane_sr << DSPFW_SR_SHIFT) |
1512 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1513 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1514 (planea_wm << DSPFW_PLANEA_SHIFT));
1515 I915_WRITE(DSPFW2,
1516 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1517 (cursora_wm << DSPFW_CURSORA_SHIFT));
1518 I915_WRITE(DSPFW3,
1519 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1520 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1521 I915_WRITE(DSPFW9_CHV,
1522 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1523 DSPFW_CURSORC_MASK)) |
1524 (planec_wm << DSPFW_PLANEC_SHIFT) |
1525 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1526
1527 if (cxsr_enabled)
1528 intel_set_memory_cxsr(dev_priv, true);
1529 }
1530
1531 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1532 struct drm_crtc *crtc,
1533 uint32_t sprite_width,
1534 uint32_t sprite_height,
1535 int pixel_size,
1536 bool enabled, bool scaled)
1537 {
1538 struct drm_device *dev = crtc->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1540 int pipe = to_intel_plane(plane)->pipe;
1541 int sprite = to_intel_plane(plane)->plane;
1542 int drain_latency;
1543 int plane_prec;
1544 int sprite_dl;
1545 int prec_mult;
1546
1547 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
1548 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1549
1550 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1551 &drain_latency)) {
1552 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1553 DDL_SPRITE_PRECISION_64(sprite) :
1554 DDL_SPRITE_PRECISION_32(sprite);
1555 sprite_dl |= plane_prec |
1556 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1557 }
1558
1559 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1560 }
1561
1562 static void g4x_update_wm(struct drm_crtc *crtc)
1563 {
1564 struct drm_device *dev = crtc->dev;
1565 static const int sr_latency_ns = 12000;
1566 struct drm_i915_private *dev_priv = dev->dev_private;
1567 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1568 int plane_sr, cursor_sr;
1569 unsigned int enabled = 0;
1570 bool cxsr_enabled;
1571
1572 if (g4x_compute_wm0(dev, PIPE_A,
1573 &g4x_wm_info, pessimal_latency_ns,
1574 &g4x_cursor_wm_info, pessimal_latency_ns,
1575 &planea_wm, &cursora_wm))
1576 enabled |= 1 << PIPE_A;
1577
1578 if (g4x_compute_wm0(dev, PIPE_B,
1579 &g4x_wm_info, pessimal_latency_ns,
1580 &g4x_cursor_wm_info, pessimal_latency_ns,
1581 &planeb_wm, &cursorb_wm))
1582 enabled |= 1 << PIPE_B;
1583
1584 if (single_plane_enabled(enabled) &&
1585 g4x_compute_srwm(dev, ffs(enabled) - 1,
1586 sr_latency_ns,
1587 &g4x_wm_info,
1588 &g4x_cursor_wm_info,
1589 &plane_sr, &cursor_sr)) {
1590 cxsr_enabled = true;
1591 } else {
1592 cxsr_enabled = false;
1593 intel_set_memory_cxsr(dev_priv, false);
1594 plane_sr = cursor_sr = 0;
1595 }
1596
1597 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1598 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1599 planea_wm, cursora_wm,
1600 planeb_wm, cursorb_wm,
1601 plane_sr, cursor_sr);
1602
1603 I915_WRITE(DSPFW1,
1604 (plane_sr << DSPFW_SR_SHIFT) |
1605 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1606 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1607 (planea_wm << DSPFW_PLANEA_SHIFT));
1608 I915_WRITE(DSPFW2,
1609 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1610 (cursora_wm << DSPFW_CURSORA_SHIFT));
1611 /* HPLL off in SR has some issues on G4x... disable it */
1612 I915_WRITE(DSPFW3,
1613 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1614 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1615
1616 if (cxsr_enabled)
1617 intel_set_memory_cxsr(dev_priv, true);
1618 }
1619
1620 static void i965_update_wm(struct drm_crtc *unused_crtc)
1621 {
1622 struct drm_device *dev = unused_crtc->dev;
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1624 struct drm_crtc *crtc;
1625 int srwm = 1;
1626 int cursor_sr = 16;
1627 bool cxsr_enabled;
1628
1629 /* Calc sr entries for one plane configs */
1630 crtc = single_enabled_crtc(dev);
1631 if (crtc) {
1632 /* self-refresh has much higher latency */
1633 static const int sr_latency_ns = 12000;
1634 const struct drm_display_mode *adjusted_mode =
1635 &to_intel_crtc(crtc)->config.adjusted_mode;
1636 int clock = adjusted_mode->crtc_clock;
1637 int htotal = adjusted_mode->crtc_htotal;
1638 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1639 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1640 unsigned long line_time_us;
1641 int entries;
1642
1643 line_time_us = max(htotal * 1000 / clock, 1);
1644
1645 /* Use ns/us then divide to preserve precision */
1646 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1647 pixel_size * hdisplay;
1648 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1649 srwm = I965_FIFO_SIZE - entries;
1650 if (srwm < 0)
1651 srwm = 1;
1652 srwm &= 0x1ff;
1653 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1654 entries, srwm);
1655
1656 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1657 pixel_size * to_intel_crtc(crtc)->cursor_width;
1658 entries = DIV_ROUND_UP(entries,
1659 i965_cursor_wm_info.cacheline_size);
1660 cursor_sr = i965_cursor_wm_info.fifo_size -
1661 (entries + i965_cursor_wm_info.guard_size);
1662
1663 if (cursor_sr > i965_cursor_wm_info.max_wm)
1664 cursor_sr = i965_cursor_wm_info.max_wm;
1665
1666 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1667 "cursor %d\n", srwm, cursor_sr);
1668
1669 cxsr_enabled = true;
1670 } else {
1671 cxsr_enabled = false;
1672 /* Turn off self refresh if both pipes are enabled */
1673 intel_set_memory_cxsr(dev_priv, false);
1674 }
1675
1676 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1677 srwm);
1678
1679 /* 965 has limitations... */
1680 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1681 (8 << DSPFW_CURSORB_SHIFT) |
1682 (8 << DSPFW_PLANEB_SHIFT) |
1683 (8 << DSPFW_PLANEA_SHIFT));
1684 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1685 (8 << DSPFW_PLANEC_SHIFT_OLD));
1686 /* update cursor SR watermark */
1687 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1688
1689 if (cxsr_enabled)
1690 intel_set_memory_cxsr(dev_priv, true);
1691 }
1692
1693 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1694 {
1695 struct drm_device *dev = unused_crtc->dev;
1696 struct drm_i915_private *dev_priv = dev->dev_private;
1697 const struct intel_watermark_params *wm_info;
1698 uint32_t fwater_lo;
1699 uint32_t fwater_hi;
1700 int cwm, srwm = 1;
1701 int fifo_size;
1702 int planea_wm, planeb_wm;
1703 struct drm_crtc *crtc, *enabled = NULL;
1704
1705 if (IS_I945GM(dev))
1706 wm_info = &i945_wm_info;
1707 else if (!IS_GEN2(dev))
1708 wm_info = &i915_wm_info;
1709 else
1710 wm_info = &i830_a_wm_info;
1711
1712 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1713 crtc = intel_get_crtc_for_plane(dev, 0);
1714 if (intel_crtc_active(crtc)) {
1715 const struct drm_display_mode *adjusted_mode;
1716 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1717 if (IS_GEN2(dev))
1718 cpp = 4;
1719
1720 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1721 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1722 wm_info, fifo_size, cpp,
1723 pessimal_latency_ns);
1724 enabled = crtc;
1725 } else {
1726 planea_wm = fifo_size - wm_info->guard_size;
1727 if (planea_wm > (long)wm_info->max_wm)
1728 planea_wm = wm_info->max_wm;
1729 }
1730
1731 if (IS_GEN2(dev))
1732 wm_info = &i830_bc_wm_info;
1733
1734 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1735 crtc = intel_get_crtc_for_plane(dev, 1);
1736 if (intel_crtc_active(crtc)) {
1737 const struct drm_display_mode *adjusted_mode;
1738 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1739 if (IS_GEN2(dev))
1740 cpp = 4;
1741
1742 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1743 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1744 wm_info, fifo_size, cpp,
1745 pessimal_latency_ns);
1746 if (enabled == NULL)
1747 enabled = crtc;
1748 else
1749 enabled = NULL;
1750 } else {
1751 planeb_wm = fifo_size - wm_info->guard_size;
1752 if (planeb_wm > (long)wm_info->max_wm)
1753 planeb_wm = wm_info->max_wm;
1754 }
1755
1756 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1757
1758 if (IS_I915GM(dev) && enabled) {
1759 struct drm_i915_gem_object *obj;
1760
1761 obj = intel_fb_obj(enabled->primary->fb);
1762
1763 /* self-refresh seems busted with untiled */
1764 if (obj->tiling_mode == I915_TILING_NONE)
1765 enabled = NULL;
1766 }
1767
1768 /*
1769 * Overlay gets an aggressive default since video jitter is bad.
1770 */
1771 cwm = 2;
1772
1773 /* Play safe and disable self-refresh before adjusting watermarks. */
1774 intel_set_memory_cxsr(dev_priv, false);
1775
1776 /* Calc sr entries for one plane configs */
1777 if (HAS_FW_BLC(dev) && enabled) {
1778 /* self-refresh has much higher latency */
1779 static const int sr_latency_ns = 6000;
1780 const struct drm_display_mode *adjusted_mode =
1781 &to_intel_crtc(enabled)->config.adjusted_mode;
1782 int clock = adjusted_mode->crtc_clock;
1783 int htotal = adjusted_mode->crtc_htotal;
1784 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1785 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1786 unsigned long line_time_us;
1787 int entries;
1788
1789 line_time_us = max(htotal * 1000 / clock, 1);
1790
1791 /* Use ns/us then divide to preserve precision */
1792 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1793 pixel_size * hdisplay;
1794 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1795 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1796 srwm = wm_info->fifo_size - entries;
1797 if (srwm < 0)
1798 srwm = 1;
1799
1800 if (IS_I945G(dev) || IS_I945GM(dev))
1801 I915_WRITE(FW_BLC_SELF,
1802 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1803 else if (IS_I915GM(dev))
1804 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1805 }
1806
1807 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1808 planea_wm, planeb_wm, cwm, srwm);
1809
1810 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1811 fwater_hi = (cwm & 0x1f);
1812
1813 /* Set request length to 8 cachelines per fetch */
1814 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1815 fwater_hi = fwater_hi | (1 << 8);
1816
1817 I915_WRITE(FW_BLC, fwater_lo);
1818 I915_WRITE(FW_BLC2, fwater_hi);
1819
1820 if (enabled)
1821 intel_set_memory_cxsr(dev_priv, true);
1822 }
1823
1824 static void i845_update_wm(struct drm_crtc *unused_crtc)
1825 {
1826 struct drm_device *dev = unused_crtc->dev;
1827 struct drm_i915_private *dev_priv = dev->dev_private;
1828 struct drm_crtc *crtc;
1829 const struct drm_display_mode *adjusted_mode;
1830 uint32_t fwater_lo;
1831 int planea_wm;
1832
1833 crtc = single_enabled_crtc(dev);
1834 if (crtc == NULL)
1835 return;
1836
1837 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1838 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1839 &i845_wm_info,
1840 dev_priv->display.get_fifo_size(dev, 0),
1841 4, pessimal_latency_ns);
1842 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1843 fwater_lo |= (3<<8) | planea_wm;
1844
1845 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1846
1847 I915_WRITE(FW_BLC, fwater_lo);
1848 }
1849
1850 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1851 struct drm_crtc *crtc)
1852 {
1853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1854 uint32_t pixel_rate;
1855
1856 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1857
1858 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1859 * adjust the pixel_rate here. */
1860
1861 if (intel_crtc->config.pch_pfit.enabled) {
1862 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1863 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1864
1865 pipe_w = intel_crtc->config.pipe_src_w;
1866 pipe_h = intel_crtc->config.pipe_src_h;
1867 pfit_w = (pfit_size >> 16) & 0xFFFF;
1868 pfit_h = pfit_size & 0xFFFF;
1869 if (pipe_w < pfit_w)
1870 pipe_w = pfit_w;
1871 if (pipe_h < pfit_h)
1872 pipe_h = pfit_h;
1873
1874 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1875 pfit_w * pfit_h);
1876 }
1877
1878 return pixel_rate;
1879 }
1880
1881 /* latency must be in 0.1us units. */
1882 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1883 uint32_t latency)
1884 {
1885 uint64_t ret;
1886
1887 if (WARN(latency == 0, "Latency value missing\n"))
1888 return UINT_MAX;
1889
1890 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1891 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1892
1893 return ret;
1894 }
1895
1896 /* latency must be in 0.1us units. */
1897 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1898 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1899 uint32_t latency)
1900 {
1901 uint32_t ret;
1902
1903 if (WARN(latency == 0, "Latency value missing\n"))
1904 return UINT_MAX;
1905
1906 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1907 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1908 ret = DIV_ROUND_UP(ret, 64) + 2;
1909 return ret;
1910 }
1911
1912 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1913 uint8_t bytes_per_pixel)
1914 {
1915 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1916 }
1917
1918 struct ilk_pipe_wm_parameters {
1919 bool active;
1920 uint32_t pipe_htotal;
1921 uint32_t pixel_rate;
1922 struct intel_plane_wm_parameters pri;
1923 struct intel_plane_wm_parameters spr;
1924 struct intel_plane_wm_parameters cur;
1925 };
1926
1927 struct ilk_wm_maximums {
1928 uint16_t pri;
1929 uint16_t spr;
1930 uint16_t cur;
1931 uint16_t fbc;
1932 };
1933
1934 /* used in computing the new watermarks state */
1935 struct intel_wm_config {
1936 unsigned int num_pipes_active;
1937 bool sprites_enabled;
1938 bool sprites_scaled;
1939 };
1940
1941 /*
1942 * For both WM_PIPE and WM_LP.
1943 * mem_value must be in 0.1us units.
1944 */
1945 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1946 uint32_t mem_value,
1947 bool is_lp)
1948 {
1949 uint32_t method1, method2;
1950
1951 if (!params->active || !params->pri.enabled)
1952 return 0;
1953
1954 method1 = ilk_wm_method1(params->pixel_rate,
1955 params->pri.bytes_per_pixel,
1956 mem_value);
1957
1958 if (!is_lp)
1959 return method1;
1960
1961 method2 = ilk_wm_method2(params->pixel_rate,
1962 params->pipe_htotal,
1963 params->pri.horiz_pixels,
1964 params->pri.bytes_per_pixel,
1965 mem_value);
1966
1967 return min(method1, method2);
1968 }
1969
1970 /*
1971 * For both WM_PIPE and WM_LP.
1972 * mem_value must be in 0.1us units.
1973 */
1974 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1975 uint32_t mem_value)
1976 {
1977 uint32_t method1, method2;
1978
1979 if (!params->active || !params->spr.enabled)
1980 return 0;
1981
1982 method1 = ilk_wm_method1(params->pixel_rate,
1983 params->spr.bytes_per_pixel,
1984 mem_value);
1985 method2 = ilk_wm_method2(params->pixel_rate,
1986 params->pipe_htotal,
1987 params->spr.horiz_pixels,
1988 params->spr.bytes_per_pixel,
1989 mem_value);
1990 return min(method1, method2);
1991 }
1992
1993 /*
1994 * For both WM_PIPE and WM_LP.
1995 * mem_value must be in 0.1us units.
1996 */
1997 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1998 uint32_t mem_value)
1999 {
2000 if (!params->active || !params->cur.enabled)
2001 return 0;
2002
2003 return ilk_wm_method2(params->pixel_rate,
2004 params->pipe_htotal,
2005 params->cur.horiz_pixels,
2006 params->cur.bytes_per_pixel,
2007 mem_value);
2008 }
2009
2010 /* Only for WM_LP. */
2011 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2012 uint32_t pri_val)
2013 {
2014 if (!params->active || !params->pri.enabled)
2015 return 0;
2016
2017 return ilk_wm_fbc(pri_val,
2018 params->pri.horiz_pixels,
2019 params->pri.bytes_per_pixel);
2020 }
2021
2022 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2023 {
2024 if (INTEL_INFO(dev)->gen >= 8)
2025 return 3072;
2026 else if (INTEL_INFO(dev)->gen >= 7)
2027 return 768;
2028 else
2029 return 512;
2030 }
2031
2032 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
2033 int level, bool is_sprite)
2034 {
2035 if (INTEL_INFO(dev)->gen >= 8)
2036 /* BDW primary/sprite plane watermarks */
2037 return level == 0 ? 255 : 2047;
2038 else if (INTEL_INFO(dev)->gen >= 7)
2039 /* IVB/HSW primary/sprite plane watermarks */
2040 return level == 0 ? 127 : 1023;
2041 else if (!is_sprite)
2042 /* ILK/SNB primary plane watermarks */
2043 return level == 0 ? 127 : 511;
2044 else
2045 /* ILK/SNB sprite plane watermarks */
2046 return level == 0 ? 63 : 255;
2047 }
2048
2049 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
2050 int level)
2051 {
2052 if (INTEL_INFO(dev)->gen >= 7)
2053 return level == 0 ? 63 : 255;
2054 else
2055 return level == 0 ? 31 : 63;
2056 }
2057
2058 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
2059 {
2060 if (INTEL_INFO(dev)->gen >= 8)
2061 return 31;
2062 else
2063 return 15;
2064 }
2065
2066 /* Calculate the maximum primary/sprite plane watermark */
2067 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2068 int level,
2069 const struct intel_wm_config *config,
2070 enum intel_ddb_partitioning ddb_partitioning,
2071 bool is_sprite)
2072 {
2073 unsigned int fifo_size = ilk_display_fifo_size(dev);
2074
2075 /* if sprites aren't enabled, sprites get nothing */
2076 if (is_sprite && !config->sprites_enabled)
2077 return 0;
2078
2079 /* HSW allows LP1+ watermarks even with multiple pipes */
2080 if (level == 0 || config->num_pipes_active > 1) {
2081 fifo_size /= INTEL_INFO(dev)->num_pipes;
2082
2083 /*
2084 * For some reason the non self refresh
2085 * FIFO size is only half of the self
2086 * refresh FIFO size on ILK/SNB.
2087 */
2088 if (INTEL_INFO(dev)->gen <= 6)
2089 fifo_size /= 2;
2090 }
2091
2092 if (config->sprites_enabled) {
2093 /* level 0 is always calculated with 1:1 split */
2094 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2095 if (is_sprite)
2096 fifo_size *= 5;
2097 fifo_size /= 6;
2098 } else {
2099 fifo_size /= 2;
2100 }
2101 }
2102
2103 /* clamp to max that the registers can hold */
2104 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
2105 }
2106
2107 /* Calculate the maximum cursor plane watermark */
2108 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2109 int level,
2110 const struct intel_wm_config *config)
2111 {
2112 /* HSW LP1+ watermarks w/ multiple pipes */
2113 if (level > 0 && config->num_pipes_active > 1)
2114 return 64;
2115
2116 /* otherwise just report max that registers can hold */
2117 return ilk_cursor_wm_reg_max(dev, level);
2118 }
2119
2120 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2121 int level,
2122 const struct intel_wm_config *config,
2123 enum intel_ddb_partitioning ddb_partitioning,
2124 struct ilk_wm_maximums *max)
2125 {
2126 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2127 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2128 max->cur = ilk_cursor_wm_max(dev, level, config);
2129 max->fbc = ilk_fbc_wm_reg_max(dev);
2130 }
2131
2132 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2133 int level,
2134 struct ilk_wm_maximums *max)
2135 {
2136 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2137 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2138 max->cur = ilk_cursor_wm_reg_max(dev, level);
2139 max->fbc = ilk_fbc_wm_reg_max(dev);
2140 }
2141
2142 static bool ilk_validate_wm_level(int level,
2143 const struct ilk_wm_maximums *max,
2144 struct intel_wm_level *result)
2145 {
2146 bool ret;
2147
2148 /* already determined to be invalid? */
2149 if (!result->enable)
2150 return false;
2151
2152 result->enable = result->pri_val <= max->pri &&
2153 result->spr_val <= max->spr &&
2154 result->cur_val <= max->cur;
2155
2156 ret = result->enable;
2157
2158 /*
2159 * HACK until we can pre-compute everything,
2160 * and thus fail gracefully if LP0 watermarks
2161 * are exceeded...
2162 */
2163 if (level == 0 && !result->enable) {
2164 if (result->pri_val > max->pri)
2165 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2166 level, result->pri_val, max->pri);
2167 if (result->spr_val > max->spr)
2168 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2169 level, result->spr_val, max->spr);
2170 if (result->cur_val > max->cur)
2171 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2172 level, result->cur_val, max->cur);
2173
2174 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2175 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2176 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2177 result->enable = true;
2178 }
2179
2180 return ret;
2181 }
2182
2183 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2184 int level,
2185 const struct ilk_pipe_wm_parameters *p,
2186 struct intel_wm_level *result)
2187 {
2188 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2189 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2190 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2191
2192 /* WM1+ latency values stored in 0.5us units */
2193 if (level > 0) {
2194 pri_latency *= 5;
2195 spr_latency *= 5;
2196 cur_latency *= 5;
2197 }
2198
2199 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2200 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2201 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2202 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2203 result->enable = true;
2204 }
2205
2206 static uint32_t
2207 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2208 {
2209 struct drm_i915_private *dev_priv = dev->dev_private;
2210 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2211 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2212 u32 linetime, ips_linetime;
2213
2214 if (!intel_crtc_active(crtc))
2215 return 0;
2216
2217 /* The WM are computed with base on how long it takes to fill a single
2218 * row at the given clock rate, multiplied by 8.
2219 * */
2220 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2221 mode->crtc_clock);
2222 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2223 intel_ddi_get_cdclk_freq(dev_priv));
2224
2225 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2226 PIPE_WM_LINETIME_TIME(linetime);
2227 }
2228
2229 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2230 {
2231 struct drm_i915_private *dev_priv = dev->dev_private;
2232
2233 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2234 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2235
2236 wm[0] = (sskpd >> 56) & 0xFF;
2237 if (wm[0] == 0)
2238 wm[0] = sskpd & 0xF;
2239 wm[1] = (sskpd >> 4) & 0xFF;
2240 wm[2] = (sskpd >> 12) & 0xFF;
2241 wm[3] = (sskpd >> 20) & 0x1FF;
2242 wm[4] = (sskpd >> 32) & 0x1FF;
2243 } else if (INTEL_INFO(dev)->gen >= 6) {
2244 uint32_t sskpd = I915_READ(MCH_SSKPD);
2245
2246 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2247 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2248 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2249 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2250 } else if (INTEL_INFO(dev)->gen >= 5) {
2251 uint32_t mltr = I915_READ(MLTR_ILK);
2252
2253 /* ILK primary LP0 latency is 700 ns */
2254 wm[0] = 7;
2255 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2256 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2257 }
2258 }
2259
2260 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2261 {
2262 /* ILK sprite LP0 latency is 1300 ns */
2263 if (INTEL_INFO(dev)->gen == 5)
2264 wm[0] = 13;
2265 }
2266
2267 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2268 {
2269 /* ILK cursor LP0 latency is 1300 ns */
2270 if (INTEL_INFO(dev)->gen == 5)
2271 wm[0] = 13;
2272
2273 /* WaDoubleCursorLP3Latency:ivb */
2274 if (IS_IVYBRIDGE(dev))
2275 wm[3] *= 2;
2276 }
2277
2278 int ilk_wm_max_level(const struct drm_device *dev)
2279 {
2280 /* how many WM levels are we expecting */
2281 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2282 return 4;
2283 else if (INTEL_INFO(dev)->gen >= 6)
2284 return 3;
2285 else
2286 return 2;
2287 }
2288
2289 static void intel_print_wm_latency(struct drm_device *dev,
2290 const char *name,
2291 const uint16_t wm[5])
2292 {
2293 int level, max_level = ilk_wm_max_level(dev);
2294
2295 for (level = 0; level <= max_level; level++) {
2296 unsigned int latency = wm[level];
2297
2298 if (latency == 0) {
2299 DRM_ERROR("%s WM%d latency not provided\n",
2300 name, level);
2301 continue;
2302 }
2303
2304 /* WM1+ latency values in 0.5us units */
2305 if (level > 0)
2306 latency *= 5;
2307
2308 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2309 name, level, wm[level],
2310 latency / 10, latency % 10);
2311 }
2312 }
2313
2314 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2315 uint16_t wm[5], uint16_t min)
2316 {
2317 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2318
2319 if (wm[0] >= min)
2320 return false;
2321
2322 wm[0] = max(wm[0], min);
2323 for (level = 1; level <= max_level; level++)
2324 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2325
2326 return true;
2327 }
2328
2329 static void snb_wm_latency_quirk(struct drm_device *dev)
2330 {
2331 struct drm_i915_private *dev_priv = dev->dev_private;
2332 bool changed;
2333
2334 /*
2335 * The BIOS provided WM memory latency values are often
2336 * inadequate for high resolution displays. Adjust them.
2337 */
2338 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2339 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2340 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2341
2342 if (!changed)
2343 return;
2344
2345 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2346 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2347 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2348 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2349 }
2350
2351 static void ilk_setup_wm_latency(struct drm_device *dev)
2352 {
2353 struct drm_i915_private *dev_priv = dev->dev_private;
2354
2355 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2356
2357 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2358 sizeof(dev_priv->wm.pri_latency));
2359 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2360 sizeof(dev_priv->wm.pri_latency));
2361
2362 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2363 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2364
2365 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2366 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2367 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2368
2369 if (IS_GEN6(dev))
2370 snb_wm_latency_quirk(dev);
2371 }
2372
2373 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2374 struct ilk_pipe_wm_parameters *p)
2375 {
2376 struct drm_device *dev = crtc->dev;
2377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2378 enum pipe pipe = intel_crtc->pipe;
2379 struct drm_plane *plane;
2380
2381 if (!intel_crtc_active(crtc))
2382 return;
2383
2384 p->active = true;
2385 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2386 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2387 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2388 p->cur.bytes_per_pixel = 4;
2389 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2390 p->cur.horiz_pixels = intel_crtc->cursor_width;
2391 /* TODO: for now, assume primary and cursor planes are always enabled. */
2392 p->pri.enabled = true;
2393 p->cur.enabled = true;
2394
2395 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2396 struct intel_plane *intel_plane = to_intel_plane(plane);
2397
2398 if (intel_plane->pipe == pipe) {
2399 p->spr = intel_plane->wm;
2400 break;
2401 }
2402 }
2403 }
2404
2405 static void ilk_compute_wm_config(struct drm_device *dev,
2406 struct intel_wm_config *config)
2407 {
2408 struct intel_crtc *intel_crtc;
2409
2410 /* Compute the currently _active_ config */
2411 for_each_intel_crtc(dev, intel_crtc) {
2412 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2413
2414 if (!wm->pipe_enabled)
2415 continue;
2416
2417 config->sprites_enabled |= wm->sprites_enabled;
2418 config->sprites_scaled |= wm->sprites_scaled;
2419 config->num_pipes_active++;
2420 }
2421 }
2422
2423 /* Compute new watermarks for the pipe */
2424 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2425 const struct ilk_pipe_wm_parameters *params,
2426 struct intel_pipe_wm *pipe_wm)
2427 {
2428 struct drm_device *dev = crtc->dev;
2429 const struct drm_i915_private *dev_priv = dev->dev_private;
2430 int level, max_level = ilk_wm_max_level(dev);
2431 /* LP0 watermark maximums depend on this pipe alone */
2432 struct intel_wm_config config = {
2433 .num_pipes_active = 1,
2434 .sprites_enabled = params->spr.enabled,
2435 .sprites_scaled = params->spr.scaled,
2436 };
2437 struct ilk_wm_maximums max;
2438
2439 pipe_wm->pipe_enabled = params->active;
2440 pipe_wm->sprites_enabled = params->spr.enabled;
2441 pipe_wm->sprites_scaled = params->spr.scaled;
2442
2443 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2444 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2445 max_level = 1;
2446
2447 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2448 if (params->spr.scaled)
2449 max_level = 0;
2450
2451 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2452
2453 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2454 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2455
2456 /* LP0 watermarks always use 1/2 DDB partitioning */
2457 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2458
2459 /* At least LP0 must be valid */
2460 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2461 return false;
2462
2463 ilk_compute_wm_reg_maximums(dev, 1, &max);
2464
2465 for (level = 1; level <= max_level; level++) {
2466 struct intel_wm_level wm = {};
2467
2468 ilk_compute_wm_level(dev_priv, level, params, &wm);
2469
2470 /*
2471 * Disable any watermark level that exceeds the
2472 * register maximums since such watermarks are
2473 * always invalid.
2474 */
2475 if (!ilk_validate_wm_level(level, &max, &wm))
2476 break;
2477
2478 pipe_wm->wm[level] = wm;
2479 }
2480
2481 return true;
2482 }
2483
2484 /*
2485 * Merge the watermarks from all active pipes for a specific level.
2486 */
2487 static void ilk_merge_wm_level(struct drm_device *dev,
2488 int level,
2489 struct intel_wm_level *ret_wm)
2490 {
2491 const struct intel_crtc *intel_crtc;
2492
2493 ret_wm->enable = true;
2494
2495 for_each_intel_crtc(dev, intel_crtc) {
2496 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2497 const struct intel_wm_level *wm = &active->wm[level];
2498
2499 if (!active->pipe_enabled)
2500 continue;
2501
2502 /*
2503 * The watermark values may have been used in the past,
2504 * so we must maintain them in the registers for some
2505 * time even if the level is now disabled.
2506 */
2507 if (!wm->enable)
2508 ret_wm->enable = false;
2509
2510 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2511 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2512 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2513 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2514 }
2515 }
2516
2517 /*
2518 * Merge all low power watermarks for all active pipes.
2519 */
2520 static void ilk_wm_merge(struct drm_device *dev,
2521 const struct intel_wm_config *config,
2522 const struct ilk_wm_maximums *max,
2523 struct intel_pipe_wm *merged)
2524 {
2525 int level, max_level = ilk_wm_max_level(dev);
2526 int last_enabled_level = max_level;
2527
2528 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2529 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2530 config->num_pipes_active > 1)
2531 return;
2532
2533 /* ILK: FBC WM must be disabled always */
2534 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2535
2536 /* merge each WM1+ level */
2537 for (level = 1; level <= max_level; level++) {
2538 struct intel_wm_level *wm = &merged->wm[level];
2539
2540 ilk_merge_wm_level(dev, level, wm);
2541
2542 if (level > last_enabled_level)
2543 wm->enable = false;
2544 else if (!ilk_validate_wm_level(level, max, wm))
2545 /* make sure all following levels get disabled */
2546 last_enabled_level = level - 1;
2547
2548 /*
2549 * The spec says it is preferred to disable
2550 * FBC WMs instead of disabling a WM level.
2551 */
2552 if (wm->fbc_val > max->fbc) {
2553 if (wm->enable)
2554 merged->fbc_wm_enabled = false;
2555 wm->fbc_val = 0;
2556 }
2557 }
2558
2559 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2560 /*
2561 * FIXME this is racy. FBC might get enabled later.
2562 * What we should check here is whether FBC can be
2563 * enabled sometime later.
2564 */
2565 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2566 for (level = 2; level <= max_level; level++) {
2567 struct intel_wm_level *wm = &merged->wm[level];
2568
2569 wm->enable = false;
2570 }
2571 }
2572 }
2573
2574 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2575 {
2576 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2577 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2578 }
2579
2580 /* The value we need to program into the WM_LPx latency field */
2581 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2582 {
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584
2585 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2586 return 2 * level;
2587 else
2588 return dev_priv->wm.pri_latency[level];
2589 }
2590
2591 static void ilk_compute_wm_results(struct drm_device *dev,
2592 const struct intel_pipe_wm *merged,
2593 enum intel_ddb_partitioning partitioning,
2594 struct ilk_wm_values *results)
2595 {
2596 struct intel_crtc *intel_crtc;
2597 int level, wm_lp;
2598
2599 results->enable_fbc_wm = merged->fbc_wm_enabled;
2600 results->partitioning = partitioning;
2601
2602 /* LP1+ register values */
2603 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2604 const struct intel_wm_level *r;
2605
2606 level = ilk_wm_lp_to_level(wm_lp, merged);
2607
2608 r = &merged->wm[level];
2609
2610 /*
2611 * Maintain the watermark values even if the level is
2612 * disabled. Doing otherwise could cause underruns.
2613 */
2614 results->wm_lp[wm_lp - 1] =
2615 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2616 (r->pri_val << WM1_LP_SR_SHIFT) |
2617 r->cur_val;
2618
2619 if (r->enable)
2620 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2621
2622 if (INTEL_INFO(dev)->gen >= 8)
2623 results->wm_lp[wm_lp - 1] |=
2624 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2625 else
2626 results->wm_lp[wm_lp - 1] |=
2627 r->fbc_val << WM1_LP_FBC_SHIFT;
2628
2629 /*
2630 * Always set WM1S_LP_EN when spr_val != 0, even if the
2631 * level is disabled. Doing otherwise could cause underruns.
2632 */
2633 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2634 WARN_ON(wm_lp != 1);
2635 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2636 } else
2637 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2638 }
2639
2640 /* LP0 register values */
2641 for_each_intel_crtc(dev, intel_crtc) {
2642 enum pipe pipe = intel_crtc->pipe;
2643 const struct intel_wm_level *r =
2644 &intel_crtc->wm.active.wm[0];
2645
2646 if (WARN_ON(!r->enable))
2647 continue;
2648
2649 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2650
2651 results->wm_pipe[pipe] =
2652 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2653 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2654 r->cur_val;
2655 }
2656 }
2657
2658 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2659 * case both are at the same level. Prefer r1 in case they're the same. */
2660 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2661 struct intel_pipe_wm *r1,
2662 struct intel_pipe_wm *r2)
2663 {
2664 int level, max_level = ilk_wm_max_level(dev);
2665 int level1 = 0, level2 = 0;
2666
2667 for (level = 1; level <= max_level; level++) {
2668 if (r1->wm[level].enable)
2669 level1 = level;
2670 if (r2->wm[level].enable)
2671 level2 = level;
2672 }
2673
2674 if (level1 == level2) {
2675 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2676 return r2;
2677 else
2678 return r1;
2679 } else if (level1 > level2) {
2680 return r1;
2681 } else {
2682 return r2;
2683 }
2684 }
2685
2686 /* dirty bits used to track which watermarks need changes */
2687 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2688 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2689 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2690 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2691 #define WM_DIRTY_FBC (1 << 24)
2692 #define WM_DIRTY_DDB (1 << 25)
2693
2694 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2695 const struct ilk_wm_values *old,
2696 const struct ilk_wm_values *new)
2697 {
2698 unsigned int dirty = 0;
2699 enum pipe pipe;
2700 int wm_lp;
2701
2702 for_each_pipe(dev_priv, pipe) {
2703 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2704 dirty |= WM_DIRTY_LINETIME(pipe);
2705 /* Must disable LP1+ watermarks too */
2706 dirty |= WM_DIRTY_LP_ALL;
2707 }
2708
2709 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2710 dirty |= WM_DIRTY_PIPE(pipe);
2711 /* Must disable LP1+ watermarks too */
2712 dirty |= WM_DIRTY_LP_ALL;
2713 }
2714 }
2715
2716 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2717 dirty |= WM_DIRTY_FBC;
2718 /* Must disable LP1+ watermarks too */
2719 dirty |= WM_DIRTY_LP_ALL;
2720 }
2721
2722 if (old->partitioning != new->partitioning) {
2723 dirty |= WM_DIRTY_DDB;
2724 /* Must disable LP1+ watermarks too */
2725 dirty |= WM_DIRTY_LP_ALL;
2726 }
2727
2728 /* LP1+ watermarks already deemed dirty, no need to continue */
2729 if (dirty & WM_DIRTY_LP_ALL)
2730 return dirty;
2731
2732 /* Find the lowest numbered LP1+ watermark in need of an update... */
2733 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2734 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2735 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2736 break;
2737 }
2738
2739 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2740 for (; wm_lp <= 3; wm_lp++)
2741 dirty |= WM_DIRTY_LP(wm_lp);
2742
2743 return dirty;
2744 }
2745
2746 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2747 unsigned int dirty)
2748 {
2749 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2750 bool changed = false;
2751
2752 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2753 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2754 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2755 changed = true;
2756 }
2757 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2758 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2759 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2760 changed = true;
2761 }
2762 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2763 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2764 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2765 changed = true;
2766 }
2767
2768 /*
2769 * Don't touch WM1S_LP_EN here.
2770 * Doing so could cause underruns.
2771 */
2772
2773 return changed;
2774 }
2775
2776 /*
2777 * The spec says we shouldn't write when we don't need, because every write
2778 * causes WMs to be re-evaluated, expending some power.
2779 */
2780 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2781 struct ilk_wm_values *results)
2782 {
2783 struct drm_device *dev = dev_priv->dev;
2784 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2785 unsigned int dirty;
2786 uint32_t val;
2787
2788 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2789 if (!dirty)
2790 return;
2791
2792 _ilk_disable_lp_wm(dev_priv, dirty);
2793
2794 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2795 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2796 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2797 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2798 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2799 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2800
2801 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2802 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2803 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2804 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2805 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2806 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2807
2808 if (dirty & WM_DIRTY_DDB) {
2809 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2810 val = I915_READ(WM_MISC);
2811 if (results->partitioning == INTEL_DDB_PART_1_2)
2812 val &= ~WM_MISC_DATA_PARTITION_5_6;
2813 else
2814 val |= WM_MISC_DATA_PARTITION_5_6;
2815 I915_WRITE(WM_MISC, val);
2816 } else {
2817 val = I915_READ(DISP_ARB_CTL2);
2818 if (results->partitioning == INTEL_DDB_PART_1_2)
2819 val &= ~DISP_DATA_PARTITION_5_6;
2820 else
2821 val |= DISP_DATA_PARTITION_5_6;
2822 I915_WRITE(DISP_ARB_CTL2, val);
2823 }
2824 }
2825
2826 if (dirty & WM_DIRTY_FBC) {
2827 val = I915_READ(DISP_ARB_CTL);
2828 if (results->enable_fbc_wm)
2829 val &= ~DISP_FBC_WM_DIS;
2830 else
2831 val |= DISP_FBC_WM_DIS;
2832 I915_WRITE(DISP_ARB_CTL, val);
2833 }
2834
2835 if (dirty & WM_DIRTY_LP(1) &&
2836 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2837 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2838
2839 if (INTEL_INFO(dev)->gen >= 7) {
2840 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2841 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2842 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2843 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2844 }
2845
2846 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2847 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2848 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2849 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2850 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2851 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2852
2853 dev_priv->wm.hw = *results;
2854 }
2855
2856 static bool ilk_disable_lp_wm(struct drm_device *dev)
2857 {
2858 struct drm_i915_private *dev_priv = dev->dev_private;
2859
2860 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2861 }
2862
2863 static void ilk_update_wm(struct drm_crtc *crtc)
2864 {
2865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2866 struct drm_device *dev = crtc->dev;
2867 struct drm_i915_private *dev_priv = dev->dev_private;
2868 struct ilk_wm_maximums max;
2869 struct ilk_pipe_wm_parameters params = {};
2870 struct ilk_wm_values results = {};
2871 enum intel_ddb_partitioning partitioning;
2872 struct intel_pipe_wm pipe_wm = {};
2873 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2874 struct intel_wm_config config = {};
2875
2876 ilk_compute_wm_parameters(crtc, &params);
2877
2878 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2879
2880 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2881 return;
2882
2883 intel_crtc->wm.active = pipe_wm;
2884
2885 ilk_compute_wm_config(dev, &config);
2886
2887 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2888 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2889
2890 /* 5/6 split only in single pipe config on IVB+ */
2891 if (INTEL_INFO(dev)->gen >= 7 &&
2892 config.num_pipes_active == 1 && config.sprites_enabled) {
2893 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2894 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2895
2896 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2897 } else {
2898 best_lp_wm = &lp_wm_1_2;
2899 }
2900
2901 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2902 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2903
2904 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2905
2906 ilk_write_wm_values(dev_priv, &results);
2907 }
2908
2909 static void
2910 ilk_update_sprite_wm(struct drm_plane *plane,
2911 struct drm_crtc *crtc,
2912 uint32_t sprite_width, uint32_t sprite_height,
2913 int pixel_size, bool enabled, bool scaled)
2914 {
2915 struct drm_device *dev = plane->dev;
2916 struct intel_plane *intel_plane = to_intel_plane(plane);
2917
2918 intel_plane->wm.enabled = enabled;
2919 intel_plane->wm.scaled = scaled;
2920 intel_plane->wm.horiz_pixels = sprite_width;
2921 intel_plane->wm.vert_pixels = sprite_width;
2922 intel_plane->wm.bytes_per_pixel = pixel_size;
2923
2924 /*
2925 * IVB workaround: must disable low power watermarks for at least
2926 * one frame before enabling scaling. LP watermarks can be re-enabled
2927 * when scaling is disabled.
2928 *
2929 * WaCxSRDisabledForSpriteScaling:ivb
2930 */
2931 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2932 intel_wait_for_vblank(dev, intel_plane->pipe);
2933
2934 ilk_update_wm(crtc);
2935 }
2936
2937 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2938 {
2939 struct drm_device *dev = crtc->dev;
2940 struct drm_i915_private *dev_priv = dev->dev_private;
2941 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2943 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2944 enum pipe pipe = intel_crtc->pipe;
2945 static const unsigned int wm0_pipe_reg[] = {
2946 [PIPE_A] = WM0_PIPEA_ILK,
2947 [PIPE_B] = WM0_PIPEB_ILK,
2948 [PIPE_C] = WM0_PIPEC_IVB,
2949 };
2950
2951 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2952 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2953 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2954
2955 active->pipe_enabled = intel_crtc_active(crtc);
2956
2957 if (active->pipe_enabled) {
2958 u32 tmp = hw->wm_pipe[pipe];
2959
2960 /*
2961 * For active pipes LP0 watermark is marked as
2962 * enabled, and LP1+ watermaks as disabled since
2963 * we can't really reverse compute them in case
2964 * multiple pipes are active.
2965 */
2966 active->wm[0].enable = true;
2967 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2968 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2969 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2970 active->linetime = hw->wm_linetime[pipe];
2971 } else {
2972 int level, max_level = ilk_wm_max_level(dev);
2973
2974 /*
2975 * For inactive pipes, all watermark levels
2976 * should be marked as enabled but zeroed,
2977 * which is what we'd compute them to.
2978 */
2979 for (level = 0; level <= max_level; level++)
2980 active->wm[level].enable = true;
2981 }
2982 }
2983
2984 void ilk_wm_get_hw_state(struct drm_device *dev)
2985 {
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2988 struct drm_crtc *crtc;
2989
2990 for_each_crtc(dev, crtc)
2991 ilk_pipe_wm_get_hw_state(crtc);
2992
2993 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2994 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2995 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2996
2997 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2998 if (INTEL_INFO(dev)->gen >= 7) {
2999 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3000 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3001 }
3002
3003 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3004 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
3005 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3006 else if (IS_IVYBRIDGE(dev))
3007 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
3008 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3009
3010 hw->enable_fbc_wm =
3011 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
3012 }
3013
3014 /**
3015 * intel_update_watermarks - update FIFO watermark values based on current modes
3016 *
3017 * Calculate watermark values for the various WM regs based on current mode
3018 * and plane configuration.
3019 *
3020 * There are several cases to deal with here:
3021 * - normal (i.e. non-self-refresh)
3022 * - self-refresh (SR) mode
3023 * - lines are large relative to FIFO size (buffer can hold up to 2)
3024 * - lines are small relative to FIFO size (buffer can hold more than 2
3025 * lines), so need to account for TLB latency
3026 *
3027 * The normal calculation is:
3028 * watermark = dotclock * bytes per pixel * latency
3029 * where latency is platform & configuration dependent (we assume pessimal
3030 * values here).
3031 *
3032 * The SR calculation is:
3033 * watermark = (trunc(latency/line time)+1) * surface width *
3034 * bytes per pixel
3035 * where
3036 * line time = htotal / dotclock
3037 * surface width = hdisplay for normal plane and 64 for cursor
3038 * and latency is assumed to be high, as above.
3039 *
3040 * The final value programmed to the register should always be rounded up,
3041 * and include an extra 2 entries to account for clock crossings.
3042 *
3043 * We don't use the sprite, so we can ignore that. And on Crestline we have
3044 * to set the non-SR watermarks to 8.
3045 */
3046 void intel_update_watermarks(struct drm_crtc *crtc)
3047 {
3048 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
3049
3050 if (dev_priv->display.update_wm)
3051 dev_priv->display.update_wm(crtc);
3052 }
3053
3054 void intel_update_sprite_watermarks(struct drm_plane *plane,
3055 struct drm_crtc *crtc,
3056 uint32_t sprite_width,
3057 uint32_t sprite_height,
3058 int pixel_size,
3059 bool enabled, bool scaled)
3060 {
3061 struct drm_i915_private *dev_priv = plane->dev->dev_private;
3062
3063 if (dev_priv->display.update_sprite_wm)
3064 dev_priv->display.update_sprite_wm(plane, crtc,
3065 sprite_width, sprite_height,
3066 pixel_size, enabled, scaled);
3067 }
3068
3069 static struct drm_i915_gem_object *
3070 intel_alloc_context_page(struct drm_device *dev)
3071 {
3072 struct drm_i915_gem_object *ctx;
3073 int ret;
3074
3075 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3076
3077 ctx = i915_gem_alloc_object(dev, 4096);
3078 if (!ctx) {
3079 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3080 return NULL;
3081 }
3082
3083 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
3084 if (ret) {
3085 DRM_ERROR("failed to pin power context: %d\n", ret);
3086 goto err_unref;
3087 }
3088
3089 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3090 if (ret) {
3091 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3092 goto err_unpin;
3093 }
3094
3095 return ctx;
3096
3097 err_unpin:
3098 i915_gem_object_ggtt_unpin(ctx);
3099 err_unref:
3100 drm_gem_object_unreference(&ctx->base);
3101 return NULL;
3102 }
3103
3104 /**
3105 * Lock protecting IPS related data structures
3106 */
3107 DEFINE_SPINLOCK(mchdev_lock);
3108
3109 /* Global for IPS driver to get at the current i915 device. Protected by
3110 * mchdev_lock. */
3111 static struct drm_i915_private *i915_mch_dev;
3112
3113 bool ironlake_set_drps(struct drm_device *dev, u8 val)
3114 {
3115 struct drm_i915_private *dev_priv = dev->dev_private;
3116 u16 rgvswctl;
3117
3118 assert_spin_locked(&mchdev_lock);
3119
3120 rgvswctl = I915_READ16(MEMSWCTL);
3121 if (rgvswctl & MEMCTL_CMD_STS) {
3122 DRM_DEBUG("gpu busy, RCS change rejected\n");
3123 return false; /* still busy with another command */
3124 }
3125
3126 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3127 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3128 I915_WRITE16(MEMSWCTL, rgvswctl);
3129 POSTING_READ16(MEMSWCTL);
3130
3131 rgvswctl |= MEMCTL_CMD_STS;
3132 I915_WRITE16(MEMSWCTL, rgvswctl);
3133
3134 return true;
3135 }
3136
3137 static void ironlake_enable_drps(struct drm_device *dev)
3138 {
3139 struct drm_i915_private *dev_priv = dev->dev_private;
3140 u32 rgvmodectl = I915_READ(MEMMODECTL);
3141 u8 fmax, fmin, fstart, vstart;
3142
3143 spin_lock_irq(&mchdev_lock);
3144
3145 /* Enable temp reporting */
3146 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3147 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3148
3149 /* 100ms RC evaluation intervals */
3150 I915_WRITE(RCUPEI, 100000);
3151 I915_WRITE(RCDNEI, 100000);
3152
3153 /* Set max/min thresholds to 90ms and 80ms respectively */
3154 I915_WRITE(RCBMAXAVG, 90000);
3155 I915_WRITE(RCBMINAVG, 80000);
3156
3157 I915_WRITE(MEMIHYST, 1);
3158
3159 /* Set up min, max, and cur for interrupt handling */
3160 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3161 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3162 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3163 MEMMODE_FSTART_SHIFT;
3164
3165 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3166 PXVFREQ_PX_SHIFT;
3167
3168 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3169 dev_priv->ips.fstart = fstart;
3170
3171 dev_priv->ips.max_delay = fstart;
3172 dev_priv->ips.min_delay = fmin;
3173 dev_priv->ips.cur_delay = fstart;
3174
3175 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3176 fmax, fmin, fstart);
3177
3178 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3179
3180 /*
3181 * Interrupts will be enabled in ironlake_irq_postinstall
3182 */
3183
3184 I915_WRITE(VIDSTART, vstart);
3185 POSTING_READ(VIDSTART);
3186
3187 rgvmodectl |= MEMMODE_SWMODE_EN;
3188 I915_WRITE(MEMMODECTL, rgvmodectl);
3189
3190 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3191 DRM_ERROR("stuck trying to change perf mode\n");
3192 mdelay(1);
3193
3194 ironlake_set_drps(dev, fstart);
3195
3196 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3197 I915_READ(0x112e0);
3198 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3199 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3200 dev_priv->ips.last_time2 = ktime_get_raw_ns();
3201
3202 spin_unlock_irq(&mchdev_lock);
3203 }
3204
3205 static void ironlake_disable_drps(struct drm_device *dev)
3206 {
3207 struct drm_i915_private *dev_priv = dev->dev_private;
3208 u16 rgvswctl;
3209
3210 spin_lock_irq(&mchdev_lock);
3211
3212 rgvswctl = I915_READ16(MEMSWCTL);
3213
3214 /* Ack interrupts, disable EFC interrupt */
3215 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3216 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3217 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3218 I915_WRITE(DEIIR, DE_PCU_EVENT);
3219 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3220
3221 /* Go back to the starting frequency */
3222 ironlake_set_drps(dev, dev_priv->ips.fstart);
3223 mdelay(1);
3224 rgvswctl |= MEMCTL_CMD_STS;
3225 I915_WRITE(MEMSWCTL, rgvswctl);
3226 mdelay(1);
3227
3228 spin_unlock_irq(&mchdev_lock);
3229 }
3230
3231 /* There's a funny hw issue where the hw returns all 0 when reading from
3232 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3233 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3234 * all limits and the gpu stuck at whatever frequency it is at atm).
3235 */
3236 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3237 {
3238 u32 limits;
3239
3240 /* Only set the down limit when we've reached the lowest level to avoid
3241 * getting more interrupts, otherwise leave this clear. This prevents a
3242 * race in the hw when coming out of rc6: There's a tiny window where
3243 * the hw runs at the minimal clock before selecting the desired
3244 * frequency, if the down threshold expires in that window we will not
3245 * receive a down interrupt. */
3246 limits = dev_priv->rps.max_freq_softlimit << 24;
3247 if (val <= dev_priv->rps.min_freq_softlimit)
3248 limits |= dev_priv->rps.min_freq_softlimit << 16;
3249
3250 return limits;
3251 }
3252
3253 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3254 {
3255 int new_power;
3256
3257 new_power = dev_priv->rps.power;
3258 switch (dev_priv->rps.power) {
3259 case LOW_POWER:
3260 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3261 new_power = BETWEEN;
3262 break;
3263
3264 case BETWEEN:
3265 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3266 new_power = LOW_POWER;
3267 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3268 new_power = HIGH_POWER;
3269 break;
3270
3271 case HIGH_POWER:
3272 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3273 new_power = BETWEEN;
3274 break;
3275 }
3276 /* Max/min bins are special */
3277 if (val == dev_priv->rps.min_freq_softlimit)
3278 new_power = LOW_POWER;
3279 if (val == dev_priv->rps.max_freq_softlimit)
3280 new_power = HIGH_POWER;
3281 if (new_power == dev_priv->rps.power)
3282 return;
3283
3284 /* Note the units here are not exactly 1us, but 1280ns. */
3285 switch (new_power) {
3286 case LOW_POWER:
3287 /* Upclock if more than 95% busy over 16ms */
3288 I915_WRITE(GEN6_RP_UP_EI, 12500);
3289 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3290
3291 /* Downclock if less than 85% busy over 32ms */
3292 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3293 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3294
3295 I915_WRITE(GEN6_RP_CONTROL,
3296 GEN6_RP_MEDIA_TURBO |
3297 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3298 GEN6_RP_MEDIA_IS_GFX |
3299 GEN6_RP_ENABLE |
3300 GEN6_RP_UP_BUSY_AVG |
3301 GEN6_RP_DOWN_IDLE_AVG);
3302 break;
3303
3304 case BETWEEN:
3305 /* Upclock if more than 90% busy over 13ms */
3306 I915_WRITE(GEN6_RP_UP_EI, 10250);
3307 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3308
3309 /* Downclock if less than 75% busy over 32ms */
3310 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3311 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3312
3313 I915_WRITE(GEN6_RP_CONTROL,
3314 GEN6_RP_MEDIA_TURBO |
3315 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3316 GEN6_RP_MEDIA_IS_GFX |
3317 GEN6_RP_ENABLE |
3318 GEN6_RP_UP_BUSY_AVG |
3319 GEN6_RP_DOWN_IDLE_AVG);
3320 break;
3321
3322 case HIGH_POWER:
3323 /* Upclock if more than 85% busy over 10ms */
3324 I915_WRITE(GEN6_RP_UP_EI, 8000);
3325 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3326
3327 /* Downclock if less than 60% busy over 32ms */
3328 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3329 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3330
3331 I915_WRITE(GEN6_RP_CONTROL,
3332 GEN6_RP_MEDIA_TURBO |
3333 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3334 GEN6_RP_MEDIA_IS_GFX |
3335 GEN6_RP_ENABLE |
3336 GEN6_RP_UP_BUSY_AVG |
3337 GEN6_RP_DOWN_IDLE_AVG);
3338 break;
3339 }
3340
3341 dev_priv->rps.power = new_power;
3342 dev_priv->rps.last_adj = 0;
3343 }
3344
3345 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3346 {
3347 u32 mask = 0;
3348
3349 if (val > dev_priv->rps.min_freq_softlimit)
3350 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3351 if (val < dev_priv->rps.max_freq_softlimit)
3352 mask |= GEN6_PM_RP_UP_THRESHOLD;
3353
3354 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3355 mask &= dev_priv->pm_rps_events;
3356
3357 /* IVB and SNB hard hangs on looping batchbuffer
3358 * if GEN6_PM_UP_EI_EXPIRED is masked.
3359 */
3360 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3361 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3362
3363 if (IS_GEN8(dev_priv->dev))
3364 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3365
3366 return ~mask;
3367 }
3368
3369 /* gen6_set_rps is called to update the frequency request, but should also be
3370 * called when the range (min_delay and max_delay) is modified so that we can
3371 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3372 void gen6_set_rps(struct drm_device *dev, u8 val)
3373 {
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3375
3376 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3377 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3378 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3379
3380 /* min/max delay may still have been modified so be sure to
3381 * write the limits value.
3382 */
3383 if (val != dev_priv->rps.cur_freq) {
3384 gen6_set_rps_thresholds(dev_priv, val);
3385
3386 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3387 I915_WRITE(GEN6_RPNSWREQ,
3388 HSW_FREQUENCY(val));
3389 else
3390 I915_WRITE(GEN6_RPNSWREQ,
3391 GEN6_FREQUENCY(val) |
3392 GEN6_OFFSET(0) |
3393 GEN6_AGGRESSIVE_TURBO);
3394 }
3395
3396 /* Make sure we continue to get interrupts
3397 * until we hit the minimum or maximum frequencies.
3398 */
3399 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3400 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3401
3402 POSTING_READ(GEN6_RPNSWREQ);
3403
3404 dev_priv->rps.cur_freq = val;
3405 trace_intel_gpu_freq_change(val * 50);
3406 }
3407
3408 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3409 *
3410 * * If Gfx is Idle, then
3411 * 1. Mask Turbo interrupts
3412 * 2. Bring up Gfx clock
3413 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3414 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3415 * 5. Unmask Turbo interrupts
3416 */
3417 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3418 {
3419 struct drm_device *dev = dev_priv->dev;
3420
3421 /* Latest VLV doesn't need to force the gfx clock */
3422 if (dev->pdev->revision >= 0xd) {
3423 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3424 return;
3425 }
3426
3427 /*
3428 * When we are idle. Drop to min voltage state.
3429 */
3430
3431 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3432 return;
3433
3434 /* Mask turbo interrupt so that they will not come in between */
3435 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3436
3437 vlv_force_gfx_clock(dev_priv, true);
3438
3439 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3440
3441 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3442 dev_priv->rps.min_freq_softlimit);
3443
3444 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3445 & GENFREQSTATUS) == 0, 5))
3446 DRM_ERROR("timed out waiting for Punit\n");
3447
3448 vlv_force_gfx_clock(dev_priv, false);
3449
3450 I915_WRITE(GEN6_PMINTRMSK,
3451 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3452 }
3453
3454 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3455 {
3456 struct drm_device *dev = dev_priv->dev;
3457
3458 mutex_lock(&dev_priv->rps.hw_lock);
3459 if (dev_priv->rps.enabled) {
3460 if (IS_CHERRYVIEW(dev))
3461 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3462 else if (IS_VALLEYVIEW(dev))
3463 vlv_set_rps_idle(dev_priv);
3464 else
3465 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3466 dev_priv->rps.last_adj = 0;
3467 }
3468 mutex_unlock(&dev_priv->rps.hw_lock);
3469 }
3470
3471 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3472 {
3473 struct drm_device *dev = dev_priv->dev;
3474
3475 mutex_lock(&dev_priv->rps.hw_lock);
3476 if (dev_priv->rps.enabled) {
3477 if (IS_VALLEYVIEW(dev))
3478 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3479 else
3480 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3481 dev_priv->rps.last_adj = 0;
3482 }
3483 mutex_unlock(&dev_priv->rps.hw_lock);
3484 }
3485
3486 void valleyview_set_rps(struct drm_device *dev, u8 val)
3487 {
3488 struct drm_i915_private *dev_priv = dev->dev_private;
3489
3490 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3491 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3492 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3493
3494 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3495 "Odd GPU freq value\n"))
3496 val &= ~1;
3497
3498 if (val != dev_priv->rps.cur_freq) {
3499 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3500 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3501 dev_priv->rps.cur_freq,
3502 vlv_gpu_freq(dev_priv, val), val);
3503
3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3505 }
3506
3507 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3508
3509 dev_priv->rps.cur_freq = val;
3510 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3511 }
3512
3513 static void gen8_disable_rps_interrupts(struct drm_device *dev)
3514 {
3515 struct drm_i915_private *dev_priv = dev->dev_private;
3516
3517 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3518 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3519 ~dev_priv->pm_rps_events);
3520 /* Complete PM interrupt masking here doesn't race with the rps work
3521 * item again unmasking PM interrupts because that is using a different
3522 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3523 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3524 * gen8_enable_rps will clean up. */
3525
3526 spin_lock_irq(&dev_priv->irq_lock);
3527 dev_priv->rps.pm_iir = 0;
3528 spin_unlock_irq(&dev_priv->irq_lock);
3529
3530 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3531 }
3532
3533 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3534 {
3535 struct drm_i915_private *dev_priv = dev->dev_private;
3536
3537 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3538 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3539 ~dev_priv->pm_rps_events);
3540 /* Complete PM interrupt masking here doesn't race with the rps work
3541 * item again unmasking PM interrupts because that is using a different
3542 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3543 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3544
3545 spin_lock_irq(&dev_priv->irq_lock);
3546 dev_priv->rps.pm_iir = 0;
3547 spin_unlock_irq(&dev_priv->irq_lock);
3548
3549 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3550 }
3551
3552 static void gen6_disable_rps(struct drm_device *dev)
3553 {
3554 struct drm_i915_private *dev_priv = dev->dev_private;
3555
3556 I915_WRITE(GEN6_RC_CONTROL, 0);
3557 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3558
3559 if (IS_BROADWELL(dev))
3560 gen8_disable_rps_interrupts(dev);
3561 else
3562 gen6_disable_rps_interrupts(dev);
3563 }
3564
3565 static void cherryview_disable_rps(struct drm_device *dev)
3566 {
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568
3569 I915_WRITE(GEN6_RC_CONTROL, 0);
3570
3571 gen8_disable_rps_interrupts(dev);
3572 }
3573
3574 static void valleyview_disable_rps(struct drm_device *dev)
3575 {
3576 struct drm_i915_private *dev_priv = dev->dev_private;
3577
3578 /* we're doing forcewake before Disabling RC6,
3579 * This what the BIOS expects when going into suspend */
3580 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3581
3582 I915_WRITE(GEN6_RC_CONTROL, 0);
3583
3584 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3585
3586 gen6_disable_rps_interrupts(dev);
3587 }
3588
3589 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3590 {
3591 if (IS_VALLEYVIEW(dev)) {
3592 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3593 mode = GEN6_RC_CTL_RC6_ENABLE;
3594 else
3595 mode = 0;
3596 }
3597 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3598 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3599 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3600 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3601 }
3602
3603 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3604 {
3605 /* No RC6 before Ironlake */
3606 if (INTEL_INFO(dev)->gen < 5)
3607 return 0;
3608
3609 /* RC6 is only on Ironlake mobile not on desktop */
3610 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3611 return 0;
3612
3613 /* Respect the kernel parameter if it is set */
3614 if (enable_rc6 >= 0) {
3615 int mask;
3616
3617 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3618 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3619 INTEL_RC6pp_ENABLE;
3620 else
3621 mask = INTEL_RC6_ENABLE;
3622
3623 if ((enable_rc6 & mask) != enable_rc6)
3624 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3625 enable_rc6 & mask, enable_rc6, mask);
3626
3627 return enable_rc6 & mask;
3628 }
3629
3630 /* Disable RC6 on Ironlake */
3631 if (INTEL_INFO(dev)->gen == 5)
3632 return 0;
3633
3634 if (IS_IVYBRIDGE(dev))
3635 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3636
3637 return INTEL_RC6_ENABLE;
3638 }
3639
3640 int intel_enable_rc6(const struct drm_device *dev)
3641 {
3642 return i915.enable_rc6;
3643 }
3644
3645 static void gen8_enable_rps_interrupts(struct drm_device *dev)
3646 {
3647 struct drm_i915_private *dev_priv = dev->dev_private;
3648
3649 spin_lock_irq(&dev_priv->irq_lock);
3650 WARN_ON(dev_priv->rps.pm_iir);
3651 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3652 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3653 spin_unlock_irq(&dev_priv->irq_lock);
3654 }
3655
3656 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3657 {
3658 struct drm_i915_private *dev_priv = dev->dev_private;
3659
3660 spin_lock_irq(&dev_priv->irq_lock);
3661 WARN_ON(dev_priv->rps.pm_iir);
3662 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3663 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3664 spin_unlock_irq(&dev_priv->irq_lock);
3665 }
3666
3667 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3668 {
3669 /* All of these values are in units of 50MHz */
3670 dev_priv->rps.cur_freq = 0;
3671 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3672 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3673 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3674 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3675 /* XXX: only BYT has a special efficient freq */
3676 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3677 /* hw_max = RP0 until we check for overclocking */
3678 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3679
3680 /* Preserve min/max settings in case of re-init */
3681 if (dev_priv->rps.max_freq_softlimit == 0)
3682 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3683
3684 if (dev_priv->rps.min_freq_softlimit == 0)
3685 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3686 }
3687
3688 static void gen8_enable_rps(struct drm_device *dev)
3689 {
3690 struct drm_i915_private *dev_priv = dev->dev_private;
3691 struct intel_engine_cs *ring;
3692 uint32_t rc6_mask = 0, rp_state_cap;
3693 int unused;
3694
3695 /* 1a: Software RC state - RC0 */
3696 I915_WRITE(GEN6_RC_STATE, 0);
3697
3698 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3699 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3700 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3701
3702 /* 2a: Disable RC states. */
3703 I915_WRITE(GEN6_RC_CONTROL, 0);
3704
3705 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3706 parse_rp_state_cap(dev_priv, rp_state_cap);
3707
3708 /* 2b: Program RC6 thresholds.*/
3709 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3710 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3711 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3712 for_each_ring(ring, dev_priv, unused)
3713 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3714 I915_WRITE(GEN6_RC_SLEEP, 0);
3715 if (IS_BROADWELL(dev))
3716 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3717 else
3718 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3719
3720 /* 3: Enable RC6 */
3721 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3722 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3723 intel_print_rc6_info(dev, rc6_mask);
3724 if (IS_BROADWELL(dev))
3725 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3726 GEN7_RC_CTL_TO_MODE |
3727 rc6_mask);
3728 else
3729 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3730 GEN6_RC_CTL_EI_MODE(1) |
3731 rc6_mask);
3732
3733 /* 4 Program defaults and thresholds for RPS*/
3734 I915_WRITE(GEN6_RPNSWREQ,
3735 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3736 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3737 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3738 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3739 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3740
3741 /* Docs recommend 900MHz, and 300 MHz respectively */
3742 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3743 dev_priv->rps.max_freq_softlimit << 24 |
3744 dev_priv->rps.min_freq_softlimit << 16);
3745
3746 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3747 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3748 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3749 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3750
3751 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3752
3753 /* 5: Enable RPS */
3754 I915_WRITE(GEN6_RP_CONTROL,
3755 GEN6_RP_MEDIA_TURBO |
3756 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3757 GEN6_RP_MEDIA_IS_GFX |
3758 GEN6_RP_ENABLE |
3759 GEN6_RP_UP_BUSY_AVG |
3760 GEN6_RP_DOWN_IDLE_AVG);
3761
3762 /* 6: Ring frequency + overclocking (our driver does this later */
3763
3764 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3765
3766 gen8_enable_rps_interrupts(dev);
3767
3768 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3769 }
3770
3771 static void gen6_enable_rps(struct drm_device *dev)
3772 {
3773 struct drm_i915_private *dev_priv = dev->dev_private;
3774 struct intel_engine_cs *ring;
3775 u32 rp_state_cap;
3776 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3777 u32 gtfifodbg;
3778 int rc6_mode;
3779 int i, ret;
3780
3781 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3782
3783 /* Here begins a magic sequence of register writes to enable
3784 * auto-downclocking.
3785 *
3786 * Perhaps there might be some value in exposing these to
3787 * userspace...
3788 */
3789 I915_WRITE(GEN6_RC_STATE, 0);
3790
3791 /* Clear the DBG now so we don't confuse earlier errors */
3792 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3793 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3794 I915_WRITE(GTFIFODBG, gtfifodbg);
3795 }
3796
3797 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3798
3799 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3800
3801 parse_rp_state_cap(dev_priv, rp_state_cap);
3802
3803 /* disable the counters and set deterministic thresholds */
3804 I915_WRITE(GEN6_RC_CONTROL, 0);
3805
3806 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3807 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3808 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3809 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3810 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3811
3812 for_each_ring(ring, dev_priv, i)
3813 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3814
3815 I915_WRITE(GEN6_RC_SLEEP, 0);
3816 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3817 if (IS_IVYBRIDGE(dev))
3818 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3819 else
3820 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3821 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3822 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3823
3824 /* Check if we are enabling RC6 */
3825 rc6_mode = intel_enable_rc6(dev_priv->dev);
3826 if (rc6_mode & INTEL_RC6_ENABLE)
3827 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3828
3829 /* We don't use those on Haswell */
3830 if (!IS_HASWELL(dev)) {
3831 if (rc6_mode & INTEL_RC6p_ENABLE)
3832 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3833
3834 if (rc6_mode & INTEL_RC6pp_ENABLE)
3835 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3836 }
3837
3838 intel_print_rc6_info(dev, rc6_mask);
3839
3840 I915_WRITE(GEN6_RC_CONTROL,
3841 rc6_mask |
3842 GEN6_RC_CTL_EI_MODE(1) |
3843 GEN6_RC_CTL_HW_ENABLE);
3844
3845 /* Power down if completely idle for over 50ms */
3846 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3847 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3848
3849 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3850 if (ret)
3851 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3852
3853 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3854 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3855 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3856 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3857 (pcu_mbox & 0xff) * 50);
3858 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3859 }
3860
3861 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3862 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3863
3864 gen6_enable_rps_interrupts(dev);
3865
3866 rc6vids = 0;
3867 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3868 if (IS_GEN6(dev) && ret) {
3869 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3870 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3871 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3872 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3873 rc6vids &= 0xffff00;
3874 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3875 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3876 if (ret)
3877 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3878 }
3879
3880 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3881 }
3882
3883 static void __gen6_update_ring_freq(struct drm_device *dev)
3884 {
3885 struct drm_i915_private *dev_priv = dev->dev_private;
3886 int min_freq = 15;
3887 unsigned int gpu_freq;
3888 unsigned int max_ia_freq, min_ring_freq;
3889 int scaling_factor = 180;
3890 struct cpufreq_policy *policy;
3891
3892 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3893
3894 policy = cpufreq_cpu_get(0);
3895 if (policy) {
3896 max_ia_freq = policy->cpuinfo.max_freq;
3897 cpufreq_cpu_put(policy);
3898 } else {
3899 /*
3900 * Default to measured freq if none found, PCU will ensure we
3901 * don't go over
3902 */
3903 max_ia_freq = tsc_khz;
3904 }
3905
3906 /* Convert from kHz to MHz */
3907 max_ia_freq /= 1000;
3908
3909 min_ring_freq = I915_READ(DCLK) & 0xf;
3910 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3911 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3912
3913 /*
3914 * For each potential GPU frequency, load a ring frequency we'd like
3915 * to use for memory access. We do this by specifying the IA frequency
3916 * the PCU should use as a reference to determine the ring frequency.
3917 */
3918 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3919 gpu_freq--) {
3920 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3921 unsigned int ia_freq = 0, ring_freq = 0;
3922
3923 if (INTEL_INFO(dev)->gen >= 8) {
3924 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3925 ring_freq = max(min_ring_freq, gpu_freq);
3926 } else if (IS_HASWELL(dev)) {
3927 ring_freq = mult_frac(gpu_freq, 5, 4);
3928 ring_freq = max(min_ring_freq, ring_freq);
3929 /* leave ia_freq as the default, chosen by cpufreq */
3930 } else {
3931 /* On older processors, there is no separate ring
3932 * clock domain, so in order to boost the bandwidth
3933 * of the ring, we need to upclock the CPU (ia_freq).
3934 *
3935 * For GPU frequencies less than 750MHz,
3936 * just use the lowest ring freq.
3937 */
3938 if (gpu_freq < min_freq)
3939 ia_freq = 800;
3940 else
3941 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3942 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3943 }
3944
3945 sandybridge_pcode_write(dev_priv,
3946 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3947 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3948 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3949 gpu_freq);
3950 }
3951 }
3952
3953 void gen6_update_ring_freq(struct drm_device *dev)
3954 {
3955 struct drm_i915_private *dev_priv = dev->dev_private;
3956
3957 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3958 return;
3959
3960 mutex_lock(&dev_priv->rps.hw_lock);
3961 __gen6_update_ring_freq(dev);
3962 mutex_unlock(&dev_priv->rps.hw_lock);
3963 }
3964
3965 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3966 {
3967 u32 val, rp0;
3968
3969 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3970 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3971
3972 return rp0;
3973 }
3974
3975 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3976 {
3977 u32 val, rpe;
3978
3979 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3980 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3981
3982 return rpe;
3983 }
3984
3985 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3986 {
3987 u32 val, rp1;
3988
3989 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3990 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3991
3992 return rp1;
3993 }
3994
3995 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3996 {
3997 u32 val, rpn;
3998
3999 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4000 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
4001 return rpn;
4002 }
4003
4004 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
4005 {
4006 u32 val, rp1;
4007
4008 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4009
4010 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
4011
4012 return rp1;
4013 }
4014
4015 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
4016 {
4017 u32 val, rp0;
4018
4019 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
4020
4021 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
4022 /* Clamp to max */
4023 rp0 = min_t(u32, rp0, 0xea);
4024
4025 return rp0;
4026 }
4027
4028 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4029 {
4030 u32 val, rpe;
4031
4032 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
4033 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
4034 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
4035 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
4036
4037 return rpe;
4038 }
4039
4040 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
4041 {
4042 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
4043 }
4044
4045 /* Check that the pctx buffer wasn't move under us. */
4046 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
4047 {
4048 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4049
4050 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
4051 dev_priv->vlv_pctx->stolen->start);
4052 }
4053
4054
4055 /* Check that the pcbr address is not empty. */
4056 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
4057 {
4058 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
4059
4060 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
4061 }
4062
4063 static void cherryview_setup_pctx(struct drm_device *dev)
4064 {
4065 struct drm_i915_private *dev_priv = dev->dev_private;
4066 unsigned long pctx_paddr, paddr;
4067 struct i915_gtt *gtt = &dev_priv->gtt;
4068 u32 pcbr;
4069 int pctx_size = 32*1024;
4070
4071 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4072
4073 pcbr = I915_READ(VLV_PCBR);
4074 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
4075 paddr = (dev_priv->mm.stolen_base +
4076 (gtt->stolen_size - pctx_size));
4077
4078 pctx_paddr = (paddr & (~4095));
4079 I915_WRITE(VLV_PCBR, pctx_paddr);
4080 }
4081 }
4082
4083 static void valleyview_setup_pctx(struct drm_device *dev)
4084 {
4085 struct drm_i915_private *dev_priv = dev->dev_private;
4086 struct drm_i915_gem_object *pctx;
4087 unsigned long pctx_paddr;
4088 u32 pcbr;
4089 int pctx_size = 24*1024;
4090
4091 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4092
4093 pcbr = I915_READ(VLV_PCBR);
4094 if (pcbr) {
4095 /* BIOS set it up already, grab the pre-alloc'd space */
4096 int pcbr_offset;
4097
4098 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
4099 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
4100 pcbr_offset,
4101 I915_GTT_OFFSET_NONE,
4102 pctx_size);
4103 goto out;
4104 }
4105
4106 /*
4107 * From the Gunit register HAS:
4108 * The Gfx driver is expected to program this register and ensure
4109 * proper allocation within Gfx stolen memory. For example, this
4110 * register should be programmed such than the PCBR range does not
4111 * overlap with other ranges, such as the frame buffer, protected
4112 * memory, or any other relevant ranges.
4113 */
4114 pctx = i915_gem_object_create_stolen(dev, pctx_size);
4115 if (!pctx) {
4116 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
4117 return;
4118 }
4119
4120 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
4121 I915_WRITE(VLV_PCBR, pctx_paddr);
4122
4123 out:
4124 dev_priv->vlv_pctx = pctx;
4125 }
4126
4127 static void valleyview_cleanup_pctx(struct drm_device *dev)
4128 {
4129 struct drm_i915_private *dev_priv = dev->dev_private;
4130
4131 if (WARN_ON(!dev_priv->vlv_pctx))
4132 return;
4133
4134 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
4135 dev_priv->vlv_pctx = NULL;
4136 }
4137
4138 static void valleyview_init_gt_powersave(struct drm_device *dev)
4139 {
4140 struct drm_i915_private *dev_priv = dev->dev_private;
4141 u32 val;
4142
4143 valleyview_setup_pctx(dev);
4144
4145 mutex_lock(&dev_priv->rps.hw_lock);
4146
4147 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4148 switch ((val >> 6) & 3) {
4149 case 0:
4150 case 1:
4151 dev_priv->mem_freq = 800;
4152 break;
4153 case 2:
4154 dev_priv->mem_freq = 1066;
4155 break;
4156 case 3:
4157 dev_priv->mem_freq = 1333;
4158 break;
4159 }
4160 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4161
4162 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4163 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4164 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4165 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4166 dev_priv->rps.max_freq);
4167
4168 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
4169 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4170 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4171 dev_priv->rps.efficient_freq);
4172
4173 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
4174 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
4175 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4176 dev_priv->rps.rp1_freq);
4177
4178 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4179 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4180 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4181 dev_priv->rps.min_freq);
4182
4183 /* Preserve min/max settings in case of re-init */
4184 if (dev_priv->rps.max_freq_softlimit == 0)
4185 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4186
4187 if (dev_priv->rps.min_freq_softlimit == 0)
4188 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4189
4190 mutex_unlock(&dev_priv->rps.hw_lock);
4191 }
4192
4193 static void cherryview_init_gt_powersave(struct drm_device *dev)
4194 {
4195 struct drm_i915_private *dev_priv = dev->dev_private;
4196 u32 val;
4197
4198 cherryview_setup_pctx(dev);
4199
4200 mutex_lock(&dev_priv->rps.hw_lock);
4201
4202 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
4203 switch ((val >> 2) & 0x7) {
4204 case 0:
4205 case 1:
4206 dev_priv->rps.cz_freq = 200;
4207 dev_priv->mem_freq = 1600;
4208 break;
4209 case 2:
4210 dev_priv->rps.cz_freq = 267;
4211 dev_priv->mem_freq = 1600;
4212 break;
4213 case 3:
4214 dev_priv->rps.cz_freq = 333;
4215 dev_priv->mem_freq = 2000;
4216 break;
4217 case 4:
4218 dev_priv->rps.cz_freq = 320;
4219 dev_priv->mem_freq = 1600;
4220 break;
4221 case 5:
4222 dev_priv->rps.cz_freq = 400;
4223 dev_priv->mem_freq = 1600;
4224 break;
4225 }
4226 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4227
4228 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4229 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4230 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4231 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4232 dev_priv->rps.max_freq);
4233
4234 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4235 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4236 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4237 dev_priv->rps.efficient_freq);
4238
4239 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4240 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4241 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4242 dev_priv->rps.rp1_freq);
4243
4244 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4245 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4246 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4247 dev_priv->rps.min_freq);
4248
4249 WARN_ONCE((dev_priv->rps.max_freq |
4250 dev_priv->rps.efficient_freq |
4251 dev_priv->rps.rp1_freq |
4252 dev_priv->rps.min_freq) & 1,
4253 "Odd GPU freq values\n");
4254
4255 /* Preserve min/max settings in case of re-init */
4256 if (dev_priv->rps.max_freq_softlimit == 0)
4257 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4258
4259 if (dev_priv->rps.min_freq_softlimit == 0)
4260 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4261
4262 mutex_unlock(&dev_priv->rps.hw_lock);
4263 }
4264
4265 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4266 {
4267 valleyview_cleanup_pctx(dev);
4268 }
4269
4270 static void cherryview_enable_rps(struct drm_device *dev)
4271 {
4272 struct drm_i915_private *dev_priv = dev->dev_private;
4273 struct intel_engine_cs *ring;
4274 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4275 int i;
4276
4277 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4278
4279 gtfifodbg = I915_READ(GTFIFODBG);
4280 if (gtfifodbg) {
4281 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4282 gtfifodbg);
4283 I915_WRITE(GTFIFODBG, gtfifodbg);
4284 }
4285
4286 cherryview_check_pctx(dev_priv);
4287
4288 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4289 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4290 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4291
4292 /* 2a: Program RC6 thresholds.*/
4293 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4294 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4295 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4296
4297 for_each_ring(ring, dev_priv, i)
4298 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4299 I915_WRITE(GEN6_RC_SLEEP, 0);
4300
4301 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4302
4303 /* allows RC6 residency counter to work */
4304 I915_WRITE(VLV_COUNTER_CONTROL,
4305 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4306 VLV_MEDIA_RC6_COUNT_EN |
4307 VLV_RENDER_RC6_COUNT_EN));
4308
4309 /* For now we assume BIOS is allocating and populating the PCBR */
4310 pcbr = I915_READ(VLV_PCBR);
4311
4312 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4313
4314 /* 3: Enable RC6 */
4315 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4316 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4317 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4318
4319 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4320
4321 /* 4 Program defaults and thresholds for RPS*/
4322 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4323 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4324 I915_WRITE(GEN6_RP_UP_EI, 66000);
4325 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4326
4327 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4328
4329 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4330 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4331 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4332
4333 /* 5: Enable RPS */
4334 I915_WRITE(GEN6_RP_CONTROL,
4335 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4336 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4337 GEN6_RP_ENABLE |
4338 GEN6_RP_UP_BUSY_AVG |
4339 GEN6_RP_DOWN_IDLE_AVG);
4340
4341 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4342
4343 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4344 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4345
4346 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4347 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4348 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4349 dev_priv->rps.cur_freq);
4350
4351 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4352 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4353 dev_priv->rps.efficient_freq);
4354
4355 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4356
4357 gen8_enable_rps_interrupts(dev);
4358
4359 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4360 }
4361
4362 static void valleyview_enable_rps(struct drm_device *dev)
4363 {
4364 struct drm_i915_private *dev_priv = dev->dev_private;
4365 struct intel_engine_cs *ring;
4366 u32 gtfifodbg, val, rc6_mode = 0;
4367 int i;
4368
4369 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4370
4371 valleyview_check_pctx(dev_priv);
4372
4373 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4374 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4375 gtfifodbg);
4376 I915_WRITE(GTFIFODBG, gtfifodbg);
4377 }
4378
4379 /* If VLV, Forcewake all wells, else re-direct to regular path */
4380 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4381
4382 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4383 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4384 I915_WRITE(GEN6_RP_UP_EI, 66000);
4385 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4386
4387 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4388 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4389
4390 I915_WRITE(GEN6_RP_CONTROL,
4391 GEN6_RP_MEDIA_TURBO |
4392 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4393 GEN6_RP_MEDIA_IS_GFX |
4394 GEN6_RP_ENABLE |
4395 GEN6_RP_UP_BUSY_AVG |
4396 GEN6_RP_DOWN_IDLE_CONT);
4397
4398 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4399 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4400 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4401
4402 for_each_ring(ring, dev_priv, i)
4403 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4404
4405 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4406
4407 /* allows RC6 residency counter to work */
4408 I915_WRITE(VLV_COUNTER_CONTROL,
4409 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4410 VLV_RENDER_RC0_COUNT_EN |
4411 VLV_MEDIA_RC6_COUNT_EN |
4412 VLV_RENDER_RC6_COUNT_EN));
4413
4414 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4415 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4416
4417 intel_print_rc6_info(dev, rc6_mode);
4418
4419 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4420
4421 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4422
4423 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4424 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4425
4426 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4427 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4428 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4429 dev_priv->rps.cur_freq);
4430
4431 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4432 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4433 dev_priv->rps.efficient_freq);
4434
4435 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4436
4437 gen6_enable_rps_interrupts(dev);
4438
4439 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4440 }
4441
4442 void ironlake_teardown_rc6(struct drm_device *dev)
4443 {
4444 struct drm_i915_private *dev_priv = dev->dev_private;
4445
4446 if (dev_priv->ips.renderctx) {
4447 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4448 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4449 dev_priv->ips.renderctx = NULL;
4450 }
4451
4452 if (dev_priv->ips.pwrctx) {
4453 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4454 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4455 dev_priv->ips.pwrctx = NULL;
4456 }
4457 }
4458
4459 static void ironlake_disable_rc6(struct drm_device *dev)
4460 {
4461 struct drm_i915_private *dev_priv = dev->dev_private;
4462
4463 if (I915_READ(PWRCTXA)) {
4464 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4465 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4466 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4467 50);
4468
4469 I915_WRITE(PWRCTXA, 0);
4470 POSTING_READ(PWRCTXA);
4471
4472 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4473 POSTING_READ(RSTDBYCTL);
4474 }
4475 }
4476
4477 static int ironlake_setup_rc6(struct drm_device *dev)
4478 {
4479 struct drm_i915_private *dev_priv = dev->dev_private;
4480
4481 if (dev_priv->ips.renderctx == NULL)
4482 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4483 if (!dev_priv->ips.renderctx)
4484 return -ENOMEM;
4485
4486 if (dev_priv->ips.pwrctx == NULL)
4487 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4488 if (!dev_priv->ips.pwrctx) {
4489 ironlake_teardown_rc6(dev);
4490 return -ENOMEM;
4491 }
4492
4493 return 0;
4494 }
4495
4496 static void ironlake_enable_rc6(struct drm_device *dev)
4497 {
4498 struct drm_i915_private *dev_priv = dev->dev_private;
4499 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4500 bool was_interruptible;
4501 int ret;
4502
4503 /* rc6 disabled by default due to repeated reports of hanging during
4504 * boot and resume.
4505 */
4506 if (!intel_enable_rc6(dev))
4507 return;
4508
4509 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4510
4511 ret = ironlake_setup_rc6(dev);
4512 if (ret)
4513 return;
4514
4515 was_interruptible = dev_priv->mm.interruptible;
4516 dev_priv->mm.interruptible = false;
4517
4518 /*
4519 * GPU can automatically power down the render unit if given a page
4520 * to save state.
4521 */
4522 ret = intel_ring_begin(ring, 6);
4523 if (ret) {
4524 ironlake_teardown_rc6(dev);
4525 dev_priv->mm.interruptible = was_interruptible;
4526 return;
4527 }
4528
4529 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4530 intel_ring_emit(ring, MI_SET_CONTEXT);
4531 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4532 MI_MM_SPACE_GTT |
4533 MI_SAVE_EXT_STATE_EN |
4534 MI_RESTORE_EXT_STATE_EN |
4535 MI_RESTORE_INHIBIT);
4536 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4537 intel_ring_emit(ring, MI_NOOP);
4538 intel_ring_emit(ring, MI_FLUSH);
4539 intel_ring_advance(ring);
4540
4541 /*
4542 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4543 * does an implicit flush, combined with MI_FLUSH above, it should be
4544 * safe to assume that renderctx is valid
4545 */
4546 ret = intel_ring_idle(ring);
4547 dev_priv->mm.interruptible = was_interruptible;
4548 if (ret) {
4549 DRM_ERROR("failed to enable ironlake power savings\n");
4550 ironlake_teardown_rc6(dev);
4551 return;
4552 }
4553
4554 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4555 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4556
4557 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4558 }
4559
4560 static unsigned long intel_pxfreq(u32 vidfreq)
4561 {
4562 unsigned long freq;
4563 int div = (vidfreq & 0x3f0000) >> 16;
4564 int post = (vidfreq & 0x3000) >> 12;
4565 int pre = (vidfreq & 0x7);
4566
4567 if (!pre)
4568 return 0;
4569
4570 freq = ((div * 133333) / ((1<<post) * pre));
4571
4572 return freq;
4573 }
4574
4575 static const struct cparams {
4576 u16 i;
4577 u16 t;
4578 u16 m;
4579 u16 c;
4580 } cparams[] = {
4581 { 1, 1333, 301, 28664 },
4582 { 1, 1066, 294, 24460 },
4583 { 1, 800, 294, 25192 },
4584 { 0, 1333, 276, 27605 },
4585 { 0, 1066, 276, 27605 },
4586 { 0, 800, 231, 23784 },
4587 };
4588
4589 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4590 {
4591 u64 total_count, diff, ret;
4592 u32 count1, count2, count3, m = 0, c = 0;
4593 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4594 int i;
4595
4596 assert_spin_locked(&mchdev_lock);
4597
4598 diff1 = now - dev_priv->ips.last_time1;
4599
4600 /* Prevent division-by-zero if we are asking too fast.
4601 * Also, we don't get interesting results if we are polling
4602 * faster than once in 10ms, so just return the saved value
4603 * in such cases.
4604 */
4605 if (diff1 <= 10)
4606 return dev_priv->ips.chipset_power;
4607
4608 count1 = I915_READ(DMIEC);
4609 count2 = I915_READ(DDREC);
4610 count3 = I915_READ(CSIEC);
4611
4612 total_count = count1 + count2 + count3;
4613
4614 /* FIXME: handle per-counter overflow */
4615 if (total_count < dev_priv->ips.last_count1) {
4616 diff = ~0UL - dev_priv->ips.last_count1;
4617 diff += total_count;
4618 } else {
4619 diff = total_count - dev_priv->ips.last_count1;
4620 }
4621
4622 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4623 if (cparams[i].i == dev_priv->ips.c_m &&
4624 cparams[i].t == dev_priv->ips.r_t) {
4625 m = cparams[i].m;
4626 c = cparams[i].c;
4627 break;
4628 }
4629 }
4630
4631 diff = div_u64(diff, diff1);
4632 ret = ((m * diff) + c);
4633 ret = div_u64(ret, 10);
4634
4635 dev_priv->ips.last_count1 = total_count;
4636 dev_priv->ips.last_time1 = now;
4637
4638 dev_priv->ips.chipset_power = ret;
4639
4640 return ret;
4641 }
4642
4643 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4644 {
4645 struct drm_device *dev = dev_priv->dev;
4646 unsigned long val;
4647
4648 if (INTEL_INFO(dev)->gen != 5)
4649 return 0;
4650
4651 spin_lock_irq(&mchdev_lock);
4652
4653 val = __i915_chipset_val(dev_priv);
4654
4655 spin_unlock_irq(&mchdev_lock);
4656
4657 return val;
4658 }
4659
4660 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4661 {
4662 unsigned long m, x, b;
4663 u32 tsfs;
4664
4665 tsfs = I915_READ(TSFS);
4666
4667 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4668 x = I915_READ8(TR1);
4669
4670 b = tsfs & TSFS_INTR_MASK;
4671
4672 return ((m * x) / 127) - b;
4673 }
4674
4675 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4676 {
4677 struct drm_device *dev = dev_priv->dev;
4678 static const struct v_table {
4679 u16 vd; /* in .1 mil */
4680 u16 vm; /* in .1 mil */
4681 } v_table[] = {
4682 { 0, 0, },
4683 { 375, 0, },
4684 { 500, 0, },
4685 { 625, 0, },
4686 { 750, 0, },
4687 { 875, 0, },
4688 { 1000, 0, },
4689 { 1125, 0, },
4690 { 4125, 3000, },
4691 { 4125, 3000, },
4692 { 4125, 3000, },
4693 { 4125, 3000, },
4694 { 4125, 3000, },
4695 { 4125, 3000, },
4696 { 4125, 3000, },
4697 { 4125, 3000, },
4698 { 4125, 3000, },
4699 { 4125, 3000, },
4700 { 4125, 3000, },
4701 { 4125, 3000, },
4702 { 4125, 3000, },
4703 { 4125, 3000, },
4704 { 4125, 3000, },
4705 { 4125, 3000, },
4706 { 4125, 3000, },
4707 { 4125, 3000, },
4708 { 4125, 3000, },
4709 { 4125, 3000, },
4710 { 4125, 3000, },
4711 { 4125, 3000, },
4712 { 4125, 3000, },
4713 { 4125, 3000, },
4714 { 4250, 3125, },
4715 { 4375, 3250, },
4716 { 4500, 3375, },
4717 { 4625, 3500, },
4718 { 4750, 3625, },
4719 { 4875, 3750, },
4720 { 5000, 3875, },
4721 { 5125, 4000, },
4722 { 5250, 4125, },
4723 { 5375, 4250, },
4724 { 5500, 4375, },
4725 { 5625, 4500, },
4726 { 5750, 4625, },
4727 { 5875, 4750, },
4728 { 6000, 4875, },
4729 { 6125, 5000, },
4730 { 6250, 5125, },
4731 { 6375, 5250, },
4732 { 6500, 5375, },
4733 { 6625, 5500, },
4734 { 6750, 5625, },
4735 { 6875, 5750, },
4736 { 7000, 5875, },
4737 { 7125, 6000, },
4738 { 7250, 6125, },
4739 { 7375, 6250, },
4740 { 7500, 6375, },
4741 { 7625, 6500, },
4742 { 7750, 6625, },
4743 { 7875, 6750, },
4744 { 8000, 6875, },
4745 { 8125, 7000, },
4746 { 8250, 7125, },
4747 { 8375, 7250, },
4748 { 8500, 7375, },
4749 { 8625, 7500, },
4750 { 8750, 7625, },
4751 { 8875, 7750, },
4752 { 9000, 7875, },
4753 { 9125, 8000, },
4754 { 9250, 8125, },
4755 { 9375, 8250, },
4756 { 9500, 8375, },
4757 { 9625, 8500, },
4758 { 9750, 8625, },
4759 { 9875, 8750, },
4760 { 10000, 8875, },
4761 { 10125, 9000, },
4762 { 10250, 9125, },
4763 { 10375, 9250, },
4764 { 10500, 9375, },
4765 { 10625, 9500, },
4766 { 10750, 9625, },
4767 { 10875, 9750, },
4768 { 11000, 9875, },
4769 { 11125, 10000, },
4770 { 11250, 10125, },
4771 { 11375, 10250, },
4772 { 11500, 10375, },
4773 { 11625, 10500, },
4774 { 11750, 10625, },
4775 { 11875, 10750, },
4776 { 12000, 10875, },
4777 { 12125, 11000, },
4778 { 12250, 11125, },
4779 { 12375, 11250, },
4780 { 12500, 11375, },
4781 { 12625, 11500, },
4782 { 12750, 11625, },
4783 { 12875, 11750, },
4784 { 13000, 11875, },
4785 { 13125, 12000, },
4786 { 13250, 12125, },
4787 { 13375, 12250, },
4788 { 13500, 12375, },
4789 { 13625, 12500, },
4790 { 13750, 12625, },
4791 { 13875, 12750, },
4792 { 14000, 12875, },
4793 { 14125, 13000, },
4794 { 14250, 13125, },
4795 { 14375, 13250, },
4796 { 14500, 13375, },
4797 { 14625, 13500, },
4798 { 14750, 13625, },
4799 { 14875, 13750, },
4800 { 15000, 13875, },
4801 { 15125, 14000, },
4802 { 15250, 14125, },
4803 { 15375, 14250, },
4804 { 15500, 14375, },
4805 { 15625, 14500, },
4806 { 15750, 14625, },
4807 { 15875, 14750, },
4808 { 16000, 14875, },
4809 { 16125, 15000, },
4810 };
4811 if (INTEL_INFO(dev)->is_mobile)
4812 return v_table[pxvid].vm;
4813 else
4814 return v_table[pxvid].vd;
4815 }
4816
4817 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4818 {
4819 u64 now, diff, diffms;
4820 u32 count;
4821
4822 assert_spin_locked(&mchdev_lock);
4823
4824 now = ktime_get_raw_ns();
4825 diffms = now - dev_priv->ips.last_time2;
4826 do_div(diffms, NSEC_PER_MSEC);
4827
4828 /* Don't divide by 0 */
4829 if (!diffms)
4830 return;
4831
4832 count = I915_READ(GFXEC);
4833
4834 if (count < dev_priv->ips.last_count2) {
4835 diff = ~0UL - dev_priv->ips.last_count2;
4836 diff += count;
4837 } else {
4838 diff = count - dev_priv->ips.last_count2;
4839 }
4840
4841 dev_priv->ips.last_count2 = count;
4842 dev_priv->ips.last_time2 = now;
4843
4844 /* More magic constants... */
4845 diff = diff * 1181;
4846 diff = div_u64(diff, diffms * 10);
4847 dev_priv->ips.gfx_power = diff;
4848 }
4849
4850 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4851 {
4852 struct drm_device *dev = dev_priv->dev;
4853
4854 if (INTEL_INFO(dev)->gen != 5)
4855 return;
4856
4857 spin_lock_irq(&mchdev_lock);
4858
4859 __i915_update_gfx_val(dev_priv);
4860
4861 spin_unlock_irq(&mchdev_lock);
4862 }
4863
4864 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4865 {
4866 unsigned long t, corr, state1, corr2, state2;
4867 u32 pxvid, ext_v;
4868
4869 assert_spin_locked(&mchdev_lock);
4870
4871 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4872 pxvid = (pxvid >> 24) & 0x7f;
4873 ext_v = pvid_to_extvid(dev_priv, pxvid);
4874
4875 state1 = ext_v;
4876
4877 t = i915_mch_val(dev_priv);
4878
4879 /* Revel in the empirically derived constants */
4880
4881 /* Correction factor in 1/100000 units */
4882 if (t > 80)
4883 corr = ((t * 2349) + 135940);
4884 else if (t >= 50)
4885 corr = ((t * 964) + 29317);
4886 else /* < 50 */
4887 corr = ((t * 301) + 1004);
4888
4889 corr = corr * ((150142 * state1) / 10000 - 78642);
4890 corr /= 100000;
4891 corr2 = (corr * dev_priv->ips.corr);
4892
4893 state2 = (corr2 * state1) / 10000;
4894 state2 /= 100; /* convert to mW */
4895
4896 __i915_update_gfx_val(dev_priv);
4897
4898 return dev_priv->ips.gfx_power + state2;
4899 }
4900
4901 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4902 {
4903 struct drm_device *dev = dev_priv->dev;
4904 unsigned long val;
4905
4906 if (INTEL_INFO(dev)->gen != 5)
4907 return 0;
4908
4909 spin_lock_irq(&mchdev_lock);
4910
4911 val = __i915_gfx_val(dev_priv);
4912
4913 spin_unlock_irq(&mchdev_lock);
4914
4915 return val;
4916 }
4917
4918 /**
4919 * i915_read_mch_val - return value for IPS use
4920 *
4921 * Calculate and return a value for the IPS driver to use when deciding whether
4922 * we have thermal and power headroom to increase CPU or GPU power budget.
4923 */
4924 unsigned long i915_read_mch_val(void)
4925 {
4926 struct drm_i915_private *dev_priv;
4927 unsigned long chipset_val, graphics_val, ret = 0;
4928
4929 spin_lock_irq(&mchdev_lock);
4930 if (!i915_mch_dev)
4931 goto out_unlock;
4932 dev_priv = i915_mch_dev;
4933
4934 chipset_val = __i915_chipset_val(dev_priv);
4935 graphics_val = __i915_gfx_val(dev_priv);
4936
4937 ret = chipset_val + graphics_val;
4938
4939 out_unlock:
4940 spin_unlock_irq(&mchdev_lock);
4941
4942 return ret;
4943 }
4944 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4945
4946 /**
4947 * i915_gpu_raise - raise GPU frequency limit
4948 *
4949 * Raise the limit; IPS indicates we have thermal headroom.
4950 */
4951 bool i915_gpu_raise(void)
4952 {
4953 struct drm_i915_private *dev_priv;
4954 bool ret = true;
4955
4956 spin_lock_irq(&mchdev_lock);
4957 if (!i915_mch_dev) {
4958 ret = false;
4959 goto out_unlock;
4960 }
4961 dev_priv = i915_mch_dev;
4962
4963 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4964 dev_priv->ips.max_delay--;
4965
4966 out_unlock:
4967 spin_unlock_irq(&mchdev_lock);
4968
4969 return ret;
4970 }
4971 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4972
4973 /**
4974 * i915_gpu_lower - lower GPU frequency limit
4975 *
4976 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4977 * frequency maximum.
4978 */
4979 bool i915_gpu_lower(void)
4980 {
4981 struct drm_i915_private *dev_priv;
4982 bool ret = true;
4983
4984 spin_lock_irq(&mchdev_lock);
4985 if (!i915_mch_dev) {
4986 ret = false;
4987 goto out_unlock;
4988 }
4989 dev_priv = i915_mch_dev;
4990
4991 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4992 dev_priv->ips.max_delay++;
4993
4994 out_unlock:
4995 spin_unlock_irq(&mchdev_lock);
4996
4997 return ret;
4998 }
4999 EXPORT_SYMBOL_GPL(i915_gpu_lower);
5000
5001 /**
5002 * i915_gpu_busy - indicate GPU business to IPS
5003 *
5004 * Tell the IPS driver whether or not the GPU is busy.
5005 */
5006 bool i915_gpu_busy(void)
5007 {
5008 struct drm_i915_private *dev_priv;
5009 struct intel_engine_cs *ring;
5010 bool ret = false;
5011 int i;
5012
5013 spin_lock_irq(&mchdev_lock);
5014 if (!i915_mch_dev)
5015 goto out_unlock;
5016 dev_priv = i915_mch_dev;
5017
5018 for_each_ring(ring, dev_priv, i)
5019 ret |= !list_empty(&ring->request_list);
5020
5021 out_unlock:
5022 spin_unlock_irq(&mchdev_lock);
5023
5024 return ret;
5025 }
5026 EXPORT_SYMBOL_GPL(i915_gpu_busy);
5027
5028 /**
5029 * i915_gpu_turbo_disable - disable graphics turbo
5030 *
5031 * Disable graphics turbo by resetting the max frequency and setting the
5032 * current frequency to the default.
5033 */
5034 bool i915_gpu_turbo_disable(void)
5035 {
5036 struct drm_i915_private *dev_priv;
5037 bool ret = true;
5038
5039 spin_lock_irq(&mchdev_lock);
5040 if (!i915_mch_dev) {
5041 ret = false;
5042 goto out_unlock;
5043 }
5044 dev_priv = i915_mch_dev;
5045
5046 dev_priv->ips.max_delay = dev_priv->ips.fstart;
5047
5048 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
5049 ret = false;
5050
5051 out_unlock:
5052 spin_unlock_irq(&mchdev_lock);
5053
5054 return ret;
5055 }
5056 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
5057
5058 /**
5059 * Tells the intel_ips driver that the i915 driver is now loaded, if
5060 * IPS got loaded first.
5061 *
5062 * This awkward dance is so that neither module has to depend on the
5063 * other in order for IPS to do the appropriate communication of
5064 * GPU turbo limits to i915.
5065 */
5066 static void
5067 ips_ping_for_i915_load(void)
5068 {
5069 void (*link)(void);
5070
5071 link = symbol_get(ips_link_to_i915_driver);
5072 if (link) {
5073 link();
5074 symbol_put(ips_link_to_i915_driver);
5075 }
5076 }
5077
5078 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
5079 {
5080 /* We only register the i915 ips part with intel-ips once everything is
5081 * set up, to avoid intel-ips sneaking in and reading bogus values. */
5082 spin_lock_irq(&mchdev_lock);
5083 i915_mch_dev = dev_priv;
5084 spin_unlock_irq(&mchdev_lock);
5085
5086 ips_ping_for_i915_load();
5087 }
5088
5089 void intel_gpu_ips_teardown(void)
5090 {
5091 spin_lock_irq(&mchdev_lock);
5092 i915_mch_dev = NULL;
5093 spin_unlock_irq(&mchdev_lock);
5094 }
5095
5096 static void intel_init_emon(struct drm_device *dev)
5097 {
5098 struct drm_i915_private *dev_priv = dev->dev_private;
5099 u32 lcfuse;
5100 u8 pxw[16];
5101 int i;
5102
5103 /* Disable to program */
5104 I915_WRITE(ECR, 0);
5105 POSTING_READ(ECR);
5106
5107 /* Program energy weights for various events */
5108 I915_WRITE(SDEW, 0x15040d00);
5109 I915_WRITE(CSIEW0, 0x007f0000);
5110 I915_WRITE(CSIEW1, 0x1e220004);
5111 I915_WRITE(CSIEW2, 0x04000004);
5112
5113 for (i = 0; i < 5; i++)
5114 I915_WRITE(PEW + (i * 4), 0);
5115 for (i = 0; i < 3; i++)
5116 I915_WRITE(DEW + (i * 4), 0);
5117
5118 /* Program P-state weights to account for frequency power adjustment */
5119 for (i = 0; i < 16; i++) {
5120 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5121 unsigned long freq = intel_pxfreq(pxvidfreq);
5122 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5123 PXVFREQ_PX_SHIFT;
5124 unsigned long val;
5125
5126 val = vid * vid;
5127 val *= (freq / 1000);
5128 val *= 255;
5129 val /= (127*127*900);
5130 if (val > 0xff)
5131 DRM_ERROR("bad pxval: %ld\n", val);
5132 pxw[i] = val;
5133 }
5134 /* Render standby states get 0 weight */
5135 pxw[14] = 0;
5136 pxw[15] = 0;
5137
5138 for (i = 0; i < 4; i++) {
5139 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5140 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5141 I915_WRITE(PXW + (i * 4), val);
5142 }
5143
5144 /* Adjust magic regs to magic values (more experimental results) */
5145 I915_WRITE(OGW0, 0);
5146 I915_WRITE(OGW1, 0);
5147 I915_WRITE(EG0, 0x00007f00);
5148 I915_WRITE(EG1, 0x0000000e);
5149 I915_WRITE(EG2, 0x000e0000);
5150 I915_WRITE(EG3, 0x68000300);
5151 I915_WRITE(EG4, 0x42000000);
5152 I915_WRITE(EG5, 0x00140031);
5153 I915_WRITE(EG6, 0);
5154 I915_WRITE(EG7, 0);
5155
5156 for (i = 0; i < 8; i++)
5157 I915_WRITE(PXWL + (i * 4), 0);
5158
5159 /* Enable PMON + select events */
5160 I915_WRITE(ECR, 0x80000019);
5161
5162 lcfuse = I915_READ(LCFUSE02);
5163
5164 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
5165 }
5166
5167 void intel_init_gt_powersave(struct drm_device *dev)
5168 {
5169 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
5170
5171 if (IS_CHERRYVIEW(dev))
5172 cherryview_init_gt_powersave(dev);
5173 else if (IS_VALLEYVIEW(dev))
5174 valleyview_init_gt_powersave(dev);
5175 }
5176
5177 void intel_cleanup_gt_powersave(struct drm_device *dev)
5178 {
5179 if (IS_CHERRYVIEW(dev))
5180 return;
5181 else if (IS_VALLEYVIEW(dev))
5182 valleyview_cleanup_gt_powersave(dev);
5183 }
5184
5185 /**
5186 * intel_suspend_gt_powersave - suspend PM work and helper threads
5187 * @dev: drm device
5188 *
5189 * We don't want to disable RC6 or other features here, we just want
5190 * to make sure any work we've queued has finished and won't bother
5191 * us while we're suspended.
5192 */
5193 void intel_suspend_gt_powersave(struct drm_device *dev)
5194 {
5195 struct drm_i915_private *dev_priv = dev->dev_private;
5196
5197 /* Interrupts should be disabled already to avoid re-arming. */
5198 WARN_ON(intel_irqs_enabled(dev_priv));
5199
5200 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5201
5202 cancel_work_sync(&dev_priv->rps.work);
5203
5204 /* Force GPU to min freq during suspend */
5205 gen6_rps_idle(dev_priv);
5206 }
5207
5208 void intel_disable_gt_powersave(struct drm_device *dev)
5209 {
5210 struct drm_i915_private *dev_priv = dev->dev_private;
5211
5212 /* Interrupts should be disabled already to avoid re-arming. */
5213 WARN_ON(intel_irqs_enabled(dev_priv));
5214
5215 if (IS_IRONLAKE_M(dev)) {
5216 ironlake_disable_drps(dev);
5217 ironlake_disable_rc6(dev);
5218 } else if (INTEL_INFO(dev)->gen >= 6) {
5219 intel_suspend_gt_powersave(dev);
5220
5221 mutex_lock(&dev_priv->rps.hw_lock);
5222 if (IS_CHERRYVIEW(dev))
5223 cherryview_disable_rps(dev);
5224 else if (IS_VALLEYVIEW(dev))
5225 valleyview_disable_rps(dev);
5226 else
5227 gen6_disable_rps(dev);
5228 dev_priv->rps.enabled = false;
5229 mutex_unlock(&dev_priv->rps.hw_lock);
5230 }
5231 }
5232
5233 static void intel_gen6_powersave_work(struct work_struct *work)
5234 {
5235 struct drm_i915_private *dev_priv =
5236 container_of(work, struct drm_i915_private,
5237 rps.delayed_resume_work.work);
5238 struct drm_device *dev = dev_priv->dev;
5239
5240 mutex_lock(&dev_priv->rps.hw_lock);
5241
5242 if (IS_CHERRYVIEW(dev)) {
5243 cherryview_enable_rps(dev);
5244 } else if (IS_VALLEYVIEW(dev)) {
5245 valleyview_enable_rps(dev);
5246 } else if (IS_BROADWELL(dev)) {
5247 gen8_enable_rps(dev);
5248 __gen6_update_ring_freq(dev);
5249 } else {
5250 gen6_enable_rps(dev);
5251 __gen6_update_ring_freq(dev);
5252 }
5253 dev_priv->rps.enabled = true;
5254 mutex_unlock(&dev_priv->rps.hw_lock);
5255
5256 intel_runtime_pm_put(dev_priv);
5257 }
5258
5259 void intel_enable_gt_powersave(struct drm_device *dev)
5260 {
5261 struct drm_i915_private *dev_priv = dev->dev_private;
5262
5263 if (IS_IRONLAKE_M(dev)) {
5264 mutex_lock(&dev->struct_mutex);
5265 ironlake_enable_drps(dev);
5266 ironlake_enable_rc6(dev);
5267 intel_init_emon(dev);
5268 mutex_unlock(&dev->struct_mutex);
5269 } else if (INTEL_INFO(dev)->gen >= 6) {
5270 /*
5271 * PCU communication is slow and this doesn't need to be
5272 * done at any specific time, so do this out of our fast path
5273 * to make resume and init faster.
5274 *
5275 * We depend on the HW RC6 power context save/restore
5276 * mechanism when entering D3 through runtime PM suspend. So
5277 * disable RPM until RPS/RC6 is properly setup. We can only
5278 * get here via the driver load/system resume/runtime resume
5279 * paths, so the _noresume version is enough (and in case of
5280 * runtime resume it's necessary).
5281 */
5282 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5283 round_jiffies_up_relative(HZ)))
5284 intel_runtime_pm_get_noresume(dev_priv);
5285 }
5286 }
5287
5288 void intel_reset_gt_powersave(struct drm_device *dev)
5289 {
5290 struct drm_i915_private *dev_priv = dev->dev_private;
5291
5292 dev_priv->rps.enabled = false;
5293 intel_enable_gt_powersave(dev);
5294 }
5295
5296 static void ibx_init_clock_gating(struct drm_device *dev)
5297 {
5298 struct drm_i915_private *dev_priv = dev->dev_private;
5299
5300 /*
5301 * On Ibex Peak and Cougar Point, we need to disable clock
5302 * gating for the panel power sequencer or it will fail to
5303 * start up when no ports are active.
5304 */
5305 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5306 }
5307
5308 static void g4x_disable_trickle_feed(struct drm_device *dev)
5309 {
5310 struct drm_i915_private *dev_priv = dev->dev_private;
5311 int pipe;
5312
5313 for_each_pipe(dev_priv, pipe) {
5314 I915_WRITE(DSPCNTR(pipe),
5315 I915_READ(DSPCNTR(pipe)) |
5316 DISPPLANE_TRICKLE_FEED_DISABLE);
5317 intel_flush_primary_plane(dev_priv, pipe);
5318 }
5319 }
5320
5321 static void ilk_init_lp_watermarks(struct drm_device *dev)
5322 {
5323 struct drm_i915_private *dev_priv = dev->dev_private;
5324
5325 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5326 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5327 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5328
5329 /*
5330 * Don't touch WM1S_LP_EN here.
5331 * Doing so could cause underruns.
5332 */
5333 }
5334
5335 static void ironlake_init_clock_gating(struct drm_device *dev)
5336 {
5337 struct drm_i915_private *dev_priv = dev->dev_private;
5338 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5339
5340 /*
5341 * Required for FBC
5342 * WaFbcDisableDpfcClockGating:ilk
5343 */
5344 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5345 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5346 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5347
5348 I915_WRITE(PCH_3DCGDIS0,
5349 MARIUNIT_CLOCK_GATE_DISABLE |
5350 SVSMUNIT_CLOCK_GATE_DISABLE);
5351 I915_WRITE(PCH_3DCGDIS1,
5352 VFMUNIT_CLOCK_GATE_DISABLE);
5353
5354 /*
5355 * According to the spec the following bits should be set in
5356 * order to enable memory self-refresh
5357 * The bit 22/21 of 0x42004
5358 * The bit 5 of 0x42020
5359 * The bit 15 of 0x45000
5360 */
5361 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5362 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5363 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5364 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5365 I915_WRITE(DISP_ARB_CTL,
5366 (I915_READ(DISP_ARB_CTL) |
5367 DISP_FBC_WM_DIS));
5368
5369 ilk_init_lp_watermarks(dev);
5370
5371 /*
5372 * Based on the document from hardware guys the following bits
5373 * should be set unconditionally in order to enable FBC.
5374 * The bit 22 of 0x42000
5375 * The bit 22 of 0x42004
5376 * The bit 7,8,9 of 0x42020.
5377 */
5378 if (IS_IRONLAKE_M(dev)) {
5379 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5380 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5381 I915_READ(ILK_DISPLAY_CHICKEN1) |
5382 ILK_FBCQ_DIS);
5383 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5384 I915_READ(ILK_DISPLAY_CHICKEN2) |
5385 ILK_DPARB_GATE);
5386 }
5387
5388 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5389
5390 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5391 I915_READ(ILK_DISPLAY_CHICKEN2) |
5392 ILK_ELPIN_409_SELECT);
5393 I915_WRITE(_3D_CHICKEN2,
5394 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5395 _3D_CHICKEN2_WM_READ_PIPELINED);
5396
5397 /* WaDisableRenderCachePipelinedFlush:ilk */
5398 I915_WRITE(CACHE_MODE_0,
5399 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5400
5401 /* WaDisable_RenderCache_OperationalFlush:ilk */
5402 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5403
5404 g4x_disable_trickle_feed(dev);
5405
5406 ibx_init_clock_gating(dev);
5407 }
5408
5409 static void cpt_init_clock_gating(struct drm_device *dev)
5410 {
5411 struct drm_i915_private *dev_priv = dev->dev_private;
5412 int pipe;
5413 uint32_t val;
5414
5415 /*
5416 * On Ibex Peak and Cougar Point, we need to disable clock
5417 * gating for the panel power sequencer or it will fail to
5418 * start up when no ports are active.
5419 */
5420 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5421 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5422 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5423 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5424 DPLS_EDP_PPS_FIX_DIS);
5425 /* The below fixes the weird display corruption, a few pixels shifted
5426 * downward, on (only) LVDS of some HP laptops with IVY.
5427 */
5428 for_each_pipe(dev_priv, pipe) {
5429 val = I915_READ(TRANS_CHICKEN2(pipe));
5430 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5431 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5432 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5433 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5434 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5435 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5436 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5437 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5438 }
5439 /* WADP0ClockGatingDisable */
5440 for_each_pipe(dev_priv, pipe) {
5441 I915_WRITE(TRANS_CHICKEN1(pipe),
5442 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5443 }
5444 }
5445
5446 static void gen6_check_mch_setup(struct drm_device *dev)
5447 {
5448 struct drm_i915_private *dev_priv = dev->dev_private;
5449 uint32_t tmp;
5450
5451 tmp = I915_READ(MCH_SSKPD);
5452 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5453 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5454 tmp);
5455 }
5456
5457 static void gen6_init_clock_gating(struct drm_device *dev)
5458 {
5459 struct drm_i915_private *dev_priv = dev->dev_private;
5460 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5461
5462 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5463
5464 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5465 I915_READ(ILK_DISPLAY_CHICKEN2) |
5466 ILK_ELPIN_409_SELECT);
5467
5468 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5469 I915_WRITE(_3D_CHICKEN,
5470 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5471
5472 /* WaDisable_RenderCache_OperationalFlush:snb */
5473 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5474
5475 /*
5476 * BSpec recoomends 8x4 when MSAA is used,
5477 * however in practice 16x4 seems fastest.
5478 *
5479 * Note that PS/WM thread counts depend on the WIZ hashing
5480 * disable bit, which we don't touch here, but it's good
5481 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5482 */
5483 I915_WRITE(GEN6_GT_MODE,
5484 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5485
5486 ilk_init_lp_watermarks(dev);
5487
5488 I915_WRITE(CACHE_MODE_0,
5489 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5490
5491 I915_WRITE(GEN6_UCGCTL1,
5492 I915_READ(GEN6_UCGCTL1) |
5493 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5494 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5495
5496 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5497 * gating disable must be set. Failure to set it results in
5498 * flickering pixels due to Z write ordering failures after
5499 * some amount of runtime in the Mesa "fire" demo, and Unigine
5500 * Sanctuary and Tropics, and apparently anything else with
5501 * alpha test or pixel discard.
5502 *
5503 * According to the spec, bit 11 (RCCUNIT) must also be set,
5504 * but we didn't debug actual testcases to find it out.
5505 *
5506 * WaDisableRCCUnitClockGating:snb
5507 * WaDisableRCPBUnitClockGating:snb
5508 */
5509 I915_WRITE(GEN6_UCGCTL2,
5510 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5511 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5512
5513 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5514 I915_WRITE(_3D_CHICKEN3,
5515 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5516
5517 /*
5518 * Bspec says:
5519 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5520 * 3DSTATE_SF number of SF output attributes is more than 16."
5521 */
5522 I915_WRITE(_3D_CHICKEN3,
5523 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5524
5525 /*
5526 * According to the spec the following bits should be
5527 * set in order to enable memory self-refresh and fbc:
5528 * The bit21 and bit22 of 0x42000
5529 * The bit21 and bit22 of 0x42004
5530 * The bit5 and bit7 of 0x42020
5531 * The bit14 of 0x70180
5532 * The bit14 of 0x71180
5533 *
5534 * WaFbcAsynchFlipDisableFbcQueue:snb
5535 */
5536 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5537 I915_READ(ILK_DISPLAY_CHICKEN1) |
5538 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5539 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5540 I915_READ(ILK_DISPLAY_CHICKEN2) |
5541 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5542 I915_WRITE(ILK_DSPCLK_GATE_D,
5543 I915_READ(ILK_DSPCLK_GATE_D) |
5544 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5545 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5546
5547 g4x_disable_trickle_feed(dev);
5548
5549 cpt_init_clock_gating(dev);
5550
5551 gen6_check_mch_setup(dev);
5552 }
5553
5554 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5555 {
5556 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5557
5558 /*
5559 * WaVSThreadDispatchOverride:ivb,vlv
5560 *
5561 * This actually overrides the dispatch
5562 * mode for all thread types.
5563 */
5564 reg &= ~GEN7_FF_SCHED_MASK;
5565 reg |= GEN7_FF_TS_SCHED_HW;
5566 reg |= GEN7_FF_VS_SCHED_HW;
5567 reg |= GEN7_FF_DS_SCHED_HW;
5568
5569 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5570 }
5571
5572 static void lpt_init_clock_gating(struct drm_device *dev)
5573 {
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575
5576 /*
5577 * TODO: this bit should only be enabled when really needed, then
5578 * disabled when not needed anymore in order to save power.
5579 */
5580 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5581 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5582 I915_READ(SOUTH_DSPCLK_GATE_D) |
5583 PCH_LP_PARTITION_LEVEL_DISABLE);
5584
5585 /* WADPOClockGatingDisable:hsw */
5586 I915_WRITE(_TRANSA_CHICKEN1,
5587 I915_READ(_TRANSA_CHICKEN1) |
5588 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5589 }
5590
5591 static void lpt_suspend_hw(struct drm_device *dev)
5592 {
5593 struct drm_i915_private *dev_priv = dev->dev_private;
5594
5595 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5596 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5597
5598 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5599 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5600 }
5601 }
5602
5603 static void broadwell_init_clock_gating(struct drm_device *dev)
5604 {
5605 struct drm_i915_private *dev_priv = dev->dev_private;
5606 enum pipe pipe;
5607
5608 I915_WRITE(WM3_LP_ILK, 0);
5609 I915_WRITE(WM2_LP_ILK, 0);
5610 I915_WRITE(WM1_LP_ILK, 0);
5611
5612 /* FIXME(BDW): Check all the w/a, some might only apply to
5613 * pre-production hw. */
5614
5615
5616 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5617
5618 I915_WRITE(_3D_CHICKEN3,
5619 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5620
5621
5622 /* WaSwitchSolVfFArbitrationPriority:bdw */
5623 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5624
5625 /* WaPsrDPAMaskVBlankInSRD:bdw */
5626 I915_WRITE(CHICKEN_PAR1_1,
5627 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5628
5629 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5630 for_each_pipe(dev_priv, pipe) {
5631 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5632 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5633 BDW_DPRS_MASK_VBLANK_SRD);
5634 }
5635
5636 /* WaVSRefCountFullforceMissDisable:bdw */
5637 /* WaDSRefCountFullforceMissDisable:bdw */
5638 I915_WRITE(GEN7_FF_THREAD_MODE,
5639 I915_READ(GEN7_FF_THREAD_MODE) &
5640 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5641
5642 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5643 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5644
5645 /* WaDisableSDEUnitClockGating:bdw */
5646 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5647 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5648
5649 lpt_init_clock_gating(dev);
5650 }
5651
5652 static void haswell_init_clock_gating(struct drm_device *dev)
5653 {
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5655
5656 ilk_init_lp_watermarks(dev);
5657
5658 /* L3 caching of data atomics doesn't work -- disable it. */
5659 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5660 I915_WRITE(HSW_ROW_CHICKEN3,
5661 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5662
5663 /* This is required by WaCatErrorRejectionIssue:hsw */
5664 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5665 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5666 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5667
5668 /* WaVSRefCountFullforceMissDisable:hsw */
5669 I915_WRITE(GEN7_FF_THREAD_MODE,
5670 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5671
5672 /* WaDisable_RenderCache_OperationalFlush:hsw */
5673 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5674
5675 /* enable HiZ Raw Stall Optimization */
5676 I915_WRITE(CACHE_MODE_0_GEN7,
5677 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5678
5679 /* WaDisable4x2SubspanOptimization:hsw */
5680 I915_WRITE(CACHE_MODE_1,
5681 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5682
5683 /*
5684 * BSpec recommends 8x4 when MSAA is used,
5685 * however in practice 16x4 seems fastest.
5686 *
5687 * Note that PS/WM thread counts depend on the WIZ hashing
5688 * disable bit, which we don't touch here, but it's good
5689 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5690 */
5691 I915_WRITE(GEN7_GT_MODE,
5692 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5693
5694 /* WaSwitchSolVfFArbitrationPriority:hsw */
5695 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5696
5697 /* WaRsPkgCStateDisplayPMReq:hsw */
5698 I915_WRITE(CHICKEN_PAR1_1,
5699 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5700
5701 lpt_init_clock_gating(dev);
5702 }
5703
5704 static void ivybridge_init_clock_gating(struct drm_device *dev)
5705 {
5706 struct drm_i915_private *dev_priv = dev->dev_private;
5707 uint32_t snpcr;
5708
5709 ilk_init_lp_watermarks(dev);
5710
5711 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5712
5713 /* WaDisableEarlyCull:ivb */
5714 I915_WRITE(_3D_CHICKEN3,
5715 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5716
5717 /* WaDisableBackToBackFlipFix:ivb */
5718 I915_WRITE(IVB_CHICKEN3,
5719 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5720 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5721
5722 /* WaDisablePSDDualDispatchEnable:ivb */
5723 if (IS_IVB_GT1(dev))
5724 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5725 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5726
5727 /* WaDisable_RenderCache_OperationalFlush:ivb */
5728 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5729
5730 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5731 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5732 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5733
5734 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5735 I915_WRITE(GEN7_L3CNTLREG1,
5736 GEN7_WA_FOR_GEN7_L3_CONTROL);
5737 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5738 GEN7_WA_L3_CHICKEN_MODE);
5739 if (IS_IVB_GT1(dev))
5740 I915_WRITE(GEN7_ROW_CHICKEN2,
5741 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5742 else {
5743 /* must write both registers */
5744 I915_WRITE(GEN7_ROW_CHICKEN2,
5745 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5746 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5747 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5748 }
5749
5750 /* WaForceL3Serialization:ivb */
5751 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5752 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5753
5754 /*
5755 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5756 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5757 */
5758 I915_WRITE(GEN6_UCGCTL2,
5759 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5760
5761 /* This is required by WaCatErrorRejectionIssue:ivb */
5762 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5763 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5764 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5765
5766 g4x_disable_trickle_feed(dev);
5767
5768 gen7_setup_fixed_func_scheduler(dev_priv);
5769
5770 if (0) { /* causes HiZ corruption on ivb:gt1 */
5771 /* enable HiZ Raw Stall Optimization */
5772 I915_WRITE(CACHE_MODE_0_GEN7,
5773 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5774 }
5775
5776 /* WaDisable4x2SubspanOptimization:ivb */
5777 I915_WRITE(CACHE_MODE_1,
5778 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5779
5780 /*
5781 * BSpec recommends 8x4 when MSAA is used,
5782 * however in practice 16x4 seems fastest.
5783 *
5784 * Note that PS/WM thread counts depend on the WIZ hashing
5785 * disable bit, which we don't touch here, but it's good
5786 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5787 */
5788 I915_WRITE(GEN7_GT_MODE,
5789 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5790
5791 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5792 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5793 snpcr |= GEN6_MBC_SNPCR_MED;
5794 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5795
5796 if (!HAS_PCH_NOP(dev))
5797 cpt_init_clock_gating(dev);
5798
5799 gen6_check_mch_setup(dev);
5800 }
5801
5802 static void valleyview_init_clock_gating(struct drm_device *dev)
5803 {
5804 struct drm_i915_private *dev_priv = dev->dev_private;
5805
5806 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5807
5808 /* WaDisableEarlyCull:vlv */
5809 I915_WRITE(_3D_CHICKEN3,
5810 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5811
5812 /* WaDisableBackToBackFlipFix:vlv */
5813 I915_WRITE(IVB_CHICKEN3,
5814 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5815 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5816
5817 /* WaPsdDispatchEnable:vlv */
5818 /* WaDisablePSDDualDispatchEnable:vlv */
5819 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5820 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5821 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5822
5823 /* WaDisable_RenderCache_OperationalFlush:vlv */
5824 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5825
5826 /* WaForceL3Serialization:vlv */
5827 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5828 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5829
5830 /* WaDisableDopClockGating:vlv */
5831 I915_WRITE(GEN7_ROW_CHICKEN2,
5832 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5833
5834 /* This is required by WaCatErrorRejectionIssue:vlv */
5835 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5836 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5837 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5838
5839 gen7_setup_fixed_func_scheduler(dev_priv);
5840
5841 /*
5842 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5843 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5844 */
5845 I915_WRITE(GEN6_UCGCTL2,
5846 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5847
5848 /* WaDisableL3Bank2xClockGate:vlv
5849 * Disabling L3 clock gating- MMIO 940c[25] = 1
5850 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5851 I915_WRITE(GEN7_UCGCTL4,
5852 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5853
5854 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5855
5856 /*
5857 * BSpec says this must be set, even though
5858 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5859 */
5860 I915_WRITE(CACHE_MODE_1,
5861 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5862
5863 /*
5864 * WaIncreaseL3CreditsForVLVB0:vlv
5865 * This is the hardware default actually.
5866 */
5867 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5868
5869 /*
5870 * WaDisableVLVClockGating_VBIIssue:vlv
5871 * Disable clock gating on th GCFG unit to prevent a delay
5872 * in the reporting of vblank events.
5873 */
5874 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5875 }
5876
5877 static void cherryview_init_clock_gating(struct drm_device *dev)
5878 {
5879 struct drm_i915_private *dev_priv = dev->dev_private;
5880
5881 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5882
5883 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5884
5885 /* WaVSRefCountFullforceMissDisable:chv */
5886 /* WaDSRefCountFullforceMissDisable:chv */
5887 I915_WRITE(GEN7_FF_THREAD_MODE,
5888 I915_READ(GEN7_FF_THREAD_MODE) &
5889 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5890
5891 /* WaDisableSemaphoreAndSyncFlipWait:chv */
5892 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5893 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5894
5895 /* WaDisableCSUnitClockGating:chv */
5896 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5897 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5898
5899 /* WaDisableSDEUnitClockGating:chv */
5900 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5901 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5902
5903 /* WaDisableGunitClockGating:chv (pre-production hw) */
5904 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5905 GINT_DIS);
5906
5907 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5908 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5909 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5910
5911 /* WaDisableDopClockGating:chv (pre-production hw) */
5912 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5913 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5914 }
5915
5916 static void g4x_init_clock_gating(struct drm_device *dev)
5917 {
5918 struct drm_i915_private *dev_priv = dev->dev_private;
5919 uint32_t dspclk_gate;
5920
5921 I915_WRITE(RENCLK_GATE_D1, 0);
5922 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5923 GS_UNIT_CLOCK_GATE_DISABLE |
5924 CL_UNIT_CLOCK_GATE_DISABLE);
5925 I915_WRITE(RAMCLK_GATE_D, 0);
5926 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5927 OVRUNIT_CLOCK_GATE_DISABLE |
5928 OVCUNIT_CLOCK_GATE_DISABLE;
5929 if (IS_GM45(dev))
5930 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5931 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5932
5933 /* WaDisableRenderCachePipelinedFlush */
5934 I915_WRITE(CACHE_MODE_0,
5935 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5936
5937 /* WaDisable_RenderCache_OperationalFlush:g4x */
5938 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5939
5940 g4x_disable_trickle_feed(dev);
5941 }
5942
5943 static void crestline_init_clock_gating(struct drm_device *dev)
5944 {
5945 struct drm_i915_private *dev_priv = dev->dev_private;
5946
5947 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5948 I915_WRITE(RENCLK_GATE_D2, 0);
5949 I915_WRITE(DSPCLK_GATE_D, 0);
5950 I915_WRITE(RAMCLK_GATE_D, 0);
5951 I915_WRITE16(DEUC, 0);
5952 I915_WRITE(MI_ARB_STATE,
5953 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5954
5955 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5956 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5957 }
5958
5959 static void broadwater_init_clock_gating(struct drm_device *dev)
5960 {
5961 struct drm_i915_private *dev_priv = dev->dev_private;
5962
5963 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5964 I965_RCC_CLOCK_GATE_DISABLE |
5965 I965_RCPB_CLOCK_GATE_DISABLE |
5966 I965_ISC_CLOCK_GATE_DISABLE |
5967 I965_FBC_CLOCK_GATE_DISABLE);
5968 I915_WRITE(RENCLK_GATE_D2, 0);
5969 I915_WRITE(MI_ARB_STATE,
5970 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5971
5972 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5973 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5974 }
5975
5976 static void gen3_init_clock_gating(struct drm_device *dev)
5977 {
5978 struct drm_i915_private *dev_priv = dev->dev_private;
5979 u32 dstate = I915_READ(D_STATE);
5980
5981 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5982 DSTATE_DOT_CLOCK_GATING;
5983 I915_WRITE(D_STATE, dstate);
5984
5985 if (IS_PINEVIEW(dev))
5986 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5987
5988 /* IIR "flip pending" means done if this bit is set */
5989 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5990
5991 /* interrupts should cause a wake up from C3 */
5992 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5993
5994 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5995 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5996
5997 I915_WRITE(MI_ARB_STATE,
5998 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5999 }
6000
6001 static void i85x_init_clock_gating(struct drm_device *dev)
6002 {
6003 struct drm_i915_private *dev_priv = dev->dev_private;
6004
6005 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
6006
6007 /* interrupts should cause a wake up from C3 */
6008 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
6009 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6010
6011 I915_WRITE(MEM_MODE,
6012 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
6013 }
6014
6015 static void i830_init_clock_gating(struct drm_device *dev)
6016 {
6017 struct drm_i915_private *dev_priv = dev->dev_private;
6018
6019 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6020
6021 I915_WRITE(MEM_MODE,
6022 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6023 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
6024 }
6025
6026 void intel_init_clock_gating(struct drm_device *dev)
6027 {
6028 struct drm_i915_private *dev_priv = dev->dev_private;
6029
6030 dev_priv->display.init_clock_gating(dev);
6031 }
6032
6033 void intel_suspend_hw(struct drm_device *dev)
6034 {
6035 if (HAS_PCH_LPT(dev))
6036 lpt_suspend_hw(dev);
6037 }
6038
6039 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
6040 for (i = 0; \
6041 i < (power_domains)->power_well_count && \
6042 ((power_well) = &(power_domains)->power_wells[i]); \
6043 i++) \
6044 if ((power_well)->domains & (domain_mask))
6045
6046 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6047 for (i = (power_domains)->power_well_count - 1; \
6048 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6049 i--) \
6050 if ((power_well)->domains & (domain_mask))
6051
6052 /**
6053 * We should only use the power well if we explicitly asked the hardware to
6054 * enable it, so check if it's enabled and also check if we've requested it to
6055 * be enabled.
6056 */
6057 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6058 struct i915_power_well *power_well)
6059 {
6060 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6061 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6062 }
6063
6064 bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6065 enum intel_display_power_domain domain)
6066 {
6067 struct i915_power_domains *power_domains;
6068 struct i915_power_well *power_well;
6069 bool is_enabled;
6070 int i;
6071
6072 if (dev_priv->pm.suspended)
6073 return false;
6074
6075 power_domains = &dev_priv->power_domains;
6076
6077 is_enabled = true;
6078
6079 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6080 if (power_well->always_on)
6081 continue;
6082
6083 if (!power_well->hw_enabled) {
6084 is_enabled = false;
6085 break;
6086 }
6087 }
6088
6089 return is_enabled;
6090 }
6091
6092 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6093 enum intel_display_power_domain domain)
6094 {
6095 struct i915_power_domains *power_domains;
6096 bool ret;
6097
6098 power_domains = &dev_priv->power_domains;
6099
6100 mutex_lock(&power_domains->lock);
6101 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6102 mutex_unlock(&power_domains->lock);
6103
6104 return ret;
6105 }
6106
6107 /*
6108 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6109 * when not needed anymore. We have 4 registers that can request the power well
6110 * to be enabled, and it will only be disabled if none of the registers is
6111 * requesting it to be enabled.
6112 */
6113 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6114 {
6115 struct drm_device *dev = dev_priv->dev;
6116
6117 /*
6118 * After we re-enable the power well, if we touch VGA register 0x3d5
6119 * we'll get unclaimed register interrupts. This stops after we write
6120 * anything to the VGA MSR register. The vgacon module uses this
6121 * register all the time, so if we unbind our driver and, as a
6122 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6123 * console_unlock(). So make here we touch the VGA MSR register, making
6124 * sure vgacon can keep working normally without triggering interrupts
6125 * and error messages.
6126 */
6127 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6128 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6129 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6130
6131 if (IS_BROADWELL(dev))
6132 gen8_irq_power_well_post_enable(dev_priv);
6133 }
6134
6135 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6136 struct i915_power_well *power_well, bool enable)
6137 {
6138 bool is_enabled, enable_requested;
6139 uint32_t tmp;
6140
6141 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6142 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6143 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6144
6145 if (enable) {
6146 if (!enable_requested)
6147 I915_WRITE(HSW_PWR_WELL_DRIVER,
6148 HSW_PWR_WELL_ENABLE_REQUEST);
6149
6150 if (!is_enabled) {
6151 DRM_DEBUG_KMS("Enabling power well\n");
6152 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6153 HSW_PWR_WELL_STATE_ENABLED), 20))
6154 DRM_ERROR("Timeout enabling power well\n");
6155 }
6156
6157 hsw_power_well_post_enable(dev_priv);
6158 } else {
6159 if (enable_requested) {
6160 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6161 POSTING_READ(HSW_PWR_WELL_DRIVER);
6162 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6163 }
6164 }
6165 }
6166
6167 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6168 struct i915_power_well *power_well)
6169 {
6170 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6171
6172 /*
6173 * We're taking over the BIOS, so clear any requests made by it since
6174 * the driver is in charge now.
6175 */
6176 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6177 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6178 }
6179
6180 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6181 struct i915_power_well *power_well)
6182 {
6183 hsw_set_power_well(dev_priv, power_well, true);
6184 }
6185
6186 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6187 struct i915_power_well *power_well)
6188 {
6189 hsw_set_power_well(dev_priv, power_well, false);
6190 }
6191
6192 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6193 struct i915_power_well *power_well)
6194 {
6195 }
6196
6197 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6198 struct i915_power_well *power_well)
6199 {
6200 return true;
6201 }
6202
6203 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6204 struct i915_power_well *power_well, bool enable)
6205 {
6206 enum punit_power_well power_well_id = power_well->data;
6207 u32 mask;
6208 u32 state;
6209 u32 ctrl;
6210
6211 mask = PUNIT_PWRGT_MASK(power_well_id);
6212 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6213 PUNIT_PWRGT_PWR_GATE(power_well_id);
6214
6215 mutex_lock(&dev_priv->rps.hw_lock);
6216
6217 #define COND \
6218 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6219
6220 if (COND)
6221 goto out;
6222
6223 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6224 ctrl &= ~mask;
6225 ctrl |= state;
6226 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6227
6228 if (wait_for(COND, 100))
6229 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6230 state,
6231 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6232
6233 #undef COND
6234
6235 out:
6236 mutex_unlock(&dev_priv->rps.hw_lock);
6237 }
6238
6239 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6240 struct i915_power_well *power_well)
6241 {
6242 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6243 }
6244
6245 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6246 struct i915_power_well *power_well)
6247 {
6248 vlv_set_power_well(dev_priv, power_well, true);
6249 }
6250
6251 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6252 struct i915_power_well *power_well)
6253 {
6254 vlv_set_power_well(dev_priv, power_well, false);
6255 }
6256
6257 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6258 struct i915_power_well *power_well)
6259 {
6260 int power_well_id = power_well->data;
6261 bool enabled = false;
6262 u32 mask;
6263 u32 state;
6264 u32 ctrl;
6265
6266 mask = PUNIT_PWRGT_MASK(power_well_id);
6267 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6268
6269 mutex_lock(&dev_priv->rps.hw_lock);
6270
6271 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6272 /*
6273 * We only ever set the power-on and power-gate states, anything
6274 * else is unexpected.
6275 */
6276 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6277 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6278 if (state == ctrl)
6279 enabled = true;
6280
6281 /*
6282 * A transient state at this point would mean some unexpected party
6283 * is poking at the power controls too.
6284 */
6285 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6286 WARN_ON(ctrl != state);
6287
6288 mutex_unlock(&dev_priv->rps.hw_lock);
6289
6290 return enabled;
6291 }
6292
6293 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6294 struct i915_power_well *power_well)
6295 {
6296 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6297
6298 vlv_set_power_well(dev_priv, power_well, true);
6299
6300 spin_lock_irq(&dev_priv->irq_lock);
6301 valleyview_enable_display_irqs(dev_priv);
6302 spin_unlock_irq(&dev_priv->irq_lock);
6303
6304 /*
6305 * During driver initialization/resume we can avoid restoring the
6306 * part of the HW/SW state that will be inited anyway explicitly.
6307 */
6308 if (dev_priv->power_domains.initializing)
6309 return;
6310
6311 intel_hpd_init(dev_priv->dev);
6312
6313 i915_redisable_vga_power_on(dev_priv->dev);
6314 }
6315
6316 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6317 struct i915_power_well *power_well)
6318 {
6319 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6320
6321 spin_lock_irq(&dev_priv->irq_lock);
6322 valleyview_disable_display_irqs(dev_priv);
6323 spin_unlock_irq(&dev_priv->irq_lock);
6324
6325 vlv_set_power_well(dev_priv, power_well, false);
6326
6327 vlv_power_sequencer_reset(dev_priv);
6328 }
6329
6330 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6331 struct i915_power_well *power_well)
6332 {
6333 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6334
6335 /*
6336 * Enable the CRI clock source so we can get at the
6337 * display and the reference clock for VGA
6338 * hotplug / manual detection.
6339 */
6340 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6341 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6342 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6343
6344 vlv_set_power_well(dev_priv, power_well, true);
6345
6346 /*
6347 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6348 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6349 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6350 * b. The other bits such as sfr settings / modesel may all
6351 * be set to 0.
6352 *
6353 * This should only be done on init and resume from S3 with
6354 * both PLLs disabled, or we risk losing DPIO and PLL
6355 * synchronization.
6356 */
6357 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6358 }
6359
6360 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6361 struct i915_power_well *power_well)
6362 {
6363 enum pipe pipe;
6364
6365 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6366
6367 for_each_pipe(dev_priv, pipe)
6368 assert_pll_disabled(dev_priv, pipe);
6369
6370 /* Assert common reset */
6371 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6372
6373 vlv_set_power_well(dev_priv, power_well, false);
6374 }
6375
6376 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6377 struct i915_power_well *power_well)
6378 {
6379 enum dpio_phy phy;
6380
6381 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6382 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6383
6384 /*
6385 * Enable the CRI clock source so we can get at the
6386 * display and the reference clock for VGA
6387 * hotplug / manual detection.
6388 */
6389 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6390 phy = DPIO_PHY0;
6391 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6392 DPLL_REFA_CLK_ENABLE_VLV);
6393 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6394 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6395 } else {
6396 phy = DPIO_PHY1;
6397 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6398 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6399 }
6400 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6401 vlv_set_power_well(dev_priv, power_well, true);
6402
6403 /* Poll for phypwrgood signal */
6404 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6405 DRM_ERROR("Display PHY %d is not power up\n", phy);
6406
6407 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6408 PHY_COM_LANE_RESET_DEASSERT(phy));
6409 }
6410
6411 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6412 struct i915_power_well *power_well)
6413 {
6414 enum dpio_phy phy;
6415
6416 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6417 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6418
6419 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6420 phy = DPIO_PHY0;
6421 assert_pll_disabled(dev_priv, PIPE_A);
6422 assert_pll_disabled(dev_priv, PIPE_B);
6423 } else {
6424 phy = DPIO_PHY1;
6425 assert_pll_disabled(dev_priv, PIPE_C);
6426 }
6427
6428 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6429 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6430
6431 vlv_set_power_well(dev_priv, power_well, false);
6432 }
6433
6434 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6435 struct i915_power_well *power_well)
6436 {
6437 enum pipe pipe = power_well->data;
6438 bool enabled;
6439 u32 state, ctrl;
6440
6441 mutex_lock(&dev_priv->rps.hw_lock);
6442
6443 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6444 /*
6445 * We only ever set the power-on and power-gate states, anything
6446 * else is unexpected.
6447 */
6448 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6449 enabled = state == DP_SSS_PWR_ON(pipe);
6450
6451 /*
6452 * A transient state at this point would mean some unexpected party
6453 * is poking at the power controls too.
6454 */
6455 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6456 WARN_ON(ctrl << 16 != state);
6457
6458 mutex_unlock(&dev_priv->rps.hw_lock);
6459
6460 return enabled;
6461 }
6462
6463 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6464 struct i915_power_well *power_well,
6465 bool enable)
6466 {
6467 enum pipe pipe = power_well->data;
6468 u32 state;
6469 u32 ctrl;
6470
6471 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6472
6473 mutex_lock(&dev_priv->rps.hw_lock);
6474
6475 #define COND \
6476 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6477
6478 if (COND)
6479 goto out;
6480
6481 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6482 ctrl &= ~DP_SSC_MASK(pipe);
6483 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6484 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6485
6486 if (wait_for(COND, 100))
6487 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6488 state,
6489 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6490
6491 #undef COND
6492
6493 out:
6494 mutex_unlock(&dev_priv->rps.hw_lock);
6495 }
6496
6497 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6498 struct i915_power_well *power_well)
6499 {
6500 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6501 }
6502
6503 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6504 struct i915_power_well *power_well)
6505 {
6506 WARN_ON_ONCE(power_well->data != PIPE_A &&
6507 power_well->data != PIPE_B &&
6508 power_well->data != PIPE_C);
6509
6510 chv_set_pipe_power_well(dev_priv, power_well, true);
6511 }
6512
6513 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6514 struct i915_power_well *power_well)
6515 {
6516 WARN_ON_ONCE(power_well->data != PIPE_A &&
6517 power_well->data != PIPE_B &&
6518 power_well->data != PIPE_C);
6519
6520 chv_set_pipe_power_well(dev_priv, power_well, false);
6521 }
6522
6523 static void check_power_well_state(struct drm_i915_private *dev_priv,
6524 struct i915_power_well *power_well)
6525 {
6526 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6527
6528 if (power_well->always_on || !i915.disable_power_well) {
6529 if (!enabled)
6530 goto mismatch;
6531
6532 return;
6533 }
6534
6535 if (enabled != (power_well->count > 0))
6536 goto mismatch;
6537
6538 return;
6539
6540 mismatch:
6541 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6542 power_well->name, power_well->always_on, enabled,
6543 power_well->count, i915.disable_power_well);
6544 }
6545
6546 void intel_display_power_get(struct drm_i915_private *dev_priv,
6547 enum intel_display_power_domain domain)
6548 {
6549 struct i915_power_domains *power_domains;
6550 struct i915_power_well *power_well;
6551 int i;
6552
6553 intel_runtime_pm_get(dev_priv);
6554
6555 power_domains = &dev_priv->power_domains;
6556
6557 mutex_lock(&power_domains->lock);
6558
6559 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6560 if (!power_well->count++) {
6561 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6562 power_well->ops->enable(dev_priv, power_well);
6563 power_well->hw_enabled = true;
6564 }
6565
6566 check_power_well_state(dev_priv, power_well);
6567 }
6568
6569 power_domains->domain_use_count[domain]++;
6570
6571 mutex_unlock(&power_domains->lock);
6572 }
6573
6574 void intel_display_power_put(struct drm_i915_private *dev_priv,
6575 enum intel_display_power_domain domain)
6576 {
6577 struct i915_power_domains *power_domains;
6578 struct i915_power_well *power_well;
6579 int i;
6580
6581 power_domains = &dev_priv->power_domains;
6582
6583 mutex_lock(&power_domains->lock);
6584
6585 WARN_ON(!power_domains->domain_use_count[domain]);
6586 power_domains->domain_use_count[domain]--;
6587
6588 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6589 WARN_ON(!power_well->count);
6590
6591 if (!--power_well->count && i915.disable_power_well) {
6592 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6593 power_well->hw_enabled = false;
6594 power_well->ops->disable(dev_priv, power_well);
6595 }
6596
6597 check_power_well_state(dev_priv, power_well);
6598 }
6599
6600 mutex_unlock(&power_domains->lock);
6601
6602 intel_runtime_pm_put(dev_priv);
6603 }
6604
6605 static struct i915_power_domains *hsw_pwr;
6606
6607 /* Display audio driver power well request */
6608 int i915_request_power_well(void)
6609 {
6610 struct drm_i915_private *dev_priv;
6611
6612 if (!hsw_pwr)
6613 return -ENODEV;
6614
6615 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6616 power_domains);
6617 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6618 return 0;
6619 }
6620 EXPORT_SYMBOL_GPL(i915_request_power_well);
6621
6622 /* Display audio driver power well release */
6623 int i915_release_power_well(void)
6624 {
6625 struct drm_i915_private *dev_priv;
6626
6627 if (!hsw_pwr)
6628 return -ENODEV;
6629
6630 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6631 power_domains);
6632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6633 return 0;
6634 }
6635 EXPORT_SYMBOL_GPL(i915_release_power_well);
6636
6637 /*
6638 * Private interface for the audio driver to get CDCLK in kHz.
6639 *
6640 * Caller must request power well using i915_request_power_well() prior to
6641 * making the call.
6642 */
6643 int i915_get_cdclk_freq(void)
6644 {
6645 struct drm_i915_private *dev_priv;
6646
6647 if (!hsw_pwr)
6648 return -ENODEV;
6649
6650 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6651 power_domains);
6652
6653 return intel_ddi_get_cdclk_freq(dev_priv);
6654 }
6655 EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6656
6657
6658 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6659
6660 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6661 BIT(POWER_DOMAIN_PIPE_A) | \
6662 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6663 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6664 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6665 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6666 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6667 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6668 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6669 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6670 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6671 BIT(POWER_DOMAIN_PORT_CRT) | \
6672 BIT(POWER_DOMAIN_PLLS) | \
6673 BIT(POWER_DOMAIN_INIT))
6674 #define HSW_DISPLAY_POWER_DOMAINS ( \
6675 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6676 BIT(POWER_DOMAIN_INIT))
6677
6678 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6679 HSW_ALWAYS_ON_POWER_DOMAINS | \
6680 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6681 #define BDW_DISPLAY_POWER_DOMAINS ( \
6682 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6683 BIT(POWER_DOMAIN_INIT))
6684
6685 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6686 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6687
6688 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6689 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6690 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6691 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6692 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6693 BIT(POWER_DOMAIN_PORT_CRT) | \
6694 BIT(POWER_DOMAIN_INIT))
6695
6696 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6697 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6698 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6699 BIT(POWER_DOMAIN_INIT))
6700
6701 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6702 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6703 BIT(POWER_DOMAIN_INIT))
6704
6705 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6706 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6707 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6708 BIT(POWER_DOMAIN_INIT))
6709
6710 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6711 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6712 BIT(POWER_DOMAIN_INIT))
6713
6714 #define CHV_PIPE_A_POWER_DOMAINS ( \
6715 BIT(POWER_DOMAIN_PIPE_A) | \
6716 BIT(POWER_DOMAIN_INIT))
6717
6718 #define CHV_PIPE_B_POWER_DOMAINS ( \
6719 BIT(POWER_DOMAIN_PIPE_B) | \
6720 BIT(POWER_DOMAIN_INIT))
6721
6722 #define CHV_PIPE_C_POWER_DOMAINS ( \
6723 BIT(POWER_DOMAIN_PIPE_C) | \
6724 BIT(POWER_DOMAIN_INIT))
6725
6726 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6727 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6728 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6729 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6730 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6731 BIT(POWER_DOMAIN_INIT))
6732
6733 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6734 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6735 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6736 BIT(POWER_DOMAIN_INIT))
6737
6738 #define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6740 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6741 BIT(POWER_DOMAIN_INIT))
6742
6743 #define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6745 BIT(POWER_DOMAIN_INIT))
6746
6747 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6748 .sync_hw = i9xx_always_on_power_well_noop,
6749 .enable = i9xx_always_on_power_well_noop,
6750 .disable = i9xx_always_on_power_well_noop,
6751 .is_enabled = i9xx_always_on_power_well_enabled,
6752 };
6753
6754 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6755 .sync_hw = chv_pipe_power_well_sync_hw,
6756 .enable = chv_pipe_power_well_enable,
6757 .disable = chv_pipe_power_well_disable,
6758 .is_enabled = chv_pipe_power_well_enabled,
6759 };
6760
6761 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6762 .sync_hw = vlv_power_well_sync_hw,
6763 .enable = chv_dpio_cmn_power_well_enable,
6764 .disable = chv_dpio_cmn_power_well_disable,
6765 .is_enabled = vlv_power_well_enabled,
6766 };
6767
6768 static struct i915_power_well i9xx_always_on_power_well[] = {
6769 {
6770 .name = "always-on",
6771 .always_on = 1,
6772 .domains = POWER_DOMAIN_MASK,
6773 .ops = &i9xx_always_on_power_well_ops,
6774 },
6775 };
6776
6777 static const struct i915_power_well_ops hsw_power_well_ops = {
6778 .sync_hw = hsw_power_well_sync_hw,
6779 .enable = hsw_power_well_enable,
6780 .disable = hsw_power_well_disable,
6781 .is_enabled = hsw_power_well_enabled,
6782 };
6783
6784 static struct i915_power_well hsw_power_wells[] = {
6785 {
6786 .name = "always-on",
6787 .always_on = 1,
6788 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6789 .ops = &i9xx_always_on_power_well_ops,
6790 },
6791 {
6792 .name = "display",
6793 .domains = HSW_DISPLAY_POWER_DOMAINS,
6794 .ops = &hsw_power_well_ops,
6795 },
6796 };
6797
6798 static struct i915_power_well bdw_power_wells[] = {
6799 {
6800 .name = "always-on",
6801 .always_on = 1,
6802 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6803 .ops = &i9xx_always_on_power_well_ops,
6804 },
6805 {
6806 .name = "display",
6807 .domains = BDW_DISPLAY_POWER_DOMAINS,
6808 .ops = &hsw_power_well_ops,
6809 },
6810 };
6811
6812 static const struct i915_power_well_ops vlv_display_power_well_ops = {
6813 .sync_hw = vlv_power_well_sync_hw,
6814 .enable = vlv_display_power_well_enable,
6815 .disable = vlv_display_power_well_disable,
6816 .is_enabled = vlv_power_well_enabled,
6817 };
6818
6819 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6820 .sync_hw = vlv_power_well_sync_hw,
6821 .enable = vlv_dpio_cmn_power_well_enable,
6822 .disable = vlv_dpio_cmn_power_well_disable,
6823 .is_enabled = vlv_power_well_enabled,
6824 };
6825
6826 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6827 .sync_hw = vlv_power_well_sync_hw,
6828 .enable = vlv_power_well_enable,
6829 .disable = vlv_power_well_disable,
6830 .is_enabled = vlv_power_well_enabled,
6831 };
6832
6833 static struct i915_power_well vlv_power_wells[] = {
6834 {
6835 .name = "always-on",
6836 .always_on = 1,
6837 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6838 .ops = &i9xx_always_on_power_well_ops,
6839 },
6840 {
6841 .name = "display",
6842 .domains = VLV_DISPLAY_POWER_DOMAINS,
6843 .data = PUNIT_POWER_WELL_DISP2D,
6844 .ops = &vlv_display_power_well_ops,
6845 },
6846 {
6847 .name = "dpio-tx-b-01",
6848 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6849 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6850 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6851 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6852 .ops = &vlv_dpio_power_well_ops,
6853 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6854 },
6855 {
6856 .name = "dpio-tx-b-23",
6857 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6858 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6859 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6860 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6861 .ops = &vlv_dpio_power_well_ops,
6862 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6863 },
6864 {
6865 .name = "dpio-tx-c-01",
6866 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6867 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6868 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6869 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6870 .ops = &vlv_dpio_power_well_ops,
6871 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6872 },
6873 {
6874 .name = "dpio-tx-c-23",
6875 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6876 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6877 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6878 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6879 .ops = &vlv_dpio_power_well_ops,
6880 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6881 },
6882 {
6883 .name = "dpio-common",
6884 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6885 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6886 .ops = &vlv_dpio_cmn_power_well_ops,
6887 },
6888 };
6889
6890 static struct i915_power_well chv_power_wells[] = {
6891 {
6892 .name = "always-on",
6893 .always_on = 1,
6894 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6895 .ops = &i9xx_always_on_power_well_ops,
6896 },
6897 #if 0
6898 {
6899 .name = "display",
6900 .domains = VLV_DISPLAY_POWER_DOMAINS,
6901 .data = PUNIT_POWER_WELL_DISP2D,
6902 .ops = &vlv_display_power_well_ops,
6903 },
6904 {
6905 .name = "pipe-a",
6906 .domains = CHV_PIPE_A_POWER_DOMAINS,
6907 .data = PIPE_A,
6908 .ops = &chv_pipe_power_well_ops,
6909 },
6910 {
6911 .name = "pipe-b",
6912 .domains = CHV_PIPE_B_POWER_DOMAINS,
6913 .data = PIPE_B,
6914 .ops = &chv_pipe_power_well_ops,
6915 },
6916 {
6917 .name = "pipe-c",
6918 .domains = CHV_PIPE_C_POWER_DOMAINS,
6919 .data = PIPE_C,
6920 .ops = &chv_pipe_power_well_ops,
6921 },
6922 #endif
6923 {
6924 .name = "dpio-common-bc",
6925 /*
6926 * XXX: cmnreset for one PHY seems to disturb the other.
6927 * As a workaround keep both powered on at the same
6928 * time for now.
6929 */
6930 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6931 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6932 .ops = &chv_dpio_cmn_power_well_ops,
6933 },
6934 {
6935 .name = "dpio-common-d",
6936 /*
6937 * XXX: cmnreset for one PHY seems to disturb the other.
6938 * As a workaround keep both powered on at the same
6939 * time for now.
6940 */
6941 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6942 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6943 .ops = &chv_dpio_cmn_power_well_ops,
6944 },
6945 #if 0
6946 {
6947 .name = "dpio-tx-b-01",
6948 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6949 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6950 .ops = &vlv_dpio_power_well_ops,
6951 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6952 },
6953 {
6954 .name = "dpio-tx-b-23",
6955 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6956 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6957 .ops = &vlv_dpio_power_well_ops,
6958 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6959 },
6960 {
6961 .name = "dpio-tx-c-01",
6962 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6963 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6964 .ops = &vlv_dpio_power_well_ops,
6965 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6966 },
6967 {
6968 .name = "dpio-tx-c-23",
6969 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6970 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6971 .ops = &vlv_dpio_power_well_ops,
6972 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6973 },
6974 {
6975 .name = "dpio-tx-d-01",
6976 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6977 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6978 .ops = &vlv_dpio_power_well_ops,
6979 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6980 },
6981 {
6982 .name = "dpio-tx-d-23",
6983 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6984 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6985 .ops = &vlv_dpio_power_well_ops,
6986 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6987 },
6988 #endif
6989 };
6990
6991 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6992 enum punit_power_well power_well_id)
6993 {
6994 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6995 struct i915_power_well *power_well;
6996 int i;
6997
6998 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6999 if (power_well->data == power_well_id)
7000 return power_well;
7001 }
7002
7003 return NULL;
7004 }
7005
7006 #define set_power_wells(power_domains, __power_wells) ({ \
7007 (power_domains)->power_wells = (__power_wells); \
7008 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7009 })
7010
7011 int intel_power_domains_init(struct drm_i915_private *dev_priv)
7012 {
7013 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7014
7015 mutex_init(&power_domains->lock);
7016
7017 /*
7018 * The enabling order will be from lower to higher indexed wells,
7019 * the disabling order is reversed.
7020 */
7021 if (IS_HASWELL(dev_priv->dev)) {
7022 set_power_wells(power_domains, hsw_power_wells);
7023 hsw_pwr = power_domains;
7024 } else if (IS_BROADWELL(dev_priv->dev)) {
7025 set_power_wells(power_domains, bdw_power_wells);
7026 hsw_pwr = power_domains;
7027 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7028 set_power_wells(power_domains, chv_power_wells);
7029 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7030 set_power_wells(power_domains, vlv_power_wells);
7031 } else {
7032 set_power_wells(power_domains, i9xx_always_on_power_well);
7033 }
7034
7035 return 0;
7036 }
7037
7038 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7039 {
7040 hsw_pwr = NULL;
7041 }
7042
7043 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7044 {
7045 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7046 struct i915_power_well *power_well;
7047 int i;
7048
7049 mutex_lock(&power_domains->lock);
7050 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7051 power_well->ops->sync_hw(dev_priv, power_well);
7052 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7053 power_well);
7054 }
7055 mutex_unlock(&power_domains->lock);
7056 }
7057
7058 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7059 {
7060 struct i915_power_well *cmn =
7061 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7062 struct i915_power_well *disp2d =
7063 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7064
7065 /* nothing to do if common lane is already off */
7066 if (!cmn->ops->is_enabled(dev_priv, cmn))
7067 return;
7068
7069 /* If the display might be already active skip this */
7070 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7071 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7072 return;
7073
7074 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7075
7076 /* cmnlane needs DPLL registers */
7077 disp2d->ops->enable(dev_priv, disp2d);
7078
7079 /*
7080 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7081 * Need to assert and de-assert PHY SB reset by gating the
7082 * common lane power, then un-gating it.
7083 * Simply ungating isn't enough to reset the PHY enough to get
7084 * ports and lanes running.
7085 */
7086 cmn->ops->disable(dev_priv, cmn);
7087 }
7088
7089 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7090 {
7091 struct drm_device *dev = dev_priv->dev;
7092 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7093
7094 power_domains->initializing = true;
7095
7096 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7097 mutex_lock(&power_domains->lock);
7098 vlv_cmnlane_wa(dev_priv);
7099 mutex_unlock(&power_domains->lock);
7100 }
7101
7102 /* For now, we need the power well to be always enabled. */
7103 intel_display_set_init_power(dev_priv, true);
7104 intel_power_domains_resume(dev_priv);
7105 power_domains->initializing = false;
7106 }
7107
7108 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7109 {
7110 intel_runtime_pm_get(dev_priv);
7111 }
7112
7113 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7114 {
7115 intel_runtime_pm_put(dev_priv);
7116 }
7117
7118 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7119 {
7120 struct drm_device *dev = dev_priv->dev;
7121 struct device *device = &dev->pdev->dev;
7122
7123 if (!HAS_RUNTIME_PM(dev))
7124 return;
7125
7126 pm_runtime_get_sync(device);
7127 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7128 }
7129
7130 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7131 {
7132 struct drm_device *dev = dev_priv->dev;
7133 struct device *device = &dev->pdev->dev;
7134
7135 if (!HAS_RUNTIME_PM(dev))
7136 return;
7137
7138 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7139 pm_runtime_get_noresume(device);
7140 }
7141
7142 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7143 {
7144 struct drm_device *dev = dev_priv->dev;
7145 struct device *device = &dev->pdev->dev;
7146
7147 if (!HAS_RUNTIME_PM(dev))
7148 return;
7149
7150 pm_runtime_mark_last_busy(device);
7151 pm_runtime_put_autosuspend(device);
7152 }
7153
7154 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7155 {
7156 struct drm_device *dev = dev_priv->dev;
7157 struct device *device = &dev->pdev->dev;
7158
7159 if (!HAS_RUNTIME_PM(dev))
7160 return;
7161
7162 pm_runtime_set_active(device);
7163
7164 /*
7165 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7166 * requirement.
7167 */
7168 if (!intel_enable_rc6(dev)) {
7169 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7170 return;
7171 }
7172
7173 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7174 pm_runtime_mark_last_busy(device);
7175 pm_runtime_use_autosuspend(device);
7176
7177 pm_runtime_put_autosuspend(device);
7178 }
7179
7180 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7181 {
7182 struct drm_device *dev = dev_priv->dev;
7183 struct device *device = &dev->pdev->dev;
7184
7185 if (!HAS_RUNTIME_PM(dev))
7186 return;
7187
7188 if (!intel_enable_rc6(dev))
7189 return;
7190
7191 /* Make sure we're not suspended first. */
7192 pm_runtime_get_sync(device);
7193 pm_runtime_disable(device);
7194 }
7195
7196 /* Set up chip specific power management-related functions */
7197 void intel_init_pm(struct drm_device *dev)
7198 {
7199 struct drm_i915_private *dev_priv = dev->dev_private;
7200
7201 if (HAS_FBC(dev)) {
7202 if (INTEL_INFO(dev)->gen >= 7) {
7203 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7204 dev_priv->display.enable_fbc = gen7_enable_fbc;
7205 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7206 } else if (INTEL_INFO(dev)->gen >= 5) {
7207 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7208 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7209 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7210 } else if (IS_GM45(dev)) {
7211 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7212 dev_priv->display.enable_fbc = g4x_enable_fbc;
7213 dev_priv->display.disable_fbc = g4x_disable_fbc;
7214 } else {
7215 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7216 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7217 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7218
7219 /* This value was pulled out of someone's hat */
7220 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7221 }
7222 }
7223
7224 /* For cxsr */
7225 if (IS_PINEVIEW(dev))
7226 i915_pineview_get_mem_freq(dev);
7227 else if (IS_GEN5(dev))
7228 i915_ironlake_get_mem_freq(dev);
7229
7230 /* For FIFO watermark updates */
7231 if (HAS_PCH_SPLIT(dev)) {
7232 ilk_setup_wm_latency(dev);
7233
7234 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7235 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7236 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7237 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7238 dev_priv->display.update_wm = ilk_update_wm;
7239 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7240 } else {
7241 DRM_DEBUG_KMS("Failed to read display plane latency. "
7242 "Disable CxSR\n");
7243 }
7244
7245 if (IS_GEN5(dev))
7246 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7247 else if (IS_GEN6(dev))
7248 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7249 else if (IS_IVYBRIDGE(dev))
7250 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7251 else if (IS_HASWELL(dev))
7252 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7253 else if (INTEL_INFO(dev)->gen == 8)
7254 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7255 } else if (IS_CHERRYVIEW(dev)) {
7256 dev_priv->display.update_wm = cherryview_update_wm;
7257 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7258 dev_priv->display.init_clock_gating =
7259 cherryview_init_clock_gating;
7260 } else if (IS_VALLEYVIEW(dev)) {
7261 dev_priv->display.update_wm = valleyview_update_wm;
7262 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7263 dev_priv->display.init_clock_gating =
7264 valleyview_init_clock_gating;
7265 } else if (IS_PINEVIEW(dev)) {
7266 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7267 dev_priv->is_ddr3,
7268 dev_priv->fsb_freq,
7269 dev_priv->mem_freq)) {
7270 DRM_INFO("failed to find known CxSR latency "
7271 "(found ddr%s fsb freq %d, mem freq %d), "
7272 "disabling CxSR\n",
7273 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7274 dev_priv->fsb_freq, dev_priv->mem_freq);
7275 /* Disable CxSR and never update its watermark again */
7276 intel_set_memory_cxsr(dev_priv, false);
7277 dev_priv->display.update_wm = NULL;
7278 } else
7279 dev_priv->display.update_wm = pineview_update_wm;
7280 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7281 } else if (IS_G4X(dev)) {
7282 dev_priv->display.update_wm = g4x_update_wm;
7283 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7284 } else if (IS_GEN4(dev)) {
7285 dev_priv->display.update_wm = i965_update_wm;
7286 if (IS_CRESTLINE(dev))
7287 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7288 else if (IS_BROADWATER(dev))
7289 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7290 } else if (IS_GEN3(dev)) {
7291 dev_priv->display.update_wm = i9xx_update_wm;
7292 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7293 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7294 } else if (IS_GEN2(dev)) {
7295 if (INTEL_INFO(dev)->num_pipes == 1) {
7296 dev_priv->display.update_wm = i845_update_wm;
7297 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7298 } else {
7299 dev_priv->display.update_wm = i9xx_update_wm;
7300 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7301 }
7302
7303 if (IS_I85X(dev) || IS_I865G(dev))
7304 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7305 else
7306 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7307 } else {
7308 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7309 }
7310 }
7311
7312 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7313 {
7314 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7315
7316 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7317 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7318 return -EAGAIN;
7319 }
7320
7321 I915_WRITE(GEN6_PCODE_DATA, *val);
7322 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7323
7324 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7325 500)) {
7326 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7327 return -ETIMEDOUT;
7328 }
7329
7330 *val = I915_READ(GEN6_PCODE_DATA);
7331 I915_WRITE(GEN6_PCODE_DATA, 0);
7332
7333 return 0;
7334 }
7335
7336 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7337 {
7338 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7339
7340 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7341 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7342 return -EAGAIN;
7343 }
7344
7345 I915_WRITE(GEN6_PCODE_DATA, val);
7346 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7347
7348 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7349 500)) {
7350 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7351 return -ETIMEDOUT;
7352 }
7353
7354 I915_WRITE(GEN6_PCODE_DATA, 0);
7355
7356 return 0;
7357 }
7358
7359 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7360 {
7361 int div;
7362
7363 /* 4 x czclk */
7364 switch (dev_priv->mem_freq) {
7365 case 800:
7366 div = 10;
7367 break;
7368 case 1066:
7369 div = 12;
7370 break;
7371 case 1333:
7372 div = 16;
7373 break;
7374 default:
7375 return -1;
7376 }
7377
7378 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
7379 }
7380
7381 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7382 {
7383 int mul;
7384
7385 /* 4 x czclk */
7386 switch (dev_priv->mem_freq) {
7387 case 800:
7388 mul = 10;
7389 break;
7390 case 1066:
7391 mul = 12;
7392 break;
7393 case 1333:
7394 mul = 16;
7395 break;
7396 default:
7397 return -1;
7398 }
7399
7400 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
7401 }
7402
7403 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7404 {
7405 int div, freq;
7406
7407 switch (dev_priv->rps.cz_freq) {
7408 case 200:
7409 div = 5;
7410 break;
7411 case 267:
7412 div = 6;
7413 break;
7414 case 320:
7415 case 333:
7416 case 400:
7417 div = 8;
7418 break;
7419 default:
7420 return -1;
7421 }
7422
7423 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7424
7425 return freq;
7426 }
7427
7428 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7429 {
7430 int mul, opcode;
7431
7432 switch (dev_priv->rps.cz_freq) {
7433 case 200:
7434 mul = 5;
7435 break;
7436 case 267:
7437 mul = 6;
7438 break;
7439 case 320:
7440 case 333:
7441 case 400:
7442 mul = 8;
7443 break;
7444 default:
7445 return -1;
7446 }
7447
7448 /* CHV needs even values */
7449 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7450
7451 return opcode;
7452 }
7453
7454 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7455 {
7456 int ret = -1;
7457
7458 if (IS_CHERRYVIEW(dev_priv->dev))
7459 ret = chv_gpu_freq(dev_priv, val);
7460 else if (IS_VALLEYVIEW(dev_priv->dev))
7461 ret = byt_gpu_freq(dev_priv, val);
7462
7463 return ret;
7464 }
7465
7466 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7467 {
7468 int ret = -1;
7469
7470 if (IS_CHERRYVIEW(dev_priv->dev))
7471 ret = chv_freq_opcode(dev_priv, val);
7472 else if (IS_VALLEYVIEW(dev_priv->dev))
7473 ret = byt_freq_opcode(dev_priv, val);
7474
7475 return ret;
7476 }
7477
7478 void intel_pm_setup(struct drm_device *dev)
7479 {
7480 struct drm_i915_private *dev_priv = dev->dev_private;
7481
7482 mutex_init(&dev_priv->rps.hw_lock);
7483
7484 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7485 intel_gen6_powersave_work);
7486
7487 dev_priv->pm.suspended = false;
7488 dev_priv->pm._irqs_disabled = false;
7489 }
This page took 0.201713 seconds and 5 git commands to generate.