drm/i915/skl: Gen9 multi-engine forcewake
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26
27#define FORCEWAKE_ACK_TIMEOUT_MS 2
28
6af5d92f
CW
29#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
31
32#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
34
35#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
37
38#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
40
41#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
42
b2ec142c
PZ
43static void
44assert_device_not_suspended(struct drm_i915_private *dev_priv)
45{
46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
47 "Device suspended\n");
48}
6af5d92f 49
907b28c5
CW
50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
51{
907b28c5
CW
52 /* w/a for a sporadic read returning 0 by waiting for the GT
53 * thread to wake up.
54 */
eb88bd1b
VS
55 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
56 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
907b28c5
CW
57 DRM_ERROR("GT thread status wait timed out\n");
58}
59
60static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
61{
6af5d92f
CW
62 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
63 /* something from same cacheline, but !FORCEWAKE */
64 __raw_posting_read(dev_priv, ECOBUS);
907b28c5
CW
65}
66
c8d9a590
D
67static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
68 int fw_engine)
907b28c5 69{
6af5d92f 70 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
907b28c5
CW
71 FORCEWAKE_ACK_TIMEOUT_MS))
72 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
73
6af5d92f
CW
74 __raw_i915_write32(dev_priv, FORCEWAKE, 1);
75 /* something from same cacheline, but !FORCEWAKE */
76 __raw_posting_read(dev_priv, ECOBUS);
907b28c5 77
6af5d92f 78 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
907b28c5
CW
79 FORCEWAKE_ACK_TIMEOUT_MS))
80 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
81
82 /* WaRsForcewakeWaitTC0:snb */
83 __gen6_gt_wait_for_thread_c0(dev_priv);
84}
85
6a68735a 86static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
907b28c5 87{
6af5d92f 88 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
907b28c5 89 /* something from same cacheline, but !FORCEWAKE_MT */
6af5d92f 90 __raw_posting_read(dev_priv, ECOBUS);
907b28c5
CW
91}
92
6a68735a 93static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
c8d9a590 94 int fw_engine)
907b28c5
CW
95{
96 u32 forcewake_ack;
97
f98cd096 98 if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
907b28c5
CW
99 forcewake_ack = FORCEWAKE_ACK_HSW;
100 else
101 forcewake_ack = FORCEWAKE_MT_ACK;
102
6af5d92f 103 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
907b28c5
CW
104 FORCEWAKE_ACK_TIMEOUT_MS))
105 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
106
6af5d92f
CW
107 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
108 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
907b28c5 109 /* something from same cacheline, but !FORCEWAKE_MT */
6af5d92f 110 __raw_posting_read(dev_priv, ECOBUS);
907b28c5 111
6af5d92f 112 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
907b28c5
CW
113 FORCEWAKE_ACK_TIMEOUT_MS))
114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
115
116 /* WaRsForcewakeWaitTC0:ivb,hsw */
c549f738 117 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
118}
119
120static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
121{
122 u32 gtfifodbg;
6af5d92f
CW
123
124 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
90f256b5
VS
125 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
126 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
907b28c5
CW
127}
128
c8d9a590
D
129static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
130 int fw_engine)
907b28c5 131{
6af5d92f 132 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
907b28c5 133 /* something from same cacheline, but !FORCEWAKE */
6af5d92f 134 __raw_posting_read(dev_priv, ECOBUS);
907b28c5
CW
135 gen6_gt_check_fifodbg(dev_priv);
136}
137
6a68735a 138static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
c8d9a590 139 int fw_engine)
907b28c5 140{
6af5d92f
CW
141 __raw_i915_write32(dev_priv, FORCEWAKE_MT,
142 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
907b28c5 143 /* something from same cacheline, but !FORCEWAKE_MT */
6af5d92f 144 __raw_posting_read(dev_priv, ECOBUS);
6a68735a
MK
145
146 if (IS_GEN7(dev_priv->dev))
147 gen6_gt_check_fifodbg(dev_priv);
907b28c5
CW
148}
149
150static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
151{
152 int ret = 0;
153
5135d64b
D
154 /* On VLV, FIFO will be shared by both SW and HW.
155 * So, we need to read the FREE_ENTRIES everytime */
156 if (IS_VALLEYVIEW(dev_priv->dev))
157 dev_priv->uncore.fifo_count =
158 __raw_i915_read32(dev_priv, GTFIFOCTL) &
159 GT_FIFO_FREE_ENTRIES_MASK;
160
907b28c5
CW
161 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
162 int loop = 500;
46520e2b 163 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
907b28c5
CW
164 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
165 udelay(10);
46520e2b 166 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
907b28c5
CW
167 }
168 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
169 ++ret;
170 dev_priv->uncore.fifo_count = fifo;
171 }
172 dev_priv->uncore.fifo_count--;
173
174 return ret;
175}
176
177static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
178{
6af5d92f
CW
179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
180 _MASKED_BIT_DISABLE(0xffff));
05adaf1f
JN
181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
182 _MASKED_BIT_DISABLE(0xffff));
907b28c5 183 /* something from same cacheline, but !FORCEWAKE_VLV */
6af5d92f 184 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
907b28c5
CW
185}
186
940aece4
D
187static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
188 int fw_engine)
907b28c5 189{
940aece4
D
190 /* Check for Render Engine */
191 if (FORCEWAKE_RENDER & fw_engine) {
95009861
MK
192 if (wait_for_atomic((__raw_i915_read32(dev_priv,
193 FORCEWAKE_ACK_VLV) &
194 FORCEWAKE_KERNEL) == 0,
195 FORCEWAKE_ACK_TIMEOUT_MS))
196 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
940aece4
D
197
198 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
199 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
200
201 if (wait_for_atomic((__raw_i915_read32(dev_priv,
202 FORCEWAKE_ACK_VLV) &
203 FORCEWAKE_KERNEL),
204 FORCEWAKE_ACK_TIMEOUT_MS))
205 DRM_ERROR("Timed out: waiting for Render to ack.\n");
206 }
907b28c5 207
940aece4
D
208 /* Check for Media Engine */
209 if (FORCEWAKE_MEDIA & fw_engine) {
95009861
MK
210 if (wait_for_atomic((__raw_i915_read32(dev_priv,
211 FORCEWAKE_ACK_MEDIA_VLV) &
212 FORCEWAKE_KERNEL) == 0,
213 FORCEWAKE_ACK_TIMEOUT_MS))
214 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
940aece4
D
215
216 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
217 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
218
219 if (wait_for_atomic((__raw_i915_read32(dev_priv,
220 FORCEWAKE_ACK_MEDIA_VLV) &
221 FORCEWAKE_KERNEL),
222 FORCEWAKE_ACK_TIMEOUT_MS))
223 DRM_ERROR("Timed out: waiting for media to ack.\n");
224 }
907b28c5
CW
225}
226
940aece4
D
227static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
228 int fw_engine)
907b28c5 229{
940aece4
D
230
231 /* Check for Render Engine */
232 if (FORCEWAKE_RENDER & fw_engine)
233 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
234 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
235
236
237 /* Check for Media Engine */
238 if (FORCEWAKE_MEDIA & fw_engine)
239 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
240 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
241
ab53c267
VS
242 /* something from same cacheline, but !FORCEWAKE_VLV */
243 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
244 if (!IS_CHERRYVIEW(dev_priv->dev))
245 gen6_gt_check_fifodbg(dev_priv);
940aece4
D
246}
247
b88b23d9 248static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
940aece4
D
249{
250 unsigned long irqflags;
251
252 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6fe72865
VS
253
254 if (fw_engine & FORCEWAKE_RENDER &&
255 dev_priv->uncore.fw_rendercount++ != 0)
256 fw_engine &= ~FORCEWAKE_RENDER;
257 if (fw_engine & FORCEWAKE_MEDIA &&
258 dev_priv->uncore.fw_mediacount++ != 0)
259 fw_engine &= ~FORCEWAKE_MEDIA;
260
261 if (fw_engine)
262 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
940aece4
D
263
264 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
265}
266
b88b23d9 267static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
940aece4
D
268{
269 unsigned long irqflags;
270
271 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
272
3123fcaf
DV
273 if (fw_engine & FORCEWAKE_RENDER) {
274 WARN_ON(!dev_priv->uncore.fw_rendercount);
275 if (--dev_priv->uncore.fw_rendercount != 0)
276 fw_engine &= ~FORCEWAKE_RENDER;
277 }
278
279 if (fw_engine & FORCEWAKE_MEDIA) {
280 WARN_ON(!dev_priv->uncore.fw_mediacount);
281 if (--dev_priv->uncore.fw_mediacount != 0)
282 fw_engine &= ~FORCEWAKE_MEDIA;
283 }
940aece4 284
6fe72865
VS
285 if (fw_engine)
286 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
940aece4
D
287
288 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
907b28c5
CW
289}
290
38cff0b1
ZW
291static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
292{
293 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
294 _MASKED_BIT_DISABLE(0xffff));
295
296 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
297 _MASKED_BIT_DISABLE(0xffff));
298
299 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
300 _MASKED_BIT_DISABLE(0xffff));
301}
302
303static void
304__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
305{
306 /* Check for Render Engine */
307 if (FORCEWAKE_RENDER & fw_engine) {
308 if (wait_for_atomic((__raw_i915_read32(dev_priv,
309 FORCEWAKE_ACK_RENDER_GEN9) &
310 FORCEWAKE_KERNEL) == 0,
311 FORCEWAKE_ACK_TIMEOUT_MS))
312 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
313
314 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
315 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
316
317 if (wait_for_atomic((__raw_i915_read32(dev_priv,
318 FORCEWAKE_ACK_RENDER_GEN9) &
319 FORCEWAKE_KERNEL),
320 FORCEWAKE_ACK_TIMEOUT_MS))
321 DRM_ERROR("Timed out: waiting for Render to ack.\n");
322 }
323
324 /* Check for Media Engine */
325 if (FORCEWAKE_MEDIA & fw_engine) {
326 if (wait_for_atomic((__raw_i915_read32(dev_priv,
327 FORCEWAKE_ACK_MEDIA_GEN9) &
328 FORCEWAKE_KERNEL) == 0,
329 FORCEWAKE_ACK_TIMEOUT_MS))
330 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
331
332 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
333 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
334
335 if (wait_for_atomic((__raw_i915_read32(dev_priv,
336 FORCEWAKE_ACK_MEDIA_GEN9) &
337 FORCEWAKE_KERNEL),
338 FORCEWAKE_ACK_TIMEOUT_MS))
339 DRM_ERROR("Timed out: waiting for Media to ack.\n");
340 }
341
342 /* Check for Blitter Engine */
343 if (FORCEWAKE_BLITTER & fw_engine) {
344 if (wait_for_atomic((__raw_i915_read32(dev_priv,
345 FORCEWAKE_ACK_BLITTER_GEN9) &
346 FORCEWAKE_KERNEL) == 0,
347 FORCEWAKE_ACK_TIMEOUT_MS))
348 DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
349
350 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
351 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
352
353 if (wait_for_atomic((__raw_i915_read32(dev_priv,
354 FORCEWAKE_ACK_BLITTER_GEN9) &
355 FORCEWAKE_KERNEL),
356 FORCEWAKE_ACK_TIMEOUT_MS))
357 DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
358 }
359}
360
361static void
362__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
363{
364 /* Check for Render Engine */
365 if (FORCEWAKE_RENDER & fw_engine)
366 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
367 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
368
369 /* Check for Media Engine */
370 if (FORCEWAKE_MEDIA & fw_engine)
371 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
372 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
373
374 /* Check for Blitter Engine */
375 if (FORCEWAKE_BLITTER & fw_engine)
376 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
377 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
378}
379
380static void
381gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
382{
383 unsigned long irqflags;
384
385 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
386
387 if (FORCEWAKE_RENDER & fw_engine) {
388 if (dev_priv->uncore.fw_rendercount++ == 0)
389 dev_priv->uncore.funcs.force_wake_get(dev_priv,
390 FORCEWAKE_RENDER);
391 }
392
393 if (FORCEWAKE_MEDIA & fw_engine) {
394 if (dev_priv->uncore.fw_mediacount++ == 0)
395 dev_priv->uncore.funcs.force_wake_get(dev_priv,
396 FORCEWAKE_MEDIA);
397 }
398
399 if (FORCEWAKE_BLITTER & fw_engine) {
400 if (dev_priv->uncore.fw_blittercount++ == 0)
401 dev_priv->uncore.funcs.force_wake_get(dev_priv,
402 FORCEWAKE_BLITTER);
403 }
404
405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
406}
407
408static void
409gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
410{
411 unsigned long irqflags;
412
413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
414
415 if (FORCEWAKE_RENDER & fw_engine) {
416 WARN_ON(dev_priv->uncore.fw_rendercount == 0);
417 if (--dev_priv->uncore.fw_rendercount == 0)
418 dev_priv->uncore.funcs.force_wake_put(dev_priv,
419 FORCEWAKE_RENDER);
420 }
421
422 if (FORCEWAKE_MEDIA & fw_engine) {
423 WARN_ON(dev_priv->uncore.fw_mediacount == 0);
424 if (--dev_priv->uncore.fw_mediacount == 0)
425 dev_priv->uncore.funcs.force_wake_put(dev_priv,
426 FORCEWAKE_MEDIA);
427 }
428
429 if (FORCEWAKE_BLITTER & fw_engine) {
430 WARN_ON(dev_priv->uncore.fw_blittercount == 0);
431 if (--dev_priv->uncore.fw_blittercount == 0)
432 dev_priv->uncore.funcs.force_wake_put(dev_priv,
433 FORCEWAKE_BLITTER);
434 }
435
436 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
437}
438
8232644c 439static void gen6_force_wake_timer(unsigned long arg)
aec347ab 440{
8232644c 441 struct drm_i915_private *dev_priv = (void *)arg;
aec347ab
CW
442 unsigned long irqflags;
443
b2ec142c
PZ
444 assert_device_not_suspended(dev_priv);
445
aec347ab 446 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3123fcaf
DV
447 WARN_ON(!dev_priv->uncore.forcewake_count);
448
aec347ab 449 if (--dev_priv->uncore.forcewake_count == 0)
c8d9a590 450 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
aec347ab 451 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6d88064e
PZ
452
453 intel_runtime_pm_put(dev_priv);
aec347ab
CW
454}
455
156c7ca0 456void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
ef46e0d2
DV
457{
458 struct drm_i915_private *dev_priv = dev->dev_private;
0294ae7b
CW
459 unsigned long irqflags;
460
9e31c2a5
ID
461 if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
462 gen6_force_wake_timer((unsigned long)dev_priv);
0294ae7b
CW
463
464 /* Hold uncore.lock across reset to prevent any register access
465 * with forcewake not set correctly
466 */
467 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
ef46e0d2 468
0a089e33 469 if (IS_VALLEYVIEW(dev))
ef46e0d2 470 vlv_force_wake_reset(dev_priv);
0a089e33 471 else if (IS_GEN6(dev) || IS_GEN7(dev))
ef46e0d2 472 __gen6_gt_force_wake_reset(dev_priv);
0a089e33 473
f98cd096 474 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
6a68735a 475 __gen7_gt_force_wake_mt_reset(dev_priv);
0294ae7b 476
38cff0b1
ZW
477 if (IS_GEN9(dev))
478 __gen9_gt_force_wake_mt_reset(dev_priv);
479
0294ae7b
CW
480 if (restore) { /* If reset with a user forcewake, try to restore */
481 unsigned fw = 0;
482
483 if (IS_VALLEYVIEW(dev)) {
484 if (dev_priv->uncore.fw_rendercount)
485 fw |= FORCEWAKE_RENDER;
486
487 if (dev_priv->uncore.fw_mediacount)
488 fw |= FORCEWAKE_MEDIA;
38cff0b1
ZW
489 } else if (IS_GEN9(dev)) {
490 if (dev_priv->uncore.fw_rendercount)
491 fw |= FORCEWAKE_RENDER;
492
493 if (dev_priv->uncore.fw_mediacount)
494 fw |= FORCEWAKE_MEDIA;
495
496 if (dev_priv->uncore.fw_blittercount)
497 fw |= FORCEWAKE_BLITTER;
0294ae7b
CW
498 } else {
499 if (dev_priv->uncore.forcewake_count)
500 fw = FORCEWAKE_ALL;
501 }
502
503 if (fw)
504 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
505
506 if (IS_GEN6(dev) || IS_GEN7(dev))
507 dev_priv->uncore.fifo_count =
508 __raw_i915_read32(dev_priv, GTFIFOCTL) &
509 GT_FIFO_FREE_ENTRIES_MASK;
0294ae7b
CW
510 }
511
512 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
513}
514
ed493883
ID
515static void __intel_uncore_early_sanitize(struct drm_device *dev,
516 bool restore_forcewake)
907b28c5
CW
517{
518 struct drm_i915_private *dev_priv = dev->dev_private;
519
520 if (HAS_FPGA_DBG_UNCLAIMED(dev))
6af5d92f 521 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
18ce3994 522
1d2866ba 523 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
18ce3994
BW
524 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
525 /* The docs do not explain exactly how the calculation can be
526 * made. It is somewhat guessable, but for now, it's always
527 * 128MB.
528 * NB: We can't write IDICR yet because we do not have gt funcs
529 * set up */
530 dev_priv->ellc_size = 128;
531 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
532 }
907b28c5 533
97058870
VS
534 /* clear out old GT FIFO errors */
535 if (IS_GEN6(dev) || IS_GEN7(dev))
536 __raw_i915_write32(dev_priv, GTFIFODBG,
537 __raw_i915_read32(dev_priv, GTFIFODBG));
538
10018603 539 intel_uncore_forcewake_reset(dev, restore_forcewake);
521198a2
MK
540}
541
ed493883
ID
542void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
543{
544 __intel_uncore_early_sanitize(dev, restore_forcewake);
545 i915_check_and_clear_faults(dev);
546}
547
521198a2
MK
548void intel_uncore_sanitize(struct drm_device *dev)
549{
907b28c5
CW
550 /* BIOS often leaves RC6 enabled, but disable it for hw init */
551 intel_disable_gt_powersave(dev);
552}
553
554/*
555 * Generally this is called implicitly by the register read function. However,
556 * if some sequence requires the GT to not power down then this function should
557 * be called at the beginning of the sequence followed by a call to
558 * gen6_gt_force_wake_put() at the end of the sequence.
559 */
c8d9a590 560void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
907b28c5
CW
561{
562 unsigned long irqflags;
563
ab484f8f
BW
564 if (!dev_priv->uncore.funcs.force_wake_get)
565 return;
566
c8c8fb33
PZ
567 intel_runtime_pm_get(dev_priv);
568
38cff0b1
ZW
569 /* Redirect to Gen9 specific routine */
570 if (IS_GEN9(dev_priv->dev))
571 return gen9_force_wake_get(dev_priv, fw_engine);
572
940aece4
D
573 /* Redirect to VLV specific routine */
574 if (IS_VALLEYVIEW(dev_priv->dev))
575 return vlv_force_wake_get(dev_priv, fw_engine);
576
907b28c5
CW
577 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
578 if (dev_priv->uncore.forcewake_count++ == 0)
c8d9a590 579 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
907b28c5
CW
580 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
581}
582
583/*
584 * see gen6_gt_force_wake_get()
585 */
c8d9a590 586void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
907b28c5
CW
587{
588 unsigned long irqflags;
6d88064e 589 bool delayed = false;
907b28c5 590
ab484f8f
BW
591 if (!dev_priv->uncore.funcs.force_wake_put)
592 return;
593
38cff0b1
ZW
594 /* Redirect to Gen9 specific routine */
595 if (IS_GEN9(dev_priv->dev)) {
596 gen9_force_wake_put(dev_priv, fw_engine);
597 goto out;
598 }
599
940aece4 600 /* Redirect to VLV specific routine */
6d88064e
PZ
601 if (IS_VALLEYVIEW(dev_priv->dev)) {
602 vlv_force_wake_put(dev_priv, fw_engine);
603 goto out;
604 }
940aece4
D
605
606
907b28c5 607 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3123fcaf
DV
608 WARN_ON(!dev_priv->uncore.forcewake_count);
609
aec347ab
CW
610 if (--dev_priv->uncore.forcewake_count == 0) {
611 dev_priv->uncore.forcewake_count++;
6d88064e 612 delayed = true;
8232644c
CW
613 mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
614 jiffies + 1);
aec347ab 615 }
907b28c5 616 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
c8c8fb33 617
6d88064e
PZ
618out:
619 if (!delayed)
620 intel_runtime_pm_put(dev_priv);
907b28c5
CW
621}
622
e998c40f
PZ
623void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
624{
625 if (!dev_priv->uncore.funcs.force_wake_get)
626 return;
627
628 WARN_ON(dev_priv->uncore.forcewake_count > 0);
629}
630
907b28c5
CW
631/* We give fast paths for the really cool registers */
632#define NEEDS_FORCE_WAKE(dev_priv, reg) \
ab484f8f 633 ((reg) < 0x40000 && (reg) != FORCEWAKE)
907b28c5 634
1938e59a 635#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
38fb6a40 636
1938e59a
D
637#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
638 (REG_RANGE((reg), 0x2000, 0x4000) || \
639 REG_RANGE((reg), 0x5000, 0x8000) || \
640 REG_RANGE((reg), 0xB000, 0x12000) || \
641 REG_RANGE((reg), 0x2E000, 0x30000))
642
643#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
644 (REG_RANGE((reg), 0x12000, 0x14000) || \
645 REG_RANGE((reg), 0x22000, 0x24000) || \
646 REG_RANGE((reg), 0x30000, 0x40000))
647
648#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
649 (REG_RANGE((reg), 0x2000, 0x4000) || \
650 REG_RANGE((reg), 0x5000, 0x8000) || \
651 REG_RANGE((reg), 0x8300, 0x8500) || \
652 REG_RANGE((reg), 0xB000, 0xC000) || \
653 REG_RANGE((reg), 0xE000, 0xE800))
654
655#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
656 (REG_RANGE((reg), 0x8800, 0x8900) || \
657 REG_RANGE((reg), 0xD000, 0xD800) || \
658 REG_RANGE((reg), 0x12000, 0x14000) || \
659 REG_RANGE((reg), 0x1A000, 0x1C000) || \
660 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
661 REG_RANGE((reg), 0x30000, 0x40000))
662
663#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
664 (REG_RANGE((reg), 0x4000, 0x5000) || \
665 REG_RANGE((reg), 0x8000, 0x8300) || \
666 REG_RANGE((reg), 0x8500, 0x8600) || \
667 REG_RANGE((reg), 0x9000, 0xB000) || \
668 REG_RANGE((reg), 0xC000, 0xC800) || \
669 REG_RANGE((reg), 0xF000, 0x10000) || \
670 REG_RANGE((reg), 0x14000, 0x14400) || \
671 REG_RANGE((reg), 0x22000, 0x24000))
38fb6a40 672
4597a88a
ZW
673#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
674 REG_RANGE((reg), 0xC00, 0x2000)
675
676#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
677 (REG_RANGE((reg), 0x2000, 0x4000) || \
678 REG_RANGE((reg), 0x5200, 0x8000) || \
679 REG_RANGE((reg), 0x8300, 0x8500) || \
680 REG_RANGE((reg), 0x8C00, 0x8D00) || \
681 REG_RANGE((reg), 0xB000, 0xB480) || \
682 REG_RANGE((reg), 0xE000, 0xE800))
683
684#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
685 (REG_RANGE((reg), 0x8800, 0x8A00) || \
686 REG_RANGE((reg), 0xD000, 0xD800) || \
687 REG_RANGE((reg), 0x12000, 0x14000) || \
688 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
689 REG_RANGE((reg), 0x30000, 0x40000))
690
691#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
692 REG_RANGE((reg), 0x9400, 0x9800)
693
694#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
695 ((reg) < 0x40000 &&\
696 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
697 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
698 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
699 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
700
907b28c5
CW
701static void
702ilk_dummy_write(struct drm_i915_private *dev_priv)
703{
704 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
705 * the chip from rc6 before touching it for real. MI_MODE is masked,
706 * hence harmless to write 0 into. */
6af5d92f 707 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
708}
709
710static void
5978118c
PZ
711hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
712 bool before)
907b28c5 713{
5978118c
PZ
714 const char *op = read ? "reading" : "writing to";
715 const char *when = before ? "before" : "after";
716
717 if (!i915.mmio_debug)
718 return;
719
ab484f8f 720 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5978118c
PZ
721 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
722 when, op, reg);
6af5d92f 723 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
724 }
725}
726
727static void
5978118c 728hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
907b28c5 729{
5978118c
PZ
730 if (i915.mmio_debug)
731 return;
732
ab484f8f 733 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5978118c 734 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
6af5d92f 735 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
736 }
737}
738
5d738795
BW
739#define REG_READ_HEADER(x) \
740 unsigned long irqflags; \
741 u##x val = 0; \
6f0ea9e2 742 assert_device_not_suspended(dev_priv); \
5d738795
BW
743 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
744
745#define REG_READ_FOOTER \
746 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
747 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
748 return val
749
3967018e 750#define __gen4_read(x) \
0b274481 751static u##x \
3967018e
BW
752gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
753 REG_READ_HEADER(x); \
754 val = __raw_i915_read##x(dev_priv, reg); \
755 REG_READ_FOOTER; \
756}
757
758#define __gen5_read(x) \
759static u##x \
760gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
761 REG_READ_HEADER(x); \
762 ilk_dummy_write(dev_priv); \
763 val = __raw_i915_read##x(dev_priv, reg); \
764 REG_READ_FOOTER; \
765}
766
767#define __gen6_read(x) \
768static u##x \
769gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
5d738795 770 REG_READ_HEADER(x); \
5978118c 771 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
8232644c
CW
772 if (dev_priv->uncore.forcewake_count == 0 && \
773 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
774 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
775 FORCEWAKE_ALL); \
aa0b3b5b
PZ
776 val = __raw_i915_read##x(dev_priv, reg); \
777 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
778 FORCEWAKE_ALL); \
779 } else { \
780 val = __raw_i915_read##x(dev_priv, reg); \
907b28c5 781 } \
5978118c 782 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
5d738795 783 REG_READ_FOOTER; \
907b28c5
CW
784}
785
940aece4
D
786#define __vlv_read(x) \
787static u##x \
788vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
789 unsigned fwengine = 0; \
940aece4 790 REG_READ_HEADER(x); \
6fe72865
VS
791 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
792 if (dev_priv->uncore.fw_rendercount == 0) \
793 fwengine = FORCEWAKE_RENDER; \
794 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
795 if (dev_priv->uncore.fw_mediacount == 0) \
796 fwengine = FORCEWAKE_MEDIA; \
940aece4 797 } \
6fe72865
VS
798 if (fwengine) \
799 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
800 val = __raw_i915_read##x(dev_priv, reg); \
801 if (fwengine) \
802 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
940aece4
D
803 REG_READ_FOOTER; \
804}
805
1938e59a
D
806#define __chv_read(x) \
807static u##x \
808chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
809 unsigned fwengine = 0; \
810 REG_READ_HEADER(x); \
811 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
812 if (dev_priv->uncore.fw_rendercount == 0) \
813 fwengine = FORCEWAKE_RENDER; \
814 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
815 if (dev_priv->uncore.fw_mediacount == 0) \
816 fwengine = FORCEWAKE_MEDIA; \
817 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
818 if (dev_priv->uncore.fw_rendercount == 0) \
819 fwengine |= FORCEWAKE_RENDER; \
820 if (dev_priv->uncore.fw_mediacount == 0) \
821 fwengine |= FORCEWAKE_MEDIA; \
822 } \
823 if (fwengine) \
824 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
825 val = __raw_i915_read##x(dev_priv, reg); \
826 if (fwengine) \
827 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
828 REG_READ_FOOTER; \
829}
940aece4 830
4597a88a
ZW
831#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
832 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
833
834#define __gen9_read(x) \
835static u##x \
836gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
837 REG_READ_HEADER(x); \
838 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
839 val = __raw_i915_read##x(dev_priv, reg); \
840 } else { \
841 unsigned fwengine = 0; \
842 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
843 if (dev_priv->uncore.fw_rendercount == 0) \
844 fwengine = FORCEWAKE_RENDER; \
845 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
846 if (dev_priv->uncore.fw_mediacount == 0) \
847 fwengine = FORCEWAKE_MEDIA; \
848 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
849 if (dev_priv->uncore.fw_rendercount == 0) \
850 fwengine |= FORCEWAKE_RENDER; \
851 if (dev_priv->uncore.fw_mediacount == 0) \
852 fwengine |= FORCEWAKE_MEDIA; \
853 } else { \
854 if (dev_priv->uncore.fw_blittercount == 0) \
855 fwengine = FORCEWAKE_BLITTER; \
856 } \
857 if (fwengine) \
858 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
859 val = __raw_i915_read##x(dev_priv, reg); \
860 if (fwengine) \
861 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
862 } \
863 REG_READ_FOOTER; \
864}
865
866__gen9_read(8)
867__gen9_read(16)
868__gen9_read(32)
869__gen9_read(64)
1938e59a
D
870__chv_read(8)
871__chv_read(16)
872__chv_read(32)
873__chv_read(64)
940aece4
D
874__vlv_read(8)
875__vlv_read(16)
876__vlv_read(32)
877__vlv_read(64)
3967018e
BW
878__gen6_read(8)
879__gen6_read(16)
880__gen6_read(32)
881__gen6_read(64)
882__gen5_read(8)
883__gen5_read(16)
884__gen5_read(32)
885__gen5_read(64)
886__gen4_read(8)
887__gen4_read(16)
888__gen4_read(32)
889__gen4_read(64)
890
4597a88a 891#undef __gen9_read
1938e59a 892#undef __chv_read
940aece4 893#undef __vlv_read
3967018e
BW
894#undef __gen6_read
895#undef __gen5_read
896#undef __gen4_read
5d738795
BW
897#undef REG_READ_FOOTER
898#undef REG_READ_HEADER
899
900#define REG_WRITE_HEADER \
901 unsigned long irqflags; \
902 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6f0ea9e2 903 assert_device_not_suspended(dev_priv); \
5d738795 904 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
907b28c5 905
0d965301
VS
906#define REG_WRITE_FOOTER \
907 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
908
4032ef43 909#define __gen4_write(x) \
0b274481 910static void \
4032ef43
BW
911gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
912 REG_WRITE_HEADER; \
913 __raw_i915_write##x(dev_priv, reg, val); \
0d965301 914 REG_WRITE_FOOTER; \
4032ef43
BW
915}
916
917#define __gen5_write(x) \
918static void \
919gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
920 REG_WRITE_HEADER; \
921 ilk_dummy_write(dev_priv); \
922 __raw_i915_write##x(dev_priv, reg, val); \
0d965301 923 REG_WRITE_FOOTER; \
4032ef43
BW
924}
925
926#define __gen6_write(x) \
927static void \
928gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
929 u32 __fifo_ret = 0; \
930 REG_WRITE_HEADER; \
931 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
932 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
933 } \
934 __raw_i915_write##x(dev_priv, reg, val); \
935 if (unlikely(__fifo_ret)) { \
936 gen6_gt_check_fifodbg(dev_priv); \
937 } \
0d965301 938 REG_WRITE_FOOTER; \
4032ef43
BW
939}
940
941#define __hsw_write(x) \
942static void \
943hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
907b28c5 944 u32 __fifo_ret = 0; \
5d738795 945 REG_WRITE_HEADER; \
907b28c5
CW
946 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
947 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
948 } \
5978118c 949 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
6af5d92f 950 __raw_i915_write##x(dev_priv, reg, val); \
907b28c5
CW
951 if (unlikely(__fifo_ret)) { \
952 gen6_gt_check_fifodbg(dev_priv); \
953 } \
5978118c
PZ
954 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
955 hsw_unclaimed_reg_detect(dev_priv); \
0d965301 956 REG_WRITE_FOOTER; \
907b28c5 957}
3967018e 958
ab2aa47e
BW
959static const u32 gen8_shadowed_regs[] = {
960 FORCEWAKE_MT,
961 GEN6_RPNSWREQ,
962 GEN6_RC_VIDEO_FREQ,
963 RING_TAIL(RENDER_RING_BASE),
964 RING_TAIL(GEN6_BSD_RING_BASE),
965 RING_TAIL(VEBOX_RING_BASE),
966 RING_TAIL(BLT_RING_BASE),
967 /* TODO: Other registers are not yet used */
968};
969
970static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
971{
972 int i;
973 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
974 if (reg == gen8_shadowed_regs[i])
975 return true;
976
977 return false;
978}
979
980#define __gen8_write(x) \
981static void \
982gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
ab2aa47e 983 REG_WRITE_HEADER; \
66bc2cab 984 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
e9dbd2b2
MK
985 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
986 if (dev_priv->uncore.forcewake_count == 0) \
987 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
988 FORCEWAKE_ALL); \
989 __raw_i915_write##x(dev_priv, reg, val); \
990 if (dev_priv->uncore.forcewake_count == 0) \
991 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
992 FORCEWAKE_ALL); \
993 } else { \
994 __raw_i915_write##x(dev_priv, reg, val); \
ab2aa47e 995 } \
66bc2cab
PZ
996 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
997 hsw_unclaimed_reg_detect(dev_priv); \
0d965301 998 REG_WRITE_FOOTER; \
ab2aa47e
BW
999}
1000
1938e59a
D
1001#define __chv_write(x) \
1002static void \
1003chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
1004 unsigned fwengine = 0; \
1005 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
1006 REG_WRITE_HEADER; \
1007 if (!shadowed) { \
1008 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
1009 if (dev_priv->uncore.fw_rendercount == 0) \
1010 fwengine = FORCEWAKE_RENDER; \
1011 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
1012 if (dev_priv->uncore.fw_mediacount == 0) \
1013 fwengine = FORCEWAKE_MEDIA; \
1014 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
1015 if (dev_priv->uncore.fw_rendercount == 0) \
1016 fwengine |= FORCEWAKE_RENDER; \
1017 if (dev_priv->uncore.fw_mediacount == 0) \
1018 fwengine |= FORCEWAKE_MEDIA; \
1019 } \
1020 } \
1021 if (fwengine) \
1022 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
1023 __raw_i915_write##x(dev_priv, reg, val); \
1024 if (fwengine) \
1025 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
1026 REG_WRITE_FOOTER; \
1027}
1028
4597a88a
ZW
1029#define __gen9_write(x) \
1030static void \
1031gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
1032 bool trace) { \
1033 REG_WRITE_HEADER; \
1034 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1035 __raw_i915_write##x(dev_priv, reg, val); \
1036 } else { \
1037 unsigned fwengine = 0; \
1038 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
1039 if (dev_priv->uncore.fw_rendercount == 0) \
1040 fwengine = FORCEWAKE_RENDER; \
1041 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
1042 if (dev_priv->uncore.fw_mediacount == 0) \
1043 fwengine = FORCEWAKE_MEDIA; \
1044 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
1045 if (dev_priv->uncore.fw_rendercount == 0) \
1046 fwengine |= FORCEWAKE_RENDER; \
1047 if (dev_priv->uncore.fw_mediacount == 0) \
1048 fwengine |= FORCEWAKE_MEDIA; \
1049 } else { \
1050 if (dev_priv->uncore.fw_blittercount == 0) \
1051 fwengine = FORCEWAKE_BLITTER; \
1052 } \
1053 if (fwengine) \
1054 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
1055 fwengine); \
1056 __raw_i915_write##x(dev_priv, reg, val); \
1057 if (fwengine) \
1058 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
1059 fwengine); \
1060 } \
1061 REG_WRITE_FOOTER; \
1062}
1063
1064__gen9_write(8)
1065__gen9_write(16)
1066__gen9_write(32)
1067__gen9_write(64)
1938e59a
D
1068__chv_write(8)
1069__chv_write(16)
1070__chv_write(32)
1071__chv_write(64)
ab2aa47e
BW
1072__gen8_write(8)
1073__gen8_write(16)
1074__gen8_write(32)
1075__gen8_write(64)
4032ef43
BW
1076__hsw_write(8)
1077__hsw_write(16)
1078__hsw_write(32)
1079__hsw_write(64)
1080__gen6_write(8)
1081__gen6_write(16)
1082__gen6_write(32)
1083__gen6_write(64)
1084__gen5_write(8)
1085__gen5_write(16)
1086__gen5_write(32)
1087__gen5_write(64)
1088__gen4_write(8)
1089__gen4_write(16)
1090__gen4_write(32)
1091__gen4_write(64)
1092
4597a88a 1093#undef __gen9_write
1938e59a 1094#undef __chv_write
ab2aa47e 1095#undef __gen8_write
4032ef43
BW
1096#undef __hsw_write
1097#undef __gen6_write
1098#undef __gen5_write
1099#undef __gen4_write
0d965301 1100#undef REG_WRITE_FOOTER
5d738795 1101#undef REG_WRITE_HEADER
907b28c5 1102
43d942a7
YZ
1103#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1104do { \
1105 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1106 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1107 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1108 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1109} while (0)
1110
1111#define ASSIGN_READ_MMIO_VFUNCS(x) \
1112do { \
1113 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1114 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1115 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1116 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1117} while (0)
1118
0b274481
BW
1119void intel_uncore_init(struct drm_device *dev)
1120{
1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122
8232644c
CW
1123 setup_timer(&dev_priv->uncore.force_wake_timer,
1124 gen6_force_wake_timer, (unsigned long)dev_priv);
0b274481 1125
ed493883 1126 __intel_uncore_early_sanitize(dev, false);
05efeebd 1127
38cff0b1
ZW
1128 if (IS_GEN9(dev)) {
1129 dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
1130 dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
1131 } else if (IS_VALLEYVIEW(dev)) {
940aece4
D
1132 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
1133 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
f98cd096 1134 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6a68735a
MK
1135 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
1136 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
0b274481
BW
1137 } else if (IS_IVYBRIDGE(dev)) {
1138 u32 ecobus;
1139
1140 /* IVB configs may use multi-threaded forcewake */
1141
1142 /* A small trick here - if the bios hasn't configured
1143 * MT forcewake, and if the device is in RC6, then
1144 * force_wake_mt_get will not wake the device and the
1145 * ECOBUS read will return zero. Which will be
1146 * (correctly) interpreted by the test below as MT
1147 * forcewake being disabled.
1148 */
1149 mutex_lock(&dev->struct_mutex);
6a68735a 1150 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
0b274481 1151 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
6a68735a 1152 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
0b274481
BW
1153 mutex_unlock(&dev->struct_mutex);
1154
1155 if (ecobus & FORCEWAKE_MT_ENABLE) {
1156 dev_priv->uncore.funcs.force_wake_get =
6a68735a 1157 __gen7_gt_force_wake_mt_get;
0b274481 1158 dev_priv->uncore.funcs.force_wake_put =
6a68735a 1159 __gen7_gt_force_wake_mt_put;
0b274481
BW
1160 } else {
1161 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1162 DRM_INFO("when using vblank-synced partial screen updates.\n");
1163 dev_priv->uncore.funcs.force_wake_get =
1164 __gen6_gt_force_wake_get;
1165 dev_priv->uncore.funcs.force_wake_put =
1166 __gen6_gt_force_wake_put;
1167 }
1168 } else if (IS_GEN6(dev)) {
1169 dev_priv->uncore.funcs.force_wake_get =
1170 __gen6_gt_force_wake_get;
1171 dev_priv->uncore.funcs.force_wake_put =
1172 __gen6_gt_force_wake_put;
1173 }
1174
3967018e 1175 switch (INTEL_INFO(dev)->gen) {
ab2aa47e 1176 default:
4597a88a
ZW
1177 WARN_ON(1);
1178 return;
1179 case 9:
1180 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1181 ASSIGN_READ_MMIO_VFUNCS(gen9);
1182 break;
1183 case 8:
1938e59a 1184 if (IS_CHERRYVIEW(dev)) {
43d942a7
YZ
1185 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1186 ASSIGN_READ_MMIO_VFUNCS(chv);
1938e59a
D
1187
1188 } else {
43d942a7
YZ
1189 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1190 ASSIGN_READ_MMIO_VFUNCS(gen6);
1938e59a 1191 }
ab2aa47e 1192 break;
3967018e
BW
1193 case 7:
1194 case 6:
4032ef43 1195 if (IS_HASWELL(dev)) {
43d942a7 1196 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
4032ef43 1197 } else {
43d942a7 1198 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
4032ef43 1199 }
940aece4
D
1200
1201 if (IS_VALLEYVIEW(dev)) {
43d942a7 1202 ASSIGN_READ_MMIO_VFUNCS(vlv);
940aece4 1203 } else {
43d942a7 1204 ASSIGN_READ_MMIO_VFUNCS(gen6);
940aece4 1205 }
3967018e
BW
1206 break;
1207 case 5:
43d942a7
YZ
1208 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1209 ASSIGN_READ_MMIO_VFUNCS(gen5);
3967018e
BW
1210 break;
1211 case 4:
1212 case 3:
1213 case 2:
43d942a7
YZ
1214 ASSIGN_WRITE_MMIO_VFUNCS(gen4);
1215 ASSIGN_READ_MMIO_VFUNCS(gen4);
3967018e
BW
1216 break;
1217 }
ed493883
ID
1218
1219 i915_check_and_clear_faults(dev);
0b274481 1220}
43d942a7
YZ
1221#undef ASSIGN_WRITE_MMIO_VFUNCS
1222#undef ASSIGN_READ_MMIO_VFUNCS
0b274481
BW
1223
1224void intel_uncore_fini(struct drm_device *dev)
1225{
0b274481
BW
1226 /* Paranoia: make sure we have disabled everything before we exit. */
1227 intel_uncore_sanitize(dev);
0294ae7b 1228 intel_uncore_forcewake_reset(dev, false);
0b274481
BW
1229}
1230
af76ae44
DL
1231#define GEN_RANGE(l, h) GENMASK(h, l)
1232
907b28c5
CW
1233static const struct register_whitelist {
1234 uint64_t offset;
1235 uint32_t size;
af76ae44
DL
1236 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1237 uint32_t gen_bitmask;
907b28c5 1238} whitelist[] = {
c3f59a67 1239 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
907b28c5
CW
1240};
1241
1242int i915_reg_read_ioctl(struct drm_device *dev,
1243 void *data, struct drm_file *file)
1244{
1245 struct drm_i915_private *dev_priv = dev->dev_private;
1246 struct drm_i915_reg_read *reg = data;
1247 struct register_whitelist const *entry = whitelist;
cf67c70f 1248 int i, ret = 0;
907b28c5
CW
1249
1250 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1251 if (entry->offset == reg->offset &&
1252 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1253 break;
1254 }
1255
1256 if (i == ARRAY_SIZE(whitelist))
1257 return -EINVAL;
1258
cf67c70f
PZ
1259 intel_runtime_pm_get(dev_priv);
1260
907b28c5
CW
1261 switch (entry->size) {
1262 case 8:
1263 reg->val = I915_READ64(reg->offset);
1264 break;
1265 case 4:
1266 reg->val = I915_READ(reg->offset);
1267 break;
1268 case 2:
1269 reg->val = I915_READ16(reg->offset);
1270 break;
1271 case 1:
1272 reg->val = I915_READ8(reg->offset);
1273 break;
1274 default:
1275 WARN_ON(1);
cf67c70f
PZ
1276 ret = -EINVAL;
1277 goto out;
907b28c5
CW
1278 }
1279
cf67c70f
PZ
1280out:
1281 intel_runtime_pm_put(dev_priv);
1282 return ret;
907b28c5
CW
1283}
1284
b6359918
MK
1285int i915_get_reset_stats_ioctl(struct drm_device *dev,
1286 void *data, struct drm_file *file)
1287{
1288 struct drm_i915_private *dev_priv = dev->dev_private;
1289 struct drm_i915_reset_stats *args = data;
1290 struct i915_ctx_hang_stats *hs;
273497e5 1291 struct intel_context *ctx;
b6359918
MK
1292 int ret;
1293
661df041
MK
1294 if (args->flags || args->pad)
1295 return -EINVAL;
1296
821d66dd 1297 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
b6359918
MK
1298 return -EPERM;
1299
1300 ret = mutex_lock_interruptible(&dev->struct_mutex);
1301 if (ret)
1302 return ret;
1303
41bde553
BW
1304 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1305 if (IS_ERR(ctx)) {
b6359918 1306 mutex_unlock(&dev->struct_mutex);
41bde553 1307 return PTR_ERR(ctx);
b6359918 1308 }
41bde553 1309 hs = &ctx->hang_stats;
b6359918
MK
1310
1311 if (capable(CAP_SYS_ADMIN))
1312 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1313 else
1314 args->reset_count = 0;
1315
1316 args->batch_active = hs->batch_active;
1317 args->batch_pending = hs->batch_pending;
1318
1319 mutex_unlock(&dev->struct_mutex);
1320
1321 return 0;
1322}
1323
907b28c5
CW
1324static int i965_reset_complete(struct drm_device *dev)
1325{
1326 u8 gdrst;
1327 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
1328 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1329}
1330
1331static int i965_do_reset(struct drm_device *dev)
1332{
1333 int ret;
1334
85ab3998
DV
1335 /* FIXME: i965g/gm need a display save/restore for gpu reset. */
1336 return -ENODEV;
1337
907b28c5
CW
1338 /*
1339 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
1340 * well as the reset bit (GR/bit 0). Setting the GR bit
1341 * triggers the reset; when done, the hardware will clear it.
1342 */
1343 pci_write_config_byte(dev->pdev, I965_GDRST,
1344 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1345 ret = wait_for(i965_reset_complete(dev), 500);
1346 if (ret)
1347 return ret;
1348
907b28c5
CW
1349 pci_write_config_byte(dev->pdev, I965_GDRST,
1350 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1351
1352 ret = wait_for(i965_reset_complete(dev), 500);
1353 if (ret)
1354 return ret;
1355
1356 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
1357
1358 return 0;
1359}
1360
fa4f53c4
VS
1361static int g4x_do_reset(struct drm_device *dev)
1362{
1363 struct drm_i915_private *dev_priv = dev->dev_private;
1364 int ret;
1365
1366 pci_write_config_byte(dev->pdev, I965_GDRST,
1367 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1368 ret = wait_for(i965_reset_complete(dev), 500);
1369 if (ret)
1370 return ret;
1371
1372 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1373 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1374 POSTING_READ(VDECCLK_GATE_D);
1375
1376 pci_write_config_byte(dev->pdev, I965_GDRST,
1377 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1378 ret = wait_for(i965_reset_complete(dev), 500);
1379 if (ret)
1380 return ret;
1381
1382 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1383 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1384 POSTING_READ(VDECCLK_GATE_D);
1385
1386 pci_write_config_byte(dev->pdev, I965_GDRST, 0);
1387
1388 return 0;
1389}
1390
907b28c5
CW
1391static int ironlake_do_reset(struct drm_device *dev)
1392{
1393 struct drm_i915_private *dev_priv = dev->dev_private;
907b28c5
CW
1394 int ret;
1395
907b28c5 1396 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
0f08ffd6 1397 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
f67deb72 1398 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
b3a3f03d 1399 ILK_GRDOM_RESET_ENABLE) == 0, 500);
907b28c5
CW
1400 if (ret)
1401 return ret;
1402
907b28c5 1403 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
0f08ffd6 1404 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
9aa7250f
VS
1405 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1406 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1407 if (ret)
1408 return ret;
1409
1410 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1411
1412 return 0;
907b28c5
CW
1413}
1414
1415static int gen6_do_reset(struct drm_device *dev)
1416{
1417 struct drm_i915_private *dev_priv = dev->dev_private;
1418 int ret;
907b28c5
CW
1419
1420 /* Reset the chip */
1421
1422 /* GEN6_GDRST is not in the gt power well, no need to check
1423 * for fifo space for the write or forcewake the chip for
1424 * the read
1425 */
6af5d92f 1426 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
907b28c5
CW
1427
1428 /* Spin waiting for the device to ack the reset request */
6af5d92f 1429 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
907b28c5 1430
0294ae7b 1431 intel_uncore_forcewake_reset(dev, true);
5babf0fc 1432
907b28c5
CW
1433 return ret;
1434}
1435
1436int intel_gpu_reset(struct drm_device *dev)
1437{
542c184f
RB
1438 if (INTEL_INFO(dev)->gen >= 6)
1439 return gen6_do_reset(dev);
1440 else if (IS_GEN5(dev))
1441 return ironlake_do_reset(dev);
1442 else if (IS_G4X(dev))
1443 return g4x_do_reset(dev);
1444 else if (IS_GEN4(dev))
1445 return i965_do_reset(dev);
1446 else
1447 return -ENODEV;
907b28c5
CW
1448}
1449
907b28c5
CW
1450void intel_uncore_check_errors(struct drm_device *dev)
1451{
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453
1454 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
6af5d92f 1455 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
907b28c5 1456 DRM_ERROR("Unclaimed register before interrupt\n");
6af5d92f 1457 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
1458 }
1459}
This page took 0.190609 seconds and 5 git commands to generate.