2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
44 assert_device_not_suspended(struct drm_i915_private
*dev_priv
)
46 WARN(HAS_RUNTIME_PM(dev_priv
->dev
) && dev_priv
->pm
.suspended
,
47 "Device suspended\n");
50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
52 u32 gt_thread_status_mask
;
54 if (IS_HASWELL(dev_priv
->dev
))
55 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK_HSW
;
57 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK
;
59 /* w/a for a sporadic read returning 0 by waiting for the GT
62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv
, GEN6_GT_THREAD_STATUS_REG
) & gt_thread_status_mask
) == 0, 500))
63 DRM_ERROR("GT thread status wait timed out\n");
66 static void __gen6_gt_force_wake_reset(struct drm_i915_private
*dev_priv
)
68 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
69 /* something from same cacheline, but !FORCEWAKE */
70 __raw_posting_read(dev_priv
, ECOBUS
);
73 static void __gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
,
76 if (wait_for_atomic((__raw_i915_read32(dev_priv
, FORCEWAKE_ACK
) & 1) == 0,
77 FORCEWAKE_ACK_TIMEOUT_MS
))
78 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
80 __raw_i915_write32(dev_priv
, FORCEWAKE
, 1);
81 /* something from same cacheline, but !FORCEWAKE */
82 __raw_posting_read(dev_priv
, ECOBUS
);
84 if (wait_for_atomic((__raw_i915_read32(dev_priv
, FORCEWAKE_ACK
) & 1),
85 FORCEWAKE_ACK_TIMEOUT_MS
))
86 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
88 /* WaRsForcewakeWaitTC0:snb */
89 __gen6_gt_wait_for_thread_c0(dev_priv
);
92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private
*dev_priv
)
94 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
, _MASKED_BIT_DISABLE(0xffff));
95 /* something from same cacheline, but !FORCEWAKE_MT */
96 __raw_posting_read(dev_priv
, ECOBUS
);
99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private
*dev_priv
,
104 if (IS_HASWELL(dev_priv
->dev
) || IS_GEN8(dev_priv
->dev
))
105 forcewake_ack
= FORCEWAKE_ACK_HSW
;
107 forcewake_ack
= FORCEWAKE_MT_ACK
;
109 if (wait_for_atomic((__raw_i915_read32(dev_priv
, forcewake_ack
) & FORCEWAKE_KERNEL
) == 0,
110 FORCEWAKE_ACK_TIMEOUT_MS
))
111 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
113 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
,
114 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
115 /* something from same cacheline, but !FORCEWAKE_MT */
116 __raw_posting_read(dev_priv
, ECOBUS
);
118 if (wait_for_atomic((__raw_i915_read32(dev_priv
, forcewake_ack
) & FORCEWAKE_KERNEL
),
119 FORCEWAKE_ACK_TIMEOUT_MS
))
120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
122 /* WaRsForcewakeWaitTC0:ivb,hsw */
123 if (INTEL_INFO(dev_priv
->dev
)->gen
< 8)
124 __gen6_gt_wait_for_thread_c0(dev_priv
);
127 static void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
131 gtfifodbg
= __raw_i915_read32(dev_priv
, GTFIFODBG
);
132 if (WARN(gtfifodbg
, "GT wake FIFO error 0x%x\n", gtfifodbg
))
133 __raw_i915_write32(dev_priv
, GTFIFODBG
, gtfifodbg
);
136 static void __gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
,
139 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
140 /* something from same cacheline, but !FORCEWAKE */
141 __raw_posting_read(dev_priv
, ECOBUS
);
142 gen6_gt_check_fifodbg(dev_priv
);
145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private
*dev_priv
,
148 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
,
149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
150 /* something from same cacheline, but !FORCEWAKE_MT */
151 __raw_posting_read(dev_priv
, ECOBUS
);
153 if (IS_GEN7(dev_priv
->dev
))
154 gen6_gt_check_fifodbg(dev_priv
);
157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
161 /* On VLV, FIFO will be shared by both SW and HW.
162 * So, we need to read the FREE_ENTRIES everytime */
163 if (IS_VALLEYVIEW(dev_priv
->dev
))
164 dev_priv
->uncore
.fifo_count
=
165 __raw_i915_read32(dev_priv
, GTFIFOCTL
) &
166 GT_FIFO_FREE_ENTRIES_MASK
;
168 if (dev_priv
->uncore
.fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
170 u32 fifo
= __raw_i915_read32(dev_priv
, GTFIFOCTL
) & GT_FIFO_FREE_ENTRIES_MASK
;
171 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
173 fifo
= __raw_i915_read32(dev_priv
, GTFIFOCTL
) & GT_FIFO_FREE_ENTRIES_MASK
;
175 if (WARN_ON(loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
))
177 dev_priv
->uncore
.fifo_count
= fifo
;
179 dev_priv
->uncore
.fifo_count
--;
184 static void vlv_force_wake_reset(struct drm_i915_private
*dev_priv
)
186 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
187 _MASKED_BIT_DISABLE(0xffff));
188 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
189 _MASKED_BIT_DISABLE(0xffff));
190 /* something from same cacheline, but !FORCEWAKE_VLV */
191 __raw_posting_read(dev_priv
, FORCEWAKE_ACK_VLV
);
194 static void __vlv_force_wake_get(struct drm_i915_private
*dev_priv
,
197 /* Check for Render Engine */
198 if (FORCEWAKE_RENDER
& fw_engine
) {
199 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
201 FORCEWAKE_KERNEL
) == 0,
202 FORCEWAKE_ACK_TIMEOUT_MS
))
203 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
205 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
206 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
208 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
211 FORCEWAKE_ACK_TIMEOUT_MS
))
212 DRM_ERROR("Timed out: waiting for Render to ack.\n");
215 /* Check for Media Engine */
216 if (FORCEWAKE_MEDIA
& fw_engine
) {
217 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
218 FORCEWAKE_ACK_MEDIA_VLV
) &
219 FORCEWAKE_KERNEL
) == 0,
220 FORCEWAKE_ACK_TIMEOUT_MS
))
221 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
223 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
224 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
226 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
227 FORCEWAKE_ACK_MEDIA_VLV
) &
229 FORCEWAKE_ACK_TIMEOUT_MS
))
230 DRM_ERROR("Timed out: waiting for media to ack.\n");
233 /* WaRsForcewakeWaitTC0:vlv */
234 if (!IS_CHERRYVIEW(dev_priv
->dev
))
235 __gen6_gt_wait_for_thread_c0(dev_priv
);
238 static void __vlv_force_wake_put(struct drm_i915_private
*dev_priv
,
242 /* Check for Render Engine */
243 if (FORCEWAKE_RENDER
& fw_engine
)
244 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
245 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
248 /* Check for Media Engine */
249 if (FORCEWAKE_MEDIA
& fw_engine
)
250 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
251 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
253 /* something from same cacheline, but !FORCEWAKE_VLV */
254 __raw_posting_read(dev_priv
, FORCEWAKE_ACK_VLV
);
255 if (!IS_CHERRYVIEW(dev_priv
->dev
))
256 gen6_gt_check_fifodbg(dev_priv
);
259 static void vlv_force_wake_get(struct drm_i915_private
*dev_priv
, int fw_engine
)
261 unsigned long irqflags
;
263 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
265 if (fw_engine
& FORCEWAKE_RENDER
&&
266 dev_priv
->uncore
.fw_rendercount
++ != 0)
267 fw_engine
&= ~FORCEWAKE_RENDER
;
268 if (fw_engine
& FORCEWAKE_MEDIA
&&
269 dev_priv
->uncore
.fw_mediacount
++ != 0)
270 fw_engine
&= ~FORCEWAKE_MEDIA
;
273 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_engine
);
275 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
278 static void vlv_force_wake_put(struct drm_i915_private
*dev_priv
, int fw_engine
)
280 unsigned long irqflags
;
282 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
284 if (fw_engine
& FORCEWAKE_RENDER
) {
285 WARN_ON(!dev_priv
->uncore
.fw_rendercount
);
286 if (--dev_priv
->uncore
.fw_rendercount
!= 0)
287 fw_engine
&= ~FORCEWAKE_RENDER
;
290 if (fw_engine
& FORCEWAKE_MEDIA
) {
291 WARN_ON(!dev_priv
->uncore
.fw_mediacount
);
292 if (--dev_priv
->uncore
.fw_mediacount
!= 0)
293 fw_engine
&= ~FORCEWAKE_MEDIA
;
297 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, fw_engine
);
299 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
302 static void gen6_force_wake_timer(unsigned long arg
)
304 struct drm_i915_private
*dev_priv
= (void *)arg
;
305 unsigned long irqflags
;
307 assert_device_not_suspended(dev_priv
);
309 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
310 WARN_ON(!dev_priv
->uncore
.forcewake_count
);
312 if (--dev_priv
->uncore
.forcewake_count
== 0)
313 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, FORCEWAKE_ALL
);
314 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
316 intel_runtime_pm_put(dev_priv
);
319 void intel_uncore_forcewake_reset(struct drm_device
*dev
, bool restore
)
321 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
322 unsigned long irqflags
;
324 if (del_timer_sync(&dev_priv
->uncore
.force_wake_timer
))
325 gen6_force_wake_timer((unsigned long)dev_priv
);
327 /* Hold uncore.lock across reset to prevent any register access
328 * with forcewake not set correctly
330 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
332 if (IS_VALLEYVIEW(dev
))
333 vlv_force_wake_reset(dev_priv
);
334 else if (IS_GEN6(dev
) || IS_GEN7(dev
))
335 __gen6_gt_force_wake_reset(dev_priv
);
337 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
) || IS_GEN8(dev
))
338 __gen7_gt_force_wake_mt_reset(dev_priv
);
340 if (restore
) { /* If reset with a user forcewake, try to restore */
343 if (IS_VALLEYVIEW(dev
)) {
344 if (dev_priv
->uncore
.fw_rendercount
)
345 fw
|= FORCEWAKE_RENDER
;
347 if (dev_priv
->uncore
.fw_mediacount
)
348 fw
|= FORCEWAKE_MEDIA
;
350 if (dev_priv
->uncore
.forcewake_count
)
355 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw
);
357 if (IS_GEN6(dev
) || IS_GEN7(dev
))
358 dev_priv
->uncore
.fifo_count
=
359 __raw_i915_read32(dev_priv
, GTFIFOCTL
) &
360 GT_FIFO_FREE_ENTRIES_MASK
;
363 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
366 void intel_uncore_early_sanitize(struct drm_device
*dev
, bool restore_forcewake
)
368 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
370 if (HAS_FPGA_DBG_UNCLAIMED(dev
))
371 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
373 if ((IS_HASWELL(dev
) || IS_BROADWELL(dev
)) &&
374 (__raw_i915_read32(dev_priv
, HSW_EDRAM_PRESENT
) == 1)) {
375 /* The docs do not explain exactly how the calculation can be
376 * made. It is somewhat guessable, but for now, it's always
378 * NB: We can't write IDICR yet because we do not have gt funcs
380 dev_priv
->ellc_size
= 128;
381 DRM_INFO("Found %zuMB of eLLC\n", dev_priv
->ellc_size
);
384 /* clear out old GT FIFO errors */
385 if (IS_GEN6(dev
) || IS_GEN7(dev
))
386 __raw_i915_write32(dev_priv
, GTFIFODBG
,
387 __raw_i915_read32(dev_priv
, GTFIFODBG
));
389 intel_uncore_forcewake_reset(dev
, restore_forcewake
);
392 void intel_uncore_sanitize(struct drm_device
*dev
)
394 /* BIOS often leaves RC6 enabled, but disable it for hw init */
395 intel_disable_gt_powersave(dev
);
399 * Generally this is called implicitly by the register read function. However,
400 * if some sequence requires the GT to not power down then this function should
401 * be called at the beginning of the sequence followed by a call to
402 * gen6_gt_force_wake_put() at the end of the sequence.
404 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
, int fw_engine
)
406 unsigned long irqflags
;
408 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
411 intel_runtime_pm_get(dev_priv
);
413 /* Redirect to VLV specific routine */
414 if (IS_VALLEYVIEW(dev_priv
->dev
))
415 return vlv_force_wake_get(dev_priv
, fw_engine
);
417 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
418 if (dev_priv
->uncore
.forcewake_count
++ == 0)
419 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, FORCEWAKE_ALL
);
420 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
424 * see gen6_gt_force_wake_get()
426 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
, int fw_engine
)
428 unsigned long irqflags
;
429 bool delayed
= false;
431 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
434 /* Redirect to VLV specific routine */
435 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
436 vlv_force_wake_put(dev_priv
, fw_engine
);
441 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
442 WARN_ON(!dev_priv
->uncore
.forcewake_count
);
444 if (--dev_priv
->uncore
.forcewake_count
== 0) {
445 dev_priv
->uncore
.forcewake_count
++;
447 mod_timer_pinned(&dev_priv
->uncore
.force_wake_timer
,
450 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
454 intel_runtime_pm_put(dev_priv
);
457 void assert_force_wake_inactive(struct drm_i915_private
*dev_priv
)
459 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
462 WARN_ON(dev_priv
->uncore
.forcewake_count
> 0);
465 /* We give fast paths for the really cool registers */
466 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
467 ((reg) < 0x40000 && (reg) != FORCEWAKE)
469 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
471 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
472 (REG_RANGE((reg), 0x2000, 0x4000) || \
473 REG_RANGE((reg), 0x5000, 0x8000) || \
474 REG_RANGE((reg), 0xB000, 0x12000) || \
475 REG_RANGE((reg), 0x2E000, 0x30000))
477 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
478 (REG_RANGE((reg), 0x12000, 0x14000) || \
479 REG_RANGE((reg), 0x22000, 0x24000) || \
480 REG_RANGE((reg), 0x30000, 0x40000))
482 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
483 (REG_RANGE((reg), 0x2000, 0x4000) || \
484 REG_RANGE((reg), 0x5000, 0x8000) || \
485 REG_RANGE((reg), 0x8300, 0x8500) || \
486 REG_RANGE((reg), 0xB000, 0xC000) || \
487 REG_RANGE((reg), 0xE000, 0xE800))
489 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
490 (REG_RANGE((reg), 0x8800, 0x8900) || \
491 REG_RANGE((reg), 0xD000, 0xD800) || \
492 REG_RANGE((reg), 0x12000, 0x14000) || \
493 REG_RANGE((reg), 0x1A000, 0x1C000) || \
494 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
495 REG_RANGE((reg), 0x30000, 0x40000))
497 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
498 (REG_RANGE((reg), 0x4000, 0x5000) || \
499 REG_RANGE((reg), 0x8000, 0x8300) || \
500 REG_RANGE((reg), 0x8500, 0x8600) || \
501 REG_RANGE((reg), 0x9000, 0xB000) || \
502 REG_RANGE((reg), 0xC000, 0xC800) || \
503 REG_RANGE((reg), 0xF000, 0x10000) || \
504 REG_RANGE((reg), 0x14000, 0x14400) || \
505 REG_RANGE((reg), 0x22000, 0x24000))
508 ilk_dummy_write(struct drm_i915_private
*dev_priv
)
510 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
511 * the chip from rc6 before touching it for real. MI_MODE is masked,
512 * hence harmless to write 0 into. */
513 __raw_i915_write32(dev_priv
, MI_MODE
, 0);
517 hsw_unclaimed_reg_debug(struct drm_i915_private
*dev_priv
, u32 reg
, bool read
,
520 const char *op
= read
? "reading" : "writing to";
521 const char *when
= before
? "before" : "after";
523 if (!i915
.mmio_debug
)
526 if (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
) {
527 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
529 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
534 hsw_unclaimed_reg_detect(struct drm_i915_private
*dev_priv
)
539 if (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
) {
540 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
541 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
545 #define REG_READ_HEADER(x) \
546 unsigned long irqflags; \
548 assert_device_not_suspended(dev_priv); \
549 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
551 #define REG_READ_FOOTER \
552 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
553 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
556 #define __gen4_read(x) \
558 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
559 REG_READ_HEADER(x); \
560 val = __raw_i915_read##x(dev_priv, reg); \
564 #define __gen5_read(x) \
566 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
567 REG_READ_HEADER(x); \
568 ilk_dummy_write(dev_priv); \
569 val = __raw_i915_read##x(dev_priv, reg); \
573 #define __gen6_read(x) \
575 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
576 REG_READ_HEADER(x); \
577 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
578 if (dev_priv->uncore.forcewake_count == 0 && \
579 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
580 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
582 val = __raw_i915_read##x(dev_priv, reg); \
583 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
586 val = __raw_i915_read##x(dev_priv, reg); \
588 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
592 #define __vlv_read(x) \
594 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
595 unsigned fwengine = 0; \
596 REG_READ_HEADER(x); \
597 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
598 if (dev_priv->uncore.fw_rendercount == 0) \
599 fwengine = FORCEWAKE_RENDER; \
600 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
601 if (dev_priv->uncore.fw_mediacount == 0) \
602 fwengine = FORCEWAKE_MEDIA; \
605 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
606 val = __raw_i915_read##x(dev_priv, reg); \
608 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
612 #define __chv_read(x) \
614 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
615 unsigned fwengine = 0; \
616 REG_READ_HEADER(x); \
617 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
618 if (dev_priv->uncore.fw_rendercount == 0) \
619 fwengine = FORCEWAKE_RENDER; \
620 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
621 if (dev_priv->uncore.fw_mediacount == 0) \
622 fwengine = FORCEWAKE_MEDIA; \
623 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
624 if (dev_priv->uncore.fw_rendercount == 0) \
625 fwengine |= FORCEWAKE_RENDER; \
626 if (dev_priv->uncore.fw_mediacount == 0) \
627 fwengine |= FORCEWAKE_MEDIA; \
630 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
631 val = __raw_i915_read##x(dev_priv, reg); \
633 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
663 #undef REG_READ_FOOTER
664 #undef REG_READ_HEADER
666 #define REG_WRITE_HEADER \
667 unsigned long irqflags; \
668 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
669 assert_device_not_suspended(dev_priv); \
670 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
672 #define REG_WRITE_FOOTER \
673 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
675 #define __gen4_write(x) \
677 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
679 __raw_i915_write##x(dev_priv, reg, val); \
683 #define __gen5_write(x) \
685 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
687 ilk_dummy_write(dev_priv); \
688 __raw_i915_write##x(dev_priv, reg, val); \
692 #define __gen6_write(x) \
694 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
695 u32 __fifo_ret = 0; \
697 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
698 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
700 __raw_i915_write##x(dev_priv, reg, val); \
701 if (unlikely(__fifo_ret)) { \
702 gen6_gt_check_fifodbg(dev_priv); \
707 #define __hsw_write(x) \
709 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
710 u32 __fifo_ret = 0; \
712 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
713 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
715 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
716 __raw_i915_write##x(dev_priv, reg, val); \
717 if (unlikely(__fifo_ret)) { \
718 gen6_gt_check_fifodbg(dev_priv); \
720 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
721 hsw_unclaimed_reg_detect(dev_priv); \
725 static const u32 gen8_shadowed_regs
[] = {
729 RING_TAIL(RENDER_RING_BASE
),
730 RING_TAIL(GEN6_BSD_RING_BASE
),
731 RING_TAIL(VEBOX_RING_BASE
),
732 RING_TAIL(BLT_RING_BASE
),
733 /* TODO: Other registers are not yet used */
736 static bool is_gen8_shadowed(struct drm_i915_private
*dev_priv
, u32 reg
)
739 for (i
= 0; i
< ARRAY_SIZE(gen8_shadowed_regs
); i
++)
740 if (reg
== gen8_shadowed_regs
[i
])
746 #define __gen8_write(x) \
748 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
750 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
751 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
752 if (dev_priv->uncore.forcewake_count == 0) \
753 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
755 __raw_i915_write##x(dev_priv, reg, val); \
756 if (dev_priv->uncore.forcewake_count == 0) \
757 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
760 __raw_i915_write##x(dev_priv, reg, val); \
762 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
763 hsw_unclaimed_reg_detect(dev_priv); \
767 #define __chv_write(x) \
769 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
770 unsigned fwengine = 0; \
771 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
774 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
775 if (dev_priv->uncore.fw_rendercount == 0) \
776 fwengine = FORCEWAKE_RENDER; \
777 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
778 if (dev_priv->uncore.fw_mediacount == 0) \
779 fwengine = FORCEWAKE_MEDIA; \
780 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
781 if (dev_priv->uncore.fw_rendercount == 0) \
782 fwengine |= FORCEWAKE_RENDER; \
783 if (dev_priv->uncore.fw_mediacount == 0) \
784 fwengine |= FORCEWAKE_MEDIA; \
788 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
789 __raw_i915_write##x(dev_priv, reg, val); \
791 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
826 #undef REG_WRITE_FOOTER
827 #undef REG_WRITE_HEADER
829 void intel_uncore_init(struct drm_device
*dev
)
831 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
833 setup_timer(&dev_priv
->uncore
.force_wake_timer
,
834 gen6_force_wake_timer
, (unsigned long)dev_priv
);
836 intel_uncore_early_sanitize(dev
, false);
838 if (IS_VALLEYVIEW(dev
)) {
839 dev_priv
->uncore
.funcs
.force_wake_get
= __vlv_force_wake_get
;
840 dev_priv
->uncore
.funcs
.force_wake_put
= __vlv_force_wake_put
;
841 } else if (IS_HASWELL(dev
) || IS_GEN8(dev
)) {
842 dev_priv
->uncore
.funcs
.force_wake_get
= __gen7_gt_force_wake_mt_get
;
843 dev_priv
->uncore
.funcs
.force_wake_put
= __gen7_gt_force_wake_mt_put
;
844 } else if (IS_IVYBRIDGE(dev
)) {
847 /* IVB configs may use multi-threaded forcewake */
849 /* A small trick here - if the bios hasn't configured
850 * MT forcewake, and if the device is in RC6, then
851 * force_wake_mt_get will not wake the device and the
852 * ECOBUS read will return zero. Which will be
853 * (correctly) interpreted by the test below as MT
854 * forcewake being disabled.
856 mutex_lock(&dev
->struct_mutex
);
857 __gen7_gt_force_wake_mt_get(dev_priv
, FORCEWAKE_ALL
);
858 ecobus
= __raw_i915_read32(dev_priv
, ECOBUS
);
859 __gen7_gt_force_wake_mt_put(dev_priv
, FORCEWAKE_ALL
);
860 mutex_unlock(&dev
->struct_mutex
);
862 if (ecobus
& FORCEWAKE_MT_ENABLE
) {
863 dev_priv
->uncore
.funcs
.force_wake_get
=
864 __gen7_gt_force_wake_mt_get
;
865 dev_priv
->uncore
.funcs
.force_wake_put
=
866 __gen7_gt_force_wake_mt_put
;
868 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
869 DRM_INFO("when using vblank-synced partial screen updates.\n");
870 dev_priv
->uncore
.funcs
.force_wake_get
=
871 __gen6_gt_force_wake_get
;
872 dev_priv
->uncore
.funcs
.force_wake_put
=
873 __gen6_gt_force_wake_put
;
875 } else if (IS_GEN6(dev
)) {
876 dev_priv
->uncore
.funcs
.force_wake_get
=
877 __gen6_gt_force_wake_get
;
878 dev_priv
->uncore
.funcs
.force_wake_put
=
879 __gen6_gt_force_wake_put
;
882 switch (INTEL_INFO(dev
)->gen
) {
884 if (IS_CHERRYVIEW(dev
)) {
885 dev_priv
->uncore
.funcs
.mmio_writeb
= chv_write8
;
886 dev_priv
->uncore
.funcs
.mmio_writew
= chv_write16
;
887 dev_priv
->uncore
.funcs
.mmio_writel
= chv_write32
;
888 dev_priv
->uncore
.funcs
.mmio_writeq
= chv_write64
;
889 dev_priv
->uncore
.funcs
.mmio_readb
= chv_read8
;
890 dev_priv
->uncore
.funcs
.mmio_readw
= chv_read16
;
891 dev_priv
->uncore
.funcs
.mmio_readl
= chv_read32
;
892 dev_priv
->uncore
.funcs
.mmio_readq
= chv_read64
;
895 dev_priv
->uncore
.funcs
.mmio_writeb
= gen8_write8
;
896 dev_priv
->uncore
.funcs
.mmio_writew
= gen8_write16
;
897 dev_priv
->uncore
.funcs
.mmio_writel
= gen8_write32
;
898 dev_priv
->uncore
.funcs
.mmio_writeq
= gen8_write64
;
899 dev_priv
->uncore
.funcs
.mmio_readb
= gen6_read8
;
900 dev_priv
->uncore
.funcs
.mmio_readw
= gen6_read16
;
901 dev_priv
->uncore
.funcs
.mmio_readl
= gen6_read32
;
902 dev_priv
->uncore
.funcs
.mmio_readq
= gen6_read64
;
907 if (IS_HASWELL(dev
)) {
908 dev_priv
->uncore
.funcs
.mmio_writeb
= hsw_write8
;
909 dev_priv
->uncore
.funcs
.mmio_writew
= hsw_write16
;
910 dev_priv
->uncore
.funcs
.mmio_writel
= hsw_write32
;
911 dev_priv
->uncore
.funcs
.mmio_writeq
= hsw_write64
;
913 dev_priv
->uncore
.funcs
.mmio_writeb
= gen6_write8
;
914 dev_priv
->uncore
.funcs
.mmio_writew
= gen6_write16
;
915 dev_priv
->uncore
.funcs
.mmio_writel
= gen6_write32
;
916 dev_priv
->uncore
.funcs
.mmio_writeq
= gen6_write64
;
919 if (IS_VALLEYVIEW(dev
)) {
920 dev_priv
->uncore
.funcs
.mmio_readb
= vlv_read8
;
921 dev_priv
->uncore
.funcs
.mmio_readw
= vlv_read16
;
922 dev_priv
->uncore
.funcs
.mmio_readl
= vlv_read32
;
923 dev_priv
->uncore
.funcs
.mmio_readq
= vlv_read64
;
925 dev_priv
->uncore
.funcs
.mmio_readb
= gen6_read8
;
926 dev_priv
->uncore
.funcs
.mmio_readw
= gen6_read16
;
927 dev_priv
->uncore
.funcs
.mmio_readl
= gen6_read32
;
928 dev_priv
->uncore
.funcs
.mmio_readq
= gen6_read64
;
932 dev_priv
->uncore
.funcs
.mmio_writeb
= gen5_write8
;
933 dev_priv
->uncore
.funcs
.mmio_writew
= gen5_write16
;
934 dev_priv
->uncore
.funcs
.mmio_writel
= gen5_write32
;
935 dev_priv
->uncore
.funcs
.mmio_writeq
= gen5_write64
;
936 dev_priv
->uncore
.funcs
.mmio_readb
= gen5_read8
;
937 dev_priv
->uncore
.funcs
.mmio_readw
= gen5_read16
;
938 dev_priv
->uncore
.funcs
.mmio_readl
= gen5_read32
;
939 dev_priv
->uncore
.funcs
.mmio_readq
= gen5_read64
;
944 dev_priv
->uncore
.funcs
.mmio_writeb
= gen4_write8
;
945 dev_priv
->uncore
.funcs
.mmio_writew
= gen4_write16
;
946 dev_priv
->uncore
.funcs
.mmio_writel
= gen4_write32
;
947 dev_priv
->uncore
.funcs
.mmio_writeq
= gen4_write64
;
948 dev_priv
->uncore
.funcs
.mmio_readb
= gen4_read8
;
949 dev_priv
->uncore
.funcs
.mmio_readw
= gen4_read16
;
950 dev_priv
->uncore
.funcs
.mmio_readl
= gen4_read32
;
951 dev_priv
->uncore
.funcs
.mmio_readq
= gen4_read64
;
956 void intel_uncore_fini(struct drm_device
*dev
)
958 /* Paranoia: make sure we have disabled everything before we exit. */
959 intel_uncore_sanitize(dev
);
960 intel_uncore_forcewake_reset(dev
, false);
963 #define GEN_RANGE(l, h) GENMASK(h, l)
965 static const struct register_whitelist
{
968 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
969 uint32_t gen_bitmask
;
971 { RING_TIMESTAMP(RENDER_RING_BASE
), 8, GEN_RANGE(4, 8) },
974 int i915_reg_read_ioctl(struct drm_device
*dev
,
975 void *data
, struct drm_file
*file
)
977 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
978 struct drm_i915_reg_read
*reg
= data
;
979 struct register_whitelist
const *entry
= whitelist
;
982 for (i
= 0; i
< ARRAY_SIZE(whitelist
); i
++, entry
++) {
983 if (entry
->offset
== reg
->offset
&&
984 (1 << INTEL_INFO(dev
)->gen
& entry
->gen_bitmask
))
988 if (i
== ARRAY_SIZE(whitelist
))
991 intel_runtime_pm_get(dev_priv
);
993 switch (entry
->size
) {
995 reg
->val
= I915_READ64(reg
->offset
);
998 reg
->val
= I915_READ(reg
->offset
);
1001 reg
->val
= I915_READ16(reg
->offset
);
1004 reg
->val
= I915_READ8(reg
->offset
);
1013 intel_runtime_pm_put(dev_priv
);
1017 int i915_get_reset_stats_ioctl(struct drm_device
*dev
,
1018 void *data
, struct drm_file
*file
)
1020 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1021 struct drm_i915_reset_stats
*args
= data
;
1022 struct i915_ctx_hang_stats
*hs
;
1023 struct intel_context
*ctx
;
1026 if (args
->flags
|| args
->pad
)
1029 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
&& !capable(CAP_SYS_ADMIN
))
1032 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1036 ctx
= i915_gem_context_get(file
->driver_priv
, args
->ctx_id
);
1038 mutex_unlock(&dev
->struct_mutex
);
1039 return PTR_ERR(ctx
);
1041 hs
= &ctx
->hang_stats
;
1043 if (capable(CAP_SYS_ADMIN
))
1044 args
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1046 args
->reset_count
= 0;
1048 args
->batch_active
= hs
->batch_active
;
1049 args
->batch_pending
= hs
->batch_pending
;
1051 mutex_unlock(&dev
->struct_mutex
);
1056 static int i965_reset_complete(struct drm_device
*dev
)
1059 pci_read_config_byte(dev
->pdev
, I965_GDRST
, &gdrst
);
1060 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
1063 static int i965_do_reset(struct drm_device
*dev
)
1067 /* FIXME: i965g/gm need a display save/restore for gpu reset. */
1071 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
1072 * well as the reset bit (GR/bit 0). Setting the GR bit
1073 * triggers the reset; when done, the hardware will clear it.
1075 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1076 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1077 ret
= wait_for(i965_reset_complete(dev
), 500);
1081 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1082 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1084 ret
= wait_for(i965_reset_complete(dev
), 500);
1088 pci_write_config_byte(dev
->pdev
, I965_GDRST
, 0);
1093 static int g4x_do_reset(struct drm_device
*dev
)
1095 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1098 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1099 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1100 ret
= wait_for(i965_reset_complete(dev
), 500);
1104 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1105 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) | VCP_UNIT_CLOCK_GATE_DISABLE
);
1106 POSTING_READ(VDECCLK_GATE_D
);
1108 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1109 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1110 ret
= wait_for(i965_reset_complete(dev
), 500);
1114 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1115 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) & ~VCP_UNIT_CLOCK_GATE_DISABLE
);
1116 POSTING_READ(VDECCLK_GATE_D
);
1118 pci_write_config_byte(dev
->pdev
, I965_GDRST
, 0);
1123 static int ironlake_do_reset(struct drm_device
*dev
)
1125 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1128 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
1129 ILK_GRDOM_RENDER
| ILK_GRDOM_RESET_ENABLE
);
1130 ret
= wait_for((I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) &
1131 ILK_GRDOM_RESET_ENABLE
) == 0, 500);
1135 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
1136 ILK_GRDOM_MEDIA
| ILK_GRDOM_RESET_ENABLE
);
1137 ret
= wait_for((I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) &
1138 ILK_GRDOM_RESET_ENABLE
) == 0, 500);
1142 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
, 0);
1147 static int gen6_do_reset(struct drm_device
*dev
)
1149 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1152 /* Reset the chip */
1154 /* GEN6_GDRST is not in the gt power well, no need to check
1155 * for fifo space for the write or forcewake the chip for
1158 __raw_i915_write32(dev_priv
, GEN6_GDRST
, GEN6_GRDOM_FULL
);
1160 /* Spin waiting for the device to ack the reset request */
1161 ret
= wait_for((__raw_i915_read32(dev_priv
, GEN6_GDRST
) & GEN6_GRDOM_FULL
) == 0, 500);
1163 intel_uncore_forcewake_reset(dev
, true);
1168 int intel_gpu_reset(struct drm_device
*dev
)
1170 if (INTEL_INFO(dev
)->gen
>= 6)
1171 return gen6_do_reset(dev
);
1172 else if (IS_GEN5(dev
))
1173 return ironlake_do_reset(dev
);
1174 else if (IS_G4X(dev
))
1175 return g4x_do_reset(dev
);
1176 else if (IS_GEN4(dev
))
1177 return i965_do_reset(dev
);
1182 void intel_uncore_check_errors(struct drm_device
*dev
)
1184 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1186 if (HAS_FPGA_DBG_UNCLAIMED(dev
) &&
1187 (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
)) {
1188 DRM_ERROR("Unclaimed register before interrupt\n");
1189 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);