2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
27 #define FORCEWAKE_ACK_TIMEOUT_MS 2
29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
44 assert_device_not_suspended(struct drm_i915_private
*dev_priv
)
46 WARN(HAS_RUNTIME_PM(dev_priv
->dev
) && dev_priv
->pm
.suspended
,
47 "Device suspended\n");
50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
52 u32 gt_thread_status_mask
;
54 if (IS_HASWELL(dev_priv
->dev
))
55 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK_HSW
;
57 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK
;
59 /* w/a for a sporadic read returning 0 by waiting for the GT
62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv
, GEN6_GT_THREAD_STATUS_REG
) & gt_thread_status_mask
) == 0, 500))
63 DRM_ERROR("GT thread status wait timed out\n");
66 static void __gen6_gt_force_wake_reset(struct drm_i915_private
*dev_priv
)
68 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
69 /* something from same cacheline, but !FORCEWAKE */
70 __raw_posting_read(dev_priv
, ECOBUS
);
73 static void __gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
,
76 if (wait_for_atomic((__raw_i915_read32(dev_priv
, FORCEWAKE_ACK
) & 1) == 0,
77 FORCEWAKE_ACK_TIMEOUT_MS
))
78 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
80 __raw_i915_write32(dev_priv
, FORCEWAKE
, 1);
81 /* something from same cacheline, but !FORCEWAKE */
82 __raw_posting_read(dev_priv
, ECOBUS
);
84 if (wait_for_atomic((__raw_i915_read32(dev_priv
, FORCEWAKE_ACK
) & 1),
85 FORCEWAKE_ACK_TIMEOUT_MS
))
86 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
88 /* WaRsForcewakeWaitTC0:snb */
89 __gen6_gt_wait_for_thread_c0(dev_priv
);
92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private
*dev_priv
)
94 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
, _MASKED_BIT_DISABLE(0xffff));
95 /* something from same cacheline, but !FORCEWAKE_MT */
96 __raw_posting_read(dev_priv
, ECOBUS
);
99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private
*dev_priv
,
104 if (IS_HASWELL(dev_priv
->dev
) || IS_BROADWELL(dev_priv
->dev
))
105 forcewake_ack
= FORCEWAKE_ACK_HSW
;
107 forcewake_ack
= FORCEWAKE_MT_ACK
;
109 if (wait_for_atomic((__raw_i915_read32(dev_priv
, forcewake_ack
) & FORCEWAKE_KERNEL
) == 0,
110 FORCEWAKE_ACK_TIMEOUT_MS
))
111 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
113 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
,
114 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
115 /* something from same cacheline, but !FORCEWAKE_MT */
116 __raw_posting_read(dev_priv
, ECOBUS
);
118 if (wait_for_atomic((__raw_i915_read32(dev_priv
, forcewake_ack
) & FORCEWAKE_KERNEL
),
119 FORCEWAKE_ACK_TIMEOUT_MS
))
120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
122 /* WaRsForcewakeWaitTC0:ivb,hsw */
123 if (INTEL_INFO(dev_priv
->dev
)->gen
< 8)
124 __gen6_gt_wait_for_thread_c0(dev_priv
);
127 static void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
131 gtfifodbg
= __raw_i915_read32(dev_priv
, GTFIFODBG
);
132 if (WARN(gtfifodbg
, "GT wake FIFO error 0x%x\n", gtfifodbg
))
133 __raw_i915_write32(dev_priv
, GTFIFODBG
, gtfifodbg
);
136 static void __gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
,
139 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
140 /* something from same cacheline, but !FORCEWAKE */
141 __raw_posting_read(dev_priv
, ECOBUS
);
142 gen6_gt_check_fifodbg(dev_priv
);
145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private
*dev_priv
,
148 __raw_i915_write32(dev_priv
, FORCEWAKE_MT
,
149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
150 /* something from same cacheline, but !FORCEWAKE_MT */
151 __raw_posting_read(dev_priv
, ECOBUS
);
153 if (IS_GEN7(dev_priv
->dev
))
154 gen6_gt_check_fifodbg(dev_priv
);
157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
161 /* On VLV, FIFO will be shared by both SW and HW.
162 * So, we need to read the FREE_ENTRIES everytime */
163 if (IS_VALLEYVIEW(dev_priv
->dev
))
164 dev_priv
->uncore
.fifo_count
=
165 __raw_i915_read32(dev_priv
, GTFIFOCTL
) &
166 GT_FIFO_FREE_ENTRIES_MASK
;
168 if (dev_priv
->uncore
.fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
170 u32 fifo
= __raw_i915_read32(dev_priv
, GTFIFOCTL
) & GT_FIFO_FREE_ENTRIES_MASK
;
171 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
173 fifo
= __raw_i915_read32(dev_priv
, GTFIFOCTL
) & GT_FIFO_FREE_ENTRIES_MASK
;
175 if (WARN_ON(loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
))
177 dev_priv
->uncore
.fifo_count
= fifo
;
179 dev_priv
->uncore
.fifo_count
--;
184 static void vlv_force_wake_reset(struct drm_i915_private
*dev_priv
)
186 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
187 _MASKED_BIT_DISABLE(0xffff));
188 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
189 _MASKED_BIT_DISABLE(0xffff));
190 /* something from same cacheline, but !FORCEWAKE_VLV */
191 __raw_posting_read(dev_priv
, FORCEWAKE_ACK_VLV
);
194 static void __vlv_force_wake_get(struct drm_i915_private
*dev_priv
,
198 * WaRsDontPollForAckOnClearingFWBits:vlv
199 * Hardware clears ack bits lazily (only when all ack
200 * bits become 0) so don't poll for individiual ack
201 * bits to be clear here like on other platforms.
204 /* Check for Render Engine */
205 if (FORCEWAKE_RENDER
& fw_engine
) {
207 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
208 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
210 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
213 FORCEWAKE_ACK_TIMEOUT_MS
))
214 DRM_ERROR("Timed out: waiting for Render to ack.\n");
217 /* Check for Media Engine */
218 if (FORCEWAKE_MEDIA
& fw_engine
) {
220 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
221 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
223 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
224 FORCEWAKE_ACK_MEDIA_VLV
) &
226 FORCEWAKE_ACK_TIMEOUT_MS
))
227 DRM_ERROR("Timed out: waiting for media to ack.\n");
230 /* WaRsForcewakeWaitTC0:vlv */
231 if (!IS_CHERRYVIEW(dev_priv
->dev
))
232 __gen6_gt_wait_for_thread_c0(dev_priv
);
235 static void __vlv_force_wake_put(struct drm_i915_private
*dev_priv
,
239 /* Check for Render Engine */
240 if (FORCEWAKE_RENDER
& fw_engine
)
241 __raw_i915_write32(dev_priv
, FORCEWAKE_VLV
,
242 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
245 /* Check for Media Engine */
246 if (FORCEWAKE_MEDIA
& fw_engine
)
247 __raw_i915_write32(dev_priv
, FORCEWAKE_MEDIA_VLV
,
248 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
250 /* something from same cacheline, but !FORCEWAKE_VLV */
251 __raw_posting_read(dev_priv
, FORCEWAKE_ACK_VLV
);
252 if (!IS_CHERRYVIEW(dev_priv
->dev
))
253 gen6_gt_check_fifodbg(dev_priv
);
256 static void vlv_force_wake_get(struct drm_i915_private
*dev_priv
, int fw_engine
)
258 unsigned long irqflags
;
260 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
262 if (fw_engine
& FORCEWAKE_RENDER
&&
263 dev_priv
->uncore
.fw_rendercount
++ != 0)
264 fw_engine
&= ~FORCEWAKE_RENDER
;
265 if (fw_engine
& FORCEWAKE_MEDIA
&&
266 dev_priv
->uncore
.fw_mediacount
++ != 0)
267 fw_engine
&= ~FORCEWAKE_MEDIA
;
270 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_engine
);
272 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
275 static void vlv_force_wake_put(struct drm_i915_private
*dev_priv
, int fw_engine
)
277 unsigned long irqflags
;
279 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
281 if (fw_engine
& FORCEWAKE_RENDER
) {
282 WARN_ON(!dev_priv
->uncore
.fw_rendercount
);
283 if (--dev_priv
->uncore
.fw_rendercount
!= 0)
284 fw_engine
&= ~FORCEWAKE_RENDER
;
287 if (fw_engine
& FORCEWAKE_MEDIA
) {
288 WARN_ON(!dev_priv
->uncore
.fw_mediacount
);
289 if (--dev_priv
->uncore
.fw_mediacount
!= 0)
290 fw_engine
&= ~FORCEWAKE_MEDIA
;
294 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, fw_engine
);
296 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
299 static void gen6_force_wake_timer(unsigned long arg
)
301 struct drm_i915_private
*dev_priv
= (void *)arg
;
302 unsigned long irqflags
;
304 assert_device_not_suspended(dev_priv
);
306 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
307 WARN_ON(!dev_priv
->uncore
.forcewake_count
);
309 if (--dev_priv
->uncore
.forcewake_count
== 0)
310 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, FORCEWAKE_ALL
);
311 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
313 intel_runtime_pm_put(dev_priv
);
316 void intel_uncore_forcewake_reset(struct drm_device
*dev
, bool restore
)
318 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
319 unsigned long irqflags
;
321 if (del_timer_sync(&dev_priv
->uncore
.force_wake_timer
))
322 gen6_force_wake_timer((unsigned long)dev_priv
);
324 /* Hold uncore.lock across reset to prevent any register access
325 * with forcewake not set correctly
327 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
329 if (IS_VALLEYVIEW(dev
))
330 vlv_force_wake_reset(dev_priv
);
331 else if (IS_GEN6(dev
) || IS_GEN7(dev
))
332 __gen6_gt_force_wake_reset(dev_priv
);
334 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
) || IS_BROADWELL(dev
))
335 __gen7_gt_force_wake_mt_reset(dev_priv
);
337 if (restore
) { /* If reset with a user forcewake, try to restore */
340 if (IS_VALLEYVIEW(dev
)) {
341 if (dev_priv
->uncore
.fw_rendercount
)
342 fw
|= FORCEWAKE_RENDER
;
344 if (dev_priv
->uncore
.fw_mediacount
)
345 fw
|= FORCEWAKE_MEDIA
;
347 if (dev_priv
->uncore
.forcewake_count
)
352 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw
);
354 if (IS_GEN6(dev
) || IS_GEN7(dev
))
355 dev_priv
->uncore
.fifo_count
=
356 __raw_i915_read32(dev_priv
, GTFIFOCTL
) &
357 GT_FIFO_FREE_ENTRIES_MASK
;
360 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
363 void intel_uncore_early_sanitize(struct drm_device
*dev
, bool restore_forcewake
)
365 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
367 if (HAS_FPGA_DBG_UNCLAIMED(dev
))
368 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
370 if ((IS_HASWELL(dev
) || IS_BROADWELL(dev
)) &&
371 (__raw_i915_read32(dev_priv
, HSW_EDRAM_PRESENT
) == 1)) {
372 /* The docs do not explain exactly how the calculation can be
373 * made. It is somewhat guessable, but for now, it's always
375 * NB: We can't write IDICR yet because we do not have gt funcs
377 dev_priv
->ellc_size
= 128;
378 DRM_INFO("Found %zuMB of eLLC\n", dev_priv
->ellc_size
);
381 /* clear out old GT FIFO errors */
382 if (IS_GEN6(dev
) || IS_GEN7(dev
))
383 __raw_i915_write32(dev_priv
, GTFIFODBG
,
384 __raw_i915_read32(dev_priv
, GTFIFODBG
));
386 intel_uncore_forcewake_reset(dev
, restore_forcewake
);
389 void intel_uncore_sanitize(struct drm_device
*dev
)
391 /* BIOS often leaves RC6 enabled, but disable it for hw init */
392 intel_disable_gt_powersave(dev
);
396 * Generally this is called implicitly by the register read function. However,
397 * if some sequence requires the GT to not power down then this function should
398 * be called at the beginning of the sequence followed by a call to
399 * gen6_gt_force_wake_put() at the end of the sequence.
401 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
, int fw_engine
)
403 unsigned long irqflags
;
405 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
408 intel_runtime_pm_get(dev_priv
);
410 /* Redirect to VLV specific routine */
411 if (IS_VALLEYVIEW(dev_priv
->dev
))
412 return vlv_force_wake_get(dev_priv
, fw_engine
);
414 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
415 if (dev_priv
->uncore
.forcewake_count
++ == 0)
416 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, FORCEWAKE_ALL
);
417 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
421 * see gen6_gt_force_wake_get()
423 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
, int fw_engine
)
425 unsigned long irqflags
;
426 bool delayed
= false;
428 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
431 /* Redirect to VLV specific routine */
432 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
433 vlv_force_wake_put(dev_priv
, fw_engine
);
438 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
439 WARN_ON(!dev_priv
->uncore
.forcewake_count
);
441 if (--dev_priv
->uncore
.forcewake_count
== 0) {
442 dev_priv
->uncore
.forcewake_count
++;
444 mod_timer_pinned(&dev_priv
->uncore
.force_wake_timer
,
447 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
451 intel_runtime_pm_put(dev_priv
);
454 void assert_force_wake_inactive(struct drm_i915_private
*dev_priv
)
456 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
459 WARN_ON(dev_priv
->uncore
.forcewake_count
> 0);
462 /* We give fast paths for the really cool registers */
463 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
464 ((reg) < 0x40000 && (reg) != FORCEWAKE)
466 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
468 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
469 (REG_RANGE((reg), 0x2000, 0x4000) || \
470 REG_RANGE((reg), 0x5000, 0x8000) || \
471 REG_RANGE((reg), 0xB000, 0x12000) || \
472 REG_RANGE((reg), 0x2E000, 0x30000))
474 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
475 (REG_RANGE((reg), 0x12000, 0x14000) || \
476 REG_RANGE((reg), 0x22000, 0x24000) || \
477 REG_RANGE((reg), 0x30000, 0x40000))
479 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
480 (REG_RANGE((reg), 0x2000, 0x4000) || \
481 REG_RANGE((reg), 0x5000, 0x8000) || \
482 REG_RANGE((reg), 0x8300, 0x8500) || \
483 REG_RANGE((reg), 0xB000, 0xC000) || \
484 REG_RANGE((reg), 0xE000, 0xE800))
486 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
487 (REG_RANGE((reg), 0x8800, 0x8900) || \
488 REG_RANGE((reg), 0xD000, 0xD800) || \
489 REG_RANGE((reg), 0x12000, 0x14000) || \
490 REG_RANGE((reg), 0x1A000, 0x1C000) || \
491 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
492 REG_RANGE((reg), 0x30000, 0x40000))
494 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
495 (REG_RANGE((reg), 0x4000, 0x5000) || \
496 REG_RANGE((reg), 0x8000, 0x8300) || \
497 REG_RANGE((reg), 0x8500, 0x8600) || \
498 REG_RANGE((reg), 0x9000, 0xB000) || \
499 REG_RANGE((reg), 0xC000, 0xC800) || \
500 REG_RANGE((reg), 0xF000, 0x10000) || \
501 REG_RANGE((reg), 0x14000, 0x14400) || \
502 REG_RANGE((reg), 0x22000, 0x24000))
505 ilk_dummy_write(struct drm_i915_private
*dev_priv
)
507 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
508 * the chip from rc6 before touching it for real. MI_MODE is masked,
509 * hence harmless to write 0 into. */
510 __raw_i915_write32(dev_priv
, MI_MODE
, 0);
514 hsw_unclaimed_reg_debug(struct drm_i915_private
*dev_priv
, u32 reg
, bool read
,
517 const char *op
= read
? "reading" : "writing to";
518 const char *when
= before
? "before" : "after";
520 if (!i915
.mmio_debug
)
523 if (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
) {
524 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
526 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
531 hsw_unclaimed_reg_detect(struct drm_i915_private
*dev_priv
)
536 if (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
) {
537 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
538 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
542 #define REG_READ_HEADER(x) \
543 unsigned long irqflags; \
545 assert_device_not_suspended(dev_priv); \
546 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
548 #define REG_READ_FOOTER \
549 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
550 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
553 #define __gen4_read(x) \
555 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
556 REG_READ_HEADER(x); \
557 val = __raw_i915_read##x(dev_priv, reg); \
561 #define __gen5_read(x) \
563 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
564 REG_READ_HEADER(x); \
565 ilk_dummy_write(dev_priv); \
566 val = __raw_i915_read##x(dev_priv, reg); \
570 #define __gen6_read(x) \
572 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
573 REG_READ_HEADER(x); \
574 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
575 if (dev_priv->uncore.forcewake_count == 0 && \
576 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
577 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
579 val = __raw_i915_read##x(dev_priv, reg); \
580 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
583 val = __raw_i915_read##x(dev_priv, reg); \
585 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
589 #define __vlv_read(x) \
591 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
592 unsigned fwengine = 0; \
593 REG_READ_HEADER(x); \
594 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \
595 if (dev_priv->uncore.fw_rendercount == 0) \
596 fwengine = FORCEWAKE_RENDER; \
597 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
598 if (dev_priv->uncore.fw_mediacount == 0) \
599 fwengine = FORCEWAKE_MEDIA; \
602 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
603 val = __raw_i915_read##x(dev_priv, reg); \
605 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
609 #define __chv_read(x) \
611 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
612 unsigned fwengine = 0; \
613 REG_READ_HEADER(x); \
614 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
615 if (dev_priv->uncore.fw_rendercount == 0) \
616 fwengine = FORCEWAKE_RENDER; \
617 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
618 if (dev_priv->uncore.fw_mediacount == 0) \
619 fwengine = FORCEWAKE_MEDIA; \
620 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
621 if (dev_priv->uncore.fw_rendercount == 0) \
622 fwengine |= FORCEWAKE_RENDER; \
623 if (dev_priv->uncore.fw_mediacount == 0) \
624 fwengine |= FORCEWAKE_MEDIA; \
627 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
628 val = __raw_i915_read##x(dev_priv, reg); \
630 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
660 #undef REG_READ_FOOTER
661 #undef REG_READ_HEADER
663 #define REG_WRITE_HEADER \
664 unsigned long irqflags; \
665 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
666 assert_device_not_suspended(dev_priv); \
667 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
669 #define REG_WRITE_FOOTER \
670 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
672 #define __gen4_write(x) \
674 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
676 __raw_i915_write##x(dev_priv, reg, val); \
680 #define __gen5_write(x) \
682 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
684 ilk_dummy_write(dev_priv); \
685 __raw_i915_write##x(dev_priv, reg, val); \
689 #define __gen6_write(x) \
691 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
692 u32 __fifo_ret = 0; \
694 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
695 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
697 __raw_i915_write##x(dev_priv, reg, val); \
698 if (unlikely(__fifo_ret)) { \
699 gen6_gt_check_fifodbg(dev_priv); \
704 #define __hsw_write(x) \
706 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
707 u32 __fifo_ret = 0; \
709 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
710 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
712 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
713 __raw_i915_write##x(dev_priv, reg, val); \
714 if (unlikely(__fifo_ret)) { \
715 gen6_gt_check_fifodbg(dev_priv); \
717 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
718 hsw_unclaimed_reg_detect(dev_priv); \
722 static const u32 gen8_shadowed_regs
[] = {
726 RING_TAIL(RENDER_RING_BASE
),
727 RING_TAIL(GEN6_BSD_RING_BASE
),
728 RING_TAIL(VEBOX_RING_BASE
),
729 RING_TAIL(BLT_RING_BASE
),
730 /* TODO: Other registers are not yet used */
733 static bool is_gen8_shadowed(struct drm_i915_private
*dev_priv
, u32 reg
)
736 for (i
= 0; i
< ARRAY_SIZE(gen8_shadowed_regs
); i
++)
737 if (reg
== gen8_shadowed_regs
[i
])
743 #define __gen8_write(x) \
745 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
747 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
748 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
749 if (dev_priv->uncore.forcewake_count == 0) \
750 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
752 __raw_i915_write##x(dev_priv, reg, val); \
753 if (dev_priv->uncore.forcewake_count == 0) \
754 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
757 __raw_i915_write##x(dev_priv, reg, val); \
759 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
760 hsw_unclaimed_reg_detect(dev_priv); \
764 #define __chv_write(x) \
766 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
767 unsigned fwengine = 0; \
768 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
771 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
772 if (dev_priv->uncore.fw_rendercount == 0) \
773 fwengine = FORCEWAKE_RENDER; \
774 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
775 if (dev_priv->uncore.fw_mediacount == 0) \
776 fwengine = FORCEWAKE_MEDIA; \
777 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
778 if (dev_priv->uncore.fw_rendercount == 0) \
779 fwengine |= FORCEWAKE_RENDER; \
780 if (dev_priv->uncore.fw_mediacount == 0) \
781 fwengine |= FORCEWAKE_MEDIA; \
785 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
786 __raw_i915_write##x(dev_priv, reg, val); \
788 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
823 #undef REG_WRITE_FOOTER
824 #undef REG_WRITE_HEADER
826 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
828 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
829 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
830 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
831 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
834 #define ASSIGN_READ_MMIO_VFUNCS(x) \
836 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
837 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
838 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
839 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
842 void intel_uncore_init(struct drm_device
*dev
)
844 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
846 setup_timer(&dev_priv
->uncore
.force_wake_timer
,
847 gen6_force_wake_timer
, (unsigned long)dev_priv
);
849 intel_uncore_early_sanitize(dev
, false);
851 if (IS_VALLEYVIEW(dev
)) {
852 dev_priv
->uncore
.funcs
.force_wake_get
= __vlv_force_wake_get
;
853 dev_priv
->uncore
.funcs
.force_wake_put
= __vlv_force_wake_put
;
854 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
855 dev_priv
->uncore
.funcs
.force_wake_get
= __gen7_gt_force_wake_mt_get
;
856 dev_priv
->uncore
.funcs
.force_wake_put
= __gen7_gt_force_wake_mt_put
;
857 } else if (IS_IVYBRIDGE(dev
)) {
860 /* IVB configs may use multi-threaded forcewake */
862 /* A small trick here - if the bios hasn't configured
863 * MT forcewake, and if the device is in RC6, then
864 * force_wake_mt_get will not wake the device and the
865 * ECOBUS read will return zero. Which will be
866 * (correctly) interpreted by the test below as MT
867 * forcewake being disabled.
869 mutex_lock(&dev
->struct_mutex
);
870 __gen7_gt_force_wake_mt_get(dev_priv
, FORCEWAKE_ALL
);
871 ecobus
= __raw_i915_read32(dev_priv
, ECOBUS
);
872 __gen7_gt_force_wake_mt_put(dev_priv
, FORCEWAKE_ALL
);
873 mutex_unlock(&dev
->struct_mutex
);
875 if (ecobus
& FORCEWAKE_MT_ENABLE
) {
876 dev_priv
->uncore
.funcs
.force_wake_get
=
877 __gen7_gt_force_wake_mt_get
;
878 dev_priv
->uncore
.funcs
.force_wake_put
=
879 __gen7_gt_force_wake_mt_put
;
881 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
882 DRM_INFO("when using vblank-synced partial screen updates.\n");
883 dev_priv
->uncore
.funcs
.force_wake_get
=
884 __gen6_gt_force_wake_get
;
885 dev_priv
->uncore
.funcs
.force_wake_put
=
886 __gen6_gt_force_wake_put
;
888 } else if (IS_GEN6(dev
)) {
889 dev_priv
->uncore
.funcs
.force_wake_get
=
890 __gen6_gt_force_wake_get
;
891 dev_priv
->uncore
.funcs
.force_wake_put
=
892 __gen6_gt_force_wake_put
;
895 switch (INTEL_INFO(dev
)->gen
) {
897 if (IS_CHERRYVIEW(dev
)) {
898 ASSIGN_WRITE_MMIO_VFUNCS(chv
);
899 ASSIGN_READ_MMIO_VFUNCS(chv
);
902 ASSIGN_WRITE_MMIO_VFUNCS(gen8
);
903 ASSIGN_READ_MMIO_VFUNCS(gen6
);
908 if (IS_HASWELL(dev
)) {
909 ASSIGN_WRITE_MMIO_VFUNCS(hsw
);
911 ASSIGN_WRITE_MMIO_VFUNCS(gen6
);
914 if (IS_VALLEYVIEW(dev
)) {
915 ASSIGN_READ_MMIO_VFUNCS(vlv
);
917 ASSIGN_READ_MMIO_VFUNCS(gen6
);
921 ASSIGN_WRITE_MMIO_VFUNCS(gen5
);
922 ASSIGN_READ_MMIO_VFUNCS(gen5
);
927 ASSIGN_WRITE_MMIO_VFUNCS(gen4
);
928 ASSIGN_READ_MMIO_VFUNCS(gen4
);
932 #undef ASSIGN_WRITE_MMIO_VFUNCS
933 #undef ASSIGN_READ_MMIO_VFUNCS
935 void intel_uncore_fini(struct drm_device
*dev
)
937 /* Paranoia: make sure we have disabled everything before we exit. */
938 intel_uncore_sanitize(dev
);
939 intel_uncore_forcewake_reset(dev
, false);
942 #define GEN_RANGE(l, h) GENMASK(h, l)
944 static const struct register_whitelist
{
947 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
948 uint32_t gen_bitmask
;
950 { RING_TIMESTAMP(RENDER_RING_BASE
), 8, GEN_RANGE(4, 9) },
953 int i915_reg_read_ioctl(struct drm_device
*dev
,
954 void *data
, struct drm_file
*file
)
956 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
957 struct drm_i915_reg_read
*reg
= data
;
958 struct register_whitelist
const *entry
= whitelist
;
961 for (i
= 0; i
< ARRAY_SIZE(whitelist
); i
++, entry
++) {
962 if (entry
->offset
== reg
->offset
&&
963 (1 << INTEL_INFO(dev
)->gen
& entry
->gen_bitmask
))
967 if (i
== ARRAY_SIZE(whitelist
))
970 intel_runtime_pm_get(dev_priv
);
972 switch (entry
->size
) {
974 reg
->val
= I915_READ64(reg
->offset
);
977 reg
->val
= I915_READ(reg
->offset
);
980 reg
->val
= I915_READ16(reg
->offset
);
983 reg
->val
= I915_READ8(reg
->offset
);
992 intel_runtime_pm_put(dev_priv
);
996 int i915_get_reset_stats_ioctl(struct drm_device
*dev
,
997 void *data
, struct drm_file
*file
)
999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1000 struct drm_i915_reset_stats
*args
= data
;
1001 struct i915_ctx_hang_stats
*hs
;
1002 struct intel_context
*ctx
;
1005 if (args
->flags
|| args
->pad
)
1008 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
&& !capable(CAP_SYS_ADMIN
))
1011 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
1015 ctx
= i915_gem_context_get(file
->driver_priv
, args
->ctx_id
);
1017 mutex_unlock(&dev
->struct_mutex
);
1018 return PTR_ERR(ctx
);
1020 hs
= &ctx
->hang_stats
;
1022 if (capable(CAP_SYS_ADMIN
))
1023 args
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1025 args
->reset_count
= 0;
1027 args
->batch_active
= hs
->batch_active
;
1028 args
->batch_pending
= hs
->batch_pending
;
1030 mutex_unlock(&dev
->struct_mutex
);
1035 static int i965_reset_complete(struct drm_device
*dev
)
1038 pci_read_config_byte(dev
->pdev
, I965_GDRST
, &gdrst
);
1039 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
1042 static int i965_do_reset(struct drm_device
*dev
)
1046 /* FIXME: i965g/gm need a display save/restore for gpu reset. */
1050 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
1051 * well as the reset bit (GR/bit 0). Setting the GR bit
1052 * triggers the reset; when done, the hardware will clear it.
1054 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1055 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1056 ret
= wait_for(i965_reset_complete(dev
), 500);
1060 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1061 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1063 ret
= wait_for(i965_reset_complete(dev
), 500);
1067 pci_write_config_byte(dev
->pdev
, I965_GDRST
, 0);
1072 static int g4x_do_reset(struct drm_device
*dev
)
1074 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1077 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1078 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1079 ret
= wait_for(i965_reset_complete(dev
), 500);
1083 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1084 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) | VCP_UNIT_CLOCK_GATE_DISABLE
);
1085 POSTING_READ(VDECCLK_GATE_D
);
1087 pci_write_config_byte(dev
->pdev
, I965_GDRST
,
1088 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1089 ret
= wait_for(i965_reset_complete(dev
), 500);
1093 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1094 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) & ~VCP_UNIT_CLOCK_GATE_DISABLE
);
1095 POSTING_READ(VDECCLK_GATE_D
);
1097 pci_write_config_byte(dev
->pdev
, I965_GDRST
, 0);
1102 static int ironlake_do_reset(struct drm_device
*dev
)
1104 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1107 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
1108 ILK_GRDOM_RENDER
| ILK_GRDOM_RESET_ENABLE
);
1109 ret
= wait_for((I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) &
1110 ILK_GRDOM_RESET_ENABLE
) == 0, 500);
1114 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
,
1115 ILK_GRDOM_MEDIA
| ILK_GRDOM_RESET_ENABLE
);
1116 ret
= wait_for((I915_READ(MCHBAR_MIRROR_BASE
+ ILK_GDSR
) &
1117 ILK_GRDOM_RESET_ENABLE
) == 0, 500);
1121 I915_WRITE(MCHBAR_MIRROR_BASE
+ ILK_GDSR
, 0);
1126 static int gen6_do_reset(struct drm_device
*dev
)
1128 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1131 /* Reset the chip */
1133 /* GEN6_GDRST is not in the gt power well, no need to check
1134 * for fifo space for the write or forcewake the chip for
1137 __raw_i915_write32(dev_priv
, GEN6_GDRST
, GEN6_GRDOM_FULL
);
1139 /* Spin waiting for the device to ack the reset request */
1140 ret
= wait_for((__raw_i915_read32(dev_priv
, GEN6_GDRST
) & GEN6_GRDOM_FULL
) == 0, 500);
1142 intel_uncore_forcewake_reset(dev
, true);
1147 int intel_gpu_reset(struct drm_device
*dev
)
1149 if (INTEL_INFO(dev
)->gen
>= 6)
1150 return gen6_do_reset(dev
);
1151 else if (IS_GEN5(dev
))
1152 return ironlake_do_reset(dev
);
1153 else if (IS_G4X(dev
))
1154 return g4x_do_reset(dev
);
1155 else if (IS_GEN4(dev
))
1156 return i965_do_reset(dev
);
1161 void intel_uncore_check_errors(struct drm_device
*dev
)
1163 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1165 if (HAS_FPGA_DBG_UNCLAIMED(dev
) &&
1166 (__raw_i915_read32(dev_priv
, FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
)) {
1167 DRM_ERROR("Unclaimed register before interrupt\n");
1168 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);