1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ilk
[HPD_NUM_PINS
] = {
49 [HPD_PORT_A
] = DE_DP_A_HOTPLUG
,
52 static const u32 hpd_ivb
[HPD_NUM_PINS
] = {
53 [HPD_PORT_A
] = DE_DP_A_HOTPLUG_IVB
,
56 static const u32 hpd_bdw
[HPD_NUM_PINS
] = {
57 [HPD_PORT_A
] = GEN8_PORT_DP_A_HOTPLUG
,
60 static const u32 hpd_ibx
[HPD_NUM_PINS
] = {
61 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
62 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
63 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
64 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
65 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
68 static const u32 hpd_cpt
[HPD_NUM_PINS
] = {
69 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
70 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
71 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
72 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
73 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
76 static const u32 hpd_spt
[HPD_NUM_PINS
] = {
77 [HPD_PORT_A
] = SDE_PORTA_HOTPLUG_SPT
,
78 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
79 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
80 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
,
81 [HPD_PORT_E
] = SDE_PORTE_HOTPLUG_SPT
84 static const u32 hpd_mask_i915
[HPD_NUM_PINS
] = {
85 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
86 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
87 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
88 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
89 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
90 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
93 static const u32 hpd_status_g4x
[HPD_NUM_PINS
] = {
94 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
95 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
96 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
97 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
98 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
99 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
102 static const u32 hpd_status_i915
[HPD_NUM_PINS
] = {
103 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
104 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
105 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
106 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
107 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
108 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
112 static const u32 hpd_bxt
[HPD_NUM_PINS
] = {
113 [HPD_PORT_A
] = BXT_DE_PORT_HP_DDIA
,
114 [HPD_PORT_B
] = BXT_DE_PORT_HP_DDIB
,
115 [HPD_PORT_C
] = BXT_DE_PORT_HP_DDIC
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
142 static void gen5_assert_iir_is_zero(struct drm_i915_private
*dev_priv
,
145 u32 val
= I915_READ(reg
);
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg
), val
);
152 I915_WRITE(reg
, 0xffffffff);
154 I915_WRITE(reg
, 0xffffffff);
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
172 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
);
174 /* For display hotplug interrupt */
176 i915_hotplug_interrupt_update_locked(struct drm_i915_private
*dev_priv
,
182 assert_spin_locked(&dev_priv
->irq_lock
);
183 WARN_ON(bits
& ~mask
);
185 val
= I915_READ(PORT_HOTPLUG_EN
);
188 I915_WRITE(PORT_HOTPLUG_EN
, val
);
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
203 void i915_hotplug_interrupt_update(struct drm_i915_private
*dev_priv
,
207 spin_lock_irq(&dev_priv
->irq_lock
);
208 i915_hotplug_interrupt_update_locked(dev_priv
, mask
, bits
);
209 spin_unlock_irq(&dev_priv
->irq_lock
);
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
218 void ilk_update_display_irq(struct drm_i915_private
*dev_priv
,
219 uint32_t interrupt_mask
,
220 uint32_t enabled_irq_mask
)
224 assert_spin_locked(&dev_priv
->irq_lock
);
226 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
228 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
231 new_val
= dev_priv
->irq_mask
;
232 new_val
&= ~interrupt_mask
;
233 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
235 if (new_val
!= dev_priv
->irq_mask
) {
236 dev_priv
->irq_mask
= new_val
;
237 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
248 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
249 uint32_t interrupt_mask
,
250 uint32_t enabled_irq_mask
)
252 assert_spin_locked(&dev_priv
->irq_lock
);
254 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
256 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
259 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
260 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
261 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
264 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
266 ilk_update_gt_irq(dev_priv
, mask
, mask
);
267 POSTING_READ_FW(GTIMR
);
270 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
272 ilk_update_gt_irq(dev_priv
, mask
, 0);
275 static i915_reg_t
gen6_pm_iir(struct drm_i915_private
*dev_priv
)
277 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR
;
280 static i915_reg_t
gen6_pm_imr(struct drm_i915_private
*dev_priv
)
282 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR
;
285 static i915_reg_t
gen6_pm_ier(struct drm_i915_private
*dev_priv
)
287 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IER(2) : GEN6_PMIER
;
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
296 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
297 uint32_t interrupt_mask
,
298 uint32_t enabled_irq_mask
)
302 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
304 assert_spin_locked(&dev_priv
->irq_lock
);
306 new_val
= dev_priv
->pm_irq_mask
;
307 new_val
&= ~interrupt_mask
;
308 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
310 if (new_val
!= dev_priv
->pm_irq_mask
) {
311 dev_priv
->pm_irq_mask
= new_val
;
312 I915_WRITE(gen6_pm_imr(dev_priv
), dev_priv
->pm_irq_mask
);
313 POSTING_READ(gen6_pm_imr(dev_priv
));
317 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
319 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
322 snb_update_pm_irq(dev_priv
, mask
, mask
);
325 static void __gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
,
328 snb_update_pm_irq(dev_priv
, mask
, 0);
331 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
333 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
336 __gen6_disable_pm_irq(dev_priv
, mask
);
339 void gen6_reset_rps_interrupts(struct drm_i915_private
*dev_priv
)
341 i915_reg_t reg
= gen6_pm_iir(dev_priv
);
343 spin_lock_irq(&dev_priv
->irq_lock
);
344 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
345 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
347 dev_priv
->rps
.pm_iir
= 0;
348 spin_unlock_irq(&dev_priv
->irq_lock
);
351 void gen6_enable_rps_interrupts(struct drm_i915_private
*dev_priv
)
353 spin_lock_irq(&dev_priv
->irq_lock
);
354 WARN_ON_ONCE(dev_priv
->rps
.pm_iir
);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv
)) & dev_priv
->pm_rps_events
);
356 dev_priv
->rps
.interrupts_enabled
= true;
357 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) |
358 dev_priv
->pm_rps_events
);
359 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
361 spin_unlock_irq(&dev_priv
->irq_lock
);
364 u32
gen6_sanitize_rps_pm_mask(struct drm_i915_private
*dev_priv
, u32 mask
)
366 return (mask
& ~dev_priv
->rps
.pm_intr_keep
);
369 void gen6_disable_rps_interrupts(struct drm_i915_private
*dev_priv
)
371 spin_lock_irq(&dev_priv
->irq_lock
);
372 dev_priv
->rps
.interrupts_enabled
= false;
374 I915_WRITE(GEN6_PMINTRMSK
, gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
376 __gen6_disable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
377 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) &
378 ~dev_priv
->pm_rps_events
);
380 spin_unlock_irq(&dev_priv
->irq_lock
);
381 synchronize_irq(dev_priv
->drm
.irq
);
383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
388 cancel_work_sync(&dev_priv
->rps
.work
);
389 gen6_reset_rps_interrupts(dev_priv
);
393 * bdw_update_port_irq - update DE port interrupt
394 * @dev_priv: driver private
395 * @interrupt_mask: mask of interrupt bits to update
396 * @enabled_irq_mask: mask of interrupt bits to enable
398 static void bdw_update_port_irq(struct drm_i915_private
*dev_priv
,
399 uint32_t interrupt_mask
,
400 uint32_t enabled_irq_mask
)
405 assert_spin_locked(&dev_priv
->irq_lock
);
407 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
409 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
412 old_val
= I915_READ(GEN8_DE_PORT_IMR
);
415 new_val
&= ~interrupt_mask
;
416 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
418 if (new_val
!= old_val
) {
419 I915_WRITE(GEN8_DE_PORT_IMR
, new_val
);
420 POSTING_READ(GEN8_DE_PORT_IMR
);
425 * bdw_update_pipe_irq - update DE pipe interrupt
426 * @dev_priv: driver private
427 * @pipe: pipe whose interrupt to update
428 * @interrupt_mask: mask of interrupt bits to update
429 * @enabled_irq_mask: mask of interrupt bits to enable
431 void bdw_update_pipe_irq(struct drm_i915_private
*dev_priv
,
433 uint32_t interrupt_mask
,
434 uint32_t enabled_irq_mask
)
438 assert_spin_locked(&dev_priv
->irq_lock
);
440 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
442 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
445 new_val
= dev_priv
->de_irq_mask
[pipe
];
446 new_val
&= ~interrupt_mask
;
447 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
449 if (new_val
!= dev_priv
->de_irq_mask
[pipe
]) {
450 dev_priv
->de_irq_mask
[pipe
] = new_val
;
451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
457 * ibx_display_interrupt_update - update SDEIMR
458 * @dev_priv: driver private
459 * @interrupt_mask: mask of interrupt bits to update
460 * @enabled_irq_mask: mask of interrupt bits to enable
462 void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
463 uint32_t interrupt_mask
,
464 uint32_t enabled_irq_mask
)
466 uint32_t sdeimr
= I915_READ(SDEIMR
);
467 sdeimr
&= ~interrupt_mask
;
468 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
470 WARN_ON(enabled_irq_mask
& ~interrupt_mask
);
472 assert_spin_locked(&dev_priv
->irq_lock
);
474 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
477 I915_WRITE(SDEIMR
, sdeimr
);
478 POSTING_READ(SDEIMR
);
482 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
483 u32 enable_mask
, u32 status_mask
)
485 i915_reg_t reg
= PIPESTAT(pipe
);
486 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
488 assert_spin_locked(&dev_priv
->irq_lock
);
489 WARN_ON(!intel_irqs_enabled(dev_priv
));
491 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
492 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
494 pipe_name(pipe
), enable_mask
, status_mask
))
497 if ((pipestat
& enable_mask
) == enable_mask
)
500 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
502 /* Enable the interrupt, clear any pending status */
503 pipestat
|= enable_mask
| status_mask
;
504 I915_WRITE(reg
, pipestat
);
509 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
510 u32 enable_mask
, u32 status_mask
)
512 i915_reg_t reg
= PIPESTAT(pipe
);
513 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
515 assert_spin_locked(&dev_priv
->irq_lock
);
516 WARN_ON(!intel_irqs_enabled(dev_priv
));
518 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
519 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
521 pipe_name(pipe
), enable_mask
, status_mask
))
524 if ((pipestat
& enable_mask
) == 0)
527 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
529 pipestat
&= ~enable_mask
;
530 I915_WRITE(reg
, pipestat
);
534 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
536 u32 enable_mask
= status_mask
<< 16;
539 * On pipe A we don't support the PSR interrupt yet,
540 * on pipe B and C the same bit MBZ.
542 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
545 * On pipe B and C we don't support the PSR interrupt yet, on pipe
546 * A the same bit is for perf counters which we don't use either.
548 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
551 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
552 SPRITE0_FLIP_DONE_INT_EN_VLV
|
553 SPRITE1_FLIP_DONE_INT_EN_VLV
);
554 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
555 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
556 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
557 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
563 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
568 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
569 enable_mask
= vlv_get_pipestat_enable_mask(&dev_priv
->drm
,
572 enable_mask
= status_mask
<< 16;
573 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
577 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
582 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
583 enable_mask
= vlv_get_pipestat_enable_mask(&dev_priv
->drm
,
586 enable_mask
= status_mask
<< 16;
587 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
592 * @dev_priv: i915 device private
594 static void i915_enable_asle_pipestat(struct drm_i915_private
*dev_priv
)
596 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev_priv
))
599 spin_lock_irq(&dev_priv
->irq_lock
);
601 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
602 if (INTEL_GEN(dev_priv
) >= 4)
603 i915_enable_pipestat(dev_priv
, PIPE_A
,
604 PIPE_LEGACY_BLC_EVENT_STATUS
);
606 spin_unlock_irq(&dev_priv
->irq_lock
);
610 * This timing diagram depicts the video signal in and
611 * around the vertical blanking period.
613 * Assumptions about the fictitious mode used in this example:
615 * vsync_start = vblank_start + 1
616 * vsync_end = vblank_start + 2
617 * vtotal = vblank_start + 3
620 * latch double buffered registers
621 * increment frame counter (ctg+)
622 * generate start of vblank interrupt (gen4+)
625 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
626 * | may be shifted forward 1-3 extra lines via PIPECONF
628 * | | start of vsync:
629 * | | generate vsync interrupt
631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
633 * ----va---> <-----------------vb--------------------> <--------va-------------
634 * | | <----vs-----> |
635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
639 * last visible pixel first visible pixel
640 * | increment frame counter (gen3/4)
641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
643 * x = horizontal active
644 * _ = horizontal blanking
645 * hs = horizontal sync
646 * va = vertical active
647 * vb = vertical blanking
649 * vbs = vblank_start (number)
652 * - most events happen at the start of horizontal sync
653 * - frame start happens at the start of horizontal blank, 1-4 lines
654 * (depending on PIPECONF settings) after the start of vblank
655 * - gen3/4 pixel and frame counter are synchronized with the start
656 * of horizontal active on the first line of vertical active
659 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
661 /* Gen2 doesn't have a hardware frame counter */
665 /* Called from drm generic code, passed a 'crtc', which
666 * we use as a pipe index
668 static u32
i915_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
670 struct drm_i915_private
*dev_priv
= to_i915(dev
);
671 i915_reg_t high_frame
, low_frame
;
672 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
673 struct intel_crtc
*intel_crtc
=
674 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
675 const struct drm_display_mode
*mode
= &intel_crtc
->base
.hwmode
;
677 htotal
= mode
->crtc_htotal
;
678 hsync_start
= mode
->crtc_hsync_start
;
679 vbl_start
= mode
->crtc_vblank_start
;
680 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
681 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
683 /* Convert to pixel count */
686 /* Start of vblank event occurs at start of hsync */
687 vbl_start
-= htotal
- hsync_start
;
689 high_frame
= PIPEFRAME(pipe
);
690 low_frame
= PIPEFRAMEPIXEL(pipe
);
693 * High & low register fields aren't synchronized, so make sure
694 * we get a low value that's stable across two reads of the high
698 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
699 low
= I915_READ(low_frame
);
700 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
701 } while (high1
!= high2
);
703 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
704 pixel
= low
& PIPE_PIXEL_MASK
;
705 low
>>= PIPE_FRAME_LOW_SHIFT
;
708 * The frame counter increments at beginning of active.
709 * Cook up a vblank counter by also checking the pixel
710 * counter against vblank start.
712 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
715 static u32
g4x_get_vblank_counter(struct drm_device
*dev
, unsigned int pipe
)
717 struct drm_i915_private
*dev_priv
= to_i915(dev
);
719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe
));
722 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
723 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
725 struct drm_device
*dev
= crtc
->base
.dev
;
726 struct drm_i915_private
*dev_priv
= to_i915(dev
);
727 const struct drm_display_mode
*mode
= &crtc
->base
.hwmode
;
728 enum pipe pipe
= crtc
->pipe
;
729 int position
, vtotal
;
731 vtotal
= mode
->crtc_vtotal
;
732 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
735 if (IS_GEN2(dev_priv
))
736 position
= I915_READ_FW(PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
738 position
= I915_READ_FW(PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
752 if (HAS_DDI(dev_priv
) && !position
) {
755 for (i
= 0; i
< 100; i
++) {
757 temp
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) &
759 if (temp
!= position
) {
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
770 return (position
+ crtc
->scanline_offset
) % vtotal
;
773 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, unsigned int pipe
,
774 unsigned int flags
, int *vpos
, int *hpos
,
775 ktime_t
*stime
, ktime_t
*etime
,
776 const struct drm_display_mode
*mode
)
778 struct drm_i915_private
*dev_priv
= to_i915(dev
);
779 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
780 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
782 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
785 unsigned long irqflags
;
787 if (WARN_ON(!mode
->crtc_clock
)) {
788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
789 "pipe %c\n", pipe_name(pipe
));
793 htotal
= mode
->crtc_htotal
;
794 hsync_start
= mode
->crtc_hsync_start
;
795 vtotal
= mode
->crtc_vtotal
;
796 vbl_start
= mode
->crtc_vblank_start
;
797 vbl_end
= mode
->crtc_vblank_end
;
799 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
800 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
805 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
812 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
816 /* Get optional system timestamp before query. */
818 *stime
= ktime_get();
820 if (IS_GEN2(dev_priv
) || IS_G4X(dev_priv
) || INTEL_GEN(dev_priv
) >= 5) {
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
824 position
= __intel_get_crtc_scanline(intel_crtc
);
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
830 position
= (I915_READ_FW(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
832 /* convert to pixel counts */
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
846 if (position
>= vtotal
)
847 position
= vtotal
- 1;
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
858 position
= (position
+ htotal
- hsync_start
) % vtotal
;
861 /* Get optional system timestamp after query. */
863 *etime
= ktime_get();
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
867 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
869 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
877 if (position
>= vbl_start
)
880 position
+= vtotal
- vbl_end
;
882 if (IS_GEN2(dev_priv
) || IS_G4X(dev_priv
) || INTEL_GEN(dev_priv
) >= 5) {
886 *vpos
= position
/ htotal
;
887 *hpos
= position
- (*vpos
* htotal
);
892 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
897 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
899 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
900 unsigned long irqflags
;
903 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
904 position
= __intel_get_crtc_scanline(crtc
);
905 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
910 static int i915_get_vblank_timestamp(struct drm_device
*dev
, unsigned int pipe
,
912 struct timeval
*vblank_time
,
915 struct drm_crtc
*crtc
;
917 if (pipe
>= INTEL_INFO(dev
)->num_pipes
) {
918 DRM_ERROR("Invalid crtc %u\n", pipe
);
922 /* Get drm_crtc to timestamp: */
923 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
925 DRM_ERROR("Invalid crtc %u\n", pipe
);
929 if (!crtc
->hwmode
.crtc_clock
) {
930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe
);
934 /* Helper routine in DRM core does all the work: */
935 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
940 static void ironlake_rps_change_irq_handler(struct drm_i915_private
*dev_priv
)
942 u32 busy_up
, busy_down
, max_avg
, min_avg
;
945 spin_lock(&mchdev_lock
);
947 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
949 new_delay
= dev_priv
->ips
.cur_delay
;
951 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
952 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
953 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
954 max_avg
= I915_READ(RCBMAXAVG
);
955 min_avg
= I915_READ(RCBMINAVG
);
957 /* Handle RCS change request from hw */
958 if (busy_up
> max_avg
) {
959 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
960 new_delay
= dev_priv
->ips
.cur_delay
- 1;
961 if (new_delay
< dev_priv
->ips
.max_delay
)
962 new_delay
= dev_priv
->ips
.max_delay
;
963 } else if (busy_down
< min_avg
) {
964 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
965 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
966 if (new_delay
> dev_priv
->ips
.min_delay
)
967 new_delay
= dev_priv
->ips
.min_delay
;
970 if (ironlake_set_drps(dev_priv
, new_delay
))
971 dev_priv
->ips
.cur_delay
= new_delay
;
973 spin_unlock(&mchdev_lock
);
978 static void notify_ring(struct intel_engine_cs
*engine
)
980 smp_store_mb(engine
->breadcrumbs
.irq_posted
, true);
981 if (intel_engine_wakeup(engine
)) {
982 trace_i915_gem_request_notify(engine
);
983 engine
->breadcrumbs
.irq_wakeups
++;
987 static void vlv_c0_read(struct drm_i915_private
*dev_priv
,
988 struct intel_rps_ei
*ei
)
990 ei
->cz_clock
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
991 ei
->render_c0
= I915_READ(VLV_RENDER_C0_COUNT
);
992 ei
->media_c0
= I915_READ(VLV_MEDIA_C0_COUNT
);
995 static bool vlv_c0_above(struct drm_i915_private
*dev_priv
,
996 const struct intel_rps_ei
*old
,
997 const struct intel_rps_ei
*now
,
1001 unsigned int mul
= 100;
1003 if (old
->cz_clock
== 0)
1006 if (I915_READ(VLV_COUNTER_CONTROL
) & VLV_COUNT_RANGE_HIGH
)
1009 time
= now
->cz_clock
- old
->cz_clock
;
1010 time
*= threshold
* dev_priv
->czclk_freq
;
1012 /* Workload can be split between render + media, e.g. SwapBuffers
1013 * being blitted in X after being rendered in mesa. To account for
1014 * this we need to combine both engines into our activity counter.
1016 c0
= now
->render_c0
- old
->render_c0
;
1017 c0
+= now
->media_c0
- old
->media_c0
;
1018 c0
*= mul
* VLV_CZ_CLOCK_TO_MILLI_SEC
;
1023 void gen6_rps_reset_ei(struct drm_i915_private
*dev_priv
)
1025 vlv_c0_read(dev_priv
, &dev_priv
->rps
.down_ei
);
1026 dev_priv
->rps
.up_ei
= dev_priv
->rps
.down_ei
;
1029 static u32
vlv_wa_c0_ei(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1031 struct intel_rps_ei now
;
1034 if ((pm_iir
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
)) == 0)
1037 vlv_c0_read(dev_priv
, &now
);
1038 if (now
.cz_clock
== 0)
1041 if (pm_iir
& GEN6_PM_RP_DOWN_EI_EXPIRED
) {
1042 if (!vlv_c0_above(dev_priv
,
1043 &dev_priv
->rps
.down_ei
, &now
,
1044 dev_priv
->rps
.down_threshold
))
1045 events
|= GEN6_PM_RP_DOWN_THRESHOLD
;
1046 dev_priv
->rps
.down_ei
= now
;
1049 if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1050 if (vlv_c0_above(dev_priv
,
1051 &dev_priv
->rps
.up_ei
, &now
,
1052 dev_priv
->rps
.up_threshold
))
1053 events
|= GEN6_PM_RP_UP_THRESHOLD
;
1054 dev_priv
->rps
.up_ei
= now
;
1060 static bool any_waiters(struct drm_i915_private
*dev_priv
)
1062 struct intel_engine_cs
*engine
;
1064 for_each_engine(engine
, dev_priv
)
1065 if (intel_engine_has_waiter(engine
))
1071 static void gen6_pm_rps_work(struct work_struct
*work
)
1073 struct drm_i915_private
*dev_priv
=
1074 container_of(work
, struct drm_i915_private
, rps
.work
);
1076 int new_delay
, adj
, min
, max
;
1079 spin_lock_irq(&dev_priv
->irq_lock
);
1080 /* Speed up work cancelation during disabling rps interrupts. */
1081 if (!dev_priv
->rps
.interrupts_enabled
) {
1082 spin_unlock_irq(&dev_priv
->irq_lock
);
1086 pm_iir
= dev_priv
->rps
.pm_iir
;
1087 dev_priv
->rps
.pm_iir
= 0;
1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1090 client_boost
= dev_priv
->rps
.client_boost
;
1091 dev_priv
->rps
.client_boost
= false;
1092 spin_unlock_irq(&dev_priv
->irq_lock
);
1094 /* Make sure we didn't queue anything we're not going to process. */
1095 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1097 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0 && !client_boost
)
1100 mutex_lock(&dev_priv
->rps
.hw_lock
);
1102 pm_iir
|= vlv_wa_c0_ei(dev_priv
, pm_iir
);
1104 adj
= dev_priv
->rps
.last_adj
;
1105 new_delay
= dev_priv
->rps
.cur_freq
;
1106 min
= dev_priv
->rps
.min_freq_softlimit
;
1107 max
= dev_priv
->rps
.max_freq_softlimit
;
1110 new_delay
= dev_priv
->rps
.max_freq_softlimit
;
1112 } else if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1115 else /* CHV needs even encode values */
1116 adj
= IS_CHERRYVIEW(dev_priv
) ? 2 : 1;
1118 * For better performance, jump directly
1119 * to RPe if we're below it.
1121 if (new_delay
< dev_priv
->rps
.efficient_freq
- adj
) {
1122 new_delay
= dev_priv
->rps
.efficient_freq
;
1125 } else if (any_waiters(dev_priv
)) {
1127 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1128 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1129 new_delay
= dev_priv
->rps
.efficient_freq
;
1131 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1133 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1136 else /* CHV needs even encode values */
1137 adj
= IS_CHERRYVIEW(dev_priv
) ? -2 : -1;
1138 } else { /* unknown event */
1142 dev_priv
->rps
.last_adj
= adj
;
1144 /* sysfs frequency interfaces may have snuck in while servicing the
1148 new_delay
= clamp_t(int, new_delay
, min
, max
);
1150 intel_set_rps(dev_priv
, new_delay
);
1152 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1159 * @work: workqueue struct
1161 * Doesn't actually do anything except notify userspace. As a consequence of
1162 * this event, userspace should try to remap the bad rows since statistically
1163 * it is likely the same row is more likely to go bad again.
1165 static void ivybridge_parity_work(struct work_struct
*work
)
1167 struct drm_i915_private
*dev_priv
=
1168 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1169 u32 error_status
, row
, bank
, subbank
;
1170 char *parity_event
[6];
1174 /* We must turn off DOP level clock gating to access the L3 registers.
1175 * In order to prevent a get/put style interface, acquire struct mutex
1176 * any time we access those registers.
1178 mutex_lock(&dev_priv
->drm
.struct_mutex
);
1180 /* If we've screwed up tracking, just let the interrupt fire again */
1181 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1184 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1185 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1186 POSTING_READ(GEN7_MISCCPCTL
);
1188 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1192 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
)))
1195 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1197 reg
= GEN7_L3CDERRST1(slice
);
1199 error_status
= I915_READ(reg
);
1200 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1201 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1202 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1204 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1207 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1208 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1209 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1210 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1211 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1212 parity_event
[5] = NULL
;
1214 kobject_uevent_env(&dev_priv
->drm
.primary
->kdev
->kobj
,
1215 KOBJ_CHANGE
, parity_event
);
1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1218 slice
, row
, bank
, subbank
);
1220 kfree(parity_event
[4]);
1221 kfree(parity_event
[3]);
1222 kfree(parity_event
[2]);
1223 kfree(parity_event
[1]);
1226 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1229 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1230 spin_lock_irq(&dev_priv
->irq_lock
);
1231 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
));
1232 spin_unlock_irq(&dev_priv
->irq_lock
);
1234 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
1237 static void ivybridge_parity_error_irq_handler(struct drm_i915_private
*dev_priv
,
1240 if (!HAS_L3_DPF(dev_priv
))
1243 spin_lock(&dev_priv
->irq_lock
);
1244 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
));
1245 spin_unlock(&dev_priv
->irq_lock
);
1247 iir
&= GT_PARITY_ERROR(dev_priv
);
1248 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1249 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1251 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1252 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1254 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1257 static void ilk_gt_irq_handler(struct drm_i915_private
*dev_priv
,
1260 if (gt_iir
& GT_RENDER_USER_INTERRUPT
)
1261 notify_ring(&dev_priv
->engine
[RCS
]);
1262 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1263 notify_ring(&dev_priv
->engine
[VCS
]);
1266 static void snb_gt_irq_handler(struct drm_i915_private
*dev_priv
,
1269 if (gt_iir
& GT_RENDER_USER_INTERRUPT
)
1270 notify_ring(&dev_priv
->engine
[RCS
]);
1271 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1272 notify_ring(&dev_priv
->engine
[VCS
]);
1273 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1274 notify_ring(&dev_priv
->engine
[BCS
]);
1276 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1277 GT_BSD_CS_ERROR_INTERRUPT
|
1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
))
1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir
);
1281 if (gt_iir
& GT_PARITY_ERROR(dev_priv
))
1282 ivybridge_parity_error_irq_handler(dev_priv
, gt_iir
);
1285 static __always_inline
void
1286 gen8_cs_irq_handler(struct intel_engine_cs
*engine
, u32 iir
, int test_shift
)
1288 if (iir
& (GT_RENDER_USER_INTERRUPT
<< test_shift
))
1289 notify_ring(engine
);
1290 if (iir
& (GT_CONTEXT_SWITCH_INTERRUPT
<< test_shift
))
1291 tasklet_schedule(&engine
->irq_tasklet
);
1294 static irqreturn_t
gen8_gt_irq_ack(struct drm_i915_private
*dev_priv
,
1298 irqreturn_t ret
= IRQ_NONE
;
1300 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1301 gt_iir
[0] = I915_READ_FW(GEN8_GT_IIR(0));
1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir
[0]);
1306 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1309 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1310 gt_iir
[1] = I915_READ_FW(GEN8_GT_IIR(1));
1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir
[1]);
1315 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1318 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1319 gt_iir
[3] = I915_READ_FW(GEN8_GT_IIR(3));
1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir
[3]);
1324 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1327 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1328 gt_iir
[2] = I915_READ_FW(GEN8_GT_IIR(2));
1329 if (gt_iir
[2] & dev_priv
->pm_rps_events
) {
1330 I915_WRITE_FW(GEN8_GT_IIR(2),
1331 gt_iir
[2] & dev_priv
->pm_rps_events
);
1334 DRM_ERROR("The master control interrupt lied (PM)!\n");
1340 static void gen8_gt_irq_handler(struct drm_i915_private
*dev_priv
,
1344 gen8_cs_irq_handler(&dev_priv
->engine
[RCS
],
1345 gt_iir
[0], GEN8_RCS_IRQ_SHIFT
);
1346 gen8_cs_irq_handler(&dev_priv
->engine
[BCS
],
1347 gt_iir
[0], GEN8_BCS_IRQ_SHIFT
);
1351 gen8_cs_irq_handler(&dev_priv
->engine
[VCS
],
1352 gt_iir
[1], GEN8_VCS1_IRQ_SHIFT
);
1353 gen8_cs_irq_handler(&dev_priv
->engine
[VCS2
],
1354 gt_iir
[1], GEN8_VCS2_IRQ_SHIFT
);
1358 gen8_cs_irq_handler(&dev_priv
->engine
[VECS
],
1359 gt_iir
[3], GEN8_VECS_IRQ_SHIFT
);
1361 if (gt_iir
[2] & dev_priv
->pm_rps_events
)
1362 gen6_rps_irq_handler(dev_priv
, gt_iir
[2]);
1365 static bool bxt_port_hotplug_long_detect(enum port port
, u32 val
)
1369 return val
& PORTA_HOTPLUG_LONG_DETECT
;
1371 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1373 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1379 static bool spt_port_hotplug2_long_detect(enum port port
, u32 val
)
1383 return val
& PORTE_HOTPLUG_LONG_DETECT
;
1389 static bool spt_port_hotplug_long_detect(enum port port
, u32 val
)
1393 return val
& PORTA_HOTPLUG_LONG_DETECT
;
1395 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1397 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1399 return val
& PORTD_HOTPLUG_LONG_DETECT
;
1405 static bool ilk_port_hotplug_long_detect(enum port port
, u32 val
)
1409 return val
& DIGITAL_PORTA_HOTPLUG_LONG_DETECT
;
1415 static bool pch_port_hotplug_long_detect(enum port port
, u32 val
)
1419 return val
& PORTB_HOTPLUG_LONG_DETECT
;
1421 return val
& PORTC_HOTPLUG_LONG_DETECT
;
1423 return val
& PORTD_HOTPLUG_LONG_DETECT
;
1429 static bool i9xx_port_hotplug_long_detect(enum port port
, u32 val
)
1433 return val
& PORTB_HOTPLUG_INT_LONG_PULSE
;
1435 return val
& PORTC_HOTPLUG_INT_LONG_PULSE
;
1437 return val
& PORTD_HOTPLUG_INT_LONG_PULSE
;
1444 * Get a bit mask of pins that have triggered, and which ones may be long.
1445 * This can be called multiple times with the same masks to accumulate
1446 * hotplug detection results from several registers.
1448 * Note that the caller is expected to zero out the masks initially.
1450 static void intel_get_hpd_pins(u32
*pin_mask
, u32
*long_mask
,
1451 u32 hotplug_trigger
, u32 dig_hotplug_reg
,
1452 const u32 hpd
[HPD_NUM_PINS
],
1453 bool long_pulse_detect(enum port port
, u32 val
))
1458 for_each_hpd_pin(i
) {
1459 if ((hpd
[i
] & hotplug_trigger
) == 0)
1462 *pin_mask
|= BIT(i
);
1464 if (!intel_hpd_pin_to_port(i
, &port
))
1467 if (long_pulse_detect(port
, dig_hotplug_reg
))
1468 *long_mask
|= BIT(i
);
1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1472 hotplug_trigger
, dig_hotplug_reg
, *pin_mask
);
1476 static void gmbus_irq_handler(struct drm_i915_private
*dev_priv
)
1478 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1481 static void dp_aux_irq_handler(struct drm_i915_private
*dev_priv
)
1483 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1486 #if defined(CONFIG_DEBUG_FS)
1487 static void display_pipe_crc_irq_handler(struct drm_i915_private
*dev_priv
,
1489 uint32_t crc0
, uint32_t crc1
,
1490 uint32_t crc2
, uint32_t crc3
,
1493 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1494 struct intel_pipe_crc_entry
*entry
;
1497 spin_lock(&pipe_crc
->lock
);
1499 if (!pipe_crc
->entries
) {
1500 spin_unlock(&pipe_crc
->lock
);
1501 DRM_DEBUG_KMS("spurious interrupt\n");
1505 head
= pipe_crc
->head
;
1506 tail
= pipe_crc
->tail
;
1508 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1509 spin_unlock(&pipe_crc
->lock
);
1510 DRM_ERROR("CRC buffer overflowing\n");
1514 entry
= &pipe_crc
->entries
[head
];
1516 entry
->frame
= dev_priv
->drm
.driver
->get_vblank_counter(&dev_priv
->drm
,
1518 entry
->crc
[0] = crc0
;
1519 entry
->crc
[1] = crc1
;
1520 entry
->crc
[2] = crc2
;
1521 entry
->crc
[3] = crc3
;
1522 entry
->crc
[4] = crc4
;
1524 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1525 pipe_crc
->head
= head
;
1527 spin_unlock(&pipe_crc
->lock
);
1529 wake_up_interruptible(&pipe_crc
->wq
);
1533 display_pipe_crc_irq_handler(struct drm_i915_private
*dev_priv
,
1535 uint32_t crc0
, uint32_t crc1
,
1536 uint32_t crc2
, uint32_t crc3
,
1541 static void hsw_pipe_crc_irq_handler(struct drm_i915_private
*dev_priv
,
1544 display_pipe_crc_irq_handler(dev_priv
, pipe
,
1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1549 static void ivb_pipe_crc_irq_handler(struct drm_i915_private
*dev_priv
,
1552 display_pipe_crc_irq_handler(dev_priv
, pipe
,
1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1560 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private
*dev_priv
,
1563 uint32_t res1
, res2
;
1565 if (INTEL_GEN(dev_priv
) >= 3)
1566 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1570 if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
))
1571 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1575 display_pipe_crc_irq_handler(dev_priv
, pipe
,
1576 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1577 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1578 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1582 /* The RPS events need forcewake, so we add them to a work queue and mask their
1583 * IMR bits until the work is done. Other interrupts can be processed without
1584 * the work queue. */
1585 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1587 if (pm_iir
& dev_priv
->pm_rps_events
) {
1588 spin_lock(&dev_priv
->irq_lock
);
1589 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1590 if (dev_priv
->rps
.interrupts_enabled
) {
1591 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1592 schedule_work(&dev_priv
->rps
.work
);
1594 spin_unlock(&dev_priv
->irq_lock
);
1597 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1600 if (HAS_VEBOX(dev_priv
)) {
1601 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1602 notify_ring(&dev_priv
->engine
[VECS
]);
1604 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1609 static bool intel_pipe_handle_vblank(struct drm_i915_private
*dev_priv
,
1614 ret
= drm_handle_vblank(&dev_priv
->drm
, pipe
);
1616 intel_finish_page_flip_mmio(dev_priv
, pipe
);
1621 static void valleyview_pipestat_irq_ack(struct drm_i915_private
*dev_priv
,
1622 u32 iir
, u32 pipe_stats
[I915_MAX_PIPES
])
1626 spin_lock(&dev_priv
->irq_lock
);
1628 if (!dev_priv
->display_irqs_enabled
) {
1629 spin_unlock(&dev_priv
->irq_lock
);
1633 for_each_pipe(dev_priv
, pipe
) {
1635 u32 mask
, iir_bit
= 0;
1638 * PIPESTAT bits get signalled even when the interrupt is
1639 * disabled with the mask bits, and some of the status bits do
1640 * not generate interrupts at all (like the underrun bit). Hence
1641 * we need to be careful that we only handle what we want to
1645 /* fifo underruns are filterered in the underrun handler. */
1646 mask
= PIPE_FIFO_UNDERRUN_STATUS
;
1650 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
1653 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
1656 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
1660 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
1665 reg
= PIPESTAT(pipe
);
1666 mask
|= PIPESTAT_INT_ENABLE_MASK
;
1667 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
1670 * Clear the PIPE*STAT regs before the IIR
1672 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
1673 PIPESTAT_INT_STATUS_MASK
))
1674 I915_WRITE(reg
, pipe_stats
[pipe
]);
1676 spin_unlock(&dev_priv
->irq_lock
);
1679 static void valleyview_pipestat_irq_handler(struct drm_i915_private
*dev_priv
,
1680 u32 pipe_stats
[I915_MAX_PIPES
])
1684 for_each_pipe(dev_priv
, pipe
) {
1685 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
1686 intel_pipe_handle_vblank(dev_priv
, pipe
))
1687 intel_check_page_flip(dev_priv
, pipe
);
1689 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
)
1690 intel_finish_page_flip_cs(dev_priv
, pipe
);
1692 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1693 i9xx_pipe_crc_irq_handler(dev_priv
, pipe
);
1695 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1696 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1699 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1700 gmbus_irq_handler(dev_priv
);
1703 static u32
i9xx_hpd_irq_ack(struct drm_i915_private
*dev_priv
)
1705 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1708 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1710 return hotplug_status
;
1713 static void i9xx_hpd_irq_handler(struct drm_i915_private
*dev_priv
,
1716 u32 pin_mask
= 0, long_mask
= 0;
1718 if (IS_G4X(dev_priv
) || IS_VALLEYVIEW(dev_priv
) ||
1719 IS_CHERRYVIEW(dev_priv
)) {
1720 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
1722 if (hotplug_trigger
) {
1723 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1724 hotplug_trigger
, hpd_status_g4x
,
1725 i9xx_port_hotplug_long_detect
);
1727 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
1730 if (hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1731 dp_aux_irq_handler(dev_priv
);
1733 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1735 if (hotplug_trigger
) {
1736 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1737 hotplug_trigger
, hpd_status_i915
,
1738 i9xx_port_hotplug_long_detect
);
1739 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
1744 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1746 struct drm_device
*dev
= arg
;
1747 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1748 irqreturn_t ret
= IRQ_NONE
;
1750 if (!intel_irqs_enabled(dev_priv
))
1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1754 disable_rpm_wakeref_asserts(dev_priv
);
1757 u32 iir
, gt_iir
, pm_iir
;
1758 u32 pipe_stats
[I915_MAX_PIPES
] = {};
1759 u32 hotplug_status
= 0;
1762 gt_iir
= I915_READ(GTIIR
);
1763 pm_iir
= I915_READ(GEN6_PMIIR
);
1764 iir
= I915_READ(VLV_IIR
);
1766 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1772 * Theory on interrupt generation, based on empirical evidence:
1774 * x = ((VLV_IIR & VLV_IER) ||
1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1780 * guarantee the CPU interrupt will be raised again even if we
1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1782 * bits this time around.
1784 I915_WRITE(VLV_MASTER_IER
, 0);
1785 ier
= I915_READ(VLV_IER
);
1786 I915_WRITE(VLV_IER
, 0);
1789 I915_WRITE(GTIIR
, gt_iir
);
1791 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1793 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1794 hotplug_status
= i9xx_hpd_irq_ack(dev_priv
);
1796 /* Call regardless, as some status bits might not be
1797 * signalled in iir */
1798 valleyview_pipestat_irq_ack(dev_priv
, iir
, pipe_stats
);
1801 * VLV_IIR is single buffered, and reflects the level
1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1805 I915_WRITE(VLV_IIR
, iir
);
1807 I915_WRITE(VLV_IER
, ier
);
1808 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
1809 POSTING_READ(VLV_MASTER_IER
);
1812 snb_gt_irq_handler(dev_priv
, gt_iir
);
1814 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1817 i9xx_hpd_irq_handler(dev_priv
, hotplug_status
);
1819 valleyview_pipestat_irq_handler(dev_priv
, pipe_stats
);
1822 enable_rpm_wakeref_asserts(dev_priv
);
1827 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
1829 struct drm_device
*dev
= arg
;
1830 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1831 irqreturn_t ret
= IRQ_NONE
;
1833 if (!intel_irqs_enabled(dev_priv
))
1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1837 disable_rpm_wakeref_asserts(dev_priv
);
1840 u32 master_ctl
, iir
;
1842 u32 pipe_stats
[I915_MAX_PIPES
] = {};
1843 u32 hotplug_status
= 0;
1846 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
1847 iir
= I915_READ(VLV_IIR
);
1849 if (master_ctl
== 0 && iir
== 0)
1855 * Theory on interrupt generation, based on empirical evidence:
1857 * x = ((VLV_IIR & VLV_IER) ||
1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1863 * guarantee the CPU interrupt will be raised again even if we
1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1865 * bits this time around.
1867 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1868 ier
= I915_READ(VLV_IER
);
1869 I915_WRITE(VLV_IER
, 0);
1871 gen8_gt_irq_ack(dev_priv
, master_ctl
, gt_iir
);
1873 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1874 hotplug_status
= i9xx_hpd_irq_ack(dev_priv
);
1876 /* Call regardless, as some status bits might not be
1877 * signalled in iir */
1878 valleyview_pipestat_irq_ack(dev_priv
, iir
, pipe_stats
);
1881 * VLV_IIR is single buffered, and reflects the level
1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1885 I915_WRITE(VLV_IIR
, iir
);
1887 I915_WRITE(VLV_IER
, ier
);
1888 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
1889 POSTING_READ(GEN8_MASTER_IRQ
);
1891 gen8_gt_irq_handler(dev_priv
, gt_iir
);
1894 i9xx_hpd_irq_handler(dev_priv
, hotplug_status
);
1896 valleyview_pipestat_irq_handler(dev_priv
, pipe_stats
);
1899 enable_rpm_wakeref_asserts(dev_priv
);
1904 static void ibx_hpd_irq_handler(struct drm_i915_private
*dev_priv
,
1905 u32 hotplug_trigger
,
1906 const u32 hpd
[HPD_NUM_PINS
])
1908 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1912 * unless we touch the hotplug register, even if hotplug_trigger is
1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1916 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1917 if (!hotplug_trigger
) {
1918 u32 mask
= PORTA_HOTPLUG_STATUS_MASK
|
1919 PORTD_HOTPLUG_STATUS_MASK
|
1920 PORTC_HOTPLUG_STATUS_MASK
|
1921 PORTB_HOTPLUG_STATUS_MASK
;
1922 dig_hotplug_reg
&= ~mask
;
1925 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1926 if (!hotplug_trigger
)
1929 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
1930 dig_hotplug_reg
, hpd
,
1931 pch_port_hotplug_long_detect
);
1933 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
1936 static void ibx_irq_handler(struct drm_i915_private
*dev_priv
, u32 pch_iir
)
1939 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1941 ibx_hpd_irq_handler(dev_priv
, hotplug_trigger
, hpd_ibx
);
1943 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1944 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1945 SDE_AUDIO_POWER_SHIFT
);
1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1950 if (pch_iir
& SDE_AUX_MASK
)
1951 dp_aux_irq_handler(dev_priv
);
1953 if (pch_iir
& SDE_GMBUS
)
1954 gmbus_irq_handler(dev_priv
);
1956 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1959 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1962 if (pch_iir
& SDE_POISON
)
1963 DRM_ERROR("PCH poison interrupt\n");
1965 if (pch_iir
& SDE_FDI_MASK
)
1966 for_each_pipe(dev_priv
, pipe
)
1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1969 I915_READ(FDI_RX_IIR(pipe
)));
1971 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1974 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1977 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1978 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1980 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1981 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1984 static void ivb_err_int_handler(struct drm_i915_private
*dev_priv
)
1986 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1989 if (err_int
& ERR_INT_POISON
)
1990 DRM_ERROR("Poison interrupt\n");
1992 for_each_pipe(dev_priv
, pipe
) {
1993 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
))
1994 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1996 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
1997 if (IS_IVYBRIDGE(dev_priv
))
1998 ivb_pipe_crc_irq_handler(dev_priv
, pipe
);
2000 hsw_pipe_crc_irq_handler(dev_priv
, pipe
);
2004 I915_WRITE(GEN7_ERR_INT
, err_int
);
2007 static void cpt_serr_int_handler(struct drm_i915_private
*dev_priv
)
2009 u32 serr_int
= I915_READ(SERR_INT
);
2011 if (serr_int
& SERR_INT_POISON
)
2012 DRM_ERROR("PCH poison interrupt\n");
2014 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
2015 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
2017 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
2020 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
2021 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_C
);
2023 I915_WRITE(SERR_INT
, serr_int
);
2026 static void cpt_irq_handler(struct drm_i915_private
*dev_priv
, u32 pch_iir
)
2029 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
2031 ibx_hpd_irq_handler(dev_priv
, hotplug_trigger
, hpd_cpt
);
2033 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
2034 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
2035 SDE_AUDIO_POWER_SHIFT_CPT
);
2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2040 if (pch_iir
& SDE_AUX_MASK_CPT
)
2041 dp_aux_irq_handler(dev_priv
);
2043 if (pch_iir
& SDE_GMBUS_CPT
)
2044 gmbus_irq_handler(dev_priv
);
2046 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2049 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2052 if (pch_iir
& SDE_FDI_MASK_CPT
)
2053 for_each_pipe(dev_priv
, pipe
)
2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2056 I915_READ(FDI_RX_IIR(pipe
)));
2058 if (pch_iir
& SDE_ERROR_CPT
)
2059 cpt_serr_int_handler(dev_priv
);
2062 static void spt_irq_handler(struct drm_i915_private
*dev_priv
, u32 pch_iir
)
2064 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_SPT
&
2065 ~SDE_PORTE_HOTPLUG_SPT
;
2066 u32 hotplug2_trigger
= pch_iir
& SDE_PORTE_HOTPLUG_SPT
;
2067 u32 pin_mask
= 0, long_mask
= 0;
2069 if (hotplug_trigger
) {
2070 u32 dig_hotplug_reg
;
2072 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2073 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2075 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
2076 dig_hotplug_reg
, hpd_spt
,
2077 spt_port_hotplug_long_detect
);
2080 if (hotplug2_trigger
) {
2081 u32 dig_hotplug_reg
;
2083 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG2
);
2084 I915_WRITE(PCH_PORT_HOTPLUG2
, dig_hotplug_reg
);
2086 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug2_trigger
,
2087 dig_hotplug_reg
, hpd_spt
,
2088 spt_port_hotplug2_long_detect
);
2092 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
2094 if (pch_iir
& SDE_GMBUS_CPT
)
2095 gmbus_irq_handler(dev_priv
);
2098 static void ilk_hpd_irq_handler(struct drm_i915_private
*dev_priv
,
2099 u32 hotplug_trigger
,
2100 const u32 hpd
[HPD_NUM_PINS
])
2102 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
2104 dig_hotplug_reg
= I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL
);
2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL
, dig_hotplug_reg
);
2107 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
2108 dig_hotplug_reg
, hpd
,
2109 ilk_port_hotplug_long_detect
);
2111 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
2114 static void ilk_display_irq_handler(struct drm_i915_private
*dev_priv
,
2118 u32 hotplug_trigger
= de_iir
& DE_DP_A_HOTPLUG
;
2120 if (hotplug_trigger
)
2121 ilk_hpd_irq_handler(dev_priv
, hotplug_trigger
, hpd_ilk
);
2123 if (de_iir
& DE_AUX_CHANNEL_A
)
2124 dp_aux_irq_handler(dev_priv
);
2126 if (de_iir
& DE_GSE
)
2127 intel_opregion_asle_intr(dev_priv
);
2129 if (de_iir
& DE_POISON
)
2130 DRM_ERROR("Poison interrupt\n");
2132 for_each_pipe(dev_priv
, pipe
) {
2133 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2134 intel_pipe_handle_vblank(dev_priv
, pipe
))
2135 intel_check_page_flip(dev_priv
, pipe
);
2137 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2138 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2140 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2141 i9xx_pipe_crc_irq_handler(dev_priv
, pipe
);
2143 /* plane/pipes map 1:1 on ilk+ */
2144 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
))
2145 intel_finish_page_flip_cs(dev_priv
, pipe
);
2148 /* check event from PCH */
2149 if (de_iir
& DE_PCH_EVENT
) {
2150 u32 pch_iir
= I915_READ(SDEIIR
);
2152 if (HAS_PCH_CPT(dev_priv
))
2153 cpt_irq_handler(dev_priv
, pch_iir
);
2155 ibx_irq_handler(dev_priv
, pch_iir
);
2157 /* should clear PCH hotplug event before clear CPU irq */
2158 I915_WRITE(SDEIIR
, pch_iir
);
2161 if (IS_GEN5(dev_priv
) && de_iir
& DE_PCU_EVENT
)
2162 ironlake_rps_change_irq_handler(dev_priv
);
2165 static void ivb_display_irq_handler(struct drm_i915_private
*dev_priv
,
2169 u32 hotplug_trigger
= de_iir
& DE_DP_A_HOTPLUG_IVB
;
2171 if (hotplug_trigger
)
2172 ilk_hpd_irq_handler(dev_priv
, hotplug_trigger
, hpd_ivb
);
2174 if (de_iir
& DE_ERR_INT_IVB
)
2175 ivb_err_int_handler(dev_priv
);
2177 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2178 dp_aux_irq_handler(dev_priv
);
2180 if (de_iir
& DE_GSE_IVB
)
2181 intel_opregion_asle_intr(dev_priv
);
2183 for_each_pipe(dev_priv
, pipe
) {
2184 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2185 intel_pipe_handle_vblank(dev_priv
, pipe
))
2186 intel_check_page_flip(dev_priv
, pipe
);
2188 /* plane/pipes map 1:1 on ilk+ */
2189 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
))
2190 intel_finish_page_flip_cs(dev_priv
, pipe
);
2193 /* check event from PCH */
2194 if (!HAS_PCH_NOP(dev_priv
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2195 u32 pch_iir
= I915_READ(SDEIIR
);
2197 cpt_irq_handler(dev_priv
, pch_iir
);
2199 /* clear PCH hotplug event before clear CPU irq */
2200 I915_WRITE(SDEIIR
, pch_iir
);
2205 * To handle irqs with the minimum potential races with fresh interrupts, we:
2206 * 1 - Disable Master Interrupt Control.
2207 * 2 - Find the source(s) of the interrupt.
2208 * 3 - Clear the Interrupt Identity bits (IIR).
2209 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2210 * 5 - Re-enable Master Interrupt Control.
2212 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2214 struct drm_device
*dev
= arg
;
2215 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2216 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2217 irqreturn_t ret
= IRQ_NONE
;
2219 if (!intel_irqs_enabled(dev_priv
))
2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2223 disable_rpm_wakeref_asserts(dev_priv
);
2225 /* disable master interrupt before clearing iir */
2226 de_ier
= I915_READ(DEIER
);
2227 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2228 POSTING_READ(DEIER
);
2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2231 * interrupts will will be stored on its back queue, and then we'll be
2232 * able to process them after we restore SDEIER (as soon as we restore
2233 * it, we'll get an interrupt if SDEIIR still has something to process
2234 * due to its back queue). */
2235 if (!HAS_PCH_NOP(dev_priv
)) {
2236 sde_ier
= I915_READ(SDEIER
);
2237 I915_WRITE(SDEIER
, 0);
2238 POSTING_READ(SDEIER
);
2241 /* Find, clear, then process each source of interrupt */
2243 gt_iir
= I915_READ(GTIIR
);
2245 I915_WRITE(GTIIR
, gt_iir
);
2247 if (INTEL_GEN(dev_priv
) >= 6)
2248 snb_gt_irq_handler(dev_priv
, gt_iir
);
2250 ilk_gt_irq_handler(dev_priv
, gt_iir
);
2253 de_iir
= I915_READ(DEIIR
);
2255 I915_WRITE(DEIIR
, de_iir
);
2257 if (INTEL_GEN(dev_priv
) >= 7)
2258 ivb_display_irq_handler(dev_priv
, de_iir
);
2260 ilk_display_irq_handler(dev_priv
, de_iir
);
2263 if (INTEL_GEN(dev_priv
) >= 6) {
2264 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2266 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2268 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2272 I915_WRITE(DEIER
, de_ier
);
2273 POSTING_READ(DEIER
);
2274 if (!HAS_PCH_NOP(dev_priv
)) {
2275 I915_WRITE(SDEIER
, sde_ier
);
2276 POSTING_READ(SDEIER
);
2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2280 enable_rpm_wakeref_asserts(dev_priv
);
2285 static void bxt_hpd_irq_handler(struct drm_i915_private
*dev_priv
,
2286 u32 hotplug_trigger
,
2287 const u32 hpd
[HPD_NUM_PINS
])
2289 u32 dig_hotplug_reg
, pin_mask
= 0, long_mask
= 0;
2291 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2292 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2294 intel_get_hpd_pins(&pin_mask
, &long_mask
, hotplug_trigger
,
2295 dig_hotplug_reg
, hpd
,
2296 bxt_port_hotplug_long_detect
);
2298 intel_hpd_irq_handler(dev_priv
, pin_mask
, long_mask
);
2302 gen8_de_irq_handler(struct drm_i915_private
*dev_priv
, u32 master_ctl
)
2304 irqreturn_t ret
= IRQ_NONE
;
2308 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2309 iir
= I915_READ(GEN8_DE_MISC_IIR
);
2311 I915_WRITE(GEN8_DE_MISC_IIR
, iir
);
2313 if (iir
& GEN8_DE_MISC_GSE
)
2314 intel_opregion_asle_intr(dev_priv
);
2316 DRM_ERROR("Unexpected DE Misc interrupt\n");
2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2322 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2323 iir
= I915_READ(GEN8_DE_PORT_IIR
);
2328 I915_WRITE(GEN8_DE_PORT_IIR
, iir
);
2331 tmp_mask
= GEN8_AUX_CHANNEL_A
;
2332 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2333 tmp_mask
|= GEN9_AUX_CHANNEL_B
|
2334 GEN9_AUX_CHANNEL_C
|
2337 if (iir
& tmp_mask
) {
2338 dp_aux_irq_handler(dev_priv
);
2342 if (IS_BROXTON(dev_priv
)) {
2343 tmp_mask
= iir
& BXT_DE_PORT_HOTPLUG_MASK
;
2345 bxt_hpd_irq_handler(dev_priv
, tmp_mask
,
2349 } else if (IS_BROADWELL(dev_priv
)) {
2350 tmp_mask
= iir
& GEN8_PORT_DP_A_HOTPLUG
;
2352 ilk_hpd_irq_handler(dev_priv
,
2358 if (IS_BROXTON(dev_priv
) && (iir
& BXT_DE_PORT_GMBUS
)) {
2359 gmbus_irq_handler(dev_priv
);
2364 DRM_ERROR("Unexpected DE Port interrupt\n");
2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2370 for_each_pipe(dev_priv
, pipe
) {
2371 u32 flip_done
, fault_errors
;
2373 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2376 iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), iir
);
2385 if (iir
& GEN8_PIPE_VBLANK
&&
2386 intel_pipe_handle_vblank(dev_priv
, pipe
))
2387 intel_check_page_flip(dev_priv
, pipe
);
2390 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2391 flip_done
&= GEN9_PIPE_PLANE1_FLIP_DONE
;
2393 flip_done
&= GEN8_PIPE_PRIMARY_FLIP_DONE
;
2396 intel_finish_page_flip_cs(dev_priv
, pipe
);
2398 if (iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2399 hsw_pipe_crc_irq_handler(dev_priv
, pipe
);
2401 if (iir
& GEN8_PIPE_FIFO_UNDERRUN
)
2402 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2405 if (INTEL_INFO(dev_priv
)->gen
>= 9)
2406 fault_errors
&= GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2408 fault_errors
&= GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2416 if (HAS_PCH_SPLIT(dev_priv
) && !HAS_PCH_NOP(dev_priv
) &&
2417 master_ctl
& GEN8_DE_PCH_IRQ
) {
2419 * FIXME(BDW): Assume for now that the new interrupt handling
2420 * scheme also closed the SDE interrupt handling race we've seen
2421 * on older pch-split platforms. But this needs testing.
2423 iir
= I915_READ(SDEIIR
);
2425 I915_WRITE(SDEIIR
, iir
);
2428 if (HAS_PCH_SPT(dev_priv
) || HAS_PCH_KBP(dev_priv
))
2429 spt_irq_handler(dev_priv
, iir
);
2431 cpt_irq_handler(dev_priv
, iir
);
2434 * Like on previous PCH there seems to be something
2435 * fishy going on with forwarding PCH interrupts.
2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2444 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2446 struct drm_device
*dev
= arg
;
2447 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2452 if (!intel_irqs_enabled(dev_priv
))
2455 master_ctl
= I915_READ_FW(GEN8_MASTER_IRQ
);
2456 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2460 I915_WRITE_FW(GEN8_MASTER_IRQ
, 0);
2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2463 disable_rpm_wakeref_asserts(dev_priv
);
2465 /* Find, clear, then process each source of interrupt */
2466 ret
= gen8_gt_irq_ack(dev_priv
, master_ctl
, gt_iir
);
2467 gen8_gt_irq_handler(dev_priv
, gt_iir
);
2468 ret
|= gen8_de_irq_handler(dev_priv
, master_ctl
);
2470 I915_WRITE_FW(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2471 POSTING_READ_FW(GEN8_MASTER_IRQ
);
2473 enable_rpm_wakeref_asserts(dev_priv
);
2478 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
)
2481 * Notify all waiters for GPU completion events that reset state has
2482 * been changed, and that they need to restart their wait after
2483 * checking for potential errors (and bail out to drop locks if there is
2484 * a gpu reset pending so that i915_error_work_func can acquire them).
2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2488 wake_up_all(&dev_priv
->gpu_error
.wait_queue
);
2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2491 wake_up_all(&dev_priv
->pending_flip_queue
);
2495 * i915_reset_and_wakeup - do process context error handling work
2496 * @dev_priv: i915 device private
2498 * Fire an error uevent so userspace can see that a hang or error
2501 static void i915_reset_and_wakeup(struct drm_i915_private
*dev_priv
)
2503 struct kobject
*kobj
= &dev_priv
->drm
.primary
->kdev
->kobj
;
2504 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2505 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2506 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2509 kobject_uevent_env(kobj
, KOBJ_CHANGE
, error_event
);
2512 * Note that there's only one work item which does gpu resets, so we
2513 * need not worry about concurrent gpu resets potentially incrementing
2514 * error->reset_counter twice. We only need to take care of another
2515 * racing irq/hangcheck declaring the gpu dead for a second time. A
2516 * quick check for that is good enough: schedule_work ensures the
2517 * correct ordering between hang detection and this work item, and since
2518 * the reset in-progress bit is only ever set by code outside of this
2519 * work we don't need to worry about any other races.
2521 if (i915_reset_in_progress(&dev_priv
->gpu_error
)) {
2522 DRM_DEBUG_DRIVER("resetting chip\n");
2523 kobject_uevent_env(kobj
, KOBJ_CHANGE
, reset_event
);
2526 * In most cases it's guaranteed that we get here with an RPM
2527 * reference held, for example because there is a pending GPU
2528 * request that won't finish until the reset is done. This
2529 * isn't the case at least when we get here by doing a
2530 * simulated reset via debugs, so get an RPM reference.
2532 intel_runtime_pm_get(dev_priv
);
2534 intel_prepare_reset(dev_priv
);
2537 * All state reset _must_ be completed before we update the
2538 * reset counter, for otherwise waiters might miss the reset
2539 * pending state and not properly drop locks, resulting in
2540 * deadlocks with the reset work.
2542 ret
= i915_reset(dev_priv
);
2544 intel_finish_reset(dev_priv
);
2546 intel_runtime_pm_put(dev_priv
);
2549 kobject_uevent_env(kobj
,
2550 KOBJ_CHANGE
, reset_done_event
);
2553 * Note: The wake_up also serves as a memory barrier so that
2554 * waiters see the update value of the reset counter atomic_t.
2556 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2560 static void i915_report_and_clear_eir(struct drm_i915_private
*dev_priv
)
2562 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2563 u32 eir
= I915_READ(EIR
);
2569 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2571 i915_get_extra_instdone(dev_priv
, instdone
);
2573 if (IS_G4X(dev_priv
)) {
2574 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2575 u32 ipeir
= I915_READ(IPEIR_I965
);
2577 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2578 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2579 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2580 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2581 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2582 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2583 I915_WRITE(IPEIR_I965
, ipeir
);
2584 POSTING_READ(IPEIR_I965
);
2586 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2587 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2588 pr_err("page table error\n");
2589 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2590 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2591 POSTING_READ(PGTBL_ER
);
2595 if (!IS_GEN2(dev_priv
)) {
2596 if (eir
& I915_ERROR_PAGE_TABLE
) {
2597 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2598 pr_err("page table error\n");
2599 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2600 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2601 POSTING_READ(PGTBL_ER
);
2605 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2606 pr_err("memory refresh error:\n");
2607 for_each_pipe(dev_priv
, pipe
)
2608 pr_err("pipe %c stat: 0x%08x\n",
2609 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2610 /* pipestat has already been acked */
2612 if (eir
& I915_ERROR_INSTRUCTION
) {
2613 pr_err("instruction error\n");
2614 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2615 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2616 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2617 if (INTEL_GEN(dev_priv
) < 4) {
2618 u32 ipeir
= I915_READ(IPEIR
);
2620 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2621 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2622 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2623 I915_WRITE(IPEIR
, ipeir
);
2624 POSTING_READ(IPEIR
);
2626 u32 ipeir
= I915_READ(IPEIR_I965
);
2628 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2629 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2630 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2631 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2632 I915_WRITE(IPEIR_I965
, ipeir
);
2633 POSTING_READ(IPEIR_I965
);
2637 I915_WRITE(EIR
, eir
);
2639 eir
= I915_READ(EIR
);
2642 * some errors might have become stuck,
2645 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2646 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2647 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2652 * i915_handle_error - handle a gpu error
2653 * @dev_priv: i915 device private
2654 * @engine_mask: mask representing engines that are hung
2655 * Do some basic checking of register state at error time and
2656 * dump it to the syslog. Also call i915_capture_error_state() to make
2657 * sure we get a record and make it available in debugfs. Fire a uevent
2658 * so userspace knows something bad happened (should trigger collection
2659 * of a ring dump etc.).
2660 * @fmt: Error message format string
2662 void i915_handle_error(struct drm_i915_private
*dev_priv
,
2664 const char *fmt
, ...)
2669 va_start(args
, fmt
);
2670 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2673 i915_capture_error_state(dev_priv
, engine_mask
, error_msg
);
2674 i915_report_and_clear_eir(dev_priv
);
2677 atomic_or(I915_RESET_IN_PROGRESS_FLAG
,
2678 &dev_priv
->gpu_error
.reset_counter
);
2681 * Wakeup waiting processes so that the reset function
2682 * i915_reset_and_wakeup doesn't deadlock trying to grab
2683 * various locks. By bumping the reset counter first, the woken
2684 * processes will see a reset in progress and back off,
2685 * releasing their locks and then wait for the reset completion.
2686 * We must do this for _all_ gpu waiters that might hold locks
2687 * that the reset work needs to acquire.
2689 * Note: The wake_up serves as the required memory barrier to
2690 * ensure that the waiters see the updated value of the reset
2693 i915_error_wake_up(dev_priv
);
2696 i915_reset_and_wakeup(dev_priv
);
2699 /* Called from drm generic code, passed 'crtc' which
2700 * we use as a pipe index
2702 static int i915_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2704 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2705 unsigned long irqflags
;
2707 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2708 if (INTEL_INFO(dev
)->gen
>= 4)
2709 i915_enable_pipestat(dev_priv
, pipe
,
2710 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2712 i915_enable_pipestat(dev_priv
, pipe
,
2713 PIPE_VBLANK_INTERRUPT_STATUS
);
2714 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2719 static int ironlake_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2721 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2722 unsigned long irqflags
;
2723 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2724 DE_PIPE_VBLANK(pipe
);
2726 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2727 ilk_enable_display_irq(dev_priv
, bit
);
2728 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2733 static int valleyview_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2735 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2736 unsigned long irqflags
;
2738 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2739 i915_enable_pipestat(dev_priv
, pipe
,
2740 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2741 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2746 static int gen8_enable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2748 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2749 unsigned long irqflags
;
2751 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2752 bdw_enable_pipe_irq(dev_priv
, pipe
, GEN8_PIPE_VBLANK
);
2753 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2758 /* Called from drm generic code, passed 'crtc' which
2759 * we use as a pipe index
2761 static void i915_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2763 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2764 unsigned long irqflags
;
2766 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2767 i915_disable_pipestat(dev_priv
, pipe
,
2768 PIPE_VBLANK_INTERRUPT_STATUS
|
2769 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2770 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2773 static void ironlake_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2775 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2776 unsigned long irqflags
;
2777 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2778 DE_PIPE_VBLANK(pipe
);
2780 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2781 ilk_disable_display_irq(dev_priv
, bit
);
2782 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2785 static void valleyview_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2787 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2788 unsigned long irqflags
;
2790 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2791 i915_disable_pipestat(dev_priv
, pipe
,
2792 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2793 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2796 static void gen8_disable_vblank(struct drm_device
*dev
, unsigned int pipe
)
2798 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2799 unsigned long irqflags
;
2801 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2802 bdw_disable_pipe_irq(dev_priv
, pipe
, GEN8_PIPE_VBLANK
);
2803 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2807 ring_idle(struct intel_engine_cs
*engine
, u32 seqno
)
2809 return i915_seqno_passed(seqno
,
2810 READ_ONCE(engine
->last_submitted_seqno
));
2814 ipehr_is_semaphore_wait(struct intel_engine_cs
*engine
, u32 ipehr
)
2816 if (INTEL_GEN(engine
->i915
) >= 8) {
2817 return (ipehr
>> 23) == 0x1c;
2819 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
2820 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
2821 MI_SEMAPHORE_REGISTER
);
2825 static struct intel_engine_cs
*
2826 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*engine
, u32 ipehr
,
2829 struct drm_i915_private
*dev_priv
= engine
->i915
;
2830 struct intel_engine_cs
*signaller
;
2832 if (INTEL_GEN(dev_priv
) >= 8) {
2833 for_each_engine(signaller
, dev_priv
) {
2834 if (engine
== signaller
)
2837 if (offset
== signaller
->semaphore
.signal_ggtt
[engine
->id
])
2841 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
2843 for_each_engine(signaller
, dev_priv
) {
2844 if(engine
== signaller
)
2847 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[engine
->id
])
2852 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2853 engine
->id
, ipehr
, offset
);
2858 static struct intel_engine_cs
*
2859 semaphore_waits_for(struct intel_engine_cs
*engine
, u32
*seqno
)
2861 struct drm_i915_private
*dev_priv
= engine
->i915
;
2862 u32 cmd
, ipehr
, head
;
2867 * This function does not support execlist mode - any attempt to
2868 * proceed further into this function will result in a kernel panic
2869 * when dereferencing ring->buffer, which is not set up in execlist
2872 * The correct way of doing it would be to derive the currently
2873 * executing ring buffer from the current context, which is derived
2874 * from the currently running request. Unfortunately, to get the
2875 * current request we would have to grab the struct_mutex before doing
2876 * anything else, which would be ill-advised since some other thread
2877 * might have grabbed it already and managed to hang itself, causing
2878 * the hang checker to deadlock.
2880 * Therefore, this function does not support execlist mode in its
2881 * current form. Just return NULL and move on.
2883 if (engine
->buffer
== NULL
)
2886 ipehr
= I915_READ(RING_IPEHR(engine
->mmio_base
));
2887 if (!ipehr_is_semaphore_wait(engine
, ipehr
))
2891 * HEAD is likely pointing to the dword after the actual command,
2892 * so scan backwards until we find the MBOX. But limit it to just 3
2893 * or 4 dwords depending on the semaphore wait command size.
2894 * Note that we don't care about ACTHD here since that might
2895 * point at at batch, and semaphores are always emitted into the
2896 * ringbuffer itself.
2898 head
= I915_READ_HEAD(engine
) & HEAD_ADDR
;
2899 backwards
= (INTEL_GEN(dev_priv
) >= 8) ? 5 : 4;
2901 for (i
= backwards
; i
; --i
) {
2903 * Be paranoid and presume the hw has gone off into the wild -
2904 * our ring is smaller than what the hardware (and hence
2905 * HEAD_ADDR) allows. Also handles wrap-around.
2907 head
&= engine
->buffer
->size
- 1;
2909 /* This here seems to blow up */
2910 cmd
= ioread32(engine
->buffer
->virtual_start
+ head
);
2920 *seqno
= ioread32(engine
->buffer
->virtual_start
+ head
+ 4) + 1;
2921 if (INTEL_GEN(dev_priv
) >= 8) {
2922 offset
= ioread32(engine
->buffer
->virtual_start
+ head
+ 12);
2924 offset
= ioread32(engine
->buffer
->virtual_start
+ head
+ 8);
2926 return semaphore_wait_to_signaller_ring(engine
, ipehr
, offset
);
2929 static int semaphore_passed(struct intel_engine_cs
*engine
)
2931 struct drm_i915_private
*dev_priv
= engine
->i915
;
2932 struct intel_engine_cs
*signaller
;
2935 engine
->hangcheck
.deadlock
++;
2937 signaller
= semaphore_waits_for(engine
, &seqno
);
2938 if (signaller
== NULL
)
2941 /* Prevent pathological recursion due to driver bugs */
2942 if (signaller
->hangcheck
.deadlock
>= I915_NUM_ENGINES
)
2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller
), seqno
))
2948 /* cursory check for an unkickable deadlock */
2949 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
2950 semaphore_passed(signaller
) < 0)
2956 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2958 struct intel_engine_cs
*engine
;
2960 for_each_engine(engine
, dev_priv
)
2961 engine
->hangcheck
.deadlock
= 0;
2964 static bool subunits_stuck(struct intel_engine_cs
*engine
)
2966 u32 instdone
[I915_NUM_INSTDONE_REG
];
2970 if (engine
->id
!= RCS
)
2973 i915_get_extra_instdone(engine
->i915
, instdone
);
2975 /* There might be unstable subunit states even when
2976 * actual head is not moving. Filter out the unstable ones by
2977 * accumulating the undone -> done transitions and only
2978 * consider those as progress.
2981 for (i
= 0; i
< I915_NUM_INSTDONE_REG
; i
++) {
2982 const u32 tmp
= instdone
[i
] | engine
->hangcheck
.instdone
[i
];
2984 if (tmp
!= engine
->hangcheck
.instdone
[i
])
2987 engine
->hangcheck
.instdone
[i
] |= tmp
;
2993 static enum intel_ring_hangcheck_action
2994 head_stuck(struct intel_engine_cs
*engine
, u64 acthd
)
2996 if (acthd
!= engine
->hangcheck
.acthd
) {
2998 /* Clear subunit states on head movement */
2999 memset(engine
->hangcheck
.instdone
, 0,
3000 sizeof(engine
->hangcheck
.instdone
));
3002 return HANGCHECK_ACTIVE
;
3005 if (!subunits_stuck(engine
))
3006 return HANGCHECK_ACTIVE
;
3008 return HANGCHECK_HUNG
;
3011 static enum intel_ring_hangcheck_action
3012 ring_stuck(struct intel_engine_cs
*engine
, u64 acthd
)
3014 struct drm_i915_private
*dev_priv
= engine
->i915
;
3015 enum intel_ring_hangcheck_action ha
;
3018 ha
= head_stuck(engine
, acthd
);
3019 if (ha
!= HANGCHECK_HUNG
)
3022 if (IS_GEN2(dev_priv
))
3023 return HANGCHECK_HUNG
;
3025 /* Is the chip hanging on a WAIT_FOR_EVENT?
3026 * If so we can simply poke the RB_WAIT bit
3027 * and break the hang. This should work on
3028 * all but the second generation chipsets.
3030 tmp
= I915_READ_CTL(engine
);
3031 if (tmp
& RING_WAIT
) {
3032 i915_handle_error(dev_priv
, 0,
3033 "Kicking stuck wait on %s",
3035 I915_WRITE_CTL(engine
, tmp
);
3036 return HANGCHECK_KICK
;
3039 if (INTEL_GEN(dev_priv
) >= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
3040 switch (semaphore_passed(engine
)) {
3042 return HANGCHECK_HUNG
;
3044 i915_handle_error(dev_priv
, 0,
3045 "Kicking stuck semaphore on %s",
3047 I915_WRITE_CTL(engine
, tmp
);
3048 return HANGCHECK_KICK
;
3050 return HANGCHECK_WAIT
;
3054 return HANGCHECK_HUNG
;
3057 static unsigned long kick_waiters(struct intel_engine_cs
*engine
)
3059 struct drm_i915_private
*i915
= engine
->i915
;
3060 unsigned long irq_count
= READ_ONCE(engine
->breadcrumbs
.irq_wakeups
);
3062 if (engine
->hangcheck
.user_interrupts
== irq_count
&&
3063 !test_and_set_bit(engine
->id
, &i915
->gpu_error
.missed_irq_rings
)) {
3064 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3068 intel_engine_enable_fake_irq(engine
);
3074 * This is called when the chip hasn't reported back with completed
3075 * batchbuffers in a long time. We keep track per ring seqno progress and
3076 * if there are no progress, hangcheck score for that ring is increased.
3077 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3078 * we kick the ring. If we see no progress on three subsequent calls
3079 * we assume chip is wedged and try to fix it by resetting the chip.
3081 static void i915_hangcheck_elapsed(struct work_struct
*work
)
3083 struct drm_i915_private
*dev_priv
=
3084 container_of(work
, typeof(*dev_priv
),
3085 gpu_error
.hangcheck_work
.work
);
3086 struct intel_engine_cs
*engine
;
3087 unsigned int hung
= 0, stuck
= 0;
3092 #define ACTIVE_DECAY 15
3094 if (!i915
.enable_hangcheck
)
3097 if (!READ_ONCE(dev_priv
->gt
.awake
))
3100 /* As enabling the GPU requires fairly extensive mmio access,
3101 * periodically arm the mmio checker to see if we are triggering
3102 * any invalid access.
3104 intel_uncore_arm_unclaimed_mmio_detection(dev_priv
);
3106 for_each_engine(engine
, dev_priv
) {
3107 bool busy
= intel_engine_has_waiter(engine
);
3110 unsigned user_interrupts
;
3112 semaphore_clear_deadlocks(dev_priv
);
3114 /* We don't strictly need an irq-barrier here, as we are not
3115 * serving an interrupt request, be paranoid in case the
3116 * barrier has side-effects (such as preventing a broken
3117 * cacheline snoop) and so be sure that we can see the seqno
3118 * advance. If the seqno should stick, due to a stale
3119 * cacheline, we would erroneously declare the GPU hung.
3121 if (engine
->irq_seqno_barrier
)
3122 engine
->irq_seqno_barrier(engine
);
3124 acthd
= intel_ring_get_active_head(engine
);
3125 seqno
= intel_engine_get_seqno(engine
);
3127 /* Reset stuck interrupts between batch advances */
3128 user_interrupts
= 0;
3130 if (engine
->hangcheck
.seqno
== seqno
) {
3131 if (ring_idle(engine
, seqno
)) {
3132 engine
->hangcheck
.action
= HANGCHECK_IDLE
;
3134 /* Safeguard against driver failure */
3135 user_interrupts
= kick_waiters(engine
);
3136 engine
->hangcheck
.score
+= BUSY
;
3139 /* We always increment the hangcheck score
3140 * if the ring is busy and still processing
3141 * the same request, so that no single request
3142 * can run indefinitely (such as a chain of
3143 * batches). The only time we do not increment
3144 * the hangcheck score on this ring, if this
3145 * ring is in a legitimate wait for another
3146 * ring. In that case the waiting ring is a
3147 * victim and we want to be sure we catch the
3148 * right culprit. Then every time we do kick
3149 * the ring, add a small increment to the
3150 * score so that we can catch a batch that is
3151 * being repeatedly kicked and so responsible
3152 * for stalling the machine.
3154 engine
->hangcheck
.action
= ring_stuck(engine
,
3157 switch (engine
->hangcheck
.action
) {
3158 case HANGCHECK_IDLE
:
3159 case HANGCHECK_WAIT
:
3161 case HANGCHECK_ACTIVE
:
3162 engine
->hangcheck
.score
+= BUSY
;
3164 case HANGCHECK_KICK
:
3165 engine
->hangcheck
.score
+= KICK
;
3167 case HANGCHECK_HUNG
:
3168 engine
->hangcheck
.score
+= HUNG
;
3173 if (engine
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3174 hung
|= intel_engine_flag(engine
);
3175 if (engine
->hangcheck
.action
!= HANGCHECK_HUNG
)
3176 stuck
|= intel_engine_flag(engine
);
3179 engine
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3181 /* Gradually reduce the count so that we catch DoS
3182 * attempts across multiple batches.
3184 if (engine
->hangcheck
.score
> 0)
3185 engine
->hangcheck
.score
-= ACTIVE_DECAY
;
3186 if (engine
->hangcheck
.score
< 0)
3187 engine
->hangcheck
.score
= 0;
3189 /* Clear head and subunit states on seqno movement */
3192 memset(engine
->hangcheck
.instdone
, 0,
3193 sizeof(engine
->hangcheck
.instdone
));
3196 engine
->hangcheck
.seqno
= seqno
;
3197 engine
->hangcheck
.acthd
= acthd
;
3198 engine
->hangcheck
.user_interrupts
= user_interrupts
;
3206 /* If some rings hung but others were still busy, only
3207 * blame the hanging rings in the synopsis.
3211 len
= scnprintf(msg
, sizeof(msg
),
3212 "%s on ", stuck
== hung
? "No progress" : "Hang");
3213 for_each_engine_masked(engine
, dev_priv
, hung
)
3214 len
+= scnprintf(msg
+ len
, sizeof(msg
) - len
,
3215 "%s, ", engine
->name
);
3218 return i915_handle_error(dev_priv
, hung
, msg
);
3221 /* Reset timer in case GPU hangs without another request being added */
3223 i915_queue_hangcheck(dev_priv
);
3226 static void ibx_irq_reset(struct drm_device
*dev
)
3228 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3230 if (HAS_PCH_NOP(dev
))
3233 GEN5_IRQ_RESET(SDE
);
3235 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3236 I915_WRITE(SERR_INT
, 0xffffffff);
3240 * SDEIER is also touched by the interrupt handler to work around missed PCH
3241 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3242 * instead we unconditionally enable all PCH interrupt sources here, but then
3243 * only unmask them as needed with SDEIMR.
3245 * This function needs to be called before interrupts are enabled.
3247 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3249 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3251 if (HAS_PCH_NOP(dev
))
3254 WARN_ON(I915_READ(SDEIER
) != 0);
3255 I915_WRITE(SDEIER
, 0xffffffff);
3256 POSTING_READ(SDEIER
);
3259 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3261 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3264 if (INTEL_INFO(dev
)->gen
>= 6)
3265 GEN5_IRQ_RESET(GEN6_PM
);
3268 static void vlv_display_irq_reset(struct drm_i915_private
*dev_priv
)
3272 if (IS_CHERRYVIEW(dev_priv
))
3273 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3275 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3277 i915_hotplug_interrupt_update_locked(dev_priv
, 0xffffffff, 0);
3278 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3280 for_each_pipe(dev_priv
, pipe
) {
3281 I915_WRITE(PIPESTAT(pipe
),
3282 PIPE_FIFO_UNDERRUN_STATUS
|
3283 PIPESTAT_INT_STATUS_MASK
);
3284 dev_priv
->pipestat_irq_mask
[pipe
] = 0;
3287 GEN5_IRQ_RESET(VLV_
);
3288 dev_priv
->irq_mask
= ~0;
3291 static void vlv_display_irq_postinstall(struct drm_i915_private
*dev_priv
)
3297 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3298 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3300 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3301 for_each_pipe(dev_priv
, pipe
)
3302 i915_enable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3304 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3305 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3306 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3307 if (IS_CHERRYVIEW(dev_priv
))
3308 enable_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3310 WARN_ON(dev_priv
->irq_mask
!= ~0);
3312 dev_priv
->irq_mask
= ~enable_mask
;
3314 GEN5_IRQ_INIT(VLV_
, dev_priv
->irq_mask
, enable_mask
);
3319 static void ironlake_irq_reset(struct drm_device
*dev
)
3321 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3323 I915_WRITE(HWSTAM
, 0xffffffff);
3327 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3329 gen5_gt_irq_reset(dev
);
3334 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3336 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3338 I915_WRITE(VLV_MASTER_IER
, 0);
3339 POSTING_READ(VLV_MASTER_IER
);
3341 gen5_gt_irq_reset(dev
);
3343 spin_lock_irq(&dev_priv
->irq_lock
);
3344 if (dev_priv
->display_irqs_enabled
)
3345 vlv_display_irq_reset(dev_priv
);
3346 spin_unlock_irq(&dev_priv
->irq_lock
);
3349 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3351 GEN8_IRQ_RESET_NDX(GT
, 0);
3352 GEN8_IRQ_RESET_NDX(GT
, 1);
3353 GEN8_IRQ_RESET_NDX(GT
, 2);
3354 GEN8_IRQ_RESET_NDX(GT
, 3);
3357 static void gen8_irq_reset(struct drm_device
*dev
)
3359 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3362 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3363 POSTING_READ(GEN8_MASTER_IRQ
);
3365 gen8_gt_irq_reset(dev_priv
);
3367 for_each_pipe(dev_priv
, pipe
)
3368 if (intel_display_power_is_enabled(dev_priv
,
3369 POWER_DOMAIN_PIPE(pipe
)))
3370 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3372 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3373 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3374 GEN5_IRQ_RESET(GEN8_PCU_
);
3376 if (HAS_PCH_SPLIT(dev
))
3380 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
,
3381 unsigned int pipe_mask
)
3383 uint32_t extra_ier
= GEN8_PIPE_VBLANK
| GEN8_PIPE_FIFO_UNDERRUN
;
3386 spin_lock_irq(&dev_priv
->irq_lock
);
3387 for_each_pipe_masked(dev_priv
, pipe
, pipe_mask
)
3388 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3389 dev_priv
->de_irq_mask
[pipe
],
3390 ~dev_priv
->de_irq_mask
[pipe
] | extra_ier
);
3391 spin_unlock_irq(&dev_priv
->irq_lock
);
3394 void gen8_irq_power_well_pre_disable(struct drm_i915_private
*dev_priv
,
3395 unsigned int pipe_mask
)
3399 spin_lock_irq(&dev_priv
->irq_lock
);
3400 for_each_pipe_masked(dev_priv
, pipe
, pipe_mask
)
3401 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3402 spin_unlock_irq(&dev_priv
->irq_lock
);
3404 /* make sure we're done processing display irqs */
3405 synchronize_irq(dev_priv
->drm
.irq
);
3408 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3410 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3412 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3413 POSTING_READ(GEN8_MASTER_IRQ
);
3415 gen8_gt_irq_reset(dev_priv
);
3417 GEN5_IRQ_RESET(GEN8_PCU_
);
3419 spin_lock_irq(&dev_priv
->irq_lock
);
3420 if (dev_priv
->display_irqs_enabled
)
3421 vlv_display_irq_reset(dev_priv
);
3422 spin_unlock_irq(&dev_priv
->irq_lock
);
3425 static u32
intel_hpd_enabled_irqs(struct drm_i915_private
*dev_priv
,
3426 const u32 hpd
[HPD_NUM_PINS
])
3428 struct intel_encoder
*encoder
;
3429 u32 enabled_irqs
= 0;
3431 for_each_intel_encoder(&dev_priv
->drm
, encoder
)
3432 if (dev_priv
->hotplug
.stats
[encoder
->hpd_pin
].state
== HPD_ENABLED
)
3433 enabled_irqs
|= hpd
[encoder
->hpd_pin
];
3435 return enabled_irqs
;
3438 static void ibx_hpd_irq_setup(struct drm_i915_private
*dev_priv
)
3440 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3442 if (HAS_PCH_IBX(dev_priv
)) {
3443 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3444 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_ibx
);
3446 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3447 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_cpt
);
3450 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3453 * Enable digital hotplug on the PCH, and configure the DP short pulse
3454 * duration to 2ms (which is the minimum in the Display Port spec).
3455 * The pulse duration bits are reserved on LPT+.
3457 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3458 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3459 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3460 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3461 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3463 * When CPU and PCH are on the same package, port A
3464 * HPD must be enabled in both north and south.
3466 if (HAS_PCH_LPT_LP(dev_priv
))
3467 hotplug
|= PORTA_HOTPLUG_ENABLE
;
3468 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3471 static void spt_hpd_irq_setup(struct drm_i915_private
*dev_priv
)
3473 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3475 hotplug_irqs
= SDE_HOTPLUG_MASK_SPT
;
3476 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_spt
);
3478 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3480 /* Enable digital hotplug on the PCH */
3481 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3482 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTC_HOTPLUG_ENABLE
|
3483 PORTB_HOTPLUG_ENABLE
| PORTA_HOTPLUG_ENABLE
;
3484 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3486 hotplug
= I915_READ(PCH_PORT_HOTPLUG2
);
3487 hotplug
|= PORTE_HOTPLUG_ENABLE
;
3488 I915_WRITE(PCH_PORT_HOTPLUG2
, hotplug
);
3491 static void ilk_hpd_irq_setup(struct drm_i915_private
*dev_priv
)
3493 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3495 if (INTEL_GEN(dev_priv
) >= 8) {
3496 hotplug_irqs
= GEN8_PORT_DP_A_HOTPLUG
;
3497 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_bdw
);
3499 bdw_update_port_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3500 } else if (INTEL_GEN(dev_priv
) >= 7) {
3501 hotplug_irqs
= DE_DP_A_HOTPLUG_IVB
;
3502 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_ivb
);
3504 ilk_update_display_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3506 hotplug_irqs
= DE_DP_A_HOTPLUG
;
3507 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_ilk
);
3509 ilk_update_display_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3513 * Enable digital hotplug on the CPU, and configure the DP short pulse
3514 * duration to 2ms (which is the minimum in the Display Port spec)
3515 * The pulse duration bits are reserved on HSW+.
3517 hotplug
= I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL
);
3518 hotplug
&= ~DIGITAL_PORTA_PULSE_DURATION_MASK
;
3519 hotplug
|= DIGITAL_PORTA_HOTPLUG_ENABLE
| DIGITAL_PORTA_PULSE_DURATION_2ms
;
3520 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL
, hotplug
);
3522 ibx_hpd_irq_setup(dev_priv
);
3525 static void bxt_hpd_irq_setup(struct drm_i915_private
*dev_priv
)
3527 u32 hotplug_irqs
, hotplug
, enabled_irqs
;
3529 enabled_irqs
= intel_hpd_enabled_irqs(dev_priv
, hpd_bxt
);
3530 hotplug_irqs
= BXT_DE_PORT_HOTPLUG_MASK
;
3532 bdw_update_port_irq(dev_priv
, hotplug_irqs
, enabled_irqs
);
3534 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3535 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTB_HOTPLUG_ENABLE
|
3536 PORTA_HOTPLUG_ENABLE
;
3538 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3539 hotplug
, enabled_irqs
);
3540 hotplug
&= ~BXT_DDI_HPD_INVERT_MASK
;
3543 * For BXT invert bit has to be set based on AOB design
3544 * for HPD detection logic, update it based on VBT fields.
3547 if ((enabled_irqs
& BXT_DE_PORT_HP_DDIA
) &&
3548 intel_bios_is_port_hpd_inverted(dev_priv
, PORT_A
))
3549 hotplug
|= BXT_DDIA_HPD_INVERT
;
3550 if ((enabled_irqs
& BXT_DE_PORT_HP_DDIB
) &&
3551 intel_bios_is_port_hpd_inverted(dev_priv
, PORT_B
))
3552 hotplug
|= BXT_DDIB_HPD_INVERT
;
3553 if ((enabled_irqs
& BXT_DE_PORT_HP_DDIC
) &&
3554 intel_bios_is_port_hpd_inverted(dev_priv
, PORT_C
))
3555 hotplug
|= BXT_DDIC_HPD_INVERT
;
3557 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3560 static void ibx_irq_postinstall(struct drm_device
*dev
)
3562 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3565 if (HAS_PCH_NOP(dev
))
3568 if (HAS_PCH_IBX(dev
))
3569 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3571 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3573 gen5_assert_iir_is_zero(dev_priv
, SDEIIR
);
3574 I915_WRITE(SDEIMR
, ~mask
);
3577 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3579 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3580 u32 pm_irqs
, gt_irqs
;
3582 pm_irqs
= gt_irqs
= 0;
3584 dev_priv
->gt_irq_mask
= ~0;
3585 if (HAS_L3_DPF(dev
)) {
3586 /* L3 parity interrupt is always unmasked. */
3587 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3588 gt_irqs
|= GT_PARITY_ERROR(dev
);
3591 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3593 gt_irqs
|= ILK_BSD_USER_INTERRUPT
;
3595 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3598 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3600 if (INTEL_INFO(dev
)->gen
>= 6) {
3602 * RPS interrupts will get enabled/disabled on demand when RPS
3603 * itself is enabled/disabled.
3606 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3608 dev_priv
->pm_irq_mask
= 0xffffffff;
3609 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3613 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3615 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3616 u32 display_mask
, extra_mask
;
3618 if (INTEL_INFO(dev
)->gen
>= 7) {
3619 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3620 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3621 DE_PLANEB_FLIP_DONE_IVB
|
3622 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3623 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3624 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
|
3625 DE_DP_A_HOTPLUG_IVB
);
3627 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3628 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3630 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3632 extra_mask
= (DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3633 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
|
3637 dev_priv
->irq_mask
= ~display_mask
;
3639 I915_WRITE(HWSTAM
, 0xeffe);
3641 ibx_irq_pre_postinstall(dev
);
3643 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3645 gen5_gt_irq_postinstall(dev
);
3647 ibx_irq_postinstall(dev
);
3649 if (IS_IRONLAKE_M(dev
)) {
3650 /* Enable PCU event interrupts
3652 * spinlocking not required here for correctness since interrupt
3653 * setup is guaranteed to run in single-threaded context. But we
3654 * need it to make the assert_spin_locked happy. */
3655 spin_lock_irq(&dev_priv
->irq_lock
);
3656 ilk_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3657 spin_unlock_irq(&dev_priv
->irq_lock
);
3663 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3665 assert_spin_locked(&dev_priv
->irq_lock
);
3667 if (dev_priv
->display_irqs_enabled
)
3670 dev_priv
->display_irqs_enabled
= true;
3672 if (intel_irqs_enabled(dev_priv
)) {
3673 vlv_display_irq_reset(dev_priv
);
3674 vlv_display_irq_postinstall(dev_priv
);
3678 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3680 assert_spin_locked(&dev_priv
->irq_lock
);
3682 if (!dev_priv
->display_irqs_enabled
)
3685 dev_priv
->display_irqs_enabled
= false;
3687 if (intel_irqs_enabled(dev_priv
))
3688 vlv_display_irq_reset(dev_priv
);
3692 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3694 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3696 gen5_gt_irq_postinstall(dev
);
3698 spin_lock_irq(&dev_priv
->irq_lock
);
3699 if (dev_priv
->display_irqs_enabled
)
3700 vlv_display_irq_postinstall(dev_priv
);
3701 spin_unlock_irq(&dev_priv
->irq_lock
);
3703 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3704 POSTING_READ(VLV_MASTER_IER
);
3709 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3711 /* These are interrupts we'll toggle with the ring mask register */
3712 uint32_t gt_interrupts
[] = {
3713 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3714 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3715 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3716 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3717 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3718 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3719 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3720 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3722 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3723 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3726 if (HAS_L3_DPF(dev_priv
))
3727 gt_interrupts
[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
3729 dev_priv
->pm_irq_mask
= 0xffffffff;
3730 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3731 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3733 * RPS interrupts will get enabled/disabled on demand when RPS itself
3734 * is enabled/disabled.
3736 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, 0);
3737 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3740 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3742 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3743 uint32_t de_pipe_enables
;
3744 u32 de_port_masked
= GEN8_AUX_CHANNEL_A
;
3745 u32 de_port_enables
;
3746 u32 de_misc_masked
= GEN8_DE_MISC_GSE
;
3749 if (INTEL_INFO(dev_priv
)->gen
>= 9) {
3750 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3751 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3752 de_port_masked
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
3754 if (IS_BROXTON(dev_priv
))
3755 de_port_masked
|= BXT_DE_PORT_GMBUS
;
3757 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3758 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3761 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3762 GEN8_PIPE_FIFO_UNDERRUN
;
3764 de_port_enables
= de_port_masked
;
3765 if (IS_BROXTON(dev_priv
))
3766 de_port_enables
|= BXT_DE_PORT_HOTPLUG_MASK
;
3767 else if (IS_BROADWELL(dev_priv
))
3768 de_port_enables
|= GEN8_PORT_DP_A_HOTPLUG
;
3770 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3771 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3772 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3774 for_each_pipe(dev_priv
, pipe
)
3775 if (intel_display_power_is_enabled(dev_priv
,
3776 POWER_DOMAIN_PIPE(pipe
)))
3777 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3778 dev_priv
->de_irq_mask
[pipe
],
3781 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~de_port_masked
, de_port_enables
);
3782 GEN5_IRQ_INIT(GEN8_DE_MISC_
, ~de_misc_masked
, de_misc_masked
);
3785 static int gen8_irq_postinstall(struct drm_device
*dev
)
3787 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3789 if (HAS_PCH_SPLIT(dev
))
3790 ibx_irq_pre_postinstall(dev
);
3792 gen8_gt_irq_postinstall(dev_priv
);
3793 gen8_de_irq_postinstall(dev_priv
);
3795 if (HAS_PCH_SPLIT(dev
))
3796 ibx_irq_postinstall(dev
);
3798 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
3799 POSTING_READ(GEN8_MASTER_IRQ
);
3804 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3806 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3808 gen8_gt_irq_postinstall(dev_priv
);
3810 spin_lock_irq(&dev_priv
->irq_lock
);
3811 if (dev_priv
->display_irqs_enabled
)
3812 vlv_display_irq_postinstall(dev_priv
);
3813 spin_unlock_irq(&dev_priv
->irq_lock
);
3815 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
3816 POSTING_READ(GEN8_MASTER_IRQ
);
3821 static void gen8_irq_uninstall(struct drm_device
*dev
)
3823 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3828 gen8_irq_reset(dev
);
3831 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3833 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3838 I915_WRITE(VLV_MASTER_IER
, 0);
3839 POSTING_READ(VLV_MASTER_IER
);
3841 gen5_gt_irq_reset(dev
);
3843 I915_WRITE(HWSTAM
, 0xffffffff);
3845 spin_lock_irq(&dev_priv
->irq_lock
);
3846 if (dev_priv
->display_irqs_enabled
)
3847 vlv_display_irq_reset(dev_priv
);
3848 spin_unlock_irq(&dev_priv
->irq_lock
);
3851 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3853 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3858 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3859 POSTING_READ(GEN8_MASTER_IRQ
);
3861 gen8_gt_irq_reset(dev_priv
);
3863 GEN5_IRQ_RESET(GEN8_PCU_
);
3865 spin_lock_irq(&dev_priv
->irq_lock
);
3866 if (dev_priv
->display_irqs_enabled
)
3867 vlv_display_irq_reset(dev_priv
);
3868 spin_unlock_irq(&dev_priv
->irq_lock
);
3871 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3873 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3878 ironlake_irq_reset(dev
);
3881 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3883 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3886 for_each_pipe(dev_priv
, pipe
)
3887 I915_WRITE(PIPESTAT(pipe
), 0);
3888 I915_WRITE16(IMR
, 0xffff);
3889 I915_WRITE16(IER
, 0x0);
3890 POSTING_READ16(IER
);
3893 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3895 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3898 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3900 /* Unmask the interrupts that we always want on. */
3901 dev_priv
->irq_mask
=
3902 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3903 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3904 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3905 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3906 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3909 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3910 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3911 I915_USER_INTERRUPT
);
3912 POSTING_READ16(IER
);
3914 /* Interrupt setup is already guaranteed to be single-threaded, this is
3915 * just to make the assert_spin_locked check happy. */
3916 spin_lock_irq(&dev_priv
->irq_lock
);
3917 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3918 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3919 spin_unlock_irq(&dev_priv
->irq_lock
);
3925 * Returns true when a page flip has completed.
3927 static bool i8xx_handle_vblank(struct drm_i915_private
*dev_priv
,
3928 int plane
, int pipe
, u32 iir
)
3930 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3932 if (!intel_pipe_handle_vblank(dev_priv
, pipe
))
3935 if ((iir
& flip_pending
) == 0)
3936 goto check_page_flip
;
3938 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3939 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3940 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3941 * the flip is completed (no longer pending). Since this doesn't raise
3942 * an interrupt per se, we watch for the change at vblank.
3944 if (I915_READ16(ISR
) & flip_pending
)
3945 goto check_page_flip
;
3947 intel_finish_page_flip_cs(dev_priv
, pipe
);
3951 intel_check_page_flip(dev_priv
, pipe
);
3955 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3957 struct drm_device
*dev
= arg
;
3958 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3963 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3964 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3967 if (!intel_irqs_enabled(dev_priv
))
3970 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3971 disable_rpm_wakeref_asserts(dev_priv
);
3974 iir
= I915_READ16(IIR
);
3978 while (iir
& ~flip_mask
) {
3979 /* Can't rely on pipestat interrupt bit in iir as it might
3980 * have been cleared after the pipestat interrupt was received.
3981 * It doesn't set the bit in iir again, but it still produces
3982 * interrupts (for non-MSI).
3984 spin_lock(&dev_priv
->irq_lock
);
3985 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3986 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3988 for_each_pipe(dev_priv
, pipe
) {
3989 i915_reg_t reg
= PIPESTAT(pipe
);
3990 pipe_stats
[pipe
] = I915_READ(reg
);
3993 * Clear the PIPE*STAT regs before the IIR
3995 if (pipe_stats
[pipe
] & 0x8000ffff)
3996 I915_WRITE(reg
, pipe_stats
[pipe
]);
3998 spin_unlock(&dev_priv
->irq_lock
);
4000 I915_WRITE16(IIR
, iir
& ~flip_mask
);
4001 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
4003 if (iir
& I915_USER_INTERRUPT
)
4004 notify_ring(&dev_priv
->engine
[RCS
]);
4006 for_each_pipe(dev_priv
, pipe
) {
4008 if (HAS_FBC(dev_priv
))
4011 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4012 i8xx_handle_vblank(dev_priv
, plane
, pipe
, iir
))
4013 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4015 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4016 i9xx_pipe_crc_irq_handler(dev_priv
, pipe
);
4018 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4019 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
4028 enable_rpm_wakeref_asserts(dev_priv
);
4033 static void i8xx_irq_uninstall(struct drm_device
* dev
)
4035 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4038 for_each_pipe(dev_priv
, pipe
) {
4039 /* Clear enable bits; then clear status bits */
4040 I915_WRITE(PIPESTAT(pipe
), 0);
4041 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4043 I915_WRITE16(IMR
, 0xffff);
4044 I915_WRITE16(IER
, 0x0);
4045 I915_WRITE16(IIR
, I915_READ16(IIR
));
4048 static void i915_irq_preinstall(struct drm_device
* dev
)
4050 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4053 if (I915_HAS_HOTPLUG(dev
)) {
4054 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4055 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4058 I915_WRITE16(HWSTAM
, 0xeffe);
4059 for_each_pipe(dev_priv
, pipe
)
4060 I915_WRITE(PIPESTAT(pipe
), 0);
4061 I915_WRITE(IMR
, 0xffffffff);
4062 I915_WRITE(IER
, 0x0);
4066 static int i915_irq_postinstall(struct drm_device
*dev
)
4068 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4071 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
4073 /* Unmask the interrupts that we always want on. */
4074 dev_priv
->irq_mask
=
4075 ~(I915_ASLE_INTERRUPT
|
4076 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4077 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4078 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4079 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4082 I915_ASLE_INTERRUPT
|
4083 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4084 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4085 I915_USER_INTERRUPT
;
4087 if (I915_HAS_HOTPLUG(dev
)) {
4088 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4089 POSTING_READ(PORT_HOTPLUG_EN
);
4091 /* Enable in IER... */
4092 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
4093 /* and unmask in IMR */
4094 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
4097 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4098 I915_WRITE(IER
, enable_mask
);
4101 i915_enable_asle_pipestat(dev_priv
);
4103 /* Interrupt setup is already guaranteed to be single-threaded, this is
4104 * just to make the assert_spin_locked check happy. */
4105 spin_lock_irq(&dev_priv
->irq_lock
);
4106 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4107 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4108 spin_unlock_irq(&dev_priv
->irq_lock
);
4114 * Returns true when a page flip has completed.
4116 static bool i915_handle_vblank(struct drm_i915_private
*dev_priv
,
4117 int plane
, int pipe
, u32 iir
)
4119 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4121 if (!intel_pipe_handle_vblank(dev_priv
, pipe
))
4124 if ((iir
& flip_pending
) == 0)
4125 goto check_page_flip
;
4127 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4128 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4129 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4130 * the flip is completed (no longer pending). Since this doesn't raise
4131 * an interrupt per se, we watch for the change at vblank.
4133 if (I915_READ(ISR
) & flip_pending
)
4134 goto check_page_flip
;
4136 intel_finish_page_flip_cs(dev_priv
, pipe
);
4140 intel_check_page_flip(dev_priv
, pipe
);
4144 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
4146 struct drm_device
*dev
= arg
;
4147 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4148 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
4150 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4151 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4152 int pipe
, ret
= IRQ_NONE
;
4154 if (!intel_irqs_enabled(dev_priv
))
4157 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4158 disable_rpm_wakeref_asserts(dev_priv
);
4160 iir
= I915_READ(IIR
);
4162 bool irq_received
= (iir
& ~flip_mask
) != 0;
4163 bool blc_event
= false;
4165 /* Can't rely on pipestat interrupt bit in iir as it might
4166 * have been cleared after the pipestat interrupt was received.
4167 * It doesn't set the bit in iir again, but it still produces
4168 * interrupts (for non-MSI).
4170 spin_lock(&dev_priv
->irq_lock
);
4171 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4172 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4174 for_each_pipe(dev_priv
, pipe
) {
4175 i915_reg_t reg
= PIPESTAT(pipe
);
4176 pipe_stats
[pipe
] = I915_READ(reg
);
4178 /* Clear the PIPE*STAT regs before the IIR */
4179 if (pipe_stats
[pipe
] & 0x8000ffff) {
4180 I915_WRITE(reg
, pipe_stats
[pipe
]);
4181 irq_received
= true;
4184 spin_unlock(&dev_priv
->irq_lock
);
4189 /* Consume port. Then clear IIR or we'll miss events */
4190 if (I915_HAS_HOTPLUG(dev_priv
) &&
4191 iir
& I915_DISPLAY_PORT_INTERRUPT
) {
4192 u32 hotplug_status
= i9xx_hpd_irq_ack(dev_priv
);
4194 i9xx_hpd_irq_handler(dev_priv
, hotplug_status
);
4197 I915_WRITE(IIR
, iir
& ~flip_mask
);
4198 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4200 if (iir
& I915_USER_INTERRUPT
)
4201 notify_ring(&dev_priv
->engine
[RCS
]);
4203 for_each_pipe(dev_priv
, pipe
) {
4205 if (HAS_FBC(dev_priv
))
4208 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4209 i915_handle_vblank(dev_priv
, plane
, pipe
, iir
))
4210 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4212 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4215 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4216 i9xx_pipe_crc_irq_handler(dev_priv
, pipe
);
4218 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4219 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
4223 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4224 intel_opregion_asle_intr(dev_priv
);
4226 /* With MSI, interrupts are only generated when iir
4227 * transitions from zero to nonzero. If another bit got
4228 * set while we were handling the existing iir bits, then
4229 * we would never get another interrupt.
4231 * This is fine on non-MSI as well, as if we hit this path
4232 * we avoid exiting the interrupt handler only to generate
4235 * Note that for MSI this could cause a stray interrupt report
4236 * if an interrupt landed in the time between writing IIR and
4237 * the posting read. This should be rare enough to never
4238 * trigger the 99% of 100,000 interrupts test for disabling
4243 } while (iir
& ~flip_mask
);
4245 enable_rpm_wakeref_asserts(dev_priv
);
4250 static void i915_irq_uninstall(struct drm_device
* dev
)
4252 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4255 if (I915_HAS_HOTPLUG(dev
)) {
4256 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4257 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4260 I915_WRITE16(HWSTAM
, 0xffff);
4261 for_each_pipe(dev_priv
, pipe
) {
4262 /* Clear enable bits; then clear status bits */
4263 I915_WRITE(PIPESTAT(pipe
), 0);
4264 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4266 I915_WRITE(IMR
, 0xffffffff);
4267 I915_WRITE(IER
, 0x0);
4269 I915_WRITE(IIR
, I915_READ(IIR
));
4272 static void i965_irq_preinstall(struct drm_device
* dev
)
4274 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4277 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4278 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4280 I915_WRITE(HWSTAM
, 0xeffe);
4281 for_each_pipe(dev_priv
, pipe
)
4282 I915_WRITE(PIPESTAT(pipe
), 0);
4283 I915_WRITE(IMR
, 0xffffffff);
4284 I915_WRITE(IER
, 0x0);
4288 static int i965_irq_postinstall(struct drm_device
*dev
)
4290 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4294 /* Unmask the interrupts that we always want on. */
4295 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4296 I915_DISPLAY_PORT_INTERRUPT
|
4297 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4298 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4299 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4300 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4301 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4303 enable_mask
= ~dev_priv
->irq_mask
;
4304 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4305 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4306 enable_mask
|= I915_USER_INTERRUPT
;
4308 if (IS_G4X(dev_priv
))
4309 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4311 /* Interrupt setup is already guaranteed to be single-threaded, this is
4312 * just to make the assert_spin_locked check happy. */
4313 spin_lock_irq(&dev_priv
->irq_lock
);
4314 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4315 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4316 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4317 spin_unlock_irq(&dev_priv
->irq_lock
);
4320 * Enable some error detection, note the instruction error mask
4321 * bit is reserved, so we leave it masked.
4323 if (IS_G4X(dev_priv
)) {
4324 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4325 GM45_ERROR_MEM_PRIV
|
4326 GM45_ERROR_CP_PRIV
|
4327 I915_ERROR_MEMORY_REFRESH
);
4329 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4330 I915_ERROR_MEMORY_REFRESH
);
4332 I915_WRITE(EMR
, error_mask
);
4334 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4335 I915_WRITE(IER
, enable_mask
);
4338 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4339 POSTING_READ(PORT_HOTPLUG_EN
);
4341 i915_enable_asle_pipestat(dev_priv
);
4346 static void i915_hpd_irq_setup(struct drm_i915_private
*dev_priv
)
4350 assert_spin_locked(&dev_priv
->irq_lock
);
4352 /* Note HDMI and DP share hotplug bits */
4353 /* enable bits are the same for all generations */
4354 hotplug_en
= intel_hpd_enabled_irqs(dev_priv
, hpd_mask_i915
);
4355 /* Programming the CRT detection parameters tends
4356 to generate a spurious hotplug event about three
4357 seconds later. So just do it once.
4359 if (IS_G4X(dev_priv
))
4360 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4361 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4363 /* Ignore TV since it's buggy */
4364 i915_hotplug_interrupt_update_locked(dev_priv
,
4365 HOTPLUG_INT_EN_MASK
|
4366 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
|
4367 CRT_HOTPLUG_ACTIVATION_PERIOD_64
,
4371 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4373 struct drm_device
*dev
= arg
;
4374 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4376 u32 pipe_stats
[I915_MAX_PIPES
];
4377 int ret
= IRQ_NONE
, pipe
;
4379 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4380 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4382 if (!intel_irqs_enabled(dev_priv
))
4385 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4386 disable_rpm_wakeref_asserts(dev_priv
);
4388 iir
= I915_READ(IIR
);
4391 bool irq_received
= (iir
& ~flip_mask
) != 0;
4392 bool blc_event
= false;
4394 /* Can't rely on pipestat interrupt bit in iir as it might
4395 * have been cleared after the pipestat interrupt was received.
4396 * It doesn't set the bit in iir again, but it still produces
4397 * interrupts (for non-MSI).
4399 spin_lock(&dev_priv
->irq_lock
);
4400 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4401 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4403 for_each_pipe(dev_priv
, pipe
) {
4404 i915_reg_t reg
= PIPESTAT(pipe
);
4405 pipe_stats
[pipe
] = I915_READ(reg
);
4408 * Clear the PIPE*STAT regs before the IIR
4410 if (pipe_stats
[pipe
] & 0x8000ffff) {
4411 I915_WRITE(reg
, pipe_stats
[pipe
]);
4412 irq_received
= true;
4415 spin_unlock(&dev_priv
->irq_lock
);
4422 /* Consume port. Then clear IIR or we'll miss events */
4423 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
4424 u32 hotplug_status
= i9xx_hpd_irq_ack(dev_priv
);
4426 i9xx_hpd_irq_handler(dev_priv
, hotplug_status
);
4429 I915_WRITE(IIR
, iir
& ~flip_mask
);
4430 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4432 if (iir
& I915_USER_INTERRUPT
)
4433 notify_ring(&dev_priv
->engine
[RCS
]);
4434 if (iir
& I915_BSD_USER_INTERRUPT
)
4435 notify_ring(&dev_priv
->engine
[VCS
]);
4437 for_each_pipe(dev_priv
, pipe
) {
4438 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4439 i915_handle_vblank(dev_priv
, pipe
, pipe
, iir
))
4440 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4442 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4445 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4446 i9xx_pipe_crc_irq_handler(dev_priv
, pipe
);
4448 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4449 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
4452 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4453 intel_opregion_asle_intr(dev_priv
);
4455 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4456 gmbus_irq_handler(dev_priv
);
4458 /* With MSI, interrupts are only generated when iir
4459 * transitions from zero to nonzero. If another bit got
4460 * set while we were handling the existing iir bits, then
4461 * we would never get another interrupt.
4463 * This is fine on non-MSI as well, as if we hit this path
4464 * we avoid exiting the interrupt handler only to generate
4467 * Note that for MSI this could cause a stray interrupt report
4468 * if an interrupt landed in the time between writing IIR and
4469 * the posting read. This should be rare enough to never
4470 * trigger the 99% of 100,000 interrupts test for disabling
4476 enable_rpm_wakeref_asserts(dev_priv
);
4481 static void i965_irq_uninstall(struct drm_device
* dev
)
4483 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4489 i915_hotplug_interrupt_update(dev_priv
, 0xffffffff, 0);
4490 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4492 I915_WRITE(HWSTAM
, 0xffffffff);
4493 for_each_pipe(dev_priv
, pipe
)
4494 I915_WRITE(PIPESTAT(pipe
), 0);
4495 I915_WRITE(IMR
, 0xffffffff);
4496 I915_WRITE(IER
, 0x0);
4498 for_each_pipe(dev_priv
, pipe
)
4499 I915_WRITE(PIPESTAT(pipe
),
4500 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4501 I915_WRITE(IIR
, I915_READ(IIR
));
4505 * intel_irq_init - initializes irq support
4506 * @dev_priv: i915 device instance
4508 * This function initializes all the irq support including work items, timers
4509 * and all the vtables. It does not setup the interrupt itself though.
4511 void intel_irq_init(struct drm_i915_private
*dev_priv
)
4513 struct drm_device
*dev
= &dev_priv
->drm
;
4515 intel_hpd_init_work(dev_priv
);
4517 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4518 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4520 /* Let's track the enabled rps events */
4521 if (IS_VALLEYVIEW(dev_priv
))
4522 /* WaGsvRC0ResidencyMethod:vlv */
4523 dev_priv
->pm_rps_events
= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
;
4525 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4527 dev_priv
->rps
.pm_intr_keep
= 0;
4530 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4531 * if GEN6_PM_UP_EI_EXPIRED is masked.
4533 * TODO: verify if this can be reproduced on VLV,CHV.
4535 if (INTEL_INFO(dev_priv
)->gen
<= 7 && !IS_HASWELL(dev_priv
))
4536 dev_priv
->rps
.pm_intr_keep
|= GEN6_PM_RP_UP_EI_EXPIRED
;
4538 if (INTEL_INFO(dev_priv
)->gen
>= 8)
4539 dev_priv
->rps
.pm_intr_keep
|= GEN8_PMINTR_REDIRECT_TO_NON_DISP
;
4541 INIT_DELAYED_WORK(&dev_priv
->gpu_error
.hangcheck_work
,
4542 i915_hangcheck_elapsed
);
4544 if (IS_GEN2(dev_priv
)) {
4545 dev
->max_vblank_count
= 0;
4546 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4547 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
4548 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4549 dev
->driver
->get_vblank_counter
= g4x_get_vblank_counter
;
4551 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4552 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4556 * Opt out of the vblank disable timer on everything except gen2.
4557 * Gen2 doesn't have a hardware frame counter and so depends on
4558 * vblank interrupts to produce sane vblank seuquence numbers.
4560 if (!IS_GEN2(dev_priv
))
4561 dev
->vblank_disable_immediate
= true;
4563 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4564 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4566 if (IS_CHERRYVIEW(dev_priv
)) {
4567 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4568 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4569 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4570 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4571 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4572 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4573 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4574 } else if (IS_VALLEYVIEW(dev_priv
)) {
4575 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4576 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4577 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4578 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4579 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4580 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4581 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4582 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
4583 dev
->driver
->irq_handler
= gen8_irq_handler
;
4584 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4585 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4586 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4587 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4588 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4589 if (IS_BROXTON(dev
))
4590 dev_priv
->display
.hpd_irq_setup
= bxt_hpd_irq_setup
;
4591 else if (HAS_PCH_SPT(dev
) || HAS_PCH_KBP(dev
))
4592 dev_priv
->display
.hpd_irq_setup
= spt_hpd_irq_setup
;
4594 dev_priv
->display
.hpd_irq_setup
= ilk_hpd_irq_setup
;
4595 } else if (HAS_PCH_SPLIT(dev
)) {
4596 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4597 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4598 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4599 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4600 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4601 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4602 dev_priv
->display
.hpd_irq_setup
= ilk_hpd_irq_setup
;
4604 if (IS_GEN2(dev_priv
)) {
4605 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4606 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4607 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4608 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4609 } else if (IS_GEN3(dev_priv
)) {
4610 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4611 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4612 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4613 dev
->driver
->irq_handler
= i915_irq_handler
;
4615 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4616 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4617 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4618 dev
->driver
->irq_handler
= i965_irq_handler
;
4620 if (I915_HAS_HOTPLUG(dev_priv
))
4621 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4622 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4623 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4628 * intel_irq_install - enables the hardware interrupt
4629 * @dev_priv: i915 device instance
4631 * This function enables the hardware interrupt handling, but leaves the hotplug
4632 * handling still disabled. It is called after intel_irq_init().
4634 * In the driver load and resume code we need working interrupts in a few places
4635 * but don't want to deal with the hassle of concurrent probe and hotplug
4636 * workers. Hence the split into this two-stage approach.
4638 int intel_irq_install(struct drm_i915_private
*dev_priv
)
4641 * We enable some interrupt sources in our postinstall hooks, so mark
4642 * interrupts as enabled _before_ actually enabling them to avoid
4643 * special cases in our ordering checks.
4645 dev_priv
->pm
.irqs_enabled
= true;
4647 return drm_irq_install(&dev_priv
->drm
, dev_priv
->drm
.pdev
->irq
);
4651 * intel_irq_uninstall - finilizes all irq handling
4652 * @dev_priv: i915 device instance
4654 * This stops interrupt and hotplug handling and unregisters and frees all
4655 * resources acquired in the init functions.
4657 void intel_irq_uninstall(struct drm_i915_private
*dev_priv
)
4659 drm_irq_uninstall(&dev_priv
->drm
);
4660 intel_hpd_cancel_work(dev_priv
);
4661 dev_priv
->pm
.irqs_enabled
= false;
4665 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4666 * @dev_priv: i915 device instance
4668 * This function is used to disable interrupts at runtime, both in the runtime
4669 * pm and the system suspend/resume code.
4671 void intel_runtime_pm_disable_interrupts(struct drm_i915_private
*dev_priv
)
4673 dev_priv
->drm
.driver
->irq_uninstall(&dev_priv
->drm
);
4674 dev_priv
->pm
.irqs_enabled
= false;
4675 synchronize_irq(dev_priv
->drm
.irq
);
4679 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4680 * @dev_priv: i915 device instance
4682 * This function is used to enable interrupts at runtime, both in the runtime
4683 * pm and the system suspend/resume code.
4685 void intel_runtime_pm_enable_interrupts(struct drm_i915_private
*dev_priv
)
4687 dev_priv
->pm
.irqs_enabled
= true;
4688 dev_priv
->drm
.driver
->irq_preinstall(&dev_priv
->drm
);
4689 dev_priv
->drm
.driver
->irq_postinstall(&dev_priv
->drm
);