1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx
[] = {
41 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
42 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
43 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
44 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
45 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt
[] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915
[] = {
57 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
58 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
59 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
60 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
61 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
62 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x
[] = {
66 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
67 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
68 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
69 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
70 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
71 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
75 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
76 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
77 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
78 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
80 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
112 I915_WRITE((reg), 0xffffffff); \
114 I915_WRITE((reg), 0xffffffff); \
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
133 /* For display hotplug interrupt */
135 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
137 assert_spin_locked(&dev_priv
->irq_lock
);
139 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
142 if ((dev_priv
->irq_mask
& mask
) != 0) {
143 dev_priv
->irq_mask
&= ~mask
;
144 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
150 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
152 assert_spin_locked(&dev_priv
->irq_lock
);
154 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
157 if ((dev_priv
->irq_mask
& mask
) != mask
) {
158 dev_priv
->irq_mask
|= mask
;
159 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
170 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
171 uint32_t interrupt_mask
,
172 uint32_t enabled_irq_mask
)
174 assert_spin_locked(&dev_priv
->irq_lock
);
176 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
179 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
180 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
181 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
185 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
187 ilk_update_gt_irq(dev_priv
, mask
, mask
);
190 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
192 ilk_update_gt_irq(dev_priv
, mask
, 0);
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
201 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
202 uint32_t interrupt_mask
,
203 uint32_t enabled_irq_mask
)
207 assert_spin_locked(&dev_priv
->irq_lock
);
209 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
212 new_val
= dev_priv
->pm_irq_mask
;
213 new_val
&= ~interrupt_mask
;
214 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
216 if (new_val
!= dev_priv
->pm_irq_mask
) {
217 dev_priv
->pm_irq_mask
= new_val
;
218 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
219 POSTING_READ(GEN6_PMIMR
);
223 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
225 snb_update_pm_irq(dev_priv
, mask
, mask
);
228 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
230 snb_update_pm_irq(dev_priv
, mask
, 0);
233 static bool ivb_can_enable_err_int(struct drm_device
*dev
)
235 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
236 struct intel_crtc
*crtc
;
239 assert_spin_locked(&dev_priv
->irq_lock
);
241 for_each_pipe(dev_priv
, pipe
) {
242 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
244 if (crtc
->cpu_fifo_underrun_disabled
)
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
257 * Copied from the snb function, updated with relevant register offsets
259 static void bdw_update_pm_irq(struct drm_i915_private
*dev_priv
,
260 uint32_t interrupt_mask
,
261 uint32_t enabled_irq_mask
)
265 assert_spin_locked(&dev_priv
->irq_lock
);
267 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
270 new_val
= dev_priv
->pm_irq_mask
;
271 new_val
&= ~interrupt_mask
;
272 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
274 if (new_val
!= dev_priv
->pm_irq_mask
) {
275 dev_priv
->pm_irq_mask
= new_val
;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv
->pm_irq_mask
);
277 POSTING_READ(GEN8_GT_IMR(2));
281 void gen8_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
283 bdw_update_pm_irq(dev_priv
, mask
, mask
);
286 void gen8_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
288 bdw_update_pm_irq(dev_priv
, mask
, 0);
291 static bool cpt_can_enable_serr_int(struct drm_device
*dev
)
293 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
295 struct intel_crtc
*crtc
;
297 assert_spin_locked(&dev_priv
->irq_lock
);
299 for_each_pipe(dev_priv
, pipe
) {
300 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
302 if (crtc
->pch_fifo_underrun_disabled
)
309 void i9xx_check_fifo_underruns(struct drm_device
*dev
)
311 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
312 struct intel_crtc
*crtc
;
314 spin_lock_irq(&dev_priv
->irq_lock
);
316 for_each_intel_crtc(dev
, crtc
) {
317 u32 reg
= PIPESTAT(crtc
->pipe
);
320 if (crtc
->cpu_fifo_underrun_disabled
)
323 pipestat
= I915_READ(reg
) & 0xffff0000;
324 if ((pipestat
& PIPE_FIFO_UNDERRUN_STATUS
) == 0)
327 I915_WRITE(reg
, pipestat
| PIPE_FIFO_UNDERRUN_STATUS
);
330 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc
->pipe
));
333 spin_unlock_irq(&dev_priv
->irq_lock
);
336 static void i9xx_set_fifo_underrun_reporting(struct drm_device
*dev
,
338 bool enable
, bool old
)
340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
341 u32 reg
= PIPESTAT(pipe
);
342 u32 pipestat
= I915_READ(reg
) & 0xffff0000;
344 assert_spin_locked(&dev_priv
->irq_lock
);
347 I915_WRITE(reg
, pipestat
| PIPE_FIFO_UNDERRUN_STATUS
);
350 if (old
&& pipestat
& PIPE_FIFO_UNDERRUN_STATUS
)
351 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
355 static void ironlake_set_fifo_underrun_reporting(struct drm_device
*dev
,
356 enum pipe pipe
, bool enable
)
358 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
359 uint32_t bit
= (pipe
== PIPE_A
) ? DE_PIPEA_FIFO_UNDERRUN
:
360 DE_PIPEB_FIFO_UNDERRUN
;
363 ironlake_enable_display_irq(dev_priv
, bit
);
365 ironlake_disable_display_irq(dev_priv
, bit
);
368 static void ivybridge_set_fifo_underrun_reporting(struct drm_device
*dev
,
370 bool enable
, bool old
)
372 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
374 I915_WRITE(GEN7_ERR_INT
, ERR_INT_FIFO_UNDERRUN(pipe
));
376 if (!ivb_can_enable_err_int(dev
))
379 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
381 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
384 I915_READ(GEN7_ERR_INT
) & ERR_INT_FIFO_UNDERRUN(pipe
)) {
385 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
391 static void broadwell_set_fifo_underrun_reporting(struct drm_device
*dev
,
392 enum pipe pipe
, bool enable
)
394 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
396 assert_spin_locked(&dev_priv
->irq_lock
);
399 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_FIFO_UNDERRUN
;
401 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_FIFO_UNDERRUN
;
402 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
403 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
407 * ibx_display_interrupt_update - update SDEIMR
408 * @dev_priv: driver private
409 * @interrupt_mask: mask of interrupt bits to update
410 * @enabled_irq_mask: mask of interrupt bits to enable
412 static void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
413 uint32_t interrupt_mask
,
414 uint32_t enabled_irq_mask
)
416 uint32_t sdeimr
= I915_READ(SDEIMR
);
417 sdeimr
&= ~interrupt_mask
;
418 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
420 assert_spin_locked(&dev_priv
->irq_lock
);
422 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
425 I915_WRITE(SDEIMR
, sdeimr
);
426 POSTING_READ(SDEIMR
);
428 #define ibx_enable_display_interrupt(dev_priv, bits) \
429 ibx_display_interrupt_update((dev_priv), (bits), (bits))
430 #define ibx_disable_display_interrupt(dev_priv, bits) \
431 ibx_display_interrupt_update((dev_priv), (bits), 0)
433 static void ibx_set_fifo_underrun_reporting(struct drm_device
*dev
,
434 enum transcoder pch_transcoder
,
437 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
438 uint32_t bit
= (pch_transcoder
== TRANSCODER_A
) ?
439 SDE_TRANSA_FIFO_UNDER
: SDE_TRANSB_FIFO_UNDER
;
442 ibx_enable_display_interrupt(dev_priv
, bit
);
444 ibx_disable_display_interrupt(dev_priv
, bit
);
447 static void cpt_set_fifo_underrun_reporting(struct drm_device
*dev
,
448 enum transcoder pch_transcoder
,
449 bool enable
, bool old
)
451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
455 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
));
457 if (!cpt_can_enable_serr_int(dev
))
460 ibx_enable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
462 ibx_disable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
464 if (old
&& I915_READ(SERR_INT
) &
465 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
)) {
466 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
467 transcoder_name(pch_transcoder
));
473 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
476 * @enable: true if we want to report FIFO underrun errors, false otherwise
478 * This function makes us disable or enable CPU fifo underruns for a specific
479 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
480 * reporting for one pipe may also disable all the other CPU error interruts for
481 * the other pipes, due to the fact that there's just one interrupt mask/enable
482 * bit for all the pipes.
484 * Returns the previous state of underrun reporting.
486 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
487 enum pipe pipe
, bool enable
)
489 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
490 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
491 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
494 assert_spin_locked(&dev_priv
->irq_lock
);
496 old
= !intel_crtc
->cpu_fifo_underrun_disabled
;
497 intel_crtc
->cpu_fifo_underrun_disabled
= !enable
;
499 if (HAS_GMCH_DISPLAY(dev
))
500 i9xx_set_fifo_underrun_reporting(dev
, pipe
, enable
, old
);
501 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
502 ironlake_set_fifo_underrun_reporting(dev
, pipe
, enable
);
503 else if (IS_GEN7(dev
))
504 ivybridge_set_fifo_underrun_reporting(dev
, pipe
, enable
, old
);
505 else if (IS_GEN8(dev
) || IS_GEN9(dev
))
506 broadwell_set_fifo_underrun_reporting(dev
, pipe
, enable
);
511 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
512 enum pipe pipe
, bool enable
)
514 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
518 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
519 ret
= __intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, enable
);
520 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
525 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device
*dev
,
528 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
529 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
530 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
532 return !intel_crtc
->cpu_fifo_underrun_disabled
;
536 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
538 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
539 * @enable: true if we want to report FIFO underrun errors, false otherwise
541 * This function makes us disable or enable PCH fifo underruns for a specific
542 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
543 * underrun reporting for one transcoder may also disable all the other PCH
544 * error interruts for the other transcoders, due to the fact that there's just
545 * one interrupt mask/enable bit for all the transcoders.
547 * Returns the previous state of underrun reporting.
549 bool intel_set_pch_fifo_underrun_reporting(struct drm_device
*dev
,
550 enum transcoder pch_transcoder
,
553 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
554 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pch_transcoder
];
555 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
560 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
561 * has only one pch transcoder A that all pipes can use. To avoid racy
562 * pch transcoder -> pipe lookups from interrupt code simply store the
563 * underrun statistics in crtc A. Since we never expose this anywhere
564 * nor use it outside of the fifo underrun code here using the "wrong"
565 * crtc on LPT won't cause issues.
568 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
570 old
= !intel_crtc
->pch_fifo_underrun_disabled
;
571 intel_crtc
->pch_fifo_underrun_disabled
= !enable
;
573 if (HAS_PCH_IBX(dev
))
574 ibx_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
576 cpt_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
, old
);
578 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
584 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
585 u32 enable_mask
, u32 status_mask
)
587 u32 reg
= PIPESTAT(pipe
);
588 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
590 assert_spin_locked(&dev_priv
->irq_lock
);
591 WARN_ON(!intel_irqs_enabled(dev_priv
));
593 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
594 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe
), enable_mask
, status_mask
))
599 if ((pipestat
& enable_mask
) == enable_mask
)
602 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
604 /* Enable the interrupt, clear any pending status */
605 pipestat
|= enable_mask
| status_mask
;
606 I915_WRITE(reg
, pipestat
);
611 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
612 u32 enable_mask
, u32 status_mask
)
614 u32 reg
= PIPESTAT(pipe
);
615 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
617 assert_spin_locked(&dev_priv
->irq_lock
);
618 WARN_ON(!intel_irqs_enabled(dev_priv
));
620 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
621 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
622 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
623 pipe_name(pipe
), enable_mask
, status_mask
))
626 if ((pipestat
& enable_mask
) == 0)
629 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
631 pipestat
&= ~enable_mask
;
632 I915_WRITE(reg
, pipestat
);
636 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
638 u32 enable_mask
= status_mask
<< 16;
641 * On pipe A we don't support the PSR interrupt yet,
642 * on pipe B and C the same bit MBZ.
644 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
647 * On pipe B and C we don't support the PSR interrupt yet, on pipe
648 * A the same bit is for perf counters which we don't use either.
650 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
653 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
654 SPRITE0_FLIP_DONE_INT_EN_VLV
|
655 SPRITE1_FLIP_DONE_INT_EN_VLV
);
656 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
657 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
658 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
659 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
665 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
670 if (IS_VALLEYVIEW(dev_priv
->dev
))
671 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
674 enable_mask
= status_mask
<< 16;
675 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
679 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
684 if (IS_VALLEYVIEW(dev_priv
->dev
))
685 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
688 enable_mask
= status_mask
<< 16;
689 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
693 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
695 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
697 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
699 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
702 spin_lock_irq(&dev_priv
->irq_lock
);
704 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
705 if (INTEL_INFO(dev
)->gen
>= 4)
706 i915_enable_pipestat(dev_priv
, PIPE_A
,
707 PIPE_LEGACY_BLC_EVENT_STATUS
);
709 spin_unlock_irq(&dev_priv
->irq_lock
);
713 * i915_pipe_enabled - check if a pipe is enabled
715 * @pipe: pipe to check
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
722 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
726 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
729 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
731 return intel_crtc
->active
;
733 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
741 * Assumptions about the fictitious mode used in this example:
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
756 * | | start of vsync:
757 * | | generate vsync interrupt
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
777 * vbs = vblank_start (number)
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
787 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
789 /* Gen2 doesn't have a hardware frame counter */
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
796 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
798 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
799 unsigned long high_frame
;
800 unsigned long low_frame
;
801 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
803 if (!i915_pipe_enabled(dev
, pipe
)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe
));
809 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
810 struct intel_crtc
*intel_crtc
=
811 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
812 const struct drm_display_mode
*mode
=
813 &intel_crtc
->config
.adjusted_mode
;
815 htotal
= mode
->crtc_htotal
;
816 hsync_start
= mode
->crtc_hsync_start
;
817 vbl_start
= mode
->crtc_vblank_start
;
818 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
819 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
821 enum transcoder cpu_transcoder
= (enum transcoder
) pipe
;
823 htotal
= ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff) + 1;
824 hsync_start
= (I915_READ(HSYNC(cpu_transcoder
)) & 0x1fff) + 1;
825 vbl_start
= (I915_READ(VBLANK(cpu_transcoder
)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder
)) &
827 PIPECONF_INTERLACE_MASK
) != PIPECONF_PROGRESSIVE
)
828 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
831 /* Convert to pixel count */
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start
-= htotal
- hsync_start
;
837 high_frame
= PIPEFRAME(pipe
);
838 low_frame
= PIPEFRAMEPIXEL(pipe
);
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
846 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
847 low
= I915_READ(low_frame
);
848 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
849 } while (high1
!= high2
);
851 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
852 pixel
= low
& PIPE_PIXEL_MASK
;
853 low
>>= PIPE_FRAME_LOW_SHIFT
;
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
860 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
863 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
865 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
866 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
868 if (!i915_pipe_enabled(dev
, pipe
)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe
));
874 return I915_READ(reg
);
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
880 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
882 struct drm_device
*dev
= crtc
->base
.dev
;
883 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
884 const struct drm_display_mode
*mode
= &crtc
->config
.adjusted_mode
;
885 enum pipe pipe
= crtc
->pipe
;
886 int position
, vtotal
;
888 vtotal
= mode
->crtc_vtotal
;
889 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
893 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
895 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
901 return (position
+ crtc
->scanline_offset
) % vtotal
;
904 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
905 unsigned int flags
, int *vpos
, int *hpos
,
906 ktime_t
*stime
, ktime_t
*etime
)
908 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
909 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
910 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
911 const struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
913 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
916 unsigned long irqflags
;
918 if (!intel_crtc
->active
) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe
));
924 htotal
= mode
->crtc_htotal
;
925 hsync_start
= mode
->crtc_hsync_start
;
926 vtotal
= mode
->crtc_vtotal
;
927 vbl_start
= mode
->crtc_vblank_start
;
928 vbl_end
= mode
->crtc_vblank_end
;
930 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
931 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
936 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
943 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
947 /* Get optional system timestamp before query. */
949 *stime
= ktime_get();
951 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
955 position
= __intel_get_crtc_scanline(intel_crtc
);
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
961 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
963 /* convert to pixel counts */
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
977 if (position
>= vtotal
)
978 position
= vtotal
- 1;
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
989 position
= (position
+ htotal
- hsync_start
) % vtotal
;
992 /* Get optional system timestamp after query. */
994 *etime
= ktime_get();
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
998 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1000 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1008 if (position
>= vbl_start
)
1009 position
-= vbl_end
;
1011 position
+= vtotal
- vbl_end
;
1013 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
1017 *vpos
= position
/ htotal
;
1018 *hpos
= position
- (*vpos
* htotal
);
1023 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
1028 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
1030 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
1031 unsigned long irqflags
;
1034 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
1035 position
= __intel_get_crtc_scanline(crtc
);
1036 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1041 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
1043 struct timeval
*vblank_time
,
1046 struct drm_crtc
*crtc
;
1048 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe
);
1053 /* Get drm_crtc to timestamp: */
1054 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
1056 DRM_ERROR("Invalid crtc %d\n", pipe
);
1060 if (!crtc
->enabled
) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
1069 &to_intel_crtc(crtc
)->config
.adjusted_mode
);
1072 static bool intel_hpd_irq_event(struct drm_device
*dev
,
1073 struct drm_connector
*connector
)
1075 enum drm_connector_status old_status
;
1077 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
1078 old_status
= connector
->status
;
1080 connector
->status
= connector
->funcs
->detect(connector
, false);
1081 if (old_status
== connector
->status
)
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 drm_get_connector_status_name(old_status
),
1088 drm_get_connector_status_name(connector
->status
));
1093 static void i915_digport_work_func(struct work_struct
*work
)
1095 struct drm_i915_private
*dev_priv
=
1096 container_of(work
, struct drm_i915_private
, dig_port_work
);
1097 u32 long_port_mask
, short_port_mask
;
1098 struct intel_digital_port
*intel_dig_port
;
1102 spin_lock_irq(&dev_priv
->irq_lock
);
1103 long_port_mask
= dev_priv
->long_hpd_port_mask
;
1104 dev_priv
->long_hpd_port_mask
= 0;
1105 short_port_mask
= dev_priv
->short_hpd_port_mask
;
1106 dev_priv
->short_hpd_port_mask
= 0;
1107 spin_unlock_irq(&dev_priv
->irq_lock
);
1109 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
1111 bool long_hpd
= false;
1112 intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
1113 if (!intel_dig_port
|| !intel_dig_port
->hpd_pulse
)
1116 if (long_port_mask
& (1 << i
)) {
1119 } else if (short_port_mask
& (1 << i
))
1123 ret
= intel_dig_port
->hpd_pulse(intel_dig_port
, long_hpd
);
1125 /* if we get true fallback to old school hpd */
1126 old_bits
|= (1 << intel_dig_port
->base
.hpd_pin
);
1132 spin_lock_irq(&dev_priv
->irq_lock
);
1133 dev_priv
->hpd_event_bits
|= old_bits
;
1134 spin_unlock_irq(&dev_priv
->irq_lock
);
1135 schedule_work(&dev_priv
->hotplug_work
);
1140 * Handle hotplug events outside the interrupt handler proper.
1142 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1144 static void i915_hotplug_work_func(struct work_struct
*work
)
1146 struct drm_i915_private
*dev_priv
=
1147 container_of(work
, struct drm_i915_private
, hotplug_work
);
1148 struct drm_device
*dev
= dev_priv
->dev
;
1149 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
1150 struct intel_connector
*intel_connector
;
1151 struct intel_encoder
*intel_encoder
;
1152 struct drm_connector
*connector
;
1153 bool hpd_disabled
= false;
1154 bool changed
= false;
1157 mutex_lock(&mode_config
->mutex
);
1158 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1160 spin_lock_irq(&dev_priv
->irq_lock
);
1162 hpd_event_bits
= dev_priv
->hpd_event_bits
;
1163 dev_priv
->hpd_event_bits
= 0;
1164 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
1165 intel_connector
= to_intel_connector(connector
);
1166 if (!intel_connector
->encoder
)
1168 intel_encoder
= intel_connector
->encoder
;
1169 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
1170 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
1171 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
1172 DRM_INFO("HPD interrupt storm detected on connector %s: "
1173 "switching from hotplug detection to polling\n",
1175 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
1176 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
1177 | DRM_CONNECTOR_POLL_DISCONNECT
;
1178 hpd_disabled
= true;
1180 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
1181 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1182 connector
->name
, intel_encoder
->hpd_pin
);
1185 /* if there were no outputs to poll, poll was disabled,
1186 * therefore make sure it's enabled when disabling HPD on
1187 * some connectors */
1189 drm_kms_helper_poll_enable(dev
);
1190 mod_delayed_work(system_wq
, &dev_priv
->hotplug_reenable_work
,
1191 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
1194 spin_unlock_irq(&dev_priv
->irq_lock
);
1196 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
1197 intel_connector
= to_intel_connector(connector
);
1198 if (!intel_connector
->encoder
)
1200 intel_encoder
= intel_connector
->encoder
;
1201 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
1202 if (intel_encoder
->hot_plug
)
1203 intel_encoder
->hot_plug(intel_encoder
);
1204 if (intel_hpd_irq_event(dev
, connector
))
1208 mutex_unlock(&mode_config
->mutex
);
1211 drm_kms_helper_hotplug_event(dev
);
1214 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
1216 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1217 u32 busy_up
, busy_down
, max_avg
, min_avg
;
1220 spin_lock(&mchdev_lock
);
1222 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
1224 new_delay
= dev_priv
->ips
.cur_delay
;
1226 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
1227 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
1228 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
1229 max_avg
= I915_READ(RCBMAXAVG
);
1230 min_avg
= I915_READ(RCBMINAVG
);
1232 /* Handle RCS change request from hw */
1233 if (busy_up
> max_avg
) {
1234 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
1235 new_delay
= dev_priv
->ips
.cur_delay
- 1;
1236 if (new_delay
< dev_priv
->ips
.max_delay
)
1237 new_delay
= dev_priv
->ips
.max_delay
;
1238 } else if (busy_down
< min_avg
) {
1239 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
1240 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
1241 if (new_delay
> dev_priv
->ips
.min_delay
)
1242 new_delay
= dev_priv
->ips
.min_delay
;
1245 if (ironlake_set_drps(dev
, new_delay
))
1246 dev_priv
->ips
.cur_delay
= new_delay
;
1248 spin_unlock(&mchdev_lock
);
1253 static void notify_ring(struct drm_device
*dev
,
1254 struct intel_engine_cs
*ring
)
1256 if (!intel_ring_initialized(ring
))
1259 trace_i915_gem_request_complete(ring
);
1261 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1262 intel_notify_mmio_flip(ring
);
1264 wake_up_all(&ring
->irq_queue
);
1265 i915_queue_hangcheck(dev
);
1268 static u32
vlv_c0_residency(struct drm_i915_private
*dev_priv
,
1269 struct intel_rps_ei
*rps_ei
)
1271 u32 cz_ts
, cz_freq_khz
;
1272 u32 render_count
, media_count
;
1273 u32 elapsed_render
, elapsed_media
, elapsed_time
;
1276 cz_ts
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
1277 cz_freq_khz
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* 1000, 4);
1279 render_count
= I915_READ(VLV_RENDER_C0_COUNT_REG
);
1280 media_count
= I915_READ(VLV_MEDIA_C0_COUNT_REG
);
1282 if (rps_ei
->cz_clock
== 0) {
1283 rps_ei
->cz_clock
= cz_ts
;
1284 rps_ei
->render_c0
= render_count
;
1285 rps_ei
->media_c0
= media_count
;
1287 return dev_priv
->rps
.cur_freq
;
1290 elapsed_time
= cz_ts
- rps_ei
->cz_clock
;
1291 rps_ei
->cz_clock
= cz_ts
;
1293 elapsed_render
= render_count
- rps_ei
->render_c0
;
1294 rps_ei
->render_c0
= render_count
;
1296 elapsed_media
= media_count
- rps_ei
->media_c0
;
1297 rps_ei
->media_c0
= media_count
;
1299 /* Convert all the counters into common unit of milli sec */
1300 elapsed_time
/= VLV_CZ_CLOCK_TO_MILLI_SEC
;
1301 elapsed_render
/= cz_freq_khz
;
1302 elapsed_media
/= cz_freq_khz
;
1305 * Calculate overall C0 residency percentage
1306 * only if elapsed time is non zero
1310 ((max(elapsed_render
, elapsed_media
) * 100)
1318 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1319 * busy-ness calculated from C0 counters of render & media power wells
1320 * @dev_priv: DRM device private
1323 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private
*dev_priv
)
1325 u32 residency_C0_up
= 0, residency_C0_down
= 0;
1328 dev_priv
->rps
.ei_interrupt_count
++;
1330 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
1333 if (dev_priv
->rps
.up_ei
.cz_clock
== 0) {
1334 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.up_ei
);
1335 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.down_ei
);
1336 return dev_priv
->rps
.cur_freq
;
1341 * To down throttle, C0 residency should be less than down threshold
1342 * for continous EI intervals. So calculate down EI counters
1343 * once in VLV_INT_COUNT_FOR_DOWN_EI
1345 if (dev_priv
->rps
.ei_interrupt_count
== VLV_INT_COUNT_FOR_DOWN_EI
) {
1347 dev_priv
->rps
.ei_interrupt_count
= 0;
1349 residency_C0_down
= vlv_c0_residency(dev_priv
,
1350 &dev_priv
->rps
.down_ei
);
1352 residency_C0_up
= vlv_c0_residency(dev_priv
,
1353 &dev_priv
->rps
.up_ei
);
1356 new_delay
= dev_priv
->rps
.cur_freq
;
1358 adj
= dev_priv
->rps
.last_adj
;
1359 /* C0 residency is greater than UP threshold. Increase Frequency */
1360 if (residency_C0_up
>= VLV_RP_UP_EI_THRESHOLD
) {
1366 if (dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
)
1367 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1370 * For better performance, jump directly
1371 * to RPe if we're below it.
1373 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1374 new_delay
= dev_priv
->rps
.efficient_freq
;
1376 } else if (!dev_priv
->rps
.ei_interrupt_count
&&
1377 (residency_C0_down
< VLV_RP_DOWN_EI_THRESHOLD
)) {
1383 * This means, C0 residency is less than down threshold over
1384 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1386 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.min_freq_softlimit
)
1387 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1393 static void gen6_pm_rps_work(struct work_struct
*work
)
1395 struct drm_i915_private
*dev_priv
=
1396 container_of(work
, struct drm_i915_private
, rps
.work
);
1400 spin_lock_irq(&dev_priv
->irq_lock
);
1401 pm_iir
= dev_priv
->rps
.pm_iir
;
1402 dev_priv
->rps
.pm_iir
= 0;
1403 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8)
1404 gen8_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1406 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1407 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1409 spin_unlock_irq(&dev_priv
->irq_lock
);
1411 /* Make sure we didn't queue anything we're not going to process. */
1412 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1414 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1417 mutex_lock(&dev_priv
->rps
.hw_lock
);
1419 adj
= dev_priv
->rps
.last_adj
;
1420 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1424 /* CHV needs even encode values */
1425 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? 2 : 1;
1427 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1430 * For better performance, jump directly
1431 * to RPe if we're below it.
1433 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1434 new_delay
= dev_priv
->rps
.efficient_freq
;
1435 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1436 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1437 new_delay
= dev_priv
->rps
.efficient_freq
;
1439 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1441 } else if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1442 new_delay
= vlv_calc_delay_from_C0_counters(dev_priv
);
1443 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1447 /* CHV needs even encode values */
1448 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? -2 : -1;
1450 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1451 } else { /* unknown event */
1452 new_delay
= dev_priv
->rps
.cur_freq
;
1455 /* sysfs frequency interfaces may have snuck in while servicing the
1458 new_delay
= clamp_t(int, new_delay
,
1459 dev_priv
->rps
.min_freq_softlimit
,
1460 dev_priv
->rps
.max_freq_softlimit
);
1462 dev_priv
->rps
.last_adj
= new_delay
- dev_priv
->rps
.cur_freq
;
1464 if (IS_VALLEYVIEW(dev_priv
->dev
))
1465 valleyview_set_rps(dev_priv
->dev
, new_delay
);
1467 gen6_set_rps(dev_priv
->dev
, new_delay
);
1469 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1474 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1476 * @work: workqueue struct
1478 * Doesn't actually do anything except notify userspace. As a consequence of
1479 * this event, userspace should try to remap the bad rows since statistically
1480 * it is likely the same row is more likely to go bad again.
1482 static void ivybridge_parity_work(struct work_struct
*work
)
1484 struct drm_i915_private
*dev_priv
=
1485 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1486 u32 error_status
, row
, bank
, subbank
;
1487 char *parity_event
[6];
1491 /* We must turn off DOP level clock gating to access the L3 registers.
1492 * In order to prevent a get/put style interface, acquire struct mutex
1493 * any time we access those registers.
1495 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1497 /* If we've screwed up tracking, just let the interrupt fire again */
1498 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1501 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1502 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1503 POSTING_READ(GEN7_MISCCPCTL
);
1505 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1509 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1512 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1514 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1516 error_status
= I915_READ(reg
);
1517 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1518 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1519 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1521 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1524 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1525 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1526 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1527 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1528 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1529 parity_event
[5] = NULL
;
1531 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1532 KOBJ_CHANGE
, parity_event
);
1534 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1535 slice
, row
, bank
, subbank
);
1537 kfree(parity_event
[4]);
1538 kfree(parity_event
[3]);
1539 kfree(parity_event
[2]);
1540 kfree(parity_event
[1]);
1543 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1546 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1547 spin_lock_irq(&dev_priv
->irq_lock
);
1548 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1549 spin_unlock_irq(&dev_priv
->irq_lock
);
1551 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1554 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1556 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1558 if (!HAS_L3_DPF(dev
))
1561 spin_lock(&dev_priv
->irq_lock
);
1562 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1563 spin_unlock(&dev_priv
->irq_lock
);
1565 iir
&= GT_PARITY_ERROR(dev
);
1566 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1567 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1569 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1570 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1572 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1575 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1576 struct drm_i915_private
*dev_priv
,
1580 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1581 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1582 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1583 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1586 static void snb_gt_irq_handler(struct drm_device
*dev
,
1587 struct drm_i915_private
*dev_priv
,
1592 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1593 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1594 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1595 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1596 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1597 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1599 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1600 GT_BSD_CS_ERROR_INTERRUPT
|
1601 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
1602 i915_handle_error(dev
, false, "GT error interrupt 0x%08x",
1606 if (gt_iir
& GT_PARITY_ERROR(dev
))
1607 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1610 static void gen8_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1612 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1615 spin_lock(&dev_priv
->irq_lock
);
1616 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1617 gen8_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1618 spin_unlock(&dev_priv
->irq_lock
);
1620 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1623 static irqreturn_t
gen8_gt_irq_handler(struct drm_device
*dev
,
1624 struct drm_i915_private
*dev_priv
,
1627 struct intel_engine_cs
*ring
;
1630 irqreturn_t ret
= IRQ_NONE
;
1632 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1633 tmp
= I915_READ(GEN8_GT_IIR(0));
1635 I915_WRITE(GEN8_GT_IIR(0), tmp
);
1638 rcs
= tmp
>> GEN8_RCS_IRQ_SHIFT
;
1639 ring
= &dev_priv
->ring
[RCS
];
1640 if (rcs
& GT_RENDER_USER_INTERRUPT
)
1641 notify_ring(dev
, ring
);
1642 if (rcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1643 intel_execlists_handle_ctx_events(ring
);
1645 bcs
= tmp
>> GEN8_BCS_IRQ_SHIFT
;
1646 ring
= &dev_priv
->ring
[BCS
];
1647 if (bcs
& GT_RENDER_USER_INTERRUPT
)
1648 notify_ring(dev
, ring
);
1649 if (bcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1650 intel_execlists_handle_ctx_events(ring
);
1652 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1655 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1656 tmp
= I915_READ(GEN8_GT_IIR(1));
1658 I915_WRITE(GEN8_GT_IIR(1), tmp
);
1661 vcs
= tmp
>> GEN8_VCS1_IRQ_SHIFT
;
1662 ring
= &dev_priv
->ring
[VCS
];
1663 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1664 notify_ring(dev
, ring
);
1665 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1666 intel_execlists_handle_ctx_events(ring
);
1668 vcs
= tmp
>> GEN8_VCS2_IRQ_SHIFT
;
1669 ring
= &dev_priv
->ring
[VCS2
];
1670 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1671 notify_ring(dev
, ring
);
1672 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1673 intel_execlists_handle_ctx_events(ring
);
1675 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1678 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1679 tmp
= I915_READ(GEN8_GT_IIR(2));
1680 if (tmp
& dev_priv
->pm_rps_events
) {
1681 I915_WRITE(GEN8_GT_IIR(2),
1682 tmp
& dev_priv
->pm_rps_events
);
1684 gen8_rps_irq_handler(dev_priv
, tmp
);
1686 DRM_ERROR("The master control interrupt lied (PM)!\n");
1689 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1690 tmp
= I915_READ(GEN8_GT_IIR(3));
1692 I915_WRITE(GEN8_GT_IIR(3), tmp
);
1695 vcs
= tmp
>> GEN8_VECS_IRQ_SHIFT
;
1696 ring
= &dev_priv
->ring
[VECS
];
1697 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1698 notify_ring(dev
, ring
);
1699 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1700 intel_execlists_handle_ctx_events(ring
);
1702 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1708 #define HPD_STORM_DETECT_PERIOD 1000
1709 #define HPD_STORM_THRESHOLD 5
1711 static int ilk_port_to_hotplug_shift(enum port port
)
1727 static int g4x_port_to_hotplug_shift(enum port port
)
1743 static inline enum port
get_port_from_pin(enum hpd_pin pin
)
1753 return PORT_A
; /* no hpd */
1757 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1758 u32 hotplug_trigger
,
1759 u32 dig_hotplug_reg
,
1762 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1765 bool storm_detected
= false;
1766 bool queue_dig
= false, queue_hp
= false;
1768 u32 dig_port_mask
= 0;
1770 if (!hotplug_trigger
)
1773 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1774 hotplug_trigger
, dig_hotplug_reg
);
1776 spin_lock(&dev_priv
->irq_lock
);
1777 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1778 if (!(hpd
[i
] & hotplug_trigger
))
1781 port
= get_port_from_pin(i
);
1782 if (port
&& dev_priv
->hpd_irq_port
[port
]) {
1786 dig_shift
= g4x_port_to_hotplug_shift(port
);
1787 long_hpd
= (hotplug_trigger
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1789 dig_shift
= ilk_port_to_hotplug_shift(port
);
1790 long_hpd
= (dig_hotplug_reg
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1793 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1795 long_hpd
? "long" : "short");
1796 /* for long HPD pulses we want to have the digital queue happen,
1797 but we still want HPD storm detection to function. */
1799 dev_priv
->long_hpd_port_mask
|= (1 << port
);
1800 dig_port_mask
|= hpd
[i
];
1802 /* for short HPD just trigger the digital queue */
1803 dev_priv
->short_hpd_port_mask
|= (1 << port
);
1804 hotplug_trigger
&= ~hpd
[i
];
1810 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1811 if (hpd
[i
] & hotplug_trigger
&&
1812 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
) {
1814 * On GMCH platforms the interrupt mask bits only
1815 * prevent irq generation, not the setting of the
1816 * hotplug bits itself. So only WARN about unexpected
1817 * interrupts on saner platforms.
1819 WARN_ONCE(INTEL_INFO(dev
)->gen
>= 5 && !IS_VALLEYVIEW(dev
),
1820 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1821 hotplug_trigger
, i
, hpd
[i
]);
1826 if (!(hpd
[i
] & hotplug_trigger
) ||
1827 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1830 if (!(dig_port_mask
& hpd
[i
])) {
1831 dev_priv
->hpd_event_bits
|= (1 << i
);
1835 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1836 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1837 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1838 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1839 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1840 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1841 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1842 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1843 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1844 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1845 storm_detected
= true;
1847 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1848 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1849 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1854 dev_priv
->display
.hpd_irq_setup(dev
);
1855 spin_unlock(&dev_priv
->irq_lock
);
1858 * Our hotplug handler can grab modeset locks (by calling down into the
1859 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1860 * queue for otherwise the flush_work in the pageflip code will
1864 queue_work(dev_priv
->dp_wq
, &dev_priv
->dig_port_work
);
1866 schedule_work(&dev_priv
->hotplug_work
);
1869 static void gmbus_irq_handler(struct drm_device
*dev
)
1871 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1873 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1876 static void dp_aux_irq_handler(struct drm_device
*dev
)
1878 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1880 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1883 #if defined(CONFIG_DEBUG_FS)
1884 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1885 uint32_t crc0
, uint32_t crc1
,
1886 uint32_t crc2
, uint32_t crc3
,
1889 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1890 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1891 struct intel_pipe_crc_entry
*entry
;
1894 spin_lock(&pipe_crc
->lock
);
1896 if (!pipe_crc
->entries
) {
1897 spin_unlock(&pipe_crc
->lock
);
1898 DRM_ERROR("spurious interrupt\n");
1902 head
= pipe_crc
->head
;
1903 tail
= pipe_crc
->tail
;
1905 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1906 spin_unlock(&pipe_crc
->lock
);
1907 DRM_ERROR("CRC buffer overflowing\n");
1911 entry
= &pipe_crc
->entries
[head
];
1913 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1914 entry
->crc
[0] = crc0
;
1915 entry
->crc
[1] = crc1
;
1916 entry
->crc
[2] = crc2
;
1917 entry
->crc
[3] = crc3
;
1918 entry
->crc
[4] = crc4
;
1920 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1921 pipe_crc
->head
= head
;
1923 spin_unlock(&pipe_crc
->lock
);
1925 wake_up_interruptible(&pipe_crc
->wq
);
1929 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1930 uint32_t crc0
, uint32_t crc1
,
1931 uint32_t crc2
, uint32_t crc3
,
1936 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1938 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1940 display_pipe_crc_irq_handler(dev
, pipe
,
1941 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1945 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1947 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1949 display_pipe_crc_irq_handler(dev
, pipe
,
1950 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1951 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1952 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1953 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1954 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1957 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1959 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1960 uint32_t res1
, res2
;
1962 if (INTEL_INFO(dev
)->gen
>= 3)
1963 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1967 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1968 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1972 display_pipe_crc_irq_handler(dev
, pipe
,
1973 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1974 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1975 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1979 void gen8_flip_interrupt(struct drm_device
*dev
)
1981 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1983 if (!dev_priv
->rps
.is_bdw_sw_turbo
)
1986 if(atomic_read(&dev_priv
->rps
.sw_turbo
.flip_received
)) {
1987 mod_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
,
1988 usecs_to_jiffies(dev_priv
->rps
.sw_turbo
.timeout
) + jiffies
);
1991 dev_priv
->rps
.sw_turbo
.flip_timer
.expires
=
1992 usecs_to_jiffies(dev_priv
->rps
.sw_turbo
.timeout
) + jiffies
;
1993 add_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
1994 atomic_set(&dev_priv
->rps
.sw_turbo
.flip_received
, true);
1997 bdw_software_turbo(dev
);
2000 /* The RPS events need forcewake, so we add them to a work queue and mask their
2001 * IMR bits until the work is done. Other interrupts can be processed without
2002 * the work queue. */
2003 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
2005 if (pm_iir
& dev_priv
->pm_rps_events
) {
2006 spin_lock(&dev_priv
->irq_lock
);
2007 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
2008 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
2009 spin_unlock(&dev_priv
->irq_lock
);
2011 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
2014 if (HAS_VEBOX(dev_priv
->dev
)) {
2015 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
2016 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
2018 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
2019 i915_handle_error(dev_priv
->dev
, false,
2020 "VEBOX CS error interrupt 0x%08x",
2026 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
2028 if (!drm_handle_vblank(dev
, pipe
))
2034 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
2036 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2037 u32 pipe_stats
[I915_MAX_PIPES
] = { };
2040 spin_lock(&dev_priv
->irq_lock
);
2041 for_each_pipe(dev_priv
, pipe
) {
2043 u32 mask
, iir_bit
= 0;
2046 * PIPESTAT bits get signalled even when the interrupt is
2047 * disabled with the mask bits, and some of the status bits do
2048 * not generate interrupts at all (like the underrun bit). Hence
2049 * we need to be careful that we only handle what we want to
2053 if (__cpu_fifo_underrun_reporting_enabled(dev
, pipe
))
2054 mask
|= PIPE_FIFO_UNDERRUN_STATUS
;
2058 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
2061 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
2064 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
2068 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
2073 reg
= PIPESTAT(pipe
);
2074 mask
|= PIPESTAT_INT_ENABLE_MASK
;
2075 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
2078 * Clear the PIPE*STAT regs before the IIR
2080 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
2081 PIPESTAT_INT_STATUS_MASK
))
2082 I915_WRITE(reg
, pipe_stats
[pipe
]);
2084 spin_unlock(&dev_priv
->irq_lock
);
2086 for_each_pipe(dev_priv
, pipe
) {
2087 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2088 intel_pipe_handle_vblank(dev
, pipe
))
2089 intel_check_page_flip(dev
, pipe
);
2091 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
2092 intel_prepare_page_flip(dev
, pipe
);
2093 intel_finish_page_flip(dev
, pipe
);
2096 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
2097 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2099 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
2100 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
2101 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
2104 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
2105 gmbus_irq_handler(dev
);
2108 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
2110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2111 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2113 if (hotplug_status
) {
2114 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2116 * Make sure hotplug status is cleared before we clear IIR, or else we
2117 * may miss hotplug events.
2119 POSTING_READ(PORT_HOTPLUG_STAT
);
2122 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
2124 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_g4x
);
2126 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
2128 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_i915
);
2131 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) &&
2132 hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
2133 dp_aux_irq_handler(dev
);
2137 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
2139 struct drm_device
*dev
= arg
;
2140 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2141 u32 iir
, gt_iir
, pm_iir
;
2142 irqreturn_t ret
= IRQ_NONE
;
2145 /* Find, clear, then process each source of interrupt */
2147 gt_iir
= I915_READ(GTIIR
);
2149 I915_WRITE(GTIIR
, gt_iir
);
2151 pm_iir
= I915_READ(GEN6_PMIIR
);
2153 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2155 iir
= I915_READ(VLV_IIR
);
2157 /* Consume port before clearing IIR or we'll miss events */
2158 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
2159 i9xx_hpd_irq_handler(dev
);
2160 I915_WRITE(VLV_IIR
, iir
);
2163 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
2169 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2171 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2172 /* Call regardless, as some status bits might not be
2173 * signalled in iir */
2174 valleyview_pipestat_irq_handler(dev
, iir
);
2181 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
2183 struct drm_device
*dev
= arg
;
2184 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2185 u32 master_ctl
, iir
;
2186 irqreturn_t ret
= IRQ_NONE
;
2189 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
2190 iir
= I915_READ(VLV_IIR
);
2192 if (master_ctl
== 0 && iir
== 0)
2197 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2199 /* Find, clear, then process each source of interrupt */
2202 /* Consume port before clearing IIR or we'll miss events */
2203 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
2204 i9xx_hpd_irq_handler(dev
);
2205 I915_WRITE(VLV_IIR
, iir
);
2208 gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2210 /* Call regardless, as some status bits might not be
2211 * signalled in iir */
2212 valleyview_pipestat_irq_handler(dev
, iir
);
2214 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
2215 POSTING_READ(GEN8_MASTER_IRQ
);
2221 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2223 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2225 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
2226 u32 dig_hotplug_reg
;
2228 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2229 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2231 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_ibx
);
2233 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
2234 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
2235 SDE_AUDIO_POWER_SHIFT
);
2236 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2240 if (pch_iir
& SDE_AUX_MASK
)
2241 dp_aux_irq_handler(dev
);
2243 if (pch_iir
& SDE_GMBUS
)
2244 gmbus_irq_handler(dev
);
2246 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
2247 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2249 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
2250 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2252 if (pch_iir
& SDE_POISON
)
2253 DRM_ERROR("PCH poison interrupt\n");
2255 if (pch_iir
& SDE_FDI_MASK
)
2256 for_each_pipe(dev_priv
, pipe
)
2257 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2259 I915_READ(FDI_RX_IIR(pipe
)));
2261 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
2262 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2264 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
2265 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2267 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
2268 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
2270 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2272 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
2273 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
2275 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2278 static void ivb_err_int_handler(struct drm_device
*dev
)
2280 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2281 u32 err_int
= I915_READ(GEN7_ERR_INT
);
2284 if (err_int
& ERR_INT_POISON
)
2285 DRM_ERROR("Poison interrupt\n");
2287 for_each_pipe(dev_priv
, pipe
) {
2288 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
)) {
2289 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
2291 DRM_ERROR("Pipe %c FIFO underrun\n",
2295 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
2296 if (IS_IVYBRIDGE(dev
))
2297 ivb_pipe_crc_irq_handler(dev
, pipe
);
2299 hsw_pipe_crc_irq_handler(dev
, pipe
);
2303 I915_WRITE(GEN7_ERR_INT
, err_int
);
2306 static void cpt_serr_int_handler(struct drm_device
*dev
)
2308 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2309 u32 serr_int
= I915_READ(SERR_INT
);
2311 if (serr_int
& SERR_INT_POISON
)
2312 DRM_ERROR("PCH poison interrupt\n");
2314 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
2315 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
2317 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2319 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
2320 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
2322 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2324 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
2325 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_C
,
2327 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2329 I915_WRITE(SERR_INT
, serr_int
);
2332 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2336 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
2337 u32 dig_hotplug_reg
;
2339 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2340 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2342 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_cpt
);
2344 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
2345 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
2346 SDE_AUDIO_POWER_SHIFT_CPT
);
2347 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2351 if (pch_iir
& SDE_AUX_MASK_CPT
)
2352 dp_aux_irq_handler(dev
);
2354 if (pch_iir
& SDE_GMBUS_CPT
)
2355 gmbus_irq_handler(dev
);
2357 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
2358 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2360 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
2361 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2363 if (pch_iir
& SDE_FDI_MASK_CPT
)
2364 for_each_pipe(dev_priv
, pipe
)
2365 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2367 I915_READ(FDI_RX_IIR(pipe
)));
2369 if (pch_iir
& SDE_ERROR_CPT
)
2370 cpt_serr_int_handler(dev
);
2373 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2378 if (de_iir
& DE_AUX_CHANNEL_A
)
2379 dp_aux_irq_handler(dev
);
2381 if (de_iir
& DE_GSE
)
2382 intel_opregion_asle_intr(dev
);
2384 if (de_iir
& DE_POISON
)
2385 DRM_ERROR("Poison interrupt\n");
2387 for_each_pipe(dev_priv
, pipe
) {
2388 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2389 intel_pipe_handle_vblank(dev
, pipe
))
2390 intel_check_page_flip(dev
, pipe
);
2392 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2393 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
2394 DRM_ERROR("Pipe %c FIFO underrun\n",
2397 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2398 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2400 /* plane/pipes map 1:1 on ilk+ */
2401 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2402 intel_prepare_page_flip(dev
, pipe
);
2403 intel_finish_page_flip_plane(dev
, pipe
);
2407 /* check event from PCH */
2408 if (de_iir
& DE_PCH_EVENT
) {
2409 u32 pch_iir
= I915_READ(SDEIIR
);
2411 if (HAS_PCH_CPT(dev
))
2412 cpt_irq_handler(dev
, pch_iir
);
2414 ibx_irq_handler(dev
, pch_iir
);
2416 /* should clear PCH hotplug event before clear CPU irq */
2417 I915_WRITE(SDEIIR
, pch_iir
);
2420 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2421 ironlake_rps_change_irq_handler(dev
);
2424 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2426 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2429 if (de_iir
& DE_ERR_INT_IVB
)
2430 ivb_err_int_handler(dev
);
2432 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2433 dp_aux_irq_handler(dev
);
2435 if (de_iir
& DE_GSE_IVB
)
2436 intel_opregion_asle_intr(dev
);
2438 for_each_pipe(dev_priv
, pipe
) {
2439 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2440 intel_pipe_handle_vblank(dev
, pipe
))
2441 intel_check_page_flip(dev
, pipe
);
2443 /* plane/pipes map 1:1 on ilk+ */
2444 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2445 intel_prepare_page_flip(dev
, pipe
);
2446 intel_finish_page_flip_plane(dev
, pipe
);
2450 /* check event from PCH */
2451 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2452 u32 pch_iir
= I915_READ(SDEIIR
);
2454 cpt_irq_handler(dev
, pch_iir
);
2456 /* clear PCH hotplug event before clear CPU irq */
2457 I915_WRITE(SDEIIR
, pch_iir
);
2462 * To handle irqs with the minimum potential races with fresh interrupts, we:
2463 * 1 - Disable Master Interrupt Control.
2464 * 2 - Find the source(s) of the interrupt.
2465 * 3 - Clear the Interrupt Identity bits (IIR).
2466 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2467 * 5 - Re-enable Master Interrupt Control.
2469 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2471 struct drm_device
*dev
= arg
;
2472 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2473 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2474 irqreturn_t ret
= IRQ_NONE
;
2476 /* We get interrupts on unclaimed registers, so check for this before we
2477 * do any I915_{READ,WRITE}. */
2478 intel_uncore_check_errors(dev
);
2480 /* disable master interrupt before clearing iir */
2481 de_ier
= I915_READ(DEIER
);
2482 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2483 POSTING_READ(DEIER
);
2485 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2486 * interrupts will will be stored on its back queue, and then we'll be
2487 * able to process them after we restore SDEIER (as soon as we restore
2488 * it, we'll get an interrupt if SDEIIR still has something to process
2489 * due to its back queue). */
2490 if (!HAS_PCH_NOP(dev
)) {
2491 sde_ier
= I915_READ(SDEIER
);
2492 I915_WRITE(SDEIER
, 0);
2493 POSTING_READ(SDEIER
);
2496 /* Find, clear, then process each source of interrupt */
2498 gt_iir
= I915_READ(GTIIR
);
2500 I915_WRITE(GTIIR
, gt_iir
);
2502 if (INTEL_INFO(dev
)->gen
>= 6)
2503 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2505 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2508 de_iir
= I915_READ(DEIIR
);
2510 I915_WRITE(DEIIR
, de_iir
);
2512 if (INTEL_INFO(dev
)->gen
>= 7)
2513 ivb_display_irq_handler(dev
, de_iir
);
2515 ilk_display_irq_handler(dev
, de_iir
);
2518 if (INTEL_INFO(dev
)->gen
>= 6) {
2519 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2521 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2523 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2527 I915_WRITE(DEIER
, de_ier
);
2528 POSTING_READ(DEIER
);
2529 if (!HAS_PCH_NOP(dev
)) {
2530 I915_WRITE(SDEIER
, sde_ier
);
2531 POSTING_READ(SDEIER
);
2537 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2539 struct drm_device
*dev
= arg
;
2540 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2542 irqreturn_t ret
= IRQ_NONE
;
2546 master_ctl
= I915_READ(GEN8_MASTER_IRQ
);
2547 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2551 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2552 POSTING_READ(GEN8_MASTER_IRQ
);
2554 /* Find, clear, then process each source of interrupt */
2556 ret
= gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2558 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2559 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2561 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2563 if (tmp
& GEN8_DE_MISC_GSE
)
2564 intel_opregion_asle_intr(dev
);
2566 DRM_ERROR("Unexpected DE Misc interrupt\n");
2569 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2572 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2573 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2575 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2577 if (tmp
& GEN8_AUX_CHANNEL_A
)
2578 dp_aux_irq_handler(dev
);
2580 DRM_ERROR("Unexpected DE Port interrupt\n");
2583 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2586 for_each_pipe(dev_priv
, pipe
) {
2587 uint32_t pipe_iir
, flip_done
= 0, fault_errors
= 0;
2589 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2592 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2595 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2597 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2598 intel_pipe_handle_vblank(dev
, pipe
))
2599 intel_check_page_flip(dev
, pipe
);
2602 flip_done
= pipe_iir
& GEN9_PIPE_PLANE1_FLIP_DONE
;
2604 flip_done
= pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
;
2607 intel_prepare_page_flip(dev
, pipe
);
2608 intel_finish_page_flip_plane(dev
, pipe
);
2611 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2612 hsw_pipe_crc_irq_handler(dev
, pipe
);
2614 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
) {
2615 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
2617 DRM_ERROR("Pipe %c FIFO underrun\n",
2623 fault_errors
= pipe_iir
& GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2625 fault_errors
= pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2628 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2630 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2632 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2635 if (!HAS_PCH_NOP(dev
) && master_ctl
& GEN8_DE_PCH_IRQ
) {
2637 * FIXME(BDW): Assume for now that the new interrupt handling
2638 * scheme also closed the SDE interrupt handling race we've seen
2639 * on older pch-split platforms. But this needs testing.
2641 u32 pch_iir
= I915_READ(SDEIIR
);
2643 I915_WRITE(SDEIIR
, pch_iir
);
2645 cpt_irq_handler(dev
, pch_iir
);
2647 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2651 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2652 POSTING_READ(GEN8_MASTER_IRQ
);
2657 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2658 bool reset_completed
)
2660 struct intel_engine_cs
*ring
;
2664 * Notify all waiters for GPU completion events that reset state has
2665 * been changed, and that they need to restart their wait after
2666 * checking for potential errors (and bail out to drop locks if there is
2667 * a gpu reset pending so that i915_error_work_func can acquire them).
2670 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2671 for_each_ring(ring
, dev_priv
, i
)
2672 wake_up_all(&ring
->irq_queue
);
2674 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2675 wake_up_all(&dev_priv
->pending_flip_queue
);
2678 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2679 * reset state is cleared.
2681 if (reset_completed
)
2682 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2686 * i915_error_work_func - do process context error handling work
2687 * @work: work struct
2689 * Fire an error uevent so userspace can see that a hang or error
2692 static void i915_error_work_func(struct work_struct
*work
)
2694 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
2696 struct drm_i915_private
*dev_priv
=
2697 container_of(error
, struct drm_i915_private
, gpu_error
);
2698 struct drm_device
*dev
= dev_priv
->dev
;
2699 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2700 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2701 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2704 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2707 * Note that there's only one work item which does gpu resets, so we
2708 * need not worry about concurrent gpu resets potentially incrementing
2709 * error->reset_counter twice. We only need to take care of another
2710 * racing irq/hangcheck declaring the gpu dead for a second time. A
2711 * quick check for that is good enough: schedule_work ensures the
2712 * correct ordering between hang detection and this work item, and since
2713 * the reset in-progress bit is only ever set by code outside of this
2714 * work we don't need to worry about any other races.
2716 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2717 DRM_DEBUG_DRIVER("resetting chip\n");
2718 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2722 * In most cases it's guaranteed that we get here with an RPM
2723 * reference held, for example because there is a pending GPU
2724 * request that won't finish until the reset is done. This
2725 * isn't the case at least when we get here by doing a
2726 * simulated reset via debugs, so get an RPM reference.
2728 intel_runtime_pm_get(dev_priv
);
2730 * All state reset _must_ be completed before we update the
2731 * reset counter, for otherwise waiters might miss the reset
2732 * pending state and not properly drop locks, resulting in
2733 * deadlocks with the reset work.
2735 ret
= i915_reset(dev
);
2737 intel_display_handle_reset(dev
);
2739 intel_runtime_pm_put(dev_priv
);
2743 * After all the gem state is reset, increment the reset
2744 * counter and wake up everyone waiting for the reset to
2747 * Since unlock operations are a one-sided barrier only,
2748 * we need to insert a barrier here to order any seqno
2750 * the counter increment.
2752 smp_mb__before_atomic();
2753 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2755 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2756 KOBJ_CHANGE
, reset_done_event
);
2758 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2762 * Note: The wake_up also serves as a memory barrier so that
2763 * waiters see the update value of the reset counter atomic_t.
2765 i915_error_wake_up(dev_priv
, true);
2769 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2771 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2772 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2773 u32 eir
= I915_READ(EIR
);
2779 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2781 i915_get_extra_instdone(dev
, instdone
);
2784 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2785 u32 ipeir
= I915_READ(IPEIR_I965
);
2787 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2788 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2789 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2790 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2791 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2792 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2793 I915_WRITE(IPEIR_I965
, ipeir
);
2794 POSTING_READ(IPEIR_I965
);
2796 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2797 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2798 pr_err("page table error\n");
2799 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2800 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2801 POSTING_READ(PGTBL_ER
);
2805 if (!IS_GEN2(dev
)) {
2806 if (eir
& I915_ERROR_PAGE_TABLE
) {
2807 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2808 pr_err("page table error\n");
2809 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2810 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2811 POSTING_READ(PGTBL_ER
);
2815 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2816 pr_err("memory refresh error:\n");
2817 for_each_pipe(dev_priv
, pipe
)
2818 pr_err("pipe %c stat: 0x%08x\n",
2819 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2820 /* pipestat has already been acked */
2822 if (eir
& I915_ERROR_INSTRUCTION
) {
2823 pr_err("instruction error\n");
2824 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2825 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2826 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2827 if (INTEL_INFO(dev
)->gen
< 4) {
2828 u32 ipeir
= I915_READ(IPEIR
);
2830 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2831 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2832 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2833 I915_WRITE(IPEIR
, ipeir
);
2834 POSTING_READ(IPEIR
);
2836 u32 ipeir
= I915_READ(IPEIR_I965
);
2838 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2839 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2840 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2841 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2842 I915_WRITE(IPEIR_I965
, ipeir
);
2843 POSTING_READ(IPEIR_I965
);
2847 I915_WRITE(EIR
, eir
);
2849 eir
= I915_READ(EIR
);
2852 * some errors might have become stuck,
2855 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2856 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2857 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2862 * i915_handle_error - handle an error interrupt
2865 * Do some basic checking of regsiter state at error interrupt time and
2866 * dump it to the syslog. Also call i915_capture_error_state() to make
2867 * sure we get a record and make it available in debugfs. Fire a uevent
2868 * so userspace knows something bad happened (should trigger collection
2869 * of a ring dump etc.).
2871 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2872 const char *fmt
, ...)
2874 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2878 va_start(args
, fmt
);
2879 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2882 i915_capture_error_state(dev
, wedged
, error_msg
);
2883 i915_report_and_clear_eir(dev
);
2886 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2887 &dev_priv
->gpu_error
.reset_counter
);
2890 * Wakeup waiting processes so that the reset work function
2891 * i915_error_work_func doesn't deadlock trying to grab various
2892 * locks. By bumping the reset counter first, the woken
2893 * processes will see a reset in progress and back off,
2894 * releasing their locks and then wait for the reset completion.
2895 * We must do this for _all_ gpu waiters that might hold locks
2896 * that the reset work needs to acquire.
2898 * Note: The wake_up serves as the required memory barrier to
2899 * ensure that the waiters see the updated value of the reset
2902 i915_error_wake_up(dev_priv
, false);
2906 * Our reset work can grab modeset locks (since it needs to reset the
2907 * state of outstanding pagelips). Hence it must not be run on our own
2908 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2909 * code will deadlock.
2911 schedule_work(&dev_priv
->gpu_error
.work
);
2914 /* Called from drm generic code, passed 'crtc' which
2915 * we use as a pipe index
2917 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2919 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2920 unsigned long irqflags
;
2922 if (!i915_pipe_enabled(dev
, pipe
))
2925 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2926 if (INTEL_INFO(dev
)->gen
>= 4)
2927 i915_enable_pipestat(dev_priv
, pipe
,
2928 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2930 i915_enable_pipestat(dev_priv
, pipe
,
2931 PIPE_VBLANK_INTERRUPT_STATUS
);
2932 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2937 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2939 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2940 unsigned long irqflags
;
2941 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2942 DE_PIPE_VBLANK(pipe
);
2944 if (!i915_pipe_enabled(dev
, pipe
))
2947 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2948 ironlake_enable_display_irq(dev_priv
, bit
);
2949 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2954 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2956 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2957 unsigned long irqflags
;
2959 if (!i915_pipe_enabled(dev
, pipe
))
2962 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2963 i915_enable_pipestat(dev_priv
, pipe
,
2964 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2965 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2970 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2972 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2973 unsigned long irqflags
;
2975 if (!i915_pipe_enabled(dev
, pipe
))
2978 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2979 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2980 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2981 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2982 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2986 /* Called from drm generic code, passed 'crtc' which
2987 * we use as a pipe index
2989 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2991 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2992 unsigned long irqflags
;
2994 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2995 i915_disable_pipestat(dev_priv
, pipe
,
2996 PIPE_VBLANK_INTERRUPT_STATUS
|
2997 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2998 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3001 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
3003 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3004 unsigned long irqflags
;
3005 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
3006 DE_PIPE_VBLANK(pipe
);
3008 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3009 ironlake_disable_display_irq(dev_priv
, bit
);
3010 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3013 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
3015 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3016 unsigned long irqflags
;
3018 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3019 i915_disable_pipestat(dev_priv
, pipe
,
3020 PIPE_START_VBLANK_INTERRUPT_STATUS
);
3021 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3024 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
3026 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3027 unsigned long irqflags
;
3029 if (!i915_pipe_enabled(dev
, pipe
))
3032 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3033 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
3034 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
3035 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
3036 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3040 ring_last_seqno(struct intel_engine_cs
*ring
)
3042 return list_entry(ring
->request_list
.prev
,
3043 struct drm_i915_gem_request
, list
)->seqno
;
3047 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
3049 return (list_empty(&ring
->request_list
) ||
3050 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
3054 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
3056 if (INTEL_INFO(dev
)->gen
>= 8) {
3057 return (ipehr
>> 23) == 0x1c;
3059 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
3060 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
3061 MI_SEMAPHORE_REGISTER
);
3065 static struct intel_engine_cs
*
3066 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
3068 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3069 struct intel_engine_cs
*signaller
;
3072 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
3073 for_each_ring(signaller
, dev_priv
, i
) {
3074 if (ring
== signaller
)
3077 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
3081 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
3083 for_each_ring(signaller
, dev_priv
, i
) {
3084 if(ring
== signaller
)
3087 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3093 ring
->id
, ipehr
, offset
);
3098 static struct intel_engine_cs
*
3099 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
3101 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3102 u32 cmd
, ipehr
, head
;
3106 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
3107 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
3111 * HEAD is likely pointing to the dword after the actual command,
3112 * so scan backwards until we find the MBOX. But limit it to just 3
3113 * or 4 dwords depending on the semaphore wait command size.
3114 * Note that we don't care about ACTHD here since that might
3115 * point at at batch, and semaphores are always emitted into the
3116 * ringbuffer itself.
3118 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
3119 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
3121 for (i
= backwards
; i
; --i
) {
3123 * Be paranoid and presume the hw has gone off into the wild -
3124 * our ring is smaller than what the hardware (and hence
3125 * HEAD_ADDR) allows. Also handles wrap-around.
3127 head
&= ring
->buffer
->size
- 1;
3129 /* This here seems to blow up */
3130 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
3140 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
3141 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
3142 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
3144 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
3146 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
3149 static int semaphore_passed(struct intel_engine_cs
*ring
)
3151 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3152 struct intel_engine_cs
*signaller
;
3155 ring
->hangcheck
.deadlock
++;
3157 signaller
= semaphore_waits_for(ring
, &seqno
);
3158 if (signaller
== NULL
)
3161 /* Prevent pathological recursion due to driver bugs */
3162 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
3165 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
3168 /* cursory check for an unkickable deadlock */
3169 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
3170 semaphore_passed(signaller
) < 0)
3176 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
3178 struct intel_engine_cs
*ring
;
3181 for_each_ring(ring
, dev_priv
, i
)
3182 ring
->hangcheck
.deadlock
= 0;
3185 static enum intel_ring_hangcheck_action
3186 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
3188 struct drm_device
*dev
= ring
->dev
;
3189 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3192 if (acthd
!= ring
->hangcheck
.acthd
) {
3193 if (acthd
> ring
->hangcheck
.max_acthd
) {
3194 ring
->hangcheck
.max_acthd
= acthd
;
3195 return HANGCHECK_ACTIVE
;
3198 return HANGCHECK_ACTIVE_LOOP
;
3202 return HANGCHECK_HUNG
;
3204 /* Is the chip hanging on a WAIT_FOR_EVENT?
3205 * If so we can simply poke the RB_WAIT bit
3206 * and break the hang. This should work on
3207 * all but the second generation chipsets.
3209 tmp
= I915_READ_CTL(ring
);
3210 if (tmp
& RING_WAIT
) {
3211 i915_handle_error(dev
, false,
3212 "Kicking stuck wait on %s",
3214 I915_WRITE_CTL(ring
, tmp
);
3215 return HANGCHECK_KICK
;
3218 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
3219 switch (semaphore_passed(ring
)) {
3221 return HANGCHECK_HUNG
;
3223 i915_handle_error(dev
, false,
3224 "Kicking stuck semaphore on %s",
3226 I915_WRITE_CTL(ring
, tmp
);
3227 return HANGCHECK_KICK
;
3229 return HANGCHECK_WAIT
;
3233 return HANGCHECK_HUNG
;
3237 * This is called when the chip hasn't reported back with completed
3238 * batchbuffers in a long time. We keep track per ring seqno progress and
3239 * if there are no progress, hangcheck score for that ring is increased.
3240 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3241 * we kick the ring. If we see no progress on three subsequent calls
3242 * we assume chip is wedged and try to fix it by resetting the chip.
3244 static void i915_hangcheck_elapsed(unsigned long data
)
3246 struct drm_device
*dev
= (struct drm_device
*)data
;
3247 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3248 struct intel_engine_cs
*ring
;
3250 int busy_count
= 0, rings_hung
= 0;
3251 bool stuck
[I915_NUM_RINGS
] = { 0 };
3256 if (!i915
.enable_hangcheck
)
3259 for_each_ring(ring
, dev_priv
, i
) {
3264 semaphore_clear_deadlocks(dev_priv
);
3266 seqno
= ring
->get_seqno(ring
, false);
3267 acthd
= intel_ring_get_active_head(ring
);
3269 if (ring
->hangcheck
.seqno
== seqno
) {
3270 if (ring_idle(ring
, seqno
)) {
3271 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
3273 if (waitqueue_active(&ring
->irq_queue
)) {
3274 /* Issue a wake-up to catch stuck h/w. */
3275 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
3276 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
3277 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3280 DRM_INFO("Fake missed irq on %s\n",
3282 wake_up_all(&ring
->irq_queue
);
3284 /* Safeguard against driver failure */
3285 ring
->hangcheck
.score
+= BUSY
;
3289 /* We always increment the hangcheck score
3290 * if the ring is busy and still processing
3291 * the same request, so that no single request
3292 * can run indefinitely (such as a chain of
3293 * batches). The only time we do not increment
3294 * the hangcheck score on this ring, if this
3295 * ring is in a legitimate wait for another
3296 * ring. In that case the waiting ring is a
3297 * victim and we want to be sure we catch the
3298 * right culprit. Then every time we do kick
3299 * the ring, add a small increment to the
3300 * score so that we can catch a batch that is
3301 * being repeatedly kicked and so responsible
3302 * for stalling the machine.
3304 ring
->hangcheck
.action
= ring_stuck(ring
,
3307 switch (ring
->hangcheck
.action
) {
3308 case HANGCHECK_IDLE
:
3309 case HANGCHECK_WAIT
:
3310 case HANGCHECK_ACTIVE
:
3312 case HANGCHECK_ACTIVE_LOOP
:
3313 ring
->hangcheck
.score
+= BUSY
;
3315 case HANGCHECK_KICK
:
3316 ring
->hangcheck
.score
+= KICK
;
3318 case HANGCHECK_HUNG
:
3319 ring
->hangcheck
.score
+= HUNG
;
3325 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3327 /* Gradually reduce the count so that we catch DoS
3328 * attempts across multiple batches.
3330 if (ring
->hangcheck
.score
> 0)
3331 ring
->hangcheck
.score
--;
3333 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3336 ring
->hangcheck
.seqno
= seqno
;
3337 ring
->hangcheck
.acthd
= acthd
;
3341 for_each_ring(ring
, dev_priv
, i
) {
3342 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3343 DRM_INFO("%s on %s\n",
3344 stuck
[i
] ? "stuck" : "no progress",
3351 return i915_handle_error(dev
, true, "Ring hung");
3354 /* Reset timer case chip hangs without another request
3356 i915_queue_hangcheck(dev
);
3359 void i915_queue_hangcheck(struct drm_device
*dev
)
3361 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3362 if (!i915
.enable_hangcheck
)
3365 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3366 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
3369 static void ibx_irq_reset(struct drm_device
*dev
)
3371 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3373 if (HAS_PCH_NOP(dev
))
3376 GEN5_IRQ_RESET(SDE
);
3378 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3379 I915_WRITE(SERR_INT
, 0xffffffff);
3383 * SDEIER is also touched by the interrupt handler to work around missed PCH
3384 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3385 * instead we unconditionally enable all PCH interrupt sources here, but then
3386 * only unmask them as needed with SDEIMR.
3388 * This function needs to be called before interrupts are enabled.
3390 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3392 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3394 if (HAS_PCH_NOP(dev
))
3397 WARN_ON(I915_READ(SDEIER
) != 0);
3398 I915_WRITE(SDEIER
, 0xffffffff);
3399 POSTING_READ(SDEIER
);
3402 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3404 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3407 if (INTEL_INFO(dev
)->gen
>= 6)
3408 GEN5_IRQ_RESET(GEN6_PM
);
3413 static void ironlake_irq_reset(struct drm_device
*dev
)
3415 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3417 I915_WRITE(HWSTAM
, 0xffffffff);
3421 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3423 gen5_gt_irq_reset(dev
);
3428 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3430 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3434 I915_WRITE(VLV_IMR
, 0);
3435 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3436 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3437 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3440 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3441 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3443 gen5_gt_irq_reset(dev
);
3445 I915_WRITE(DPINVGTT
, 0xff);
3447 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3448 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3449 for_each_pipe(dev_priv
, pipe
)
3450 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3451 I915_WRITE(VLV_IIR
, 0xffffffff);
3452 I915_WRITE(VLV_IMR
, 0xffffffff);
3453 I915_WRITE(VLV_IER
, 0x0);
3454 POSTING_READ(VLV_IER
);
3457 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3459 GEN8_IRQ_RESET_NDX(GT
, 0);
3460 GEN8_IRQ_RESET_NDX(GT
, 1);
3461 GEN8_IRQ_RESET_NDX(GT
, 2);
3462 GEN8_IRQ_RESET_NDX(GT
, 3);
3465 static void gen8_irq_reset(struct drm_device
*dev
)
3467 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3470 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3471 POSTING_READ(GEN8_MASTER_IRQ
);
3473 gen8_gt_irq_reset(dev_priv
);
3475 for_each_pipe(dev_priv
, pipe
)
3476 if (intel_display_power_enabled(dev_priv
,
3477 POWER_DOMAIN_PIPE(pipe
)))
3478 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3480 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3481 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3482 GEN5_IRQ_RESET(GEN8_PCU_
);
3487 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
)
3489 spin_lock_irq(&dev_priv
->irq_lock
);
3490 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
, dev_priv
->de_irq_mask
[PIPE_B
],
3491 ~dev_priv
->de_irq_mask
[PIPE_B
]);
3492 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
, dev_priv
->de_irq_mask
[PIPE_C
],
3493 ~dev_priv
->de_irq_mask
[PIPE_C
]);
3494 spin_unlock_irq(&dev_priv
->irq_lock
);
3497 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3499 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3502 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3503 POSTING_READ(GEN8_MASTER_IRQ
);
3505 gen8_gt_irq_reset(dev_priv
);
3507 GEN5_IRQ_RESET(GEN8_PCU_
);
3509 POSTING_READ(GEN8_PCU_IIR
);
3511 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3513 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3514 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3516 for_each_pipe(dev_priv
, pipe
)
3517 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3519 I915_WRITE(VLV_IMR
, 0xffffffff);
3520 I915_WRITE(VLV_IER
, 0x0);
3521 I915_WRITE(VLV_IIR
, 0xffffffff);
3522 POSTING_READ(VLV_IIR
);
3525 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3527 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3528 struct intel_encoder
*intel_encoder
;
3529 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
3531 if (HAS_PCH_IBX(dev
)) {
3532 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3533 for_each_intel_encoder(dev
, intel_encoder
)
3534 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3535 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
3537 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3538 for_each_intel_encoder(dev
, intel_encoder
)
3539 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3540 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
3543 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3546 * Enable digital hotplug on the PCH, and configure the DP short pulse
3547 * duration to 2ms (which is the minimum in the Display Port spec)
3549 * This register is the same on all known PCH chips.
3551 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3552 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3553 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3554 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3555 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3556 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3559 static void ibx_irq_postinstall(struct drm_device
*dev
)
3561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3564 if (HAS_PCH_NOP(dev
))
3567 if (HAS_PCH_IBX(dev
))
3568 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3570 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3572 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR
);
3573 I915_WRITE(SDEIMR
, ~mask
);
3576 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3578 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3579 u32 pm_irqs
, gt_irqs
;
3581 pm_irqs
= gt_irqs
= 0;
3583 dev_priv
->gt_irq_mask
= ~0;
3584 if (HAS_L3_DPF(dev
)) {
3585 /* L3 parity interrupt is always unmasked. */
3586 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3587 gt_irqs
|= GT_PARITY_ERROR(dev
);
3590 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3592 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3593 ILK_BSD_USER_INTERRUPT
;
3595 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3598 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3600 if (INTEL_INFO(dev
)->gen
>= 6) {
3601 pm_irqs
|= dev_priv
->pm_rps_events
;
3604 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3606 dev_priv
->pm_irq_mask
= 0xffffffff;
3607 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3611 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3613 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3614 u32 display_mask
, extra_mask
;
3616 if (INTEL_INFO(dev
)->gen
>= 7) {
3617 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3618 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3619 DE_PLANEB_FLIP_DONE_IVB
|
3620 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3621 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3622 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
3624 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3625 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3627 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3629 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3630 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
3633 dev_priv
->irq_mask
= ~display_mask
;
3635 I915_WRITE(HWSTAM
, 0xeffe);
3637 ibx_irq_pre_postinstall(dev
);
3639 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3641 gen5_gt_irq_postinstall(dev
);
3643 ibx_irq_postinstall(dev
);
3645 if (IS_IRONLAKE_M(dev
)) {
3646 /* Enable PCU event interrupts
3648 * spinlocking not required here for correctness since interrupt
3649 * setup is guaranteed to run in single-threaded context. But we
3650 * need it to make the assert_spin_locked happy. */
3651 spin_lock_irq(&dev_priv
->irq_lock
);
3652 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3653 spin_unlock_irq(&dev_priv
->irq_lock
);
3659 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3664 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3665 PIPE_FIFO_UNDERRUN_STATUS
;
3667 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3668 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3669 POSTING_READ(PIPESTAT(PIPE_A
));
3671 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3672 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3674 i915_enable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3675 PIPE_GMBUS_INTERRUPT_STATUS
);
3676 i915_enable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3678 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3679 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3680 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3681 dev_priv
->irq_mask
&= ~iir_mask
;
3683 I915_WRITE(VLV_IIR
, iir_mask
);
3684 I915_WRITE(VLV_IIR
, iir_mask
);
3685 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3686 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3687 POSTING_READ(VLV_IER
);
3690 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3695 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3696 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3697 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3699 dev_priv
->irq_mask
|= iir_mask
;
3700 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3701 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3702 I915_WRITE(VLV_IIR
, iir_mask
);
3703 I915_WRITE(VLV_IIR
, iir_mask
);
3704 POSTING_READ(VLV_IIR
);
3706 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3707 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3709 i915_disable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3710 PIPE_GMBUS_INTERRUPT_STATUS
);
3711 i915_disable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3713 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3714 PIPE_FIFO_UNDERRUN_STATUS
;
3715 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3716 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3717 POSTING_READ(PIPESTAT(PIPE_A
));
3720 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3722 assert_spin_locked(&dev_priv
->irq_lock
);
3724 if (dev_priv
->display_irqs_enabled
)
3727 dev_priv
->display_irqs_enabled
= true;
3729 if (intel_irqs_enabled(dev_priv
))
3730 valleyview_display_irqs_install(dev_priv
);
3733 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3735 assert_spin_locked(&dev_priv
->irq_lock
);
3737 if (!dev_priv
->display_irqs_enabled
)
3740 dev_priv
->display_irqs_enabled
= false;
3742 if (intel_irqs_enabled(dev_priv
))
3743 valleyview_display_irqs_uninstall(dev_priv
);
3746 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3748 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3750 dev_priv
->irq_mask
= ~0;
3752 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3753 POSTING_READ(PORT_HOTPLUG_EN
);
3755 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3756 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3757 I915_WRITE(VLV_IIR
, 0xffffffff);
3758 POSTING_READ(VLV_IER
);
3760 /* Interrupt setup is already guaranteed to be single-threaded, this is
3761 * just to make the assert_spin_locked check happy. */
3762 spin_lock_irq(&dev_priv
->irq_lock
);
3763 if (dev_priv
->display_irqs_enabled
)
3764 valleyview_display_irqs_install(dev_priv
);
3765 spin_unlock_irq(&dev_priv
->irq_lock
);
3767 I915_WRITE(VLV_IIR
, 0xffffffff);
3768 I915_WRITE(VLV_IIR
, 0xffffffff);
3770 gen5_gt_irq_postinstall(dev
);
3772 /* ack & enable invalid PTE error interrupts */
3773 #if 0 /* FIXME: add support to irq handler for checking these bits */
3774 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3775 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3778 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3783 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3785 /* These are interrupts we'll toggle with the ring mask register */
3786 uint32_t gt_interrupts
[] = {
3787 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3788 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3789 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3790 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3791 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3792 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3793 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3794 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3795 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3797 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3798 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3801 dev_priv
->pm_irq_mask
= 0xffffffff;
3802 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3803 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3804 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, dev_priv
->pm_rps_events
);
3805 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3808 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3810 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3811 uint32_t de_pipe_enables
;
3814 if (IS_GEN9(dev_priv
))
3815 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3816 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3818 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3819 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3821 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3822 GEN8_PIPE_FIFO_UNDERRUN
;
3824 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3825 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3826 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3828 for_each_pipe(dev_priv
, pipe
)
3829 if (intel_display_power_enabled(dev_priv
,
3830 POWER_DOMAIN_PIPE(pipe
)))
3831 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3832 dev_priv
->de_irq_mask
[pipe
],
3835 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~GEN8_AUX_CHANNEL_A
, GEN8_AUX_CHANNEL_A
);
3838 static int gen8_irq_postinstall(struct drm_device
*dev
)
3840 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3842 ibx_irq_pre_postinstall(dev
);
3844 gen8_gt_irq_postinstall(dev_priv
);
3845 gen8_de_irq_postinstall(dev_priv
);
3847 ibx_irq_postinstall(dev
);
3849 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3850 POSTING_READ(GEN8_MASTER_IRQ
);
3855 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3857 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3858 u32 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3859 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3860 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3861 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3862 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3863 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3867 * Leave vblank interrupts masked initially. enable/disable will
3868 * toggle them based on usage.
3870 dev_priv
->irq_mask
= ~enable_mask
;
3872 for_each_pipe(dev_priv
, pipe
)
3873 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3875 spin_lock_irq(&dev_priv
->irq_lock
);
3876 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3877 for_each_pipe(dev_priv
, pipe
)
3878 i915_enable_pipestat(dev_priv
, pipe
, pipestat_enable
);
3879 spin_unlock_irq(&dev_priv
->irq_lock
);
3881 I915_WRITE(VLV_IIR
, 0xffffffff);
3882 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3883 I915_WRITE(VLV_IER
, enable_mask
);
3885 gen8_gt_irq_postinstall(dev_priv
);
3887 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3888 POSTING_READ(GEN8_MASTER_IRQ
);
3893 static void gen8_irq_uninstall(struct drm_device
*dev
)
3895 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3900 gen8_irq_reset(dev
);
3903 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3905 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3911 I915_WRITE(VLV_MASTER_IER
, 0);
3913 for_each_pipe(dev_priv
, pipe
)
3914 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3916 I915_WRITE(HWSTAM
, 0xffffffff);
3917 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3918 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3920 /* Interrupt setup is already guaranteed to be single-threaded, this is
3921 * just to make the assert_spin_locked check happy. */
3922 spin_lock_irq(&dev_priv
->irq_lock
);
3923 if (dev_priv
->display_irqs_enabled
)
3924 valleyview_display_irqs_uninstall(dev_priv
);
3925 spin_unlock_irq(&dev_priv
->irq_lock
);
3927 dev_priv
->irq_mask
= 0;
3929 I915_WRITE(VLV_IIR
, 0xffffffff);
3930 I915_WRITE(VLV_IMR
, 0xffffffff);
3931 I915_WRITE(VLV_IER
, 0x0);
3932 POSTING_READ(VLV_IER
);
3935 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3937 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3943 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3944 POSTING_READ(GEN8_MASTER_IRQ
);
3946 #define GEN8_IRQ_FINI_NDX(type, which) \
3948 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3949 I915_WRITE(GEN8_##type##_IER(which), 0); \
3950 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3951 POSTING_READ(GEN8_##type##_IIR(which)); \
3952 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3955 #define GEN8_IRQ_FINI(type) \
3957 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3958 I915_WRITE(GEN8_##type##_IER, 0); \
3959 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3960 POSTING_READ(GEN8_##type##_IIR); \
3961 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3964 GEN8_IRQ_FINI_NDX(GT
, 0);
3965 GEN8_IRQ_FINI_NDX(GT
, 1);
3966 GEN8_IRQ_FINI_NDX(GT
, 2);
3967 GEN8_IRQ_FINI_NDX(GT
, 3);
3971 #undef GEN8_IRQ_FINI
3972 #undef GEN8_IRQ_FINI_NDX
3974 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3975 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3977 for_each_pipe(dev_priv
, pipe
)
3978 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3980 I915_WRITE(VLV_IMR
, 0xffffffff);
3981 I915_WRITE(VLV_IER
, 0x0);
3982 I915_WRITE(VLV_IIR
, 0xffffffff);
3983 POSTING_READ(VLV_IIR
);
3986 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3988 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3993 ironlake_irq_reset(dev
);
3996 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4001 for_each_pipe(dev_priv
, pipe
)
4002 I915_WRITE(PIPESTAT(pipe
), 0);
4003 I915_WRITE16(IMR
, 0xffff);
4004 I915_WRITE16(IER
, 0x0);
4005 POSTING_READ16(IER
);
4008 static int i8xx_irq_postinstall(struct drm_device
*dev
)
4010 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4013 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
4015 /* Unmask the interrupts that we always want on. */
4016 dev_priv
->irq_mask
=
4017 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4018 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4019 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4020 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4021 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4022 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
4025 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4026 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4027 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
4028 I915_USER_INTERRUPT
);
4029 POSTING_READ16(IER
);
4031 /* Interrupt setup is already guaranteed to be single-threaded, this is
4032 * just to make the assert_spin_locked check happy. */
4033 spin_lock_irq(&dev_priv
->irq_lock
);
4034 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4035 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4036 spin_unlock_irq(&dev_priv
->irq_lock
);
4042 * Returns true when a page flip has completed.
4044 static bool i8xx_handle_vblank(struct drm_device
*dev
,
4045 int plane
, int pipe
, u32 iir
)
4047 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4048 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4050 if (!intel_pipe_handle_vblank(dev
, pipe
))
4053 if ((iir
& flip_pending
) == 0)
4054 goto check_page_flip
;
4056 intel_prepare_page_flip(dev
, plane
);
4058 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4059 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4060 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4061 * the flip is completed (no longer pending). Since this doesn't raise
4062 * an interrupt per se, we watch for the change at vblank.
4064 if (I915_READ16(ISR
) & flip_pending
)
4065 goto check_page_flip
;
4067 intel_finish_page_flip(dev
, pipe
);
4071 intel_check_page_flip(dev
, pipe
);
4075 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
4077 struct drm_device
*dev
= arg
;
4078 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4083 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4084 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4086 iir
= I915_READ16(IIR
);
4090 while (iir
& ~flip_mask
) {
4091 /* Can't rely on pipestat interrupt bit in iir as it might
4092 * have been cleared after the pipestat interrupt was received.
4093 * It doesn't set the bit in iir again, but it still produces
4094 * interrupts (for non-MSI).
4096 spin_lock(&dev_priv
->irq_lock
);
4097 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4098 i915_handle_error(dev
, false,
4099 "Command parser error, iir 0x%08x",
4102 for_each_pipe(dev_priv
, pipe
) {
4103 int reg
= PIPESTAT(pipe
);
4104 pipe_stats
[pipe
] = I915_READ(reg
);
4107 * Clear the PIPE*STAT regs before the IIR
4109 if (pipe_stats
[pipe
] & 0x8000ffff)
4110 I915_WRITE(reg
, pipe_stats
[pipe
]);
4112 spin_unlock(&dev_priv
->irq_lock
);
4114 I915_WRITE16(IIR
, iir
& ~flip_mask
);
4115 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
4117 i915_update_dri1_breadcrumb(dev
);
4119 if (iir
& I915_USER_INTERRUPT
)
4120 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4122 for_each_pipe(dev_priv
, pipe
) {
4127 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4128 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
4129 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4131 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4132 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4134 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4135 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4136 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4145 static void i8xx_irq_uninstall(struct drm_device
* dev
)
4147 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4150 for_each_pipe(dev_priv
, pipe
) {
4151 /* Clear enable bits; then clear status bits */
4152 I915_WRITE(PIPESTAT(pipe
), 0);
4153 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4155 I915_WRITE16(IMR
, 0xffff);
4156 I915_WRITE16(IER
, 0x0);
4157 I915_WRITE16(IIR
, I915_READ16(IIR
));
4160 static void i915_irq_preinstall(struct drm_device
* dev
)
4162 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4165 if (I915_HAS_HOTPLUG(dev
)) {
4166 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4167 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4170 I915_WRITE16(HWSTAM
, 0xeffe);
4171 for_each_pipe(dev_priv
, pipe
)
4172 I915_WRITE(PIPESTAT(pipe
), 0);
4173 I915_WRITE(IMR
, 0xffffffff);
4174 I915_WRITE(IER
, 0x0);
4178 static int i915_irq_postinstall(struct drm_device
*dev
)
4180 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4183 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
4185 /* Unmask the interrupts that we always want on. */
4186 dev_priv
->irq_mask
=
4187 ~(I915_ASLE_INTERRUPT
|
4188 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4189 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4190 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4191 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4192 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4195 I915_ASLE_INTERRUPT
|
4196 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4197 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4198 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
4199 I915_USER_INTERRUPT
;
4201 if (I915_HAS_HOTPLUG(dev
)) {
4202 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4203 POSTING_READ(PORT_HOTPLUG_EN
);
4205 /* Enable in IER... */
4206 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
4207 /* and unmask in IMR */
4208 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
4211 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4212 I915_WRITE(IER
, enable_mask
);
4215 i915_enable_asle_pipestat(dev
);
4217 /* Interrupt setup is already guaranteed to be single-threaded, this is
4218 * just to make the assert_spin_locked check happy. */
4219 spin_lock_irq(&dev_priv
->irq_lock
);
4220 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4221 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4222 spin_unlock_irq(&dev_priv
->irq_lock
);
4228 * Returns true when a page flip has completed.
4230 static bool i915_handle_vblank(struct drm_device
*dev
,
4231 int plane
, int pipe
, u32 iir
)
4233 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4234 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4236 if (!intel_pipe_handle_vblank(dev
, pipe
))
4239 if ((iir
& flip_pending
) == 0)
4240 goto check_page_flip
;
4242 intel_prepare_page_flip(dev
, plane
);
4244 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4245 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4246 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4247 * the flip is completed (no longer pending). Since this doesn't raise
4248 * an interrupt per se, we watch for the change at vblank.
4250 if (I915_READ(ISR
) & flip_pending
)
4251 goto check_page_flip
;
4253 intel_finish_page_flip(dev
, pipe
);
4257 intel_check_page_flip(dev
, pipe
);
4261 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
4263 struct drm_device
*dev
= arg
;
4264 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4265 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
4267 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4268 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4269 int pipe
, ret
= IRQ_NONE
;
4271 iir
= I915_READ(IIR
);
4273 bool irq_received
= (iir
& ~flip_mask
) != 0;
4274 bool blc_event
= false;
4276 /* Can't rely on pipestat interrupt bit in iir as it might
4277 * have been cleared after the pipestat interrupt was received.
4278 * It doesn't set the bit in iir again, but it still produces
4279 * interrupts (for non-MSI).
4281 spin_lock(&dev_priv
->irq_lock
);
4282 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4283 i915_handle_error(dev
, false,
4284 "Command parser error, iir 0x%08x",
4287 for_each_pipe(dev_priv
, pipe
) {
4288 int reg
= PIPESTAT(pipe
);
4289 pipe_stats
[pipe
] = I915_READ(reg
);
4291 /* Clear the PIPE*STAT regs before the IIR */
4292 if (pipe_stats
[pipe
] & 0x8000ffff) {
4293 I915_WRITE(reg
, pipe_stats
[pipe
]);
4294 irq_received
= true;
4297 spin_unlock(&dev_priv
->irq_lock
);
4302 /* Consume port. Then clear IIR or we'll miss events */
4303 if (I915_HAS_HOTPLUG(dev
) &&
4304 iir
& I915_DISPLAY_PORT_INTERRUPT
)
4305 i9xx_hpd_irq_handler(dev
);
4307 I915_WRITE(IIR
, iir
& ~flip_mask
);
4308 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4310 if (iir
& I915_USER_INTERRUPT
)
4311 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4313 for_each_pipe(dev_priv
, pipe
) {
4318 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4319 i915_handle_vblank(dev
, plane
, pipe
, iir
))
4320 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4322 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4325 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4326 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4328 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4329 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4330 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4333 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4334 intel_opregion_asle_intr(dev
);
4336 /* With MSI, interrupts are only generated when iir
4337 * transitions from zero to nonzero. If another bit got
4338 * set while we were handling the existing iir bits, then
4339 * we would never get another interrupt.
4341 * This is fine on non-MSI as well, as if we hit this path
4342 * we avoid exiting the interrupt handler only to generate
4345 * Note that for MSI this could cause a stray interrupt report
4346 * if an interrupt landed in the time between writing IIR and
4347 * the posting read. This should be rare enough to never
4348 * trigger the 99% of 100,000 interrupts test for disabling
4353 } while (iir
& ~flip_mask
);
4355 i915_update_dri1_breadcrumb(dev
);
4360 static void i915_irq_uninstall(struct drm_device
* dev
)
4362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4365 if (I915_HAS_HOTPLUG(dev
)) {
4366 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4367 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4370 I915_WRITE16(HWSTAM
, 0xffff);
4371 for_each_pipe(dev_priv
, pipe
) {
4372 /* Clear enable bits; then clear status bits */
4373 I915_WRITE(PIPESTAT(pipe
), 0);
4374 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4376 I915_WRITE(IMR
, 0xffffffff);
4377 I915_WRITE(IER
, 0x0);
4379 I915_WRITE(IIR
, I915_READ(IIR
));
4382 static void i965_irq_preinstall(struct drm_device
* dev
)
4384 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4387 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4388 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4390 I915_WRITE(HWSTAM
, 0xeffe);
4391 for_each_pipe(dev_priv
, pipe
)
4392 I915_WRITE(PIPESTAT(pipe
), 0);
4393 I915_WRITE(IMR
, 0xffffffff);
4394 I915_WRITE(IER
, 0x0);
4398 static int i965_irq_postinstall(struct drm_device
*dev
)
4400 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4404 /* Unmask the interrupts that we always want on. */
4405 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4406 I915_DISPLAY_PORT_INTERRUPT
|
4407 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4408 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4409 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4410 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4411 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4413 enable_mask
= ~dev_priv
->irq_mask
;
4414 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4415 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4416 enable_mask
|= I915_USER_INTERRUPT
;
4419 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4421 /* Interrupt setup is already guaranteed to be single-threaded, this is
4422 * just to make the assert_spin_locked check happy. */
4423 spin_lock_irq(&dev_priv
->irq_lock
);
4424 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4425 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4426 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4427 spin_unlock_irq(&dev_priv
->irq_lock
);
4430 * Enable some error detection, note the instruction error mask
4431 * bit is reserved, so we leave it masked.
4434 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4435 GM45_ERROR_MEM_PRIV
|
4436 GM45_ERROR_CP_PRIV
|
4437 I915_ERROR_MEMORY_REFRESH
);
4439 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4440 I915_ERROR_MEMORY_REFRESH
);
4442 I915_WRITE(EMR
, error_mask
);
4444 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4445 I915_WRITE(IER
, enable_mask
);
4448 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4449 POSTING_READ(PORT_HOTPLUG_EN
);
4451 i915_enable_asle_pipestat(dev
);
4456 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4458 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4459 struct intel_encoder
*intel_encoder
;
4462 assert_spin_locked(&dev_priv
->irq_lock
);
4464 if (I915_HAS_HOTPLUG(dev
)) {
4465 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
4466 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
4467 /* Note HDMI and DP share hotplug bits */
4468 /* enable bits are the same for all generations */
4469 for_each_intel_encoder(dev
, intel_encoder
)
4470 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
4471 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
4472 /* Programming the CRT detection parameters tends
4473 to generate a spurious hotplug event about three
4474 seconds later. So just do it once.
4477 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4478 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
4479 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4481 /* Ignore TV since it's buggy */
4482 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
4486 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4488 struct drm_device
*dev
= arg
;
4489 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4491 u32 pipe_stats
[I915_MAX_PIPES
];
4492 int ret
= IRQ_NONE
, pipe
;
4494 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4495 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4497 iir
= I915_READ(IIR
);
4500 bool irq_received
= (iir
& ~flip_mask
) != 0;
4501 bool blc_event
= false;
4503 /* Can't rely on pipestat interrupt bit in iir as it might
4504 * have been cleared after the pipestat interrupt was received.
4505 * It doesn't set the bit in iir again, but it still produces
4506 * interrupts (for non-MSI).
4508 spin_lock(&dev_priv
->irq_lock
);
4509 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4510 i915_handle_error(dev
, false,
4511 "Command parser error, iir 0x%08x",
4514 for_each_pipe(dev_priv
, pipe
) {
4515 int reg
= PIPESTAT(pipe
);
4516 pipe_stats
[pipe
] = I915_READ(reg
);
4519 * Clear the PIPE*STAT regs before the IIR
4521 if (pipe_stats
[pipe
] & 0x8000ffff) {
4522 I915_WRITE(reg
, pipe_stats
[pipe
]);
4523 irq_received
= true;
4526 spin_unlock(&dev_priv
->irq_lock
);
4533 /* Consume port. Then clear IIR or we'll miss events */
4534 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4535 i9xx_hpd_irq_handler(dev
);
4537 I915_WRITE(IIR
, iir
& ~flip_mask
);
4538 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4540 if (iir
& I915_USER_INTERRUPT
)
4541 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4542 if (iir
& I915_BSD_USER_INTERRUPT
)
4543 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
4545 for_each_pipe(dev_priv
, pipe
) {
4546 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4547 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4548 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4550 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4553 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4554 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4556 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4557 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4558 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4561 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4562 intel_opregion_asle_intr(dev
);
4564 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4565 gmbus_irq_handler(dev
);
4567 /* With MSI, interrupts are only generated when iir
4568 * transitions from zero to nonzero. If another bit got
4569 * set while we were handling the existing iir bits, then
4570 * we would never get another interrupt.
4572 * This is fine on non-MSI as well, as if we hit this path
4573 * we avoid exiting the interrupt handler only to generate
4576 * Note that for MSI this could cause a stray interrupt report
4577 * if an interrupt landed in the time between writing IIR and
4578 * the posting read. This should be rare enough to never
4579 * trigger the 99% of 100,000 interrupts test for disabling
4585 i915_update_dri1_breadcrumb(dev
);
4590 static void i965_irq_uninstall(struct drm_device
* dev
)
4592 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4598 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4599 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4601 I915_WRITE(HWSTAM
, 0xffffffff);
4602 for_each_pipe(dev_priv
, pipe
)
4603 I915_WRITE(PIPESTAT(pipe
), 0);
4604 I915_WRITE(IMR
, 0xffffffff);
4605 I915_WRITE(IER
, 0x0);
4607 for_each_pipe(dev_priv
, pipe
)
4608 I915_WRITE(PIPESTAT(pipe
),
4609 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4610 I915_WRITE(IIR
, I915_READ(IIR
));
4613 static void intel_hpd_irq_reenable_work(struct work_struct
*work
)
4615 struct drm_i915_private
*dev_priv
=
4616 container_of(work
, typeof(*dev_priv
),
4617 hotplug_reenable_work
.work
);
4618 struct drm_device
*dev
= dev_priv
->dev
;
4619 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4622 intel_runtime_pm_get(dev_priv
);
4624 spin_lock_irq(&dev_priv
->irq_lock
);
4625 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
4626 struct drm_connector
*connector
;
4628 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
4631 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4633 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4634 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4636 if (intel_connector
->encoder
->hpd_pin
== i
) {
4637 if (connector
->polled
!= intel_connector
->polled
)
4638 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4640 connector
->polled
= intel_connector
->polled
;
4641 if (!connector
->polled
)
4642 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4646 if (dev_priv
->display
.hpd_irq_setup
)
4647 dev_priv
->display
.hpd_irq_setup(dev
);
4648 spin_unlock_irq(&dev_priv
->irq_lock
);
4650 intel_runtime_pm_put(dev_priv
);
4653 void intel_irq_init(struct drm_device
*dev
)
4655 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4657 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
4658 INIT_WORK(&dev_priv
->dig_port_work
, i915_digport_work_func
);
4659 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
4660 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4661 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4663 /* Let's track the enabled rps events */
4664 if (IS_VALLEYVIEW(dev
) && !IS_CHERRYVIEW(dev
))
4665 /* WaGsvRC0ResidencyMethod:vlv */
4666 dev_priv
->pm_rps_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
4668 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4670 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
4671 i915_hangcheck_elapsed
,
4672 (unsigned long) dev
);
4673 INIT_DELAYED_WORK(&dev_priv
->hotplug_reenable_work
,
4674 intel_hpd_irq_reenable_work
);
4676 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4678 /* Haven't installed the IRQ handler yet */
4679 dev_priv
->pm
._irqs_disabled
= true;
4682 dev
->max_vblank_count
= 0;
4683 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4684 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
4685 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4686 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
4688 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4689 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4693 * Opt out of the vblank disable timer on everything except gen2.
4694 * Gen2 doesn't have a hardware frame counter and so depends on
4695 * vblank interrupts to produce sane vblank seuquence numbers.
4698 dev
->vblank_disable_immediate
= true;
4700 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
4701 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4702 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4705 if (IS_CHERRYVIEW(dev
)) {
4706 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4707 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4708 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4709 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4710 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4711 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4712 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4713 } else if (IS_VALLEYVIEW(dev
)) {
4714 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4715 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4716 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4717 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4718 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4719 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4720 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4721 } else if (INTEL_INFO(dev
)->gen
>= 8) {
4722 dev
->driver
->irq_handler
= gen8_irq_handler
;
4723 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4724 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4725 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4726 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4727 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4728 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4729 } else if (HAS_PCH_SPLIT(dev
)) {
4730 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4731 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4732 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4733 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4734 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4735 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4736 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4738 if (INTEL_INFO(dev
)->gen
== 2) {
4739 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4740 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4741 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4742 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4743 } else if (INTEL_INFO(dev
)->gen
== 3) {
4744 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4745 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4746 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4747 dev
->driver
->irq_handler
= i915_irq_handler
;
4748 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4750 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4751 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4752 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4753 dev
->driver
->irq_handler
= i965_irq_handler
;
4754 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4756 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4757 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4761 void intel_hpd_init(struct drm_device
*dev
)
4763 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4764 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4765 struct drm_connector
*connector
;
4768 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
4769 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
4770 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4772 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4773 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4774 connector
->polled
= intel_connector
->polled
;
4775 if (connector
->encoder
&& !connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
4776 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4777 if (intel_connector
->mst_port
)
4778 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4781 /* Interrupt setup is already guaranteed to be single-threaded, this is
4782 * just to make the assert_spin_locked checks happy. */
4783 spin_lock_irq(&dev_priv
->irq_lock
);
4784 if (dev_priv
->display
.hpd_irq_setup
)
4785 dev_priv
->display
.hpd_irq_setup(dev
);
4786 spin_unlock_irq(&dev_priv
->irq_lock
);
4789 /* Disable interrupts so we can allow runtime PM. */
4790 void intel_runtime_pm_disable_interrupts(struct drm_device
*dev
)
4792 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4794 dev
->driver
->irq_uninstall(dev
);
4795 dev_priv
->pm
._irqs_disabled
= true;
4798 /* Restore interrupts so we can recover from runtime PM. */
4799 void intel_runtime_pm_restore_interrupts(struct drm_device
*dev
)
4801 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4803 dev_priv
->pm
._irqs_disabled
= false;
4804 dev
->driver
->irq_preinstall(dev
);
4805 dev
->driver
->irq_postinstall(dev
);