1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx
[] = {
41 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
42 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
43 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
44 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
45 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt
[] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915
[] = {
57 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
58 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
59 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
60 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
61 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
62 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x
[] = {
66 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
67 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
68 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
69 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
70 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
71 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
75 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
76 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
77 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
78 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
80 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
112 I915_WRITE((reg), 0xffffffff); \
114 I915_WRITE((reg), 0xffffffff); \
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
133 /* For display hotplug interrupt */
135 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
137 assert_spin_locked(&dev_priv
->irq_lock
);
139 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
142 if ((dev_priv
->irq_mask
& mask
) != 0) {
143 dev_priv
->irq_mask
&= ~mask
;
144 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
150 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
152 assert_spin_locked(&dev_priv
->irq_lock
);
154 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
157 if ((dev_priv
->irq_mask
& mask
) != mask
) {
158 dev_priv
->irq_mask
|= mask
;
159 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
170 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
171 uint32_t interrupt_mask
,
172 uint32_t enabled_irq_mask
)
174 assert_spin_locked(&dev_priv
->irq_lock
);
176 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
179 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
180 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
181 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
185 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
187 ilk_update_gt_irq(dev_priv
, mask
, mask
);
190 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
192 ilk_update_gt_irq(dev_priv
, mask
, 0);
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
201 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
202 uint32_t interrupt_mask
,
203 uint32_t enabled_irq_mask
)
207 assert_spin_locked(&dev_priv
->irq_lock
);
209 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
212 new_val
= dev_priv
->pm_irq_mask
;
213 new_val
&= ~interrupt_mask
;
214 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
216 if (new_val
!= dev_priv
->pm_irq_mask
) {
217 dev_priv
->pm_irq_mask
= new_val
;
218 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
219 POSTING_READ(GEN6_PMIMR
);
223 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
225 snb_update_pm_irq(dev_priv
, mask
, mask
);
228 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
230 snb_update_pm_irq(dev_priv
, mask
, 0);
233 static bool ivb_can_enable_err_int(struct drm_device
*dev
)
235 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
236 struct intel_crtc
*crtc
;
239 assert_spin_locked(&dev_priv
->irq_lock
);
241 for_each_pipe(dev_priv
, pipe
) {
242 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
244 if (crtc
->cpu_fifo_underrun_disabled
)
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
257 * Copied from the snb function, updated with relevant register offsets
259 static void bdw_update_pm_irq(struct drm_i915_private
*dev_priv
,
260 uint32_t interrupt_mask
,
261 uint32_t enabled_irq_mask
)
265 assert_spin_locked(&dev_priv
->irq_lock
);
267 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
270 new_val
= dev_priv
->pm_irq_mask
;
271 new_val
&= ~interrupt_mask
;
272 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
274 if (new_val
!= dev_priv
->pm_irq_mask
) {
275 dev_priv
->pm_irq_mask
= new_val
;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv
->pm_irq_mask
);
277 POSTING_READ(GEN8_GT_IMR(2));
281 void gen8_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
283 bdw_update_pm_irq(dev_priv
, mask
, mask
);
286 void gen8_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
288 bdw_update_pm_irq(dev_priv
, mask
, 0);
291 static bool cpt_can_enable_serr_int(struct drm_device
*dev
)
293 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
295 struct intel_crtc
*crtc
;
297 assert_spin_locked(&dev_priv
->irq_lock
);
299 for_each_pipe(dev_priv
, pipe
) {
300 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
302 if (crtc
->pch_fifo_underrun_disabled
)
309 void i9xx_check_fifo_underruns(struct drm_device
*dev
)
311 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
312 struct intel_crtc
*crtc
;
315 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
317 for_each_intel_crtc(dev
, crtc
) {
318 u32 reg
= PIPESTAT(crtc
->pipe
);
321 if (crtc
->cpu_fifo_underrun_disabled
)
324 pipestat
= I915_READ(reg
) & 0xffff0000;
325 if ((pipestat
& PIPE_FIFO_UNDERRUN_STATUS
) == 0)
328 I915_WRITE(reg
, pipestat
| PIPE_FIFO_UNDERRUN_STATUS
);
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc
->pipe
));
334 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
337 static void i9xx_set_fifo_underrun_reporting(struct drm_device
*dev
,
339 bool enable
, bool old
)
341 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
342 u32 reg
= PIPESTAT(pipe
);
343 u32 pipestat
= I915_READ(reg
) & 0xffff0000;
345 assert_spin_locked(&dev_priv
->irq_lock
);
348 I915_WRITE(reg
, pipestat
| PIPE_FIFO_UNDERRUN_STATUS
);
351 if (old
&& pipestat
& PIPE_FIFO_UNDERRUN_STATUS
)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
356 static void ironlake_set_fifo_underrun_reporting(struct drm_device
*dev
,
357 enum pipe pipe
, bool enable
)
359 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
360 uint32_t bit
= (pipe
== PIPE_A
) ? DE_PIPEA_FIFO_UNDERRUN
:
361 DE_PIPEB_FIFO_UNDERRUN
;
364 ironlake_enable_display_irq(dev_priv
, bit
);
366 ironlake_disable_display_irq(dev_priv
, bit
);
369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device
*dev
,
371 bool enable
, bool old
)
373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
375 I915_WRITE(GEN7_ERR_INT
, ERR_INT_FIFO_UNDERRUN(pipe
));
377 if (!ivb_can_enable_err_int(dev
))
380 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
382 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
385 I915_READ(GEN7_ERR_INT
) & ERR_INT_FIFO_UNDERRUN(pipe
)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
392 static void broadwell_set_fifo_underrun_reporting(struct drm_device
*dev
,
393 enum pipe pipe
, bool enable
)
395 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
397 assert_spin_locked(&dev_priv
->irq_lock
);
400 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_FIFO_UNDERRUN
;
402 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_FIFO_UNDERRUN
;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
413 static void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
414 uint32_t interrupt_mask
,
415 uint32_t enabled_irq_mask
)
417 uint32_t sdeimr
= I915_READ(SDEIMR
);
418 sdeimr
&= ~interrupt_mask
;
419 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
421 assert_spin_locked(&dev_priv
->irq_lock
);
423 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
426 I915_WRITE(SDEIMR
, sdeimr
);
427 POSTING_READ(SDEIMR
);
429 #define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431 #define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
434 static void ibx_set_fifo_underrun_reporting(struct drm_device
*dev
,
435 enum transcoder pch_transcoder
,
438 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
439 uint32_t bit
= (pch_transcoder
== TRANSCODER_A
) ?
440 SDE_TRANSA_FIFO_UNDER
: SDE_TRANSB_FIFO_UNDER
;
443 ibx_enable_display_interrupt(dev_priv
, bit
);
445 ibx_disable_display_interrupt(dev_priv
, bit
);
448 static void cpt_set_fifo_underrun_reporting(struct drm_device
*dev
,
449 enum transcoder pch_transcoder
,
450 bool enable
, bool old
)
452 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
));
458 if (!cpt_can_enable_serr_int(dev
))
461 ibx_enable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
463 ibx_disable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
465 if (old
&& I915_READ(SERR_INT
) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder
));
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
485 * Returns the previous state of underrun reporting.
487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
488 enum pipe pipe
, bool enable
)
490 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
491 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
492 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
495 assert_spin_locked(&dev_priv
->irq_lock
);
497 old
= !intel_crtc
->cpu_fifo_underrun_disabled
;
498 intel_crtc
->cpu_fifo_underrun_disabled
= !enable
;
500 if (HAS_GMCH_DISPLAY(dev
))
501 i9xx_set_fifo_underrun_reporting(dev
, pipe
, enable
, old
);
502 else if (IS_GEN5(dev
) || IS_GEN6(dev
))
503 ironlake_set_fifo_underrun_reporting(dev
, pipe
, enable
);
504 else if (IS_GEN7(dev
))
505 ivybridge_set_fifo_underrun_reporting(dev
, pipe
, enable
, old
);
506 else if (IS_GEN8(dev
))
507 broadwell_set_fifo_underrun_reporting(dev
, pipe
, enable
);
512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
513 enum pipe pipe
, bool enable
)
515 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
519 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
520 ret
= __intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, enable
);
521 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device
*dev
,
529 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
530 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
531 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
533 return !intel_crtc
->cpu_fifo_underrun_disabled
;
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
548 * Returns the previous state of underrun reporting.
550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device
*dev
,
551 enum transcoder pch_transcoder
,
554 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
555 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pch_transcoder
];
556 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
569 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
571 old
= !intel_crtc
->pch_fifo_underrun_disabled
;
572 intel_crtc
->pch_fifo_underrun_disabled
= !enable
;
574 if (HAS_PCH_IBX(dev
))
575 ibx_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
577 cpt_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
, old
);
579 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
585 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
586 u32 enable_mask
, u32 status_mask
)
588 u32 reg
= PIPESTAT(pipe
);
589 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
591 assert_spin_locked(&dev_priv
->irq_lock
);
593 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
594 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe
), enable_mask
, status_mask
))
599 if ((pipestat
& enable_mask
) == enable_mask
)
602 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
604 /* Enable the interrupt, clear any pending status */
605 pipestat
|= enable_mask
| status_mask
;
606 I915_WRITE(reg
, pipestat
);
611 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
612 u32 enable_mask
, u32 status_mask
)
614 u32 reg
= PIPESTAT(pipe
);
615 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
617 assert_spin_locked(&dev_priv
->irq_lock
);
619 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
620 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe
), enable_mask
, status_mask
))
625 if ((pipestat
& enable_mask
) == 0)
628 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
630 pipestat
&= ~enable_mask
;
631 I915_WRITE(reg
, pipestat
);
635 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
637 u32 enable_mask
= status_mask
<< 16;
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
643 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
649 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
652 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
653 SPRITE0_FLIP_DONE_INT_EN_VLV
|
654 SPRITE1_FLIP_DONE_INT_EN_VLV
);
655 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
656 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
657 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
658 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
664 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
669 if (IS_VALLEYVIEW(dev_priv
->dev
))
670 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
673 enable_mask
= status_mask
<< 16;
674 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
678 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
683 if (IS_VALLEYVIEW(dev_priv
->dev
))
684 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
687 enable_mask
= status_mask
<< 16;
688 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
696 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
697 unsigned long irqflags
;
699 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
702 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
704 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
705 if (INTEL_INFO(dev
)->gen
>= 4)
706 i915_enable_pipestat(dev_priv
, PIPE_A
,
707 PIPE_LEGACY_BLC_EVENT_STATUS
);
709 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
713 * i915_pipe_enabled - check if a pipe is enabled
715 * @pipe: pipe to check
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
722 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
724 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
726 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
729 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
731 return intel_crtc
->active
;
733 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
741 * Assumptions about the fictitious mode used in this example:
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
756 * | | start of vsync:
757 * | | generate vsync interrupt
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
777 * vbs = vblank_start (number)
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
787 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
789 /* Gen2 doesn't have a hardware frame counter */
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
796 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
798 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
799 unsigned long high_frame
;
800 unsigned long low_frame
;
801 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
803 if (!i915_pipe_enabled(dev
, pipe
)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe
));
809 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
810 struct intel_crtc
*intel_crtc
=
811 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
812 const struct drm_display_mode
*mode
=
813 &intel_crtc
->config
.adjusted_mode
;
815 htotal
= mode
->crtc_htotal
;
816 hsync_start
= mode
->crtc_hsync_start
;
817 vbl_start
= mode
->crtc_vblank_start
;
818 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
819 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
821 enum transcoder cpu_transcoder
= (enum transcoder
) pipe
;
823 htotal
= ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff) + 1;
824 hsync_start
= (I915_READ(HSYNC(cpu_transcoder
)) & 0x1fff) + 1;
825 vbl_start
= (I915_READ(VBLANK(cpu_transcoder
)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder
)) &
827 PIPECONF_INTERLACE_MASK
) != PIPECONF_PROGRESSIVE
)
828 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
831 /* Convert to pixel count */
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start
-= htotal
- hsync_start
;
837 high_frame
= PIPEFRAME(pipe
);
838 low_frame
= PIPEFRAMEPIXEL(pipe
);
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
846 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
847 low
= I915_READ(low_frame
);
848 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
849 } while (high1
!= high2
);
851 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
852 pixel
= low
& PIPE_PIXEL_MASK
;
853 low
>>= PIPE_FRAME_LOW_SHIFT
;
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
860 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
863 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
865 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
866 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
868 if (!i915_pipe_enabled(dev
, pipe
)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe
));
874 return I915_READ(reg
);
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
880 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
882 struct drm_device
*dev
= crtc
->base
.dev
;
883 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
884 const struct drm_display_mode
*mode
= &crtc
->config
.adjusted_mode
;
885 enum pipe pipe
= crtc
->pipe
;
886 int position
, vtotal
;
888 vtotal
= mode
->crtc_vtotal
;
889 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
893 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
895 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
901 return (position
+ crtc
->scanline_offset
) % vtotal
;
904 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
905 unsigned int flags
, int *vpos
, int *hpos
,
906 ktime_t
*stime
, ktime_t
*etime
)
908 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
909 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
910 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
911 const struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
913 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
916 unsigned long irqflags
;
918 if (!intel_crtc
->active
) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe
));
924 htotal
= mode
->crtc_htotal
;
925 hsync_start
= mode
->crtc_hsync_start
;
926 vtotal
= mode
->crtc_vtotal
;
927 vbl_start
= mode
->crtc_vblank_start
;
928 vbl_end
= mode
->crtc_vblank_end
;
930 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
931 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
936 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
943 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
947 /* Get optional system timestamp before query. */
949 *stime
= ktime_get();
951 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
955 position
= __intel_get_crtc_scanline(intel_crtc
);
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
961 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
963 /* convert to pixel counts */
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
977 if (position
>= vtotal
)
978 position
= vtotal
- 1;
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
989 position
= (position
+ htotal
- hsync_start
) % vtotal
;
992 /* Get optional system timestamp after query. */
994 *etime
= ktime_get();
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
998 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1000 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1008 if (position
>= vbl_start
)
1009 position
-= vbl_end
;
1011 position
+= vtotal
- vbl_end
;
1013 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
1017 *vpos
= position
/ htotal
;
1018 *hpos
= position
- (*vpos
* htotal
);
1023 ret
|= DRM_SCANOUTPOS_INVBL
;
1028 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
1030 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
1031 unsigned long irqflags
;
1034 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
1035 position
= __intel_get_crtc_scanline(crtc
);
1036 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1041 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
1043 struct timeval
*vblank_time
,
1046 struct drm_crtc
*crtc
;
1048 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe
);
1053 /* Get drm_crtc to timestamp: */
1054 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
1056 DRM_ERROR("Invalid crtc %d\n", pipe
);
1060 if (!crtc
->enabled
) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
1069 &to_intel_crtc(crtc
)->config
.adjusted_mode
);
1072 static bool intel_hpd_irq_event(struct drm_device
*dev
,
1073 struct drm_connector
*connector
)
1075 enum drm_connector_status old_status
;
1077 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
1078 old_status
= connector
->status
;
1080 connector
->status
= connector
->funcs
->detect(connector
, false);
1081 if (old_status
== connector
->status
)
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 drm_get_connector_status_name(old_status
),
1088 drm_get_connector_status_name(connector
->status
));
1093 static void i915_digport_work_func(struct work_struct
*work
)
1095 struct drm_i915_private
*dev_priv
=
1096 container_of(work
, struct drm_i915_private
, dig_port_work
);
1097 unsigned long irqflags
;
1098 u32 long_port_mask
, short_port_mask
;
1099 struct intel_digital_port
*intel_dig_port
;
1103 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1104 long_port_mask
= dev_priv
->long_hpd_port_mask
;
1105 dev_priv
->long_hpd_port_mask
= 0;
1106 short_port_mask
= dev_priv
->short_hpd_port_mask
;
1107 dev_priv
->short_hpd_port_mask
= 0;
1108 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1110 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
1112 bool long_hpd
= false;
1113 intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
1114 if (!intel_dig_port
|| !intel_dig_port
->hpd_pulse
)
1117 if (long_port_mask
& (1 << i
)) {
1120 } else if (short_port_mask
& (1 << i
))
1124 ret
= intel_dig_port
->hpd_pulse(intel_dig_port
, long_hpd
);
1126 /* if we get true fallback to old school hpd */
1127 old_bits
|= (1 << intel_dig_port
->base
.hpd_pin
);
1133 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1134 dev_priv
->hpd_event_bits
|= old_bits
;
1135 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1136 schedule_work(&dev_priv
->hotplug_work
);
1141 * Handle hotplug events outside the interrupt handler proper.
1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1145 static void i915_hotplug_work_func(struct work_struct
*work
)
1147 struct drm_i915_private
*dev_priv
=
1148 container_of(work
, struct drm_i915_private
, hotplug_work
);
1149 struct drm_device
*dev
= dev_priv
->dev
;
1150 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
1151 struct intel_connector
*intel_connector
;
1152 struct intel_encoder
*intel_encoder
;
1153 struct drm_connector
*connector
;
1154 unsigned long irqflags
;
1155 bool hpd_disabled
= false;
1156 bool changed
= false;
1159 mutex_lock(&mode_config
->mutex
);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1162 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1164 hpd_event_bits
= dev_priv
->hpd_event_bits
;
1165 dev_priv
->hpd_event_bits
= 0;
1166 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
1167 intel_connector
= to_intel_connector(connector
);
1168 if (!intel_connector
->encoder
)
1170 intel_encoder
= intel_connector
->encoder
;
1171 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
1172 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
1173 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1177 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
1178 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT
;
1180 hpd_disabled
= true;
1182 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector
->name
, intel_encoder
->hpd_pin
);
1187 /* if there were no outputs to poll, poll was disabled,
1188 * therefore make sure it's enabled when disabling HPD on
1189 * some connectors */
1191 drm_kms_helper_poll_enable(dev
);
1192 mod_delayed_work(system_wq
, &dev_priv
->hotplug_reenable_work
,
1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
1196 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1198 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
1199 intel_connector
= to_intel_connector(connector
);
1200 if (!intel_connector
->encoder
)
1202 intel_encoder
= intel_connector
->encoder
;
1203 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
1204 if (intel_encoder
->hot_plug
)
1205 intel_encoder
->hot_plug(intel_encoder
);
1206 if (intel_hpd_irq_event(dev
, connector
))
1210 mutex_unlock(&mode_config
->mutex
);
1213 drm_kms_helper_hotplug_event(dev
);
1216 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
1218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1219 u32 busy_up
, busy_down
, max_avg
, min_avg
;
1222 spin_lock(&mchdev_lock
);
1224 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
1226 new_delay
= dev_priv
->ips
.cur_delay
;
1228 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
1229 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
1230 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
1231 max_avg
= I915_READ(RCBMAXAVG
);
1232 min_avg
= I915_READ(RCBMINAVG
);
1234 /* Handle RCS change request from hw */
1235 if (busy_up
> max_avg
) {
1236 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
1237 new_delay
= dev_priv
->ips
.cur_delay
- 1;
1238 if (new_delay
< dev_priv
->ips
.max_delay
)
1239 new_delay
= dev_priv
->ips
.max_delay
;
1240 } else if (busy_down
< min_avg
) {
1241 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
1242 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
1243 if (new_delay
> dev_priv
->ips
.min_delay
)
1244 new_delay
= dev_priv
->ips
.min_delay
;
1247 if (ironlake_set_drps(dev
, new_delay
))
1248 dev_priv
->ips
.cur_delay
= new_delay
;
1250 spin_unlock(&mchdev_lock
);
1255 static void notify_ring(struct drm_device
*dev
,
1256 struct intel_engine_cs
*ring
)
1258 if (!intel_ring_initialized(ring
))
1261 trace_i915_gem_request_complete(ring
);
1263 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1264 intel_notify_mmio_flip(ring
);
1266 wake_up_all(&ring
->irq_queue
);
1267 i915_queue_hangcheck(dev
);
1270 static u32
vlv_c0_residency(struct drm_i915_private
*dev_priv
,
1271 struct intel_rps_ei
*rps_ei
)
1273 u32 cz_ts
, cz_freq_khz
;
1274 u32 render_count
, media_count
;
1275 u32 elapsed_render
, elapsed_media
, elapsed_time
;
1278 cz_ts
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
1279 cz_freq_khz
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* 1000, 4);
1281 render_count
= I915_READ(VLV_RENDER_C0_COUNT_REG
);
1282 media_count
= I915_READ(VLV_MEDIA_C0_COUNT_REG
);
1284 if (rps_ei
->cz_clock
== 0) {
1285 rps_ei
->cz_clock
= cz_ts
;
1286 rps_ei
->render_c0
= render_count
;
1287 rps_ei
->media_c0
= media_count
;
1289 return dev_priv
->rps
.cur_freq
;
1292 elapsed_time
= cz_ts
- rps_ei
->cz_clock
;
1293 rps_ei
->cz_clock
= cz_ts
;
1295 elapsed_render
= render_count
- rps_ei
->render_c0
;
1296 rps_ei
->render_c0
= render_count
;
1298 elapsed_media
= media_count
- rps_ei
->media_c0
;
1299 rps_ei
->media_c0
= media_count
;
1301 /* Convert all the counters into common unit of milli sec */
1302 elapsed_time
/= VLV_CZ_CLOCK_TO_MILLI_SEC
;
1303 elapsed_render
/= cz_freq_khz
;
1304 elapsed_media
/= cz_freq_khz
;
1307 * Calculate overall C0 residency percentage
1308 * only if elapsed time is non zero
1312 ((max(elapsed_render
, elapsed_media
) * 100)
1320 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1321 * busy-ness calculated from C0 counters of render & media power wells
1322 * @dev_priv: DRM device private
1325 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private
*dev_priv
)
1327 u32 residency_C0_up
= 0, residency_C0_down
= 0;
1330 dev_priv
->rps
.ei_interrupt_count
++;
1332 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
1335 if (dev_priv
->rps
.up_ei
.cz_clock
== 0) {
1336 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.up_ei
);
1337 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.down_ei
);
1338 return dev_priv
->rps
.cur_freq
;
1343 * To down throttle, C0 residency should be less than down threshold
1344 * for continous EI intervals. So calculate down EI counters
1345 * once in VLV_INT_COUNT_FOR_DOWN_EI
1347 if (dev_priv
->rps
.ei_interrupt_count
== VLV_INT_COUNT_FOR_DOWN_EI
) {
1349 dev_priv
->rps
.ei_interrupt_count
= 0;
1351 residency_C0_down
= vlv_c0_residency(dev_priv
,
1352 &dev_priv
->rps
.down_ei
);
1354 residency_C0_up
= vlv_c0_residency(dev_priv
,
1355 &dev_priv
->rps
.up_ei
);
1358 new_delay
= dev_priv
->rps
.cur_freq
;
1360 adj
= dev_priv
->rps
.last_adj
;
1361 /* C0 residency is greater than UP threshold. Increase Frequency */
1362 if (residency_C0_up
>= VLV_RP_UP_EI_THRESHOLD
) {
1368 if (dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
)
1369 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1372 * For better performance, jump directly
1373 * to RPe if we're below it.
1375 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1376 new_delay
= dev_priv
->rps
.efficient_freq
;
1378 } else if (!dev_priv
->rps
.ei_interrupt_count
&&
1379 (residency_C0_down
< VLV_RP_DOWN_EI_THRESHOLD
)) {
1385 * This means, C0 residency is less than down threshold over
1386 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1388 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.min_freq_softlimit
)
1389 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1395 static void gen6_pm_rps_work(struct work_struct
*work
)
1397 struct drm_i915_private
*dev_priv
=
1398 container_of(work
, struct drm_i915_private
, rps
.work
);
1402 spin_lock_irq(&dev_priv
->irq_lock
);
1403 pm_iir
= dev_priv
->rps
.pm_iir
;
1404 dev_priv
->rps
.pm_iir
= 0;
1405 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8)
1406 gen8_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1408 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1409 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1411 spin_unlock_irq(&dev_priv
->irq_lock
);
1413 /* Make sure we didn't queue anything we're not going to process. */
1414 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1416 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1419 mutex_lock(&dev_priv
->rps
.hw_lock
);
1421 adj
= dev_priv
->rps
.last_adj
;
1422 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1426 /* CHV needs even encode values */
1427 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? 2 : 1;
1429 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1432 * For better performance, jump directly
1433 * to RPe if we're below it.
1435 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1436 new_delay
= dev_priv
->rps
.efficient_freq
;
1437 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1438 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1439 new_delay
= dev_priv
->rps
.efficient_freq
;
1441 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1443 } else if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1444 new_delay
= vlv_calc_delay_from_C0_counters(dev_priv
);
1445 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1449 /* CHV needs even encode values */
1450 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? -2 : -1;
1452 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1453 } else { /* unknown event */
1454 new_delay
= dev_priv
->rps
.cur_freq
;
1457 /* sysfs frequency interfaces may have snuck in while servicing the
1460 new_delay
= clamp_t(int, new_delay
,
1461 dev_priv
->rps
.min_freq_softlimit
,
1462 dev_priv
->rps
.max_freq_softlimit
);
1464 dev_priv
->rps
.last_adj
= new_delay
- dev_priv
->rps
.cur_freq
;
1466 if (IS_VALLEYVIEW(dev_priv
->dev
))
1467 valleyview_set_rps(dev_priv
->dev
, new_delay
);
1469 gen6_set_rps(dev_priv
->dev
, new_delay
);
1471 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1476 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1478 * @work: workqueue struct
1480 * Doesn't actually do anything except notify userspace. As a consequence of
1481 * this event, userspace should try to remap the bad rows since statistically
1482 * it is likely the same row is more likely to go bad again.
1484 static void ivybridge_parity_work(struct work_struct
*work
)
1486 struct drm_i915_private
*dev_priv
=
1487 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1488 u32 error_status
, row
, bank
, subbank
;
1489 char *parity_event
[6];
1491 unsigned long flags
;
1494 /* We must turn off DOP level clock gating to access the L3 registers.
1495 * In order to prevent a get/put style interface, acquire struct mutex
1496 * any time we access those registers.
1498 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1500 /* If we've screwed up tracking, just let the interrupt fire again */
1501 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1504 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1505 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1506 POSTING_READ(GEN7_MISCCPCTL
);
1508 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1512 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1515 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1517 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1519 error_status
= I915_READ(reg
);
1520 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1521 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1522 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1524 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1527 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1528 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1529 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1530 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1531 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1532 parity_event
[5] = NULL
;
1534 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1535 KOBJ_CHANGE
, parity_event
);
1537 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1538 slice
, row
, bank
, subbank
);
1540 kfree(parity_event
[4]);
1541 kfree(parity_event
[3]);
1542 kfree(parity_event
[2]);
1543 kfree(parity_event
[1]);
1546 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1549 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1550 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
1551 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1552 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
1554 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1557 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1559 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1561 if (!HAS_L3_DPF(dev
))
1564 spin_lock(&dev_priv
->irq_lock
);
1565 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1566 spin_unlock(&dev_priv
->irq_lock
);
1568 iir
&= GT_PARITY_ERROR(dev
);
1569 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1570 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1572 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1573 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1575 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1578 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1579 struct drm_i915_private
*dev_priv
,
1583 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1584 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1585 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1586 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1589 static void snb_gt_irq_handler(struct drm_device
*dev
,
1590 struct drm_i915_private
*dev_priv
,
1595 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1596 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1597 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1598 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1599 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1600 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1602 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1603 GT_BSD_CS_ERROR_INTERRUPT
|
1604 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
1605 i915_handle_error(dev
, false, "GT error interrupt 0x%08x",
1609 if (gt_iir
& GT_PARITY_ERROR(dev
))
1610 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1613 static void gen8_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1615 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1618 spin_lock(&dev_priv
->irq_lock
);
1619 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1620 gen8_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1621 spin_unlock(&dev_priv
->irq_lock
);
1623 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1626 static irqreturn_t
gen8_gt_irq_handler(struct drm_device
*dev
,
1627 struct drm_i915_private
*dev_priv
,
1630 struct intel_engine_cs
*ring
;
1633 irqreturn_t ret
= IRQ_NONE
;
1635 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1636 tmp
= I915_READ(GEN8_GT_IIR(0));
1638 I915_WRITE(GEN8_GT_IIR(0), tmp
);
1641 rcs
= tmp
>> GEN8_RCS_IRQ_SHIFT
;
1642 ring
= &dev_priv
->ring
[RCS
];
1643 if (rcs
& GT_RENDER_USER_INTERRUPT
)
1644 notify_ring(dev
, ring
);
1645 if (rcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1646 intel_execlists_handle_ctx_events(ring
);
1648 bcs
= tmp
>> GEN8_BCS_IRQ_SHIFT
;
1649 ring
= &dev_priv
->ring
[BCS
];
1650 if (bcs
& GT_RENDER_USER_INTERRUPT
)
1651 notify_ring(dev
, ring
);
1652 if (bcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1653 intel_execlists_handle_ctx_events(ring
);
1655 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1658 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1659 tmp
= I915_READ(GEN8_GT_IIR(1));
1661 I915_WRITE(GEN8_GT_IIR(1), tmp
);
1664 vcs
= tmp
>> GEN8_VCS1_IRQ_SHIFT
;
1665 ring
= &dev_priv
->ring
[VCS
];
1666 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1667 notify_ring(dev
, ring
);
1668 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1669 intel_execlists_handle_ctx_events(ring
);
1671 vcs
= tmp
>> GEN8_VCS2_IRQ_SHIFT
;
1672 ring
= &dev_priv
->ring
[VCS2
];
1673 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1674 notify_ring(dev
, ring
);
1675 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1676 intel_execlists_handle_ctx_events(ring
);
1678 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1681 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1682 tmp
= I915_READ(GEN8_GT_IIR(2));
1683 if (tmp
& dev_priv
->pm_rps_events
) {
1684 I915_WRITE(GEN8_GT_IIR(2),
1685 tmp
& dev_priv
->pm_rps_events
);
1687 gen8_rps_irq_handler(dev_priv
, tmp
);
1689 DRM_ERROR("The master control interrupt lied (PM)!\n");
1692 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1693 tmp
= I915_READ(GEN8_GT_IIR(3));
1695 I915_WRITE(GEN8_GT_IIR(3), tmp
);
1698 vcs
= tmp
>> GEN8_VECS_IRQ_SHIFT
;
1699 ring
= &dev_priv
->ring
[VECS
];
1700 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1701 notify_ring(dev
, ring
);
1702 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1703 intel_execlists_handle_ctx_events(ring
);
1705 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1711 #define HPD_STORM_DETECT_PERIOD 1000
1712 #define HPD_STORM_THRESHOLD 5
1714 static int ilk_port_to_hotplug_shift(enum port port
)
1730 static int g4x_port_to_hotplug_shift(enum port port
)
1746 static inline enum port
get_port_from_pin(enum hpd_pin pin
)
1756 return PORT_A
; /* no hpd */
1760 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1761 u32 hotplug_trigger
,
1762 u32 dig_hotplug_reg
,
1765 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1768 bool storm_detected
= false;
1769 bool queue_dig
= false, queue_hp
= false;
1771 u32 dig_port_mask
= 0;
1773 if (!hotplug_trigger
)
1776 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1777 hotplug_trigger
, dig_hotplug_reg
);
1779 spin_lock(&dev_priv
->irq_lock
);
1780 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1781 if (!(hpd
[i
] & hotplug_trigger
))
1784 port
= get_port_from_pin(i
);
1785 if (port
&& dev_priv
->hpd_irq_port
[port
]) {
1789 dig_shift
= g4x_port_to_hotplug_shift(port
);
1790 long_hpd
= (hotplug_trigger
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1792 dig_shift
= ilk_port_to_hotplug_shift(port
);
1793 long_hpd
= (dig_hotplug_reg
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1798 long_hpd
? "long" : "short");
1799 /* for long HPD pulses we want to have the digital queue happen,
1800 but we still want HPD storm detection to function. */
1802 dev_priv
->long_hpd_port_mask
|= (1 << port
);
1803 dig_port_mask
|= hpd
[i
];
1805 /* for short HPD just trigger the digital queue */
1806 dev_priv
->short_hpd_port_mask
|= (1 << port
);
1807 hotplug_trigger
&= ~hpd
[i
];
1813 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1814 if (hpd
[i
] & hotplug_trigger
&&
1815 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
) {
1817 * On GMCH platforms the interrupt mask bits only
1818 * prevent irq generation, not the setting of the
1819 * hotplug bits itself. So only WARN about unexpected
1820 * interrupts on saner platforms.
1822 WARN_ONCE(INTEL_INFO(dev
)->gen
>= 5 && !IS_VALLEYVIEW(dev
),
1823 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1824 hotplug_trigger
, i
, hpd
[i
]);
1829 if (!(hpd
[i
] & hotplug_trigger
) ||
1830 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1833 if (!(dig_port_mask
& hpd
[i
])) {
1834 dev_priv
->hpd_event_bits
|= (1 << i
);
1838 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1839 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1840 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1841 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1842 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1843 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1844 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1845 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1846 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1847 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1848 storm_detected
= true;
1850 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1851 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1852 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1857 dev_priv
->display
.hpd_irq_setup(dev
);
1858 spin_unlock(&dev_priv
->irq_lock
);
1861 * Our hotplug handler can grab modeset locks (by calling down into the
1862 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1863 * queue for otherwise the flush_work in the pageflip code will
1867 queue_work(dev_priv
->dp_wq
, &dev_priv
->dig_port_work
);
1869 schedule_work(&dev_priv
->hotplug_work
);
1872 static void gmbus_irq_handler(struct drm_device
*dev
)
1874 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1876 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1879 static void dp_aux_irq_handler(struct drm_device
*dev
)
1881 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1883 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1886 #if defined(CONFIG_DEBUG_FS)
1887 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1888 uint32_t crc0
, uint32_t crc1
,
1889 uint32_t crc2
, uint32_t crc3
,
1892 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1893 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1894 struct intel_pipe_crc_entry
*entry
;
1897 spin_lock(&pipe_crc
->lock
);
1899 if (!pipe_crc
->entries
) {
1900 spin_unlock(&pipe_crc
->lock
);
1901 DRM_ERROR("spurious interrupt\n");
1905 head
= pipe_crc
->head
;
1906 tail
= pipe_crc
->tail
;
1908 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1909 spin_unlock(&pipe_crc
->lock
);
1910 DRM_ERROR("CRC buffer overflowing\n");
1914 entry
= &pipe_crc
->entries
[head
];
1916 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1917 entry
->crc
[0] = crc0
;
1918 entry
->crc
[1] = crc1
;
1919 entry
->crc
[2] = crc2
;
1920 entry
->crc
[3] = crc3
;
1921 entry
->crc
[4] = crc4
;
1923 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1924 pipe_crc
->head
= head
;
1926 spin_unlock(&pipe_crc
->lock
);
1928 wake_up_interruptible(&pipe_crc
->wq
);
1932 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1933 uint32_t crc0
, uint32_t crc1
,
1934 uint32_t crc2
, uint32_t crc3
,
1939 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1941 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1943 display_pipe_crc_irq_handler(dev
, pipe
,
1944 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1948 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1950 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1952 display_pipe_crc_irq_handler(dev
, pipe
,
1953 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1954 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1955 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1956 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1957 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1960 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1962 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1963 uint32_t res1
, res2
;
1965 if (INTEL_INFO(dev
)->gen
>= 3)
1966 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1970 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1971 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1975 display_pipe_crc_irq_handler(dev
, pipe
,
1976 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1977 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1978 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1982 void gen8_flip_interrupt(struct drm_device
*dev
)
1984 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1986 if (!dev_priv
->rps
.is_bdw_sw_turbo
)
1989 if(atomic_read(&dev_priv
->rps
.sw_turbo
.flip_received
)) {
1990 mod_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
,
1991 usecs_to_jiffies(dev_priv
->rps
.sw_turbo
.timeout
) + jiffies
);
1994 dev_priv
->rps
.sw_turbo
.flip_timer
.expires
=
1995 usecs_to_jiffies(dev_priv
->rps
.sw_turbo
.timeout
) + jiffies
;
1996 add_timer(&dev_priv
->rps
.sw_turbo
.flip_timer
);
1997 atomic_set(&dev_priv
->rps
.sw_turbo
.flip_received
, true);
2000 bdw_software_turbo(dev
);
2003 /* The RPS events need forcewake, so we add them to a work queue and mask their
2004 * IMR bits until the work is done. Other interrupts can be processed without
2005 * the work queue. */
2006 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
2008 if (pm_iir
& dev_priv
->pm_rps_events
) {
2009 spin_lock(&dev_priv
->irq_lock
);
2010 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
2011 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
2012 spin_unlock(&dev_priv
->irq_lock
);
2014 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
2017 if (HAS_VEBOX(dev_priv
->dev
)) {
2018 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
2019 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
2021 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
2022 i915_handle_error(dev_priv
->dev
, false,
2023 "VEBOX CS error interrupt 0x%08x",
2029 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
2031 if (!drm_handle_vblank(dev
, pipe
))
2037 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
2039 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2040 u32 pipe_stats
[I915_MAX_PIPES
] = { };
2043 spin_lock(&dev_priv
->irq_lock
);
2044 for_each_pipe(dev_priv
, pipe
) {
2046 u32 mask
, iir_bit
= 0;
2049 * PIPESTAT bits get signalled even when the interrupt is
2050 * disabled with the mask bits, and some of the status bits do
2051 * not generate interrupts at all (like the underrun bit). Hence
2052 * we need to be careful that we only handle what we want to
2056 if (__cpu_fifo_underrun_reporting_enabled(dev
, pipe
))
2057 mask
|= PIPE_FIFO_UNDERRUN_STATUS
;
2061 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
2064 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
2067 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
2071 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
2076 reg
= PIPESTAT(pipe
);
2077 mask
|= PIPESTAT_INT_ENABLE_MASK
;
2078 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
2081 * Clear the PIPE*STAT regs before the IIR
2083 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
2084 PIPESTAT_INT_STATUS_MASK
))
2085 I915_WRITE(reg
, pipe_stats
[pipe
]);
2087 spin_unlock(&dev_priv
->irq_lock
);
2089 for_each_pipe(dev_priv
, pipe
) {
2090 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2091 intel_pipe_handle_vblank(dev
, pipe
))
2092 intel_check_page_flip(dev
, pipe
);
2094 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
2095 intel_prepare_page_flip(dev
, pipe
);
2096 intel_finish_page_flip(dev
, pipe
);
2099 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
2100 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2102 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
2103 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
2104 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
2107 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
2108 gmbus_irq_handler(dev
);
2111 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
2113 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2114 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2116 if (hotplug_status
) {
2117 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2119 * Make sure hotplug status is cleared before we clear IIR, or else we
2120 * may miss hotplug events.
2122 POSTING_READ(PORT_HOTPLUG_STAT
);
2125 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
2127 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_g4x
);
2129 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
2131 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_i915
);
2134 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) &&
2135 hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
2136 dp_aux_irq_handler(dev
);
2140 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
2142 struct drm_device
*dev
= arg
;
2143 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2144 u32 iir
, gt_iir
, pm_iir
;
2145 irqreturn_t ret
= IRQ_NONE
;
2148 /* Find, clear, then process each source of interrupt */
2150 gt_iir
= I915_READ(GTIIR
);
2152 I915_WRITE(GTIIR
, gt_iir
);
2154 pm_iir
= I915_READ(GEN6_PMIIR
);
2156 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2158 iir
= I915_READ(VLV_IIR
);
2160 /* Consume port before clearing IIR or we'll miss events */
2161 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
2162 i9xx_hpd_irq_handler(dev
);
2163 I915_WRITE(VLV_IIR
, iir
);
2166 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
2172 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2174 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2175 /* Call regardless, as some status bits might not be
2176 * signalled in iir */
2177 valleyview_pipestat_irq_handler(dev
, iir
);
2184 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
2186 struct drm_device
*dev
= arg
;
2187 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2188 u32 master_ctl
, iir
;
2189 irqreturn_t ret
= IRQ_NONE
;
2192 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
2193 iir
= I915_READ(VLV_IIR
);
2195 if (master_ctl
== 0 && iir
== 0)
2200 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2202 /* Find, clear, then process each source of interrupt */
2205 /* Consume port before clearing IIR or we'll miss events */
2206 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
2207 i9xx_hpd_irq_handler(dev
);
2208 I915_WRITE(VLV_IIR
, iir
);
2211 gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2213 /* Call regardless, as some status bits might not be
2214 * signalled in iir */
2215 valleyview_pipestat_irq_handler(dev
, iir
);
2217 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
2218 POSTING_READ(GEN8_MASTER_IRQ
);
2224 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2226 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2228 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
2229 u32 dig_hotplug_reg
;
2231 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2232 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2234 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_ibx
);
2236 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
2237 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
2238 SDE_AUDIO_POWER_SHIFT
);
2239 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2243 if (pch_iir
& SDE_AUX_MASK
)
2244 dp_aux_irq_handler(dev
);
2246 if (pch_iir
& SDE_GMBUS
)
2247 gmbus_irq_handler(dev
);
2249 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
2250 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2252 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
2253 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2255 if (pch_iir
& SDE_POISON
)
2256 DRM_ERROR("PCH poison interrupt\n");
2258 if (pch_iir
& SDE_FDI_MASK
)
2259 for_each_pipe(dev_priv
, pipe
)
2260 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2262 I915_READ(FDI_RX_IIR(pipe
)));
2264 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
2265 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2267 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
2268 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2270 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
2271 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
2273 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2275 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
2276 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
2278 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2281 static void ivb_err_int_handler(struct drm_device
*dev
)
2283 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2284 u32 err_int
= I915_READ(GEN7_ERR_INT
);
2287 if (err_int
& ERR_INT_POISON
)
2288 DRM_ERROR("Poison interrupt\n");
2290 for_each_pipe(dev_priv
, pipe
) {
2291 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
)) {
2292 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
2294 DRM_ERROR("Pipe %c FIFO underrun\n",
2298 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
2299 if (IS_IVYBRIDGE(dev
))
2300 ivb_pipe_crc_irq_handler(dev
, pipe
);
2302 hsw_pipe_crc_irq_handler(dev
, pipe
);
2306 I915_WRITE(GEN7_ERR_INT
, err_int
);
2309 static void cpt_serr_int_handler(struct drm_device
*dev
)
2311 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2312 u32 serr_int
= I915_READ(SERR_INT
);
2314 if (serr_int
& SERR_INT_POISON
)
2315 DRM_ERROR("PCH poison interrupt\n");
2317 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
2318 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
2320 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2322 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
2323 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
2325 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2327 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
2328 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_C
,
2330 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2332 I915_WRITE(SERR_INT
, serr_int
);
2335 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2337 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2339 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
2340 u32 dig_hotplug_reg
;
2342 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2343 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2345 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_cpt
);
2347 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
2348 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
2349 SDE_AUDIO_POWER_SHIFT_CPT
);
2350 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2354 if (pch_iir
& SDE_AUX_MASK_CPT
)
2355 dp_aux_irq_handler(dev
);
2357 if (pch_iir
& SDE_GMBUS_CPT
)
2358 gmbus_irq_handler(dev
);
2360 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
2361 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2363 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
2364 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2366 if (pch_iir
& SDE_FDI_MASK_CPT
)
2367 for_each_pipe(dev_priv
, pipe
)
2368 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2370 I915_READ(FDI_RX_IIR(pipe
)));
2372 if (pch_iir
& SDE_ERROR_CPT
)
2373 cpt_serr_int_handler(dev
);
2376 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2378 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2381 if (de_iir
& DE_AUX_CHANNEL_A
)
2382 dp_aux_irq_handler(dev
);
2384 if (de_iir
& DE_GSE
)
2385 intel_opregion_asle_intr(dev
);
2387 if (de_iir
& DE_POISON
)
2388 DRM_ERROR("Poison interrupt\n");
2390 for_each_pipe(dev_priv
, pipe
) {
2391 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2392 intel_pipe_handle_vblank(dev
, pipe
))
2393 intel_check_page_flip(dev
, pipe
);
2395 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2396 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
2397 DRM_ERROR("Pipe %c FIFO underrun\n",
2400 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2401 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2403 /* plane/pipes map 1:1 on ilk+ */
2404 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2405 intel_prepare_page_flip(dev
, pipe
);
2406 intel_finish_page_flip_plane(dev
, pipe
);
2410 /* check event from PCH */
2411 if (de_iir
& DE_PCH_EVENT
) {
2412 u32 pch_iir
= I915_READ(SDEIIR
);
2414 if (HAS_PCH_CPT(dev
))
2415 cpt_irq_handler(dev
, pch_iir
);
2417 ibx_irq_handler(dev
, pch_iir
);
2419 /* should clear PCH hotplug event before clear CPU irq */
2420 I915_WRITE(SDEIIR
, pch_iir
);
2423 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2424 ironlake_rps_change_irq_handler(dev
);
2427 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2429 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2432 if (de_iir
& DE_ERR_INT_IVB
)
2433 ivb_err_int_handler(dev
);
2435 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2436 dp_aux_irq_handler(dev
);
2438 if (de_iir
& DE_GSE_IVB
)
2439 intel_opregion_asle_intr(dev
);
2441 for_each_pipe(dev_priv
, pipe
) {
2442 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2443 intel_pipe_handle_vblank(dev
, pipe
))
2444 intel_check_page_flip(dev
, pipe
);
2446 /* plane/pipes map 1:1 on ilk+ */
2447 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2448 intel_prepare_page_flip(dev
, pipe
);
2449 intel_finish_page_flip_plane(dev
, pipe
);
2453 /* check event from PCH */
2454 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2455 u32 pch_iir
= I915_READ(SDEIIR
);
2457 cpt_irq_handler(dev
, pch_iir
);
2459 /* clear PCH hotplug event before clear CPU irq */
2460 I915_WRITE(SDEIIR
, pch_iir
);
2465 * To handle irqs with the minimum potential races with fresh interrupts, we:
2466 * 1 - Disable Master Interrupt Control.
2467 * 2 - Find the source(s) of the interrupt.
2468 * 3 - Clear the Interrupt Identity bits (IIR).
2469 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2470 * 5 - Re-enable Master Interrupt Control.
2472 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2474 struct drm_device
*dev
= arg
;
2475 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2476 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2477 irqreturn_t ret
= IRQ_NONE
;
2479 /* We get interrupts on unclaimed registers, so check for this before we
2480 * do any I915_{READ,WRITE}. */
2481 intel_uncore_check_errors(dev
);
2483 /* disable master interrupt before clearing iir */
2484 de_ier
= I915_READ(DEIER
);
2485 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2486 POSTING_READ(DEIER
);
2488 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2489 * interrupts will will be stored on its back queue, and then we'll be
2490 * able to process them after we restore SDEIER (as soon as we restore
2491 * it, we'll get an interrupt if SDEIIR still has something to process
2492 * due to its back queue). */
2493 if (!HAS_PCH_NOP(dev
)) {
2494 sde_ier
= I915_READ(SDEIER
);
2495 I915_WRITE(SDEIER
, 0);
2496 POSTING_READ(SDEIER
);
2499 /* Find, clear, then process each source of interrupt */
2501 gt_iir
= I915_READ(GTIIR
);
2503 I915_WRITE(GTIIR
, gt_iir
);
2505 if (INTEL_INFO(dev
)->gen
>= 6)
2506 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2508 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2511 de_iir
= I915_READ(DEIIR
);
2513 I915_WRITE(DEIIR
, de_iir
);
2515 if (INTEL_INFO(dev
)->gen
>= 7)
2516 ivb_display_irq_handler(dev
, de_iir
);
2518 ilk_display_irq_handler(dev
, de_iir
);
2521 if (INTEL_INFO(dev
)->gen
>= 6) {
2522 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2524 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2526 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2530 I915_WRITE(DEIER
, de_ier
);
2531 POSTING_READ(DEIER
);
2532 if (!HAS_PCH_NOP(dev
)) {
2533 I915_WRITE(SDEIER
, sde_ier
);
2534 POSTING_READ(SDEIER
);
2540 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2542 struct drm_device
*dev
= arg
;
2543 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2545 irqreturn_t ret
= IRQ_NONE
;
2549 master_ctl
= I915_READ(GEN8_MASTER_IRQ
);
2550 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2554 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2555 POSTING_READ(GEN8_MASTER_IRQ
);
2557 /* Find, clear, then process each source of interrupt */
2559 ret
= gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2561 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2562 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2564 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2566 if (tmp
& GEN8_DE_MISC_GSE
)
2567 intel_opregion_asle_intr(dev
);
2569 DRM_ERROR("Unexpected DE Misc interrupt\n");
2572 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2575 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2576 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2578 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2580 if (tmp
& GEN8_AUX_CHANNEL_A
)
2581 dp_aux_irq_handler(dev
);
2583 DRM_ERROR("Unexpected DE Port interrupt\n");
2586 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2589 for_each_pipe(dev_priv
, pipe
) {
2592 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2595 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2598 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2599 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2600 intel_pipe_handle_vblank(dev
, pipe
))
2601 intel_check_page_flip(dev
, pipe
);
2603 if (pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
) {
2604 intel_prepare_page_flip(dev
, pipe
);
2605 intel_finish_page_flip_plane(dev
, pipe
);
2608 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2609 hsw_pipe_crc_irq_handler(dev
, pipe
);
2611 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
) {
2612 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
2614 DRM_ERROR("Pipe %c FIFO underrun\n",
2618 if (pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
) {
2619 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2621 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2624 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2627 if (!HAS_PCH_NOP(dev
) && master_ctl
& GEN8_DE_PCH_IRQ
) {
2629 * FIXME(BDW): Assume for now that the new interrupt handling
2630 * scheme also closed the SDE interrupt handling race we've seen
2631 * on older pch-split platforms. But this needs testing.
2633 u32 pch_iir
= I915_READ(SDEIIR
);
2635 I915_WRITE(SDEIIR
, pch_iir
);
2637 cpt_irq_handler(dev
, pch_iir
);
2639 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2643 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2644 POSTING_READ(GEN8_MASTER_IRQ
);
2649 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2650 bool reset_completed
)
2652 struct intel_engine_cs
*ring
;
2656 * Notify all waiters for GPU completion events that reset state has
2657 * been changed, and that they need to restart their wait after
2658 * checking for potential errors (and bail out to drop locks if there is
2659 * a gpu reset pending so that i915_error_work_func can acquire them).
2662 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2663 for_each_ring(ring
, dev_priv
, i
)
2664 wake_up_all(&ring
->irq_queue
);
2666 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2667 wake_up_all(&dev_priv
->pending_flip_queue
);
2670 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2671 * reset state is cleared.
2673 if (reset_completed
)
2674 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2678 * i915_error_work_func - do process context error handling work
2679 * @work: work struct
2681 * Fire an error uevent so userspace can see that a hang or error
2684 static void i915_error_work_func(struct work_struct
*work
)
2686 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
2688 struct drm_i915_private
*dev_priv
=
2689 container_of(error
, struct drm_i915_private
, gpu_error
);
2690 struct drm_device
*dev
= dev_priv
->dev
;
2691 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2692 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2693 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2696 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2699 * Note that there's only one work item which does gpu resets, so we
2700 * need not worry about concurrent gpu resets potentially incrementing
2701 * error->reset_counter twice. We only need to take care of another
2702 * racing irq/hangcheck declaring the gpu dead for a second time. A
2703 * quick check for that is good enough: schedule_work ensures the
2704 * correct ordering between hang detection and this work item, and since
2705 * the reset in-progress bit is only ever set by code outside of this
2706 * work we don't need to worry about any other races.
2708 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2709 DRM_DEBUG_DRIVER("resetting chip\n");
2710 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2714 * In most cases it's guaranteed that we get here with an RPM
2715 * reference held, for example because there is a pending GPU
2716 * request that won't finish until the reset is done. This
2717 * isn't the case at least when we get here by doing a
2718 * simulated reset via debugs, so get an RPM reference.
2720 intel_runtime_pm_get(dev_priv
);
2722 * All state reset _must_ be completed before we update the
2723 * reset counter, for otherwise waiters might miss the reset
2724 * pending state and not properly drop locks, resulting in
2725 * deadlocks with the reset work.
2727 ret
= i915_reset(dev
);
2729 intel_display_handle_reset(dev
);
2731 intel_runtime_pm_put(dev_priv
);
2735 * After all the gem state is reset, increment the reset
2736 * counter and wake up everyone waiting for the reset to
2739 * Since unlock operations are a one-sided barrier only,
2740 * we need to insert a barrier here to order any seqno
2742 * the counter increment.
2744 smp_mb__before_atomic();
2745 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2747 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2748 KOBJ_CHANGE
, reset_done_event
);
2750 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2754 * Note: The wake_up also serves as a memory barrier so that
2755 * waiters see the update value of the reset counter atomic_t.
2757 i915_error_wake_up(dev_priv
, true);
2761 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2763 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2764 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2765 u32 eir
= I915_READ(EIR
);
2771 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2773 i915_get_extra_instdone(dev
, instdone
);
2776 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2777 u32 ipeir
= I915_READ(IPEIR_I965
);
2779 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2780 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2781 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2782 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2783 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2784 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2785 I915_WRITE(IPEIR_I965
, ipeir
);
2786 POSTING_READ(IPEIR_I965
);
2788 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2789 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2790 pr_err("page table error\n");
2791 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2792 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2793 POSTING_READ(PGTBL_ER
);
2797 if (!IS_GEN2(dev
)) {
2798 if (eir
& I915_ERROR_PAGE_TABLE
) {
2799 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2800 pr_err("page table error\n");
2801 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2802 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2803 POSTING_READ(PGTBL_ER
);
2807 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2808 pr_err("memory refresh error:\n");
2809 for_each_pipe(dev_priv
, pipe
)
2810 pr_err("pipe %c stat: 0x%08x\n",
2811 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2812 /* pipestat has already been acked */
2814 if (eir
& I915_ERROR_INSTRUCTION
) {
2815 pr_err("instruction error\n");
2816 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2817 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2818 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2819 if (INTEL_INFO(dev
)->gen
< 4) {
2820 u32 ipeir
= I915_READ(IPEIR
);
2822 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2823 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2824 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2825 I915_WRITE(IPEIR
, ipeir
);
2826 POSTING_READ(IPEIR
);
2828 u32 ipeir
= I915_READ(IPEIR_I965
);
2830 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2831 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2832 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2833 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2834 I915_WRITE(IPEIR_I965
, ipeir
);
2835 POSTING_READ(IPEIR_I965
);
2839 I915_WRITE(EIR
, eir
);
2841 eir
= I915_READ(EIR
);
2844 * some errors might have become stuck,
2847 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2848 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2849 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2854 * i915_handle_error - handle an error interrupt
2857 * Do some basic checking of regsiter state at error interrupt time and
2858 * dump it to the syslog. Also call i915_capture_error_state() to make
2859 * sure we get a record and make it available in debugfs. Fire a uevent
2860 * so userspace knows something bad happened (should trigger collection
2861 * of a ring dump etc.).
2863 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2864 const char *fmt
, ...)
2866 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2870 va_start(args
, fmt
);
2871 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2874 i915_capture_error_state(dev
, wedged
, error_msg
);
2875 i915_report_and_clear_eir(dev
);
2878 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2879 &dev_priv
->gpu_error
.reset_counter
);
2882 * Wakeup waiting processes so that the reset work function
2883 * i915_error_work_func doesn't deadlock trying to grab various
2884 * locks. By bumping the reset counter first, the woken
2885 * processes will see a reset in progress and back off,
2886 * releasing their locks and then wait for the reset completion.
2887 * We must do this for _all_ gpu waiters that might hold locks
2888 * that the reset work needs to acquire.
2890 * Note: The wake_up serves as the required memory barrier to
2891 * ensure that the waiters see the updated value of the reset
2894 i915_error_wake_up(dev_priv
, false);
2898 * Our reset work can grab modeset locks (since it needs to reset the
2899 * state of outstanding pagelips). Hence it must not be run on our own
2900 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2901 * code will deadlock.
2903 schedule_work(&dev_priv
->gpu_error
.work
);
2906 /* Called from drm generic code, passed 'crtc' which
2907 * we use as a pipe index
2909 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2911 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2912 unsigned long irqflags
;
2914 if (!i915_pipe_enabled(dev
, pipe
))
2917 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2918 if (INTEL_INFO(dev
)->gen
>= 4)
2919 i915_enable_pipestat(dev_priv
, pipe
,
2920 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2922 i915_enable_pipestat(dev_priv
, pipe
,
2923 PIPE_VBLANK_INTERRUPT_STATUS
);
2924 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2929 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2931 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2932 unsigned long irqflags
;
2933 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2934 DE_PIPE_VBLANK(pipe
);
2936 if (!i915_pipe_enabled(dev
, pipe
))
2939 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2940 ironlake_enable_display_irq(dev_priv
, bit
);
2941 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2946 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2948 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2949 unsigned long irqflags
;
2951 if (!i915_pipe_enabled(dev
, pipe
))
2954 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2955 i915_enable_pipestat(dev_priv
, pipe
,
2956 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2957 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2962 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2964 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2965 unsigned long irqflags
;
2967 if (!i915_pipe_enabled(dev
, pipe
))
2970 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2971 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2972 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2973 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2974 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2978 /* Called from drm generic code, passed 'crtc' which
2979 * we use as a pipe index
2981 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2983 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2984 unsigned long irqflags
;
2986 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2987 i915_disable_pipestat(dev_priv
, pipe
,
2988 PIPE_VBLANK_INTERRUPT_STATUS
|
2989 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2990 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2993 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2995 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2996 unsigned long irqflags
;
2997 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2998 DE_PIPE_VBLANK(pipe
);
3000 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3001 ironlake_disable_display_irq(dev_priv
, bit
);
3002 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3005 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
3007 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3008 unsigned long irqflags
;
3010 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3011 i915_disable_pipestat(dev_priv
, pipe
,
3012 PIPE_START_VBLANK_INTERRUPT_STATUS
);
3013 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3016 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
3018 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3019 unsigned long irqflags
;
3021 if (!i915_pipe_enabled(dev
, pipe
))
3024 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3025 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
3026 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
3027 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
3028 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3032 ring_last_seqno(struct intel_engine_cs
*ring
)
3034 return list_entry(ring
->request_list
.prev
,
3035 struct drm_i915_gem_request
, list
)->seqno
;
3039 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
3041 return (list_empty(&ring
->request_list
) ||
3042 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
3046 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
3048 if (INTEL_INFO(dev
)->gen
>= 8) {
3049 return (ipehr
>> 23) == 0x1c;
3051 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
3052 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
3053 MI_SEMAPHORE_REGISTER
);
3057 static struct intel_engine_cs
*
3058 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
3060 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3061 struct intel_engine_cs
*signaller
;
3064 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
3065 for_each_ring(signaller
, dev_priv
, i
) {
3066 if (ring
== signaller
)
3069 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
3073 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
3075 for_each_ring(signaller
, dev_priv
, i
) {
3076 if(ring
== signaller
)
3079 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
3084 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3085 ring
->id
, ipehr
, offset
);
3090 static struct intel_engine_cs
*
3091 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
3093 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3094 u32 cmd
, ipehr
, head
;
3098 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
3099 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
3103 * HEAD is likely pointing to the dword after the actual command,
3104 * so scan backwards until we find the MBOX. But limit it to just 3
3105 * or 4 dwords depending on the semaphore wait command size.
3106 * Note that we don't care about ACTHD here since that might
3107 * point at at batch, and semaphores are always emitted into the
3108 * ringbuffer itself.
3110 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
3111 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
3113 for (i
= backwards
; i
; --i
) {
3115 * Be paranoid and presume the hw has gone off into the wild -
3116 * our ring is smaller than what the hardware (and hence
3117 * HEAD_ADDR) allows. Also handles wrap-around.
3119 head
&= ring
->buffer
->size
- 1;
3121 /* This here seems to blow up */
3122 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
3132 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
3133 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
3134 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
3136 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
3138 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
3141 static int semaphore_passed(struct intel_engine_cs
*ring
)
3143 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
3144 struct intel_engine_cs
*signaller
;
3147 ring
->hangcheck
.deadlock
++;
3149 signaller
= semaphore_waits_for(ring
, &seqno
);
3150 if (signaller
== NULL
)
3153 /* Prevent pathological recursion due to driver bugs */
3154 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
3157 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
3160 /* cursory check for an unkickable deadlock */
3161 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
3162 semaphore_passed(signaller
) < 0)
3168 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
3170 struct intel_engine_cs
*ring
;
3173 for_each_ring(ring
, dev_priv
, i
)
3174 ring
->hangcheck
.deadlock
= 0;
3177 static enum intel_ring_hangcheck_action
3178 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
3180 struct drm_device
*dev
= ring
->dev
;
3181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3184 if (acthd
!= ring
->hangcheck
.acthd
) {
3185 if (acthd
> ring
->hangcheck
.max_acthd
) {
3186 ring
->hangcheck
.max_acthd
= acthd
;
3187 return HANGCHECK_ACTIVE
;
3190 return HANGCHECK_ACTIVE_LOOP
;
3194 return HANGCHECK_HUNG
;
3196 /* Is the chip hanging on a WAIT_FOR_EVENT?
3197 * If so we can simply poke the RB_WAIT bit
3198 * and break the hang. This should work on
3199 * all but the second generation chipsets.
3201 tmp
= I915_READ_CTL(ring
);
3202 if (tmp
& RING_WAIT
) {
3203 i915_handle_error(dev
, false,
3204 "Kicking stuck wait on %s",
3206 I915_WRITE_CTL(ring
, tmp
);
3207 return HANGCHECK_KICK
;
3210 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
3211 switch (semaphore_passed(ring
)) {
3213 return HANGCHECK_HUNG
;
3215 i915_handle_error(dev
, false,
3216 "Kicking stuck semaphore on %s",
3218 I915_WRITE_CTL(ring
, tmp
);
3219 return HANGCHECK_KICK
;
3221 return HANGCHECK_WAIT
;
3225 return HANGCHECK_HUNG
;
3229 * This is called when the chip hasn't reported back with completed
3230 * batchbuffers in a long time. We keep track per ring seqno progress and
3231 * if there are no progress, hangcheck score for that ring is increased.
3232 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3233 * we kick the ring. If we see no progress on three subsequent calls
3234 * we assume chip is wedged and try to fix it by resetting the chip.
3236 static void i915_hangcheck_elapsed(unsigned long data
)
3238 struct drm_device
*dev
= (struct drm_device
*)data
;
3239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3240 struct intel_engine_cs
*ring
;
3242 int busy_count
= 0, rings_hung
= 0;
3243 bool stuck
[I915_NUM_RINGS
] = { 0 };
3248 if (!i915
.enable_hangcheck
)
3251 for_each_ring(ring
, dev_priv
, i
) {
3256 semaphore_clear_deadlocks(dev_priv
);
3258 seqno
= ring
->get_seqno(ring
, false);
3259 acthd
= intel_ring_get_active_head(ring
);
3261 if (ring
->hangcheck
.seqno
== seqno
) {
3262 if (ring_idle(ring
, seqno
)) {
3263 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
3265 if (waitqueue_active(&ring
->irq_queue
)) {
3266 /* Issue a wake-up to catch stuck h/w. */
3267 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
3268 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
3269 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3272 DRM_INFO("Fake missed irq on %s\n",
3274 wake_up_all(&ring
->irq_queue
);
3276 /* Safeguard against driver failure */
3277 ring
->hangcheck
.score
+= BUSY
;
3281 /* We always increment the hangcheck score
3282 * if the ring is busy and still processing
3283 * the same request, so that no single request
3284 * can run indefinitely (such as a chain of
3285 * batches). The only time we do not increment
3286 * the hangcheck score on this ring, if this
3287 * ring is in a legitimate wait for another
3288 * ring. In that case the waiting ring is a
3289 * victim and we want to be sure we catch the
3290 * right culprit. Then every time we do kick
3291 * the ring, add a small increment to the
3292 * score so that we can catch a batch that is
3293 * being repeatedly kicked and so responsible
3294 * for stalling the machine.
3296 ring
->hangcheck
.action
= ring_stuck(ring
,
3299 switch (ring
->hangcheck
.action
) {
3300 case HANGCHECK_IDLE
:
3301 case HANGCHECK_WAIT
:
3302 case HANGCHECK_ACTIVE
:
3304 case HANGCHECK_ACTIVE_LOOP
:
3305 ring
->hangcheck
.score
+= BUSY
;
3307 case HANGCHECK_KICK
:
3308 ring
->hangcheck
.score
+= KICK
;
3310 case HANGCHECK_HUNG
:
3311 ring
->hangcheck
.score
+= HUNG
;
3317 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3319 /* Gradually reduce the count so that we catch DoS
3320 * attempts across multiple batches.
3322 if (ring
->hangcheck
.score
> 0)
3323 ring
->hangcheck
.score
--;
3325 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3328 ring
->hangcheck
.seqno
= seqno
;
3329 ring
->hangcheck
.acthd
= acthd
;
3333 for_each_ring(ring
, dev_priv
, i
) {
3334 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3335 DRM_INFO("%s on %s\n",
3336 stuck
[i
] ? "stuck" : "no progress",
3343 return i915_handle_error(dev
, true, "Ring hung");
3346 /* Reset timer case chip hangs without another request
3348 i915_queue_hangcheck(dev
);
3351 void i915_queue_hangcheck(struct drm_device
*dev
)
3353 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3354 if (!i915
.enable_hangcheck
)
3357 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3358 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
3361 static void ibx_irq_reset(struct drm_device
*dev
)
3363 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3365 if (HAS_PCH_NOP(dev
))
3368 GEN5_IRQ_RESET(SDE
);
3370 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3371 I915_WRITE(SERR_INT
, 0xffffffff);
3375 * SDEIER is also touched by the interrupt handler to work around missed PCH
3376 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3377 * instead we unconditionally enable all PCH interrupt sources here, but then
3378 * only unmask them as needed with SDEIMR.
3380 * This function needs to be called before interrupts are enabled.
3382 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3384 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3386 if (HAS_PCH_NOP(dev
))
3389 WARN_ON(I915_READ(SDEIER
) != 0);
3390 I915_WRITE(SDEIER
, 0xffffffff);
3391 POSTING_READ(SDEIER
);
3394 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3396 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3399 if (INTEL_INFO(dev
)->gen
>= 6)
3400 GEN5_IRQ_RESET(GEN6_PM
);
3405 static void ironlake_irq_reset(struct drm_device
*dev
)
3407 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3409 I915_WRITE(HWSTAM
, 0xffffffff);
3413 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3415 gen5_gt_irq_reset(dev
);
3420 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3422 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3426 I915_WRITE(VLV_IMR
, 0);
3427 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3428 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3429 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3432 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3433 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3435 gen5_gt_irq_reset(dev
);
3437 I915_WRITE(DPINVGTT
, 0xff);
3439 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3440 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3441 for_each_pipe(dev_priv
, pipe
)
3442 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3443 I915_WRITE(VLV_IIR
, 0xffffffff);
3444 I915_WRITE(VLV_IMR
, 0xffffffff);
3445 I915_WRITE(VLV_IER
, 0x0);
3446 POSTING_READ(VLV_IER
);
3449 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3451 GEN8_IRQ_RESET_NDX(GT
, 0);
3452 GEN8_IRQ_RESET_NDX(GT
, 1);
3453 GEN8_IRQ_RESET_NDX(GT
, 2);
3454 GEN8_IRQ_RESET_NDX(GT
, 3);
3457 static void gen8_irq_reset(struct drm_device
*dev
)
3459 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3462 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3463 POSTING_READ(GEN8_MASTER_IRQ
);
3465 gen8_gt_irq_reset(dev_priv
);
3467 for_each_pipe(dev_priv
, pipe
)
3468 if (intel_display_power_enabled(dev_priv
,
3469 POWER_DOMAIN_PIPE(pipe
)))
3470 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3472 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3473 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3474 GEN5_IRQ_RESET(GEN8_PCU_
);
3479 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
)
3481 unsigned long irqflags
;
3483 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3484 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
, dev_priv
->de_irq_mask
[PIPE_B
],
3485 ~dev_priv
->de_irq_mask
[PIPE_B
]);
3486 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
, dev_priv
->de_irq_mask
[PIPE_C
],
3487 ~dev_priv
->de_irq_mask
[PIPE_C
]);
3488 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3491 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3493 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3496 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3497 POSTING_READ(GEN8_MASTER_IRQ
);
3499 gen8_gt_irq_reset(dev_priv
);
3501 GEN5_IRQ_RESET(GEN8_PCU_
);
3503 POSTING_READ(GEN8_PCU_IIR
);
3505 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3507 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3508 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3510 for_each_pipe(dev_priv
, pipe
)
3511 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3513 I915_WRITE(VLV_IMR
, 0xffffffff);
3514 I915_WRITE(VLV_IER
, 0x0);
3515 I915_WRITE(VLV_IIR
, 0xffffffff);
3516 POSTING_READ(VLV_IIR
);
3519 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3521 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3522 struct intel_encoder
*intel_encoder
;
3523 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
3525 if (HAS_PCH_IBX(dev
)) {
3526 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3527 for_each_intel_encoder(dev
, intel_encoder
)
3528 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3529 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
3531 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3532 for_each_intel_encoder(dev
, intel_encoder
)
3533 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3534 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
3537 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3540 * Enable digital hotplug on the PCH, and configure the DP short pulse
3541 * duration to 2ms (which is the minimum in the Display Port spec)
3543 * This register is the same on all known PCH chips.
3545 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3546 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3547 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3548 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3549 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3550 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3553 static void ibx_irq_postinstall(struct drm_device
*dev
)
3555 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3558 if (HAS_PCH_NOP(dev
))
3561 if (HAS_PCH_IBX(dev
))
3562 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3564 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3566 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR
);
3567 I915_WRITE(SDEIMR
, ~mask
);
3570 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3572 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3573 u32 pm_irqs
, gt_irqs
;
3575 pm_irqs
= gt_irqs
= 0;
3577 dev_priv
->gt_irq_mask
= ~0;
3578 if (HAS_L3_DPF(dev
)) {
3579 /* L3 parity interrupt is always unmasked. */
3580 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3581 gt_irqs
|= GT_PARITY_ERROR(dev
);
3584 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3586 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3587 ILK_BSD_USER_INTERRUPT
;
3589 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3592 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3594 if (INTEL_INFO(dev
)->gen
>= 6) {
3595 pm_irqs
|= dev_priv
->pm_rps_events
;
3598 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3600 dev_priv
->pm_irq_mask
= 0xffffffff;
3601 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3605 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3607 unsigned long irqflags
;
3608 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3609 u32 display_mask
, extra_mask
;
3611 if (INTEL_INFO(dev
)->gen
>= 7) {
3612 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3613 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3614 DE_PLANEB_FLIP_DONE_IVB
|
3615 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3616 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3617 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
3619 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3620 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3622 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3624 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3625 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
3628 dev_priv
->irq_mask
= ~display_mask
;
3630 I915_WRITE(HWSTAM
, 0xeffe);
3632 ibx_irq_pre_postinstall(dev
);
3634 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3636 gen5_gt_irq_postinstall(dev
);
3638 ibx_irq_postinstall(dev
);
3640 if (IS_IRONLAKE_M(dev
)) {
3641 /* Enable PCU event interrupts
3643 * spinlocking not required here for correctness since interrupt
3644 * setup is guaranteed to run in single-threaded context. But we
3645 * need it to make the assert_spin_locked happy. */
3646 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3647 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3648 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3654 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3659 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3660 PIPE_FIFO_UNDERRUN_STATUS
;
3662 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3663 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3664 POSTING_READ(PIPESTAT(PIPE_A
));
3666 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3667 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3669 i915_enable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3670 PIPE_GMBUS_INTERRUPT_STATUS
);
3671 i915_enable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3673 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3674 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3675 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3676 dev_priv
->irq_mask
&= ~iir_mask
;
3678 I915_WRITE(VLV_IIR
, iir_mask
);
3679 I915_WRITE(VLV_IIR
, iir_mask
);
3680 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3681 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3682 POSTING_READ(VLV_IER
);
3685 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3690 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3691 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3692 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3694 dev_priv
->irq_mask
|= iir_mask
;
3695 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3696 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3697 I915_WRITE(VLV_IIR
, iir_mask
);
3698 I915_WRITE(VLV_IIR
, iir_mask
);
3699 POSTING_READ(VLV_IIR
);
3701 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3702 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3704 i915_disable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3705 PIPE_GMBUS_INTERRUPT_STATUS
);
3706 i915_disable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3708 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3709 PIPE_FIFO_UNDERRUN_STATUS
;
3710 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3711 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3712 POSTING_READ(PIPESTAT(PIPE_A
));
3715 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3717 assert_spin_locked(&dev_priv
->irq_lock
);
3719 if (dev_priv
->display_irqs_enabled
)
3722 dev_priv
->display_irqs_enabled
= true;
3724 if (dev_priv
->dev
->irq_enabled
)
3725 valleyview_display_irqs_install(dev_priv
);
3728 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3730 assert_spin_locked(&dev_priv
->irq_lock
);
3732 if (!dev_priv
->display_irqs_enabled
)
3735 dev_priv
->display_irqs_enabled
= false;
3737 if (dev_priv
->dev
->irq_enabled
)
3738 valleyview_display_irqs_uninstall(dev_priv
);
3741 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3743 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3744 unsigned long irqflags
;
3746 dev_priv
->irq_mask
= ~0;
3748 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3749 POSTING_READ(PORT_HOTPLUG_EN
);
3751 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3752 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3753 I915_WRITE(VLV_IIR
, 0xffffffff);
3754 POSTING_READ(VLV_IER
);
3756 /* Interrupt setup is already guaranteed to be single-threaded, this is
3757 * just to make the assert_spin_locked check happy. */
3758 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3759 if (dev_priv
->display_irqs_enabled
)
3760 valleyview_display_irqs_install(dev_priv
);
3761 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3763 I915_WRITE(VLV_IIR
, 0xffffffff);
3764 I915_WRITE(VLV_IIR
, 0xffffffff);
3766 gen5_gt_irq_postinstall(dev
);
3768 /* ack & enable invalid PTE error interrupts */
3769 #if 0 /* FIXME: add support to irq handler for checking these bits */
3770 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3771 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3774 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3779 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3781 /* These are interrupts we'll toggle with the ring mask register */
3782 uint32_t gt_interrupts
[] = {
3783 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3784 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3785 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3786 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3787 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3788 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3789 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3790 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3791 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3793 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3794 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3797 dev_priv
->pm_irq_mask
= 0xffffffff;
3798 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3799 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3800 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, dev_priv
->pm_rps_events
);
3801 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3804 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3806 uint32_t de_pipe_masked
= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3807 GEN8_PIPE_CDCLK_CRC_DONE
|
3808 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3809 uint32_t de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3810 GEN8_PIPE_FIFO_UNDERRUN
;
3812 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3813 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3814 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3816 for_each_pipe(dev_priv
, pipe
)
3817 if (intel_display_power_enabled(dev_priv
,
3818 POWER_DOMAIN_PIPE(pipe
)))
3819 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3820 dev_priv
->de_irq_mask
[pipe
],
3823 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~GEN8_AUX_CHANNEL_A
, GEN8_AUX_CHANNEL_A
);
3826 static int gen8_irq_postinstall(struct drm_device
*dev
)
3828 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3830 ibx_irq_pre_postinstall(dev
);
3832 gen8_gt_irq_postinstall(dev_priv
);
3833 gen8_de_irq_postinstall(dev_priv
);
3835 ibx_irq_postinstall(dev
);
3837 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3838 POSTING_READ(GEN8_MASTER_IRQ
);
3843 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3845 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3846 u32 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3847 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3848 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3849 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3850 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3851 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3852 unsigned long irqflags
;
3856 * Leave vblank interrupts masked initially. enable/disable will
3857 * toggle them based on usage.
3859 dev_priv
->irq_mask
= ~enable_mask
;
3861 for_each_pipe(dev_priv
, pipe
)
3862 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3864 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3865 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3866 for_each_pipe(dev_priv
, pipe
)
3867 i915_enable_pipestat(dev_priv
, pipe
, pipestat_enable
);
3868 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3870 I915_WRITE(VLV_IIR
, 0xffffffff);
3871 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3872 I915_WRITE(VLV_IER
, enable_mask
);
3874 gen8_gt_irq_postinstall(dev_priv
);
3876 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3877 POSTING_READ(GEN8_MASTER_IRQ
);
3882 static void gen8_irq_uninstall(struct drm_device
*dev
)
3884 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3889 gen8_irq_reset(dev
);
3892 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3894 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3895 unsigned long irqflags
;
3901 I915_WRITE(VLV_MASTER_IER
, 0);
3903 for_each_pipe(dev_priv
, pipe
)
3904 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3906 I915_WRITE(HWSTAM
, 0xffffffff);
3907 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3908 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3910 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3911 if (dev_priv
->display_irqs_enabled
)
3912 valleyview_display_irqs_uninstall(dev_priv
);
3913 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3915 dev_priv
->irq_mask
= 0;
3917 I915_WRITE(VLV_IIR
, 0xffffffff);
3918 I915_WRITE(VLV_IMR
, 0xffffffff);
3919 I915_WRITE(VLV_IER
, 0x0);
3920 POSTING_READ(VLV_IER
);
3923 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3925 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3931 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3932 POSTING_READ(GEN8_MASTER_IRQ
);
3934 #define GEN8_IRQ_FINI_NDX(type, which) \
3936 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3937 I915_WRITE(GEN8_##type##_IER(which), 0); \
3938 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3939 POSTING_READ(GEN8_##type##_IIR(which)); \
3940 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3943 #define GEN8_IRQ_FINI(type) \
3945 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3946 I915_WRITE(GEN8_##type##_IER, 0); \
3947 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3948 POSTING_READ(GEN8_##type##_IIR); \
3949 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3952 GEN8_IRQ_FINI_NDX(GT
, 0);
3953 GEN8_IRQ_FINI_NDX(GT
, 1);
3954 GEN8_IRQ_FINI_NDX(GT
, 2);
3955 GEN8_IRQ_FINI_NDX(GT
, 3);
3959 #undef GEN8_IRQ_FINI
3960 #undef GEN8_IRQ_FINI_NDX
3962 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3963 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3965 for_each_pipe(dev_priv
, pipe
)
3966 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3968 I915_WRITE(VLV_IMR
, 0xffffffff);
3969 I915_WRITE(VLV_IER
, 0x0);
3970 I915_WRITE(VLV_IIR
, 0xffffffff);
3971 POSTING_READ(VLV_IIR
);
3974 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3976 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3981 ironlake_irq_reset(dev
);
3984 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3986 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3989 for_each_pipe(dev_priv
, pipe
)
3990 I915_WRITE(PIPESTAT(pipe
), 0);
3991 I915_WRITE16(IMR
, 0xffff);
3992 I915_WRITE16(IER
, 0x0);
3993 POSTING_READ16(IER
);
3996 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3999 unsigned long irqflags
;
4002 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
4004 /* Unmask the interrupts that we always want on. */
4005 dev_priv
->irq_mask
=
4006 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4007 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4008 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4009 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4010 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4011 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
4014 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4015 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4016 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
4017 I915_USER_INTERRUPT
);
4018 POSTING_READ16(IER
);
4020 /* Interrupt setup is already guaranteed to be single-threaded, this is
4021 * just to make the assert_spin_locked check happy. */
4022 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4023 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4024 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4025 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4031 * Returns true when a page flip has completed.
4033 static bool i8xx_handle_vblank(struct drm_device
*dev
,
4034 int plane
, int pipe
, u32 iir
)
4036 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4037 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4039 if (!intel_pipe_handle_vblank(dev
, pipe
))
4042 if ((iir
& flip_pending
) == 0)
4043 goto check_page_flip
;
4045 intel_prepare_page_flip(dev
, plane
);
4047 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4048 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4049 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4050 * the flip is completed (no longer pending). Since this doesn't raise
4051 * an interrupt per se, we watch for the change at vblank.
4053 if (I915_READ16(ISR
) & flip_pending
)
4054 goto check_page_flip
;
4056 intel_finish_page_flip(dev
, pipe
);
4060 intel_check_page_flip(dev
, pipe
);
4064 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
4066 struct drm_device
*dev
= arg
;
4067 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4070 unsigned long irqflags
;
4073 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4074 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4076 iir
= I915_READ16(IIR
);
4080 while (iir
& ~flip_mask
) {
4081 /* Can't rely on pipestat interrupt bit in iir as it might
4082 * have been cleared after the pipestat interrupt was received.
4083 * It doesn't set the bit in iir again, but it still produces
4084 * interrupts (for non-MSI).
4086 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4087 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4088 i915_handle_error(dev
, false,
4089 "Command parser error, iir 0x%08x",
4092 for_each_pipe(dev_priv
, pipe
) {
4093 int reg
= PIPESTAT(pipe
);
4094 pipe_stats
[pipe
] = I915_READ(reg
);
4097 * Clear the PIPE*STAT regs before the IIR
4099 if (pipe_stats
[pipe
] & 0x8000ffff)
4100 I915_WRITE(reg
, pipe_stats
[pipe
]);
4102 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4104 I915_WRITE16(IIR
, iir
& ~flip_mask
);
4105 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
4107 i915_update_dri1_breadcrumb(dev
);
4109 if (iir
& I915_USER_INTERRUPT
)
4110 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4112 for_each_pipe(dev_priv
, pipe
) {
4117 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4118 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
4119 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4121 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4122 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4124 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4125 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4126 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4135 static void i8xx_irq_uninstall(struct drm_device
* dev
)
4137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4140 for_each_pipe(dev_priv
, pipe
) {
4141 /* Clear enable bits; then clear status bits */
4142 I915_WRITE(PIPESTAT(pipe
), 0);
4143 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4145 I915_WRITE16(IMR
, 0xffff);
4146 I915_WRITE16(IER
, 0x0);
4147 I915_WRITE16(IIR
, I915_READ16(IIR
));
4150 static void i915_irq_preinstall(struct drm_device
* dev
)
4152 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4155 if (I915_HAS_HOTPLUG(dev
)) {
4156 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4157 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4160 I915_WRITE16(HWSTAM
, 0xeffe);
4161 for_each_pipe(dev_priv
, pipe
)
4162 I915_WRITE(PIPESTAT(pipe
), 0);
4163 I915_WRITE(IMR
, 0xffffffff);
4164 I915_WRITE(IER
, 0x0);
4168 static int i915_irq_postinstall(struct drm_device
*dev
)
4170 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4172 unsigned long irqflags
;
4174 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
4176 /* Unmask the interrupts that we always want on. */
4177 dev_priv
->irq_mask
=
4178 ~(I915_ASLE_INTERRUPT
|
4179 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4180 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4181 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4182 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4183 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4186 I915_ASLE_INTERRUPT
|
4187 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4188 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4189 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
4190 I915_USER_INTERRUPT
;
4192 if (I915_HAS_HOTPLUG(dev
)) {
4193 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4194 POSTING_READ(PORT_HOTPLUG_EN
);
4196 /* Enable in IER... */
4197 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
4198 /* and unmask in IMR */
4199 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
4202 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4203 I915_WRITE(IER
, enable_mask
);
4206 i915_enable_asle_pipestat(dev
);
4208 /* Interrupt setup is already guaranteed to be single-threaded, this is
4209 * just to make the assert_spin_locked check happy. */
4210 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4211 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4212 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4213 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4219 * Returns true when a page flip has completed.
4221 static bool i915_handle_vblank(struct drm_device
*dev
,
4222 int plane
, int pipe
, u32 iir
)
4224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4225 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
4227 if (!intel_pipe_handle_vblank(dev
, pipe
))
4230 if ((iir
& flip_pending
) == 0)
4231 goto check_page_flip
;
4233 intel_prepare_page_flip(dev
, plane
);
4235 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4236 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4237 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4238 * the flip is completed (no longer pending). Since this doesn't raise
4239 * an interrupt per se, we watch for the change at vblank.
4241 if (I915_READ(ISR
) & flip_pending
)
4242 goto check_page_flip
;
4244 intel_finish_page_flip(dev
, pipe
);
4248 intel_check_page_flip(dev
, pipe
);
4252 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
4254 struct drm_device
*dev
= arg
;
4255 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4256 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
4257 unsigned long irqflags
;
4259 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4260 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4261 int pipe
, ret
= IRQ_NONE
;
4263 iir
= I915_READ(IIR
);
4265 bool irq_received
= (iir
& ~flip_mask
) != 0;
4266 bool blc_event
= false;
4268 /* Can't rely on pipestat interrupt bit in iir as it might
4269 * have been cleared after the pipestat interrupt was received.
4270 * It doesn't set the bit in iir again, but it still produces
4271 * interrupts (for non-MSI).
4273 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4274 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4275 i915_handle_error(dev
, false,
4276 "Command parser error, iir 0x%08x",
4279 for_each_pipe(dev_priv
, pipe
) {
4280 int reg
= PIPESTAT(pipe
);
4281 pipe_stats
[pipe
] = I915_READ(reg
);
4283 /* Clear the PIPE*STAT regs before the IIR */
4284 if (pipe_stats
[pipe
] & 0x8000ffff) {
4285 I915_WRITE(reg
, pipe_stats
[pipe
]);
4286 irq_received
= true;
4289 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4294 /* Consume port. Then clear IIR or we'll miss events */
4295 if (I915_HAS_HOTPLUG(dev
) &&
4296 iir
& I915_DISPLAY_PORT_INTERRUPT
)
4297 i9xx_hpd_irq_handler(dev
);
4299 I915_WRITE(IIR
, iir
& ~flip_mask
);
4300 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4302 if (iir
& I915_USER_INTERRUPT
)
4303 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4305 for_each_pipe(dev_priv
, pipe
) {
4310 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
4311 i915_handle_vblank(dev
, plane
, pipe
, iir
))
4312 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
4314 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4317 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4318 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4320 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4321 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4322 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4325 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4326 intel_opregion_asle_intr(dev
);
4328 /* With MSI, interrupts are only generated when iir
4329 * transitions from zero to nonzero. If another bit got
4330 * set while we were handling the existing iir bits, then
4331 * we would never get another interrupt.
4333 * This is fine on non-MSI as well, as if we hit this path
4334 * we avoid exiting the interrupt handler only to generate
4337 * Note that for MSI this could cause a stray interrupt report
4338 * if an interrupt landed in the time between writing IIR and
4339 * the posting read. This should be rare enough to never
4340 * trigger the 99% of 100,000 interrupts test for disabling
4345 } while (iir
& ~flip_mask
);
4347 i915_update_dri1_breadcrumb(dev
);
4352 static void i915_irq_uninstall(struct drm_device
* dev
)
4354 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4357 if (I915_HAS_HOTPLUG(dev
)) {
4358 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4359 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4362 I915_WRITE16(HWSTAM
, 0xffff);
4363 for_each_pipe(dev_priv
, pipe
) {
4364 /* Clear enable bits; then clear status bits */
4365 I915_WRITE(PIPESTAT(pipe
), 0);
4366 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4368 I915_WRITE(IMR
, 0xffffffff);
4369 I915_WRITE(IER
, 0x0);
4371 I915_WRITE(IIR
, I915_READ(IIR
));
4374 static void i965_irq_preinstall(struct drm_device
* dev
)
4376 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4379 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4380 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4382 I915_WRITE(HWSTAM
, 0xeffe);
4383 for_each_pipe(dev_priv
, pipe
)
4384 I915_WRITE(PIPESTAT(pipe
), 0);
4385 I915_WRITE(IMR
, 0xffffffff);
4386 I915_WRITE(IER
, 0x0);
4390 static int i965_irq_postinstall(struct drm_device
*dev
)
4392 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4395 unsigned long irqflags
;
4397 /* Unmask the interrupts that we always want on. */
4398 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4399 I915_DISPLAY_PORT_INTERRUPT
|
4400 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4401 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4402 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4403 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4404 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4406 enable_mask
= ~dev_priv
->irq_mask
;
4407 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4408 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4409 enable_mask
|= I915_USER_INTERRUPT
;
4412 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4414 /* Interrupt setup is already guaranteed to be single-threaded, this is
4415 * just to make the assert_spin_locked check happy. */
4416 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4417 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4418 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4419 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4420 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4423 * Enable some error detection, note the instruction error mask
4424 * bit is reserved, so we leave it masked.
4427 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4428 GM45_ERROR_MEM_PRIV
|
4429 GM45_ERROR_CP_PRIV
|
4430 I915_ERROR_MEMORY_REFRESH
);
4432 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4433 I915_ERROR_MEMORY_REFRESH
);
4435 I915_WRITE(EMR
, error_mask
);
4437 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4438 I915_WRITE(IER
, enable_mask
);
4441 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4442 POSTING_READ(PORT_HOTPLUG_EN
);
4444 i915_enable_asle_pipestat(dev
);
4449 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4452 struct intel_encoder
*intel_encoder
;
4455 assert_spin_locked(&dev_priv
->irq_lock
);
4457 if (I915_HAS_HOTPLUG(dev
)) {
4458 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
4459 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
4460 /* Note HDMI and DP share hotplug bits */
4461 /* enable bits are the same for all generations */
4462 for_each_intel_encoder(dev
, intel_encoder
)
4463 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
4464 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
4465 /* Programming the CRT detection parameters tends
4466 to generate a spurious hotplug event about three
4467 seconds later. So just do it once.
4470 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4471 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
4472 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4474 /* Ignore TV since it's buggy */
4475 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
4479 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4481 struct drm_device
*dev
= arg
;
4482 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4484 u32 pipe_stats
[I915_MAX_PIPES
];
4485 unsigned long irqflags
;
4486 int ret
= IRQ_NONE
, pipe
;
4488 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4489 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4491 iir
= I915_READ(IIR
);
4494 bool irq_received
= (iir
& ~flip_mask
) != 0;
4495 bool blc_event
= false;
4497 /* Can't rely on pipestat interrupt bit in iir as it might
4498 * have been cleared after the pipestat interrupt was received.
4499 * It doesn't set the bit in iir again, but it still produces
4500 * interrupts (for non-MSI).
4502 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4503 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4504 i915_handle_error(dev
, false,
4505 "Command parser error, iir 0x%08x",
4508 for_each_pipe(dev_priv
, pipe
) {
4509 int reg
= PIPESTAT(pipe
);
4510 pipe_stats
[pipe
] = I915_READ(reg
);
4513 * Clear the PIPE*STAT regs before the IIR
4515 if (pipe_stats
[pipe
] & 0x8000ffff) {
4516 I915_WRITE(reg
, pipe_stats
[pipe
]);
4517 irq_received
= true;
4520 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4527 /* Consume port. Then clear IIR or we'll miss events */
4528 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4529 i9xx_hpd_irq_handler(dev
);
4531 I915_WRITE(IIR
, iir
& ~flip_mask
);
4532 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4534 if (iir
& I915_USER_INTERRUPT
)
4535 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4536 if (iir
& I915_BSD_USER_INTERRUPT
)
4537 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
4539 for_each_pipe(dev_priv
, pipe
) {
4540 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4541 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4542 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4544 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4547 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4548 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4550 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
&&
4551 intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
4552 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe
));
4555 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4556 intel_opregion_asle_intr(dev
);
4558 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4559 gmbus_irq_handler(dev
);
4561 /* With MSI, interrupts are only generated when iir
4562 * transitions from zero to nonzero. If another bit got
4563 * set while we were handling the existing iir bits, then
4564 * we would never get another interrupt.
4566 * This is fine on non-MSI as well, as if we hit this path
4567 * we avoid exiting the interrupt handler only to generate
4570 * Note that for MSI this could cause a stray interrupt report
4571 * if an interrupt landed in the time between writing IIR and
4572 * the posting read. This should be rare enough to never
4573 * trigger the 99% of 100,000 interrupts test for disabling
4579 i915_update_dri1_breadcrumb(dev
);
4584 static void i965_irq_uninstall(struct drm_device
* dev
)
4586 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4592 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4593 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4595 I915_WRITE(HWSTAM
, 0xffffffff);
4596 for_each_pipe(dev_priv
, pipe
)
4597 I915_WRITE(PIPESTAT(pipe
), 0);
4598 I915_WRITE(IMR
, 0xffffffff);
4599 I915_WRITE(IER
, 0x0);
4601 for_each_pipe(dev_priv
, pipe
)
4602 I915_WRITE(PIPESTAT(pipe
),
4603 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4604 I915_WRITE(IIR
, I915_READ(IIR
));
4607 static void intel_hpd_irq_reenable(struct work_struct
*work
)
4609 struct drm_i915_private
*dev_priv
=
4610 container_of(work
, typeof(*dev_priv
),
4611 hotplug_reenable_work
.work
);
4612 struct drm_device
*dev
= dev_priv
->dev
;
4613 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4614 unsigned long irqflags
;
4617 intel_runtime_pm_get(dev_priv
);
4619 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4620 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
4621 struct drm_connector
*connector
;
4623 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
4626 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4628 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4629 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4631 if (intel_connector
->encoder
->hpd_pin
== i
) {
4632 if (connector
->polled
!= intel_connector
->polled
)
4633 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4635 connector
->polled
= intel_connector
->polled
;
4636 if (!connector
->polled
)
4637 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4641 if (dev_priv
->display
.hpd_irq_setup
)
4642 dev_priv
->display
.hpd_irq_setup(dev
);
4643 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4645 intel_runtime_pm_put(dev_priv
);
4648 void intel_irq_init(struct drm_device
*dev
)
4650 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4652 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
4653 INIT_WORK(&dev_priv
->dig_port_work
, i915_digport_work_func
);
4654 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
4655 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4656 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4658 /* Let's track the enabled rps events */
4659 if (IS_VALLEYVIEW(dev
) && !IS_CHERRYVIEW(dev
))
4660 /* WaGsvRC0ResidencyMethod:vlv */
4661 dev_priv
->pm_rps_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
4663 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4665 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
4666 i915_hangcheck_elapsed
,
4667 (unsigned long) dev
);
4668 INIT_DELAYED_WORK(&dev_priv
->hotplug_reenable_work
,
4669 intel_hpd_irq_reenable
);
4671 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4673 /* Haven't installed the IRQ handler yet */
4674 dev_priv
->pm
._irqs_disabled
= true;
4677 dev
->max_vblank_count
= 0;
4678 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4679 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
4680 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4681 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
4683 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4684 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4687 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
4688 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4689 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4692 if (IS_CHERRYVIEW(dev
)) {
4693 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4694 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4695 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4696 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4697 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4698 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4699 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4700 } else if (IS_VALLEYVIEW(dev
)) {
4701 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4702 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4703 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4704 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4705 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4706 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4707 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4708 } else if (IS_GEN8(dev
)) {
4709 dev
->driver
->irq_handler
= gen8_irq_handler
;
4710 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4711 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4712 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4713 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4714 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4715 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4716 } else if (HAS_PCH_SPLIT(dev
)) {
4717 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4718 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4719 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4720 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4721 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4722 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4723 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4725 if (INTEL_INFO(dev
)->gen
== 2) {
4726 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4727 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4728 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4729 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4730 } else if (INTEL_INFO(dev
)->gen
== 3) {
4731 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4732 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4733 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4734 dev
->driver
->irq_handler
= i915_irq_handler
;
4735 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4737 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4738 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4739 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4740 dev
->driver
->irq_handler
= i965_irq_handler
;
4741 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4743 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4744 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4748 void intel_hpd_init(struct drm_device
*dev
)
4750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4751 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4752 struct drm_connector
*connector
;
4753 unsigned long irqflags
;
4756 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
4757 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
4758 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4760 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4761 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4762 connector
->polled
= intel_connector
->polled
;
4763 if (connector
->encoder
&& !connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
4764 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4765 if (intel_connector
->mst_port
)
4766 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4769 /* Interrupt setup is already guaranteed to be single-threaded, this is
4770 * just to make the assert_spin_locked checks happy. */
4771 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
4772 if (dev_priv
->display
.hpd_irq_setup
)
4773 dev_priv
->display
.hpd_irq_setup(dev
);
4774 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
4777 /* Disable interrupts so we can allow runtime PM. */
4778 void intel_runtime_pm_disable_interrupts(struct drm_device
*dev
)
4780 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4782 dev
->driver
->irq_uninstall(dev
);
4783 dev_priv
->pm
._irqs_disabled
= true;
4786 /* Restore interrupts so we can recover from runtime PM. */
4787 void intel_runtime_pm_restore_interrupts(struct drm_device
*dev
)
4789 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4791 dev_priv
->pm
._irqs_disabled
= false;
4792 dev
->driver
->irq_preinstall(dev
);
4793 dev
->driver
->irq_postinstall(dev
);