1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx
[] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt
[] = {
57 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
58 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
59 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
60 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
61 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915
[] = {
65 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
66 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
67 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
68 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
69 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
70 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x
[] = {
74 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
75 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
76 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
77 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
78 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
83 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
84 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
85 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
86 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
87 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
88 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
147 assert_spin_locked(&dev_priv
->irq_lock
);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
152 if ((dev_priv
->irq_mask
& mask
) != 0) {
153 dev_priv
->irq_mask
&= ~mask
;
154 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
160 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
162 assert_spin_locked(&dev_priv
->irq_lock
);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
167 if ((dev_priv
->irq_mask
& mask
) != mask
) {
168 dev_priv
->irq_mask
|= mask
;
169 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
181 uint32_t interrupt_mask
,
182 uint32_t enabled_irq_mask
)
184 assert_spin_locked(&dev_priv
->irq_lock
);
186 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
189 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
190 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
191 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
195 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
197 ilk_update_gt_irq(dev_priv
, mask
, mask
);
200 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
202 ilk_update_gt_irq(dev_priv
, mask
, 0);
205 static u32
gen6_pm_iir(struct drm_i915_private
*dev_priv
)
207 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR
;
210 static u32
gen6_pm_imr(struct drm_i915_private
*dev_priv
)
212 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR
;
215 static u32
gen6_pm_ier(struct drm_i915_private
*dev_priv
)
217 return INTEL_INFO(dev_priv
)->gen
>= 8 ? GEN8_GT_IER(2) : GEN6_PMIER
;
221 * snb_update_pm_irq - update GEN6_PMIMR
222 * @dev_priv: driver private
223 * @interrupt_mask: mask of interrupt bits to update
224 * @enabled_irq_mask: mask of interrupt bits to enable
226 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
227 uint32_t interrupt_mask
,
228 uint32_t enabled_irq_mask
)
232 assert_spin_locked(&dev_priv
->irq_lock
);
234 new_val
= dev_priv
->pm_irq_mask
;
235 new_val
&= ~interrupt_mask
;
236 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
238 if (new_val
!= dev_priv
->pm_irq_mask
) {
239 dev_priv
->pm_irq_mask
= new_val
;
240 I915_WRITE(gen6_pm_imr(dev_priv
), dev_priv
->pm_irq_mask
);
241 POSTING_READ(gen6_pm_imr(dev_priv
));
245 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
247 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
250 snb_update_pm_irq(dev_priv
, mask
, mask
);
253 static void __gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
,
256 snb_update_pm_irq(dev_priv
, mask
, 0);
259 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
261 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
264 __gen6_disable_pm_irq(dev_priv
, mask
);
267 void gen6_reset_rps_interrupts(struct drm_device
*dev
)
269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
270 uint32_t reg
= gen6_pm_iir(dev_priv
);
272 spin_lock_irq(&dev_priv
->irq_lock
);
273 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
274 I915_WRITE(reg
, dev_priv
->pm_rps_events
);
276 spin_unlock_irq(&dev_priv
->irq_lock
);
279 void gen6_enable_rps_interrupts(struct drm_device
*dev
)
281 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
283 spin_lock_irq(&dev_priv
->irq_lock
);
285 WARN_ON(dev_priv
->rps
.pm_iir
);
286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv
)) & dev_priv
->pm_rps_events
);
287 dev_priv
->rps
.interrupts_enabled
= true;
288 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) |
289 dev_priv
->pm_rps_events
);
290 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
292 spin_unlock_irq(&dev_priv
->irq_lock
);
295 void gen6_disable_rps_interrupts(struct drm_device
*dev
)
297 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
299 spin_lock_irq(&dev_priv
->irq_lock
);
300 dev_priv
->rps
.interrupts_enabled
= false;
301 spin_unlock_irq(&dev_priv
->irq_lock
);
303 cancel_work_sync(&dev_priv
->rps
.work
);
305 spin_lock_irq(&dev_priv
->irq_lock
);
307 I915_WRITE(GEN6_PMINTRMSK
, INTEL_INFO(dev_priv
)->gen
>= 8 ?
308 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP
: ~0);
310 __gen6_disable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
311 I915_WRITE(gen6_pm_ier(dev_priv
), I915_READ(gen6_pm_ier(dev_priv
)) &
312 ~dev_priv
->pm_rps_events
);
313 I915_WRITE(gen6_pm_iir(dev_priv
), dev_priv
->pm_rps_events
);
314 I915_WRITE(gen6_pm_iir(dev_priv
), dev_priv
->pm_rps_events
);
316 dev_priv
->rps
.pm_iir
= 0;
318 spin_unlock_irq(&dev_priv
->irq_lock
);
322 * ibx_display_interrupt_update - update SDEIMR
323 * @dev_priv: driver private
324 * @interrupt_mask: mask of interrupt bits to update
325 * @enabled_irq_mask: mask of interrupt bits to enable
327 void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
328 uint32_t interrupt_mask
,
329 uint32_t enabled_irq_mask
)
331 uint32_t sdeimr
= I915_READ(SDEIMR
);
332 sdeimr
&= ~interrupt_mask
;
333 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
335 assert_spin_locked(&dev_priv
->irq_lock
);
337 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
340 I915_WRITE(SDEIMR
, sdeimr
);
341 POSTING_READ(SDEIMR
);
345 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
346 u32 enable_mask
, u32 status_mask
)
348 u32 reg
= PIPESTAT(pipe
);
349 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
351 assert_spin_locked(&dev_priv
->irq_lock
);
352 WARN_ON(!intel_irqs_enabled(dev_priv
));
354 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
355 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
356 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
357 pipe_name(pipe
), enable_mask
, status_mask
))
360 if ((pipestat
& enable_mask
) == enable_mask
)
363 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
365 /* Enable the interrupt, clear any pending status */
366 pipestat
|= enable_mask
| status_mask
;
367 I915_WRITE(reg
, pipestat
);
372 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
373 u32 enable_mask
, u32 status_mask
)
375 u32 reg
= PIPESTAT(pipe
);
376 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
378 assert_spin_locked(&dev_priv
->irq_lock
);
379 WARN_ON(!intel_irqs_enabled(dev_priv
));
381 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
382 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
383 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
384 pipe_name(pipe
), enable_mask
, status_mask
))
387 if ((pipestat
& enable_mask
) == 0)
390 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
392 pipestat
&= ~enable_mask
;
393 I915_WRITE(reg
, pipestat
);
397 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
399 u32 enable_mask
= status_mask
<< 16;
402 * On pipe A we don't support the PSR interrupt yet,
403 * on pipe B and C the same bit MBZ.
405 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
408 * On pipe B and C we don't support the PSR interrupt yet, on pipe
409 * A the same bit is for perf counters which we don't use either.
411 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
414 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
415 SPRITE0_FLIP_DONE_INT_EN_VLV
|
416 SPRITE1_FLIP_DONE_INT_EN_VLV
);
417 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
418 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
419 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
420 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
426 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
431 if (IS_VALLEYVIEW(dev_priv
->dev
))
432 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
435 enable_mask
= status_mask
<< 16;
436 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
440 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
445 if (IS_VALLEYVIEW(dev_priv
->dev
))
446 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
449 enable_mask
= status_mask
<< 16;
450 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
454 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
456 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
458 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
460 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
463 spin_lock_irq(&dev_priv
->irq_lock
);
465 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
466 if (INTEL_INFO(dev
)->gen
>= 4)
467 i915_enable_pipestat(dev_priv
, PIPE_A
,
468 PIPE_LEGACY_BLC_EVENT_STATUS
);
470 spin_unlock_irq(&dev_priv
->irq_lock
);
474 * i915_pipe_enabled - check if a pipe is enabled
476 * @pipe: pipe to check
478 * Reading certain registers when the pipe is disabled can hang the chip.
479 * Use this routine to make sure the PLL is running and the pipe is active
480 * before reading such registers if unsure.
483 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
485 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
487 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
488 /* Locking is horribly broken here, but whatever. */
489 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
490 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
492 return intel_crtc
->active
;
494 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
499 * This timing diagram depicts the video signal in and
500 * around the vertical blanking period.
502 * Assumptions about the fictitious mode used in this example:
504 * vsync_start = vblank_start + 1
505 * vsync_end = vblank_start + 2
506 * vtotal = vblank_start + 3
509 * latch double buffered registers
510 * increment frame counter (ctg+)
511 * generate start of vblank interrupt (gen4+)
514 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
515 * | may be shifted forward 1-3 extra lines via PIPECONF
517 * | | start of vsync:
518 * | | generate vsync interrupt
520 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
521 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
522 * ----va---> <-----------------vb--------------------> <--------va-------------
523 * | | <----vs-----> |
524 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
525 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
526 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
528 * last visible pixel first visible pixel
529 * | increment frame counter (gen3/4)
530 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
532 * x = horizontal active
533 * _ = horizontal blanking
534 * hs = horizontal sync
535 * va = vertical active
536 * vb = vertical blanking
538 * vbs = vblank_start (number)
541 * - most events happen at the start of horizontal sync
542 * - frame start happens at the start of horizontal blank, 1-4 lines
543 * (depending on PIPECONF settings) after the start of vblank
544 * - gen3/4 pixel and frame counter are synchronized with the start
545 * of horizontal active on the first line of vertical active
548 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
550 /* Gen2 doesn't have a hardware frame counter */
554 /* Called from drm generic code, passed a 'crtc', which
555 * we use as a pipe index
557 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
559 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
560 unsigned long high_frame
;
561 unsigned long low_frame
;
562 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
564 if (!i915_pipe_enabled(dev
, pipe
)) {
565 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
566 "pipe %c\n", pipe_name(pipe
));
570 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
571 struct intel_crtc
*intel_crtc
=
572 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
573 const struct drm_display_mode
*mode
=
574 &intel_crtc
->config
.adjusted_mode
;
576 htotal
= mode
->crtc_htotal
;
577 hsync_start
= mode
->crtc_hsync_start
;
578 vbl_start
= mode
->crtc_vblank_start
;
579 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
580 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
582 enum transcoder cpu_transcoder
= (enum transcoder
) pipe
;
584 htotal
= ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff) + 1;
585 hsync_start
= (I915_READ(HSYNC(cpu_transcoder
)) & 0x1fff) + 1;
586 vbl_start
= (I915_READ(VBLANK(cpu_transcoder
)) & 0x1fff) + 1;
587 if ((I915_READ(PIPECONF(cpu_transcoder
)) &
588 PIPECONF_INTERLACE_MASK
) != PIPECONF_PROGRESSIVE
)
589 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
592 /* Convert to pixel count */
595 /* Start of vblank event occurs at start of hsync */
596 vbl_start
-= htotal
- hsync_start
;
598 high_frame
= PIPEFRAME(pipe
);
599 low_frame
= PIPEFRAMEPIXEL(pipe
);
602 * High & low register fields aren't synchronized, so make sure
603 * we get a low value that's stable across two reads of the high
607 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
608 low
= I915_READ(low_frame
);
609 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
610 } while (high1
!= high2
);
612 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
613 pixel
= low
& PIPE_PIXEL_MASK
;
614 low
>>= PIPE_FRAME_LOW_SHIFT
;
617 * The frame counter increments at beginning of active.
618 * Cook up a vblank counter by also checking the pixel
619 * counter against vblank start.
621 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
624 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
626 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
627 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
629 if (!i915_pipe_enabled(dev
, pipe
)) {
630 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
631 "pipe %c\n", pipe_name(pipe
));
635 return I915_READ(reg
);
638 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
639 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
641 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
643 struct drm_device
*dev
= crtc
->base
.dev
;
644 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
645 const struct drm_display_mode
*mode
= &crtc
->config
.adjusted_mode
;
646 enum pipe pipe
= crtc
->pipe
;
647 int position
, vtotal
;
649 vtotal
= mode
->crtc_vtotal
;
650 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
654 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
656 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
659 * See update_scanline_offset() for the details on the
660 * scanline_offset adjustment.
662 return (position
+ crtc
->scanline_offset
) % vtotal
;
665 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
666 unsigned int flags
, int *vpos
, int *hpos
,
667 ktime_t
*stime
, ktime_t
*etime
)
669 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
670 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
671 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
672 const struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
674 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
677 unsigned long irqflags
;
679 if (!intel_crtc
->active
) {
680 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
681 "pipe %c\n", pipe_name(pipe
));
685 htotal
= mode
->crtc_htotal
;
686 hsync_start
= mode
->crtc_hsync_start
;
687 vtotal
= mode
->crtc_vtotal
;
688 vbl_start
= mode
->crtc_vblank_start
;
689 vbl_end
= mode
->crtc_vblank_end
;
691 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
692 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
697 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
700 * Lock uncore.lock, as we will do multiple timing critical raw
701 * register reads, potentially with preemption disabled, so the
702 * following code must not block on uncore.lock.
704 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
706 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
708 /* Get optional system timestamp before query. */
710 *stime
= ktime_get();
712 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
713 /* No obvious pixelcount register. Only query vertical
714 * scanout position from Display scan line register.
716 position
= __intel_get_crtc_scanline(intel_crtc
);
718 /* Have access to pixelcount since start of frame.
719 * We can split this into vertical and horizontal
722 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
724 /* convert to pixel counts */
730 * In interlaced modes, the pixel counter counts all pixels,
731 * so one field will have htotal more pixels. In order to avoid
732 * the reported position from jumping backwards when the pixel
733 * counter is beyond the length of the shorter field, just
734 * clamp the position the length of the shorter field. This
735 * matches how the scanline counter based position works since
736 * the scanline counter doesn't count the two half lines.
738 if (position
>= vtotal
)
739 position
= vtotal
- 1;
742 * Start of vblank interrupt is triggered at start of hsync,
743 * just prior to the first active line of vblank. However we
744 * consider lines to start at the leading edge of horizontal
745 * active. So, should we get here before we've crossed into
746 * the horizontal active of the first line in vblank, we would
747 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
748 * always add htotal-hsync_start to the current pixel position.
750 position
= (position
+ htotal
- hsync_start
) % vtotal
;
753 /* Get optional system timestamp after query. */
755 *etime
= ktime_get();
757 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
759 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
761 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
764 * While in vblank, position will be negative
765 * counting up towards 0 at vbl_end. And outside
766 * vblank, position will be positive counting
769 if (position
>= vbl_start
)
772 position
+= vtotal
- vbl_end
;
774 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
778 *vpos
= position
/ htotal
;
779 *hpos
= position
- (*vpos
* htotal
);
784 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
789 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
791 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
792 unsigned long irqflags
;
795 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
796 position
= __intel_get_crtc_scanline(crtc
);
797 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
802 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
804 struct timeval
*vblank_time
,
807 struct drm_crtc
*crtc
;
809 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
810 DRM_ERROR("Invalid crtc %d\n", pipe
);
814 /* Get drm_crtc to timestamp: */
815 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
817 DRM_ERROR("Invalid crtc %d\n", pipe
);
821 if (!crtc
->enabled
) {
822 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
826 /* Helper routine in DRM core does all the work: */
827 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
830 &to_intel_crtc(crtc
)->config
.adjusted_mode
);
833 static bool intel_hpd_irq_event(struct drm_device
*dev
,
834 struct drm_connector
*connector
)
836 enum drm_connector_status old_status
;
838 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
839 old_status
= connector
->status
;
841 connector
->status
= connector
->funcs
->detect(connector
, false);
842 if (old_status
== connector
->status
)
845 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
848 drm_get_connector_status_name(old_status
),
849 drm_get_connector_status_name(connector
->status
));
854 static void i915_digport_work_func(struct work_struct
*work
)
856 struct drm_i915_private
*dev_priv
=
857 container_of(work
, struct drm_i915_private
, dig_port_work
);
858 u32 long_port_mask
, short_port_mask
;
859 struct intel_digital_port
*intel_dig_port
;
863 spin_lock_irq(&dev_priv
->irq_lock
);
864 long_port_mask
= dev_priv
->long_hpd_port_mask
;
865 dev_priv
->long_hpd_port_mask
= 0;
866 short_port_mask
= dev_priv
->short_hpd_port_mask
;
867 dev_priv
->short_hpd_port_mask
= 0;
868 spin_unlock_irq(&dev_priv
->irq_lock
);
870 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
872 bool long_hpd
= false;
873 intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
874 if (!intel_dig_port
|| !intel_dig_port
->hpd_pulse
)
877 if (long_port_mask
& (1 << i
)) {
880 } else if (short_port_mask
& (1 << i
))
884 ret
= intel_dig_port
->hpd_pulse(intel_dig_port
, long_hpd
);
886 /* if we get true fallback to old school hpd */
887 old_bits
|= (1 << intel_dig_port
->base
.hpd_pin
);
893 spin_lock_irq(&dev_priv
->irq_lock
);
894 dev_priv
->hpd_event_bits
|= old_bits
;
895 spin_unlock_irq(&dev_priv
->irq_lock
);
896 schedule_work(&dev_priv
->hotplug_work
);
901 * Handle hotplug events outside the interrupt handler proper.
903 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
905 static void i915_hotplug_work_func(struct work_struct
*work
)
907 struct drm_i915_private
*dev_priv
=
908 container_of(work
, struct drm_i915_private
, hotplug_work
);
909 struct drm_device
*dev
= dev_priv
->dev
;
910 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
911 struct intel_connector
*intel_connector
;
912 struct intel_encoder
*intel_encoder
;
913 struct drm_connector
*connector
;
914 bool hpd_disabled
= false;
915 bool changed
= false;
918 mutex_lock(&mode_config
->mutex
);
919 DRM_DEBUG_KMS("running encoder hotplug functions\n");
921 spin_lock_irq(&dev_priv
->irq_lock
);
923 hpd_event_bits
= dev_priv
->hpd_event_bits
;
924 dev_priv
->hpd_event_bits
= 0;
925 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
926 intel_connector
= to_intel_connector(connector
);
927 if (!intel_connector
->encoder
)
929 intel_encoder
= intel_connector
->encoder
;
930 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
931 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
932 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
933 DRM_INFO("HPD interrupt storm detected on connector %s: "
934 "switching from hotplug detection to polling\n",
936 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
937 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
938 | DRM_CONNECTOR_POLL_DISCONNECT
;
941 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
942 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
943 connector
->name
, intel_encoder
->hpd_pin
);
946 /* if there were no outputs to poll, poll was disabled,
947 * therefore make sure it's enabled when disabling HPD on
950 drm_kms_helper_poll_enable(dev
);
951 mod_delayed_work(system_wq
, &dev_priv
->hotplug_reenable_work
,
952 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
955 spin_unlock_irq(&dev_priv
->irq_lock
);
957 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
958 intel_connector
= to_intel_connector(connector
);
959 if (!intel_connector
->encoder
)
961 intel_encoder
= intel_connector
->encoder
;
962 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
963 if (intel_encoder
->hot_plug
)
964 intel_encoder
->hot_plug(intel_encoder
);
965 if (intel_hpd_irq_event(dev
, connector
))
969 mutex_unlock(&mode_config
->mutex
);
972 drm_kms_helper_hotplug_event(dev
);
975 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
977 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
978 u32 busy_up
, busy_down
, max_avg
, min_avg
;
981 spin_lock(&mchdev_lock
);
983 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
985 new_delay
= dev_priv
->ips
.cur_delay
;
987 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
988 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
989 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
990 max_avg
= I915_READ(RCBMAXAVG
);
991 min_avg
= I915_READ(RCBMINAVG
);
993 /* Handle RCS change request from hw */
994 if (busy_up
> max_avg
) {
995 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
996 new_delay
= dev_priv
->ips
.cur_delay
- 1;
997 if (new_delay
< dev_priv
->ips
.max_delay
)
998 new_delay
= dev_priv
->ips
.max_delay
;
999 } else if (busy_down
< min_avg
) {
1000 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
1001 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
1002 if (new_delay
> dev_priv
->ips
.min_delay
)
1003 new_delay
= dev_priv
->ips
.min_delay
;
1006 if (ironlake_set_drps(dev
, new_delay
))
1007 dev_priv
->ips
.cur_delay
= new_delay
;
1009 spin_unlock(&mchdev_lock
);
1014 static void notify_ring(struct drm_device
*dev
,
1015 struct intel_engine_cs
*ring
)
1017 if (!intel_ring_initialized(ring
))
1020 trace_i915_gem_request_complete(ring
);
1022 wake_up_all(&ring
->irq_queue
);
1025 static u32
vlv_c0_residency(struct drm_i915_private
*dev_priv
,
1026 struct intel_rps_ei
*rps_ei
)
1028 u32 cz_ts
, cz_freq_khz
;
1029 u32 render_count
, media_count
;
1030 u32 elapsed_render
, elapsed_media
, elapsed_time
;
1033 cz_ts
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
1034 cz_freq_khz
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* 1000, 4);
1036 render_count
= I915_READ(VLV_RENDER_C0_COUNT_REG
);
1037 media_count
= I915_READ(VLV_MEDIA_C0_COUNT_REG
);
1039 if (rps_ei
->cz_clock
== 0) {
1040 rps_ei
->cz_clock
= cz_ts
;
1041 rps_ei
->render_c0
= render_count
;
1042 rps_ei
->media_c0
= media_count
;
1044 return dev_priv
->rps
.cur_freq
;
1047 elapsed_time
= cz_ts
- rps_ei
->cz_clock
;
1048 rps_ei
->cz_clock
= cz_ts
;
1050 elapsed_render
= render_count
- rps_ei
->render_c0
;
1051 rps_ei
->render_c0
= render_count
;
1053 elapsed_media
= media_count
- rps_ei
->media_c0
;
1054 rps_ei
->media_c0
= media_count
;
1056 /* Convert all the counters into common unit of milli sec */
1057 elapsed_time
/= VLV_CZ_CLOCK_TO_MILLI_SEC
;
1058 elapsed_render
/= cz_freq_khz
;
1059 elapsed_media
/= cz_freq_khz
;
1062 * Calculate overall C0 residency percentage
1063 * only if elapsed time is non zero
1067 ((max(elapsed_render
, elapsed_media
) * 100)
1075 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1076 * busy-ness calculated from C0 counters of render & media power wells
1077 * @dev_priv: DRM device private
1080 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private
*dev_priv
)
1082 u32 residency_C0_up
= 0, residency_C0_down
= 0;
1085 dev_priv
->rps
.ei_interrupt_count
++;
1087 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
1090 if (dev_priv
->rps
.up_ei
.cz_clock
== 0) {
1091 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.up_ei
);
1092 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.down_ei
);
1093 return dev_priv
->rps
.cur_freq
;
1098 * To down throttle, C0 residency should be less than down threshold
1099 * for continous EI intervals. So calculate down EI counters
1100 * once in VLV_INT_COUNT_FOR_DOWN_EI
1102 if (dev_priv
->rps
.ei_interrupt_count
== VLV_INT_COUNT_FOR_DOWN_EI
) {
1104 dev_priv
->rps
.ei_interrupt_count
= 0;
1106 residency_C0_down
= vlv_c0_residency(dev_priv
,
1107 &dev_priv
->rps
.down_ei
);
1109 residency_C0_up
= vlv_c0_residency(dev_priv
,
1110 &dev_priv
->rps
.up_ei
);
1113 new_delay
= dev_priv
->rps
.cur_freq
;
1115 adj
= dev_priv
->rps
.last_adj
;
1116 /* C0 residency is greater than UP threshold. Increase Frequency */
1117 if (residency_C0_up
>= VLV_RP_UP_EI_THRESHOLD
) {
1123 if (dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
)
1124 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1127 * For better performance, jump directly
1128 * to RPe if we're below it.
1130 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1131 new_delay
= dev_priv
->rps
.efficient_freq
;
1133 } else if (!dev_priv
->rps
.ei_interrupt_count
&&
1134 (residency_C0_down
< VLV_RP_DOWN_EI_THRESHOLD
)) {
1140 * This means, C0 residency is less than down threshold over
1141 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1143 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.min_freq_softlimit
)
1144 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1150 static void gen6_pm_rps_work(struct work_struct
*work
)
1152 struct drm_i915_private
*dev_priv
=
1153 container_of(work
, struct drm_i915_private
, rps
.work
);
1157 spin_lock_irq(&dev_priv
->irq_lock
);
1158 /* Speed up work cancelation during disabling rps interrupts. */
1159 if (!dev_priv
->rps
.interrupts_enabled
) {
1160 spin_unlock_irq(&dev_priv
->irq_lock
);
1163 pm_iir
= dev_priv
->rps
.pm_iir
;
1164 dev_priv
->rps
.pm_iir
= 0;
1165 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1166 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1167 spin_unlock_irq(&dev_priv
->irq_lock
);
1169 /* Make sure we didn't queue anything we're not going to process. */
1170 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1172 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1175 mutex_lock(&dev_priv
->rps
.hw_lock
);
1177 adj
= dev_priv
->rps
.last_adj
;
1178 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1182 /* CHV needs even encode values */
1183 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? 2 : 1;
1185 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1188 * For better performance, jump directly
1189 * to RPe if we're below it.
1191 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1192 new_delay
= dev_priv
->rps
.efficient_freq
;
1193 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1194 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1195 new_delay
= dev_priv
->rps
.efficient_freq
;
1197 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1199 } else if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1200 new_delay
= vlv_calc_delay_from_C0_counters(dev_priv
);
1201 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1205 /* CHV needs even encode values */
1206 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? -2 : -1;
1208 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1209 } else { /* unknown event */
1210 new_delay
= dev_priv
->rps
.cur_freq
;
1213 /* sysfs frequency interfaces may have snuck in while servicing the
1216 new_delay
= clamp_t(int, new_delay
,
1217 dev_priv
->rps
.min_freq_softlimit
,
1218 dev_priv
->rps
.max_freq_softlimit
);
1220 dev_priv
->rps
.last_adj
= new_delay
- dev_priv
->rps
.cur_freq
;
1222 if (IS_VALLEYVIEW(dev_priv
->dev
))
1223 valleyview_set_rps(dev_priv
->dev
, new_delay
);
1225 gen6_set_rps(dev_priv
->dev
, new_delay
);
1227 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1232 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1234 * @work: workqueue struct
1236 * Doesn't actually do anything except notify userspace. As a consequence of
1237 * this event, userspace should try to remap the bad rows since statistically
1238 * it is likely the same row is more likely to go bad again.
1240 static void ivybridge_parity_work(struct work_struct
*work
)
1242 struct drm_i915_private
*dev_priv
=
1243 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1244 u32 error_status
, row
, bank
, subbank
;
1245 char *parity_event
[6];
1249 /* We must turn off DOP level clock gating to access the L3 registers.
1250 * In order to prevent a get/put style interface, acquire struct mutex
1251 * any time we access those registers.
1253 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1255 /* If we've screwed up tracking, just let the interrupt fire again */
1256 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1259 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1260 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1261 POSTING_READ(GEN7_MISCCPCTL
);
1263 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1267 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1270 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1272 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1274 error_status
= I915_READ(reg
);
1275 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1276 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1277 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1279 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1282 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1283 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1284 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1285 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1286 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1287 parity_event
[5] = NULL
;
1289 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1290 KOBJ_CHANGE
, parity_event
);
1292 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1293 slice
, row
, bank
, subbank
);
1295 kfree(parity_event
[4]);
1296 kfree(parity_event
[3]);
1297 kfree(parity_event
[2]);
1298 kfree(parity_event
[1]);
1301 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1304 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1305 spin_lock_irq(&dev_priv
->irq_lock
);
1306 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1307 spin_unlock_irq(&dev_priv
->irq_lock
);
1309 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1312 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1314 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1316 if (!HAS_L3_DPF(dev
))
1319 spin_lock(&dev_priv
->irq_lock
);
1320 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1321 spin_unlock(&dev_priv
->irq_lock
);
1323 iir
&= GT_PARITY_ERROR(dev
);
1324 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1325 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1327 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1328 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1330 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1333 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1334 struct drm_i915_private
*dev_priv
,
1338 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1339 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1340 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1341 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1344 static void snb_gt_irq_handler(struct drm_device
*dev
,
1345 struct drm_i915_private
*dev_priv
,
1350 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1351 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1352 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1353 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1354 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1355 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1357 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1358 GT_BSD_CS_ERROR_INTERRUPT
|
1359 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
))
1360 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir
);
1362 if (gt_iir
& GT_PARITY_ERROR(dev
))
1363 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1366 static irqreturn_t
gen8_gt_irq_handler(struct drm_device
*dev
,
1367 struct drm_i915_private
*dev_priv
,
1370 struct intel_engine_cs
*ring
;
1373 irqreturn_t ret
= IRQ_NONE
;
1375 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1376 tmp
= I915_READ(GEN8_GT_IIR(0));
1378 I915_WRITE(GEN8_GT_IIR(0), tmp
);
1381 rcs
= tmp
>> GEN8_RCS_IRQ_SHIFT
;
1382 ring
= &dev_priv
->ring
[RCS
];
1383 if (rcs
& GT_RENDER_USER_INTERRUPT
)
1384 notify_ring(dev
, ring
);
1385 if (rcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1386 intel_execlists_handle_ctx_events(ring
);
1388 bcs
= tmp
>> GEN8_BCS_IRQ_SHIFT
;
1389 ring
= &dev_priv
->ring
[BCS
];
1390 if (bcs
& GT_RENDER_USER_INTERRUPT
)
1391 notify_ring(dev
, ring
);
1392 if (bcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1393 intel_execlists_handle_ctx_events(ring
);
1395 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1398 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1399 tmp
= I915_READ(GEN8_GT_IIR(1));
1401 I915_WRITE(GEN8_GT_IIR(1), tmp
);
1404 vcs
= tmp
>> GEN8_VCS1_IRQ_SHIFT
;
1405 ring
= &dev_priv
->ring
[VCS
];
1406 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1407 notify_ring(dev
, ring
);
1408 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1409 intel_execlists_handle_ctx_events(ring
);
1411 vcs
= tmp
>> GEN8_VCS2_IRQ_SHIFT
;
1412 ring
= &dev_priv
->ring
[VCS2
];
1413 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1414 notify_ring(dev
, ring
);
1415 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1416 intel_execlists_handle_ctx_events(ring
);
1418 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1421 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1422 tmp
= I915_READ(GEN8_GT_IIR(2));
1423 if (tmp
& dev_priv
->pm_rps_events
) {
1424 I915_WRITE(GEN8_GT_IIR(2),
1425 tmp
& dev_priv
->pm_rps_events
);
1427 gen6_rps_irq_handler(dev_priv
, tmp
);
1429 DRM_ERROR("The master control interrupt lied (PM)!\n");
1432 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1433 tmp
= I915_READ(GEN8_GT_IIR(3));
1435 I915_WRITE(GEN8_GT_IIR(3), tmp
);
1438 vcs
= tmp
>> GEN8_VECS_IRQ_SHIFT
;
1439 ring
= &dev_priv
->ring
[VECS
];
1440 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1441 notify_ring(dev
, ring
);
1442 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1443 intel_execlists_handle_ctx_events(ring
);
1445 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1451 #define HPD_STORM_DETECT_PERIOD 1000
1452 #define HPD_STORM_THRESHOLD 5
1454 static int pch_port_to_hotplug_shift(enum port port
)
1470 static int i915_port_to_hotplug_shift(enum port port
)
1486 static inline enum port
get_port_from_pin(enum hpd_pin pin
)
1496 return PORT_A
; /* no hpd */
1500 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1501 u32 hotplug_trigger
,
1502 u32 dig_hotplug_reg
,
1505 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1508 bool storm_detected
= false;
1509 bool queue_dig
= false, queue_hp
= false;
1511 u32 dig_port_mask
= 0;
1513 if (!hotplug_trigger
)
1516 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1517 hotplug_trigger
, dig_hotplug_reg
);
1519 spin_lock(&dev_priv
->irq_lock
);
1520 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1521 if (!(hpd
[i
] & hotplug_trigger
))
1524 port
= get_port_from_pin(i
);
1525 if (port
&& dev_priv
->hpd_irq_port
[port
]) {
1528 if (HAS_PCH_SPLIT(dev
)) {
1529 dig_shift
= pch_port_to_hotplug_shift(port
);
1530 long_hpd
= (dig_hotplug_reg
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1532 dig_shift
= i915_port_to_hotplug_shift(port
);
1533 long_hpd
= (hotplug_trigger
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1536 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1538 long_hpd
? "long" : "short");
1539 /* for long HPD pulses we want to have the digital queue happen,
1540 but we still want HPD storm detection to function. */
1542 dev_priv
->long_hpd_port_mask
|= (1 << port
);
1543 dig_port_mask
|= hpd
[i
];
1545 /* for short HPD just trigger the digital queue */
1546 dev_priv
->short_hpd_port_mask
|= (1 << port
);
1547 hotplug_trigger
&= ~hpd
[i
];
1553 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1554 if (hpd
[i
] & hotplug_trigger
&&
1555 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
) {
1557 * On GMCH platforms the interrupt mask bits only
1558 * prevent irq generation, not the setting of the
1559 * hotplug bits itself. So only WARN about unexpected
1560 * interrupts on saner platforms.
1562 WARN_ONCE(INTEL_INFO(dev
)->gen
>= 5 && !IS_VALLEYVIEW(dev
),
1563 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1564 hotplug_trigger
, i
, hpd
[i
]);
1569 if (!(hpd
[i
] & hotplug_trigger
) ||
1570 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1573 if (!(dig_port_mask
& hpd
[i
])) {
1574 dev_priv
->hpd_event_bits
|= (1 << i
);
1578 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1579 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1580 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1581 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1582 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1583 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1584 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1585 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1586 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1587 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1588 storm_detected
= true;
1590 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1591 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1592 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1597 dev_priv
->display
.hpd_irq_setup(dev
);
1598 spin_unlock(&dev_priv
->irq_lock
);
1601 * Our hotplug handler can grab modeset locks (by calling down into the
1602 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1603 * queue for otherwise the flush_work in the pageflip code will
1607 queue_work(dev_priv
->dp_wq
, &dev_priv
->dig_port_work
);
1609 schedule_work(&dev_priv
->hotplug_work
);
1612 static void gmbus_irq_handler(struct drm_device
*dev
)
1614 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1616 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1619 static void dp_aux_irq_handler(struct drm_device
*dev
)
1621 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1623 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1626 #if defined(CONFIG_DEBUG_FS)
1627 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1628 uint32_t crc0
, uint32_t crc1
,
1629 uint32_t crc2
, uint32_t crc3
,
1632 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1633 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1634 struct intel_pipe_crc_entry
*entry
;
1637 spin_lock(&pipe_crc
->lock
);
1639 if (!pipe_crc
->entries
) {
1640 spin_unlock(&pipe_crc
->lock
);
1641 DRM_DEBUG_KMS("spurious interrupt\n");
1645 head
= pipe_crc
->head
;
1646 tail
= pipe_crc
->tail
;
1648 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1649 spin_unlock(&pipe_crc
->lock
);
1650 DRM_ERROR("CRC buffer overflowing\n");
1654 entry
= &pipe_crc
->entries
[head
];
1656 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1657 entry
->crc
[0] = crc0
;
1658 entry
->crc
[1] = crc1
;
1659 entry
->crc
[2] = crc2
;
1660 entry
->crc
[3] = crc3
;
1661 entry
->crc
[4] = crc4
;
1663 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1664 pipe_crc
->head
= head
;
1666 spin_unlock(&pipe_crc
->lock
);
1668 wake_up_interruptible(&pipe_crc
->wq
);
1672 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1673 uint32_t crc0
, uint32_t crc1
,
1674 uint32_t crc2
, uint32_t crc3
,
1679 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1681 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1683 display_pipe_crc_irq_handler(dev
, pipe
,
1684 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1688 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1690 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1692 display_pipe_crc_irq_handler(dev
, pipe
,
1693 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1694 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1695 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1696 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1697 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1700 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1702 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1703 uint32_t res1
, res2
;
1705 if (INTEL_INFO(dev
)->gen
>= 3)
1706 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1710 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1711 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1715 display_pipe_crc_irq_handler(dev
, pipe
,
1716 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1717 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1718 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1722 /* The RPS events need forcewake, so we add them to a work queue and mask their
1723 * IMR bits until the work is done. Other interrupts can be processed without
1724 * the work queue. */
1725 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1727 /* TODO: RPS on GEN9+ is not supported yet. */
1728 if (WARN_ONCE(INTEL_INFO(dev_priv
)->gen
>= 9,
1729 "GEN9+: unexpected RPS IRQ\n"))
1732 if (pm_iir
& dev_priv
->pm_rps_events
) {
1733 spin_lock(&dev_priv
->irq_lock
);
1734 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1735 if (dev_priv
->rps
.interrupts_enabled
) {
1736 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1737 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1739 spin_unlock(&dev_priv
->irq_lock
);
1742 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1745 if (HAS_VEBOX(dev_priv
->dev
)) {
1746 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1747 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
1749 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
)
1750 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir
);
1754 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
1756 if (!drm_handle_vblank(dev
, pipe
))
1762 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
1764 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1765 u32 pipe_stats
[I915_MAX_PIPES
] = { };
1768 spin_lock(&dev_priv
->irq_lock
);
1769 for_each_pipe(dev_priv
, pipe
) {
1771 u32 mask
, iir_bit
= 0;
1774 * PIPESTAT bits get signalled even when the interrupt is
1775 * disabled with the mask bits, and some of the status bits do
1776 * not generate interrupts at all (like the underrun bit). Hence
1777 * we need to be careful that we only handle what we want to
1781 /* fifo underruns are filterered in the underrun handler. */
1782 mask
= PIPE_FIFO_UNDERRUN_STATUS
;
1786 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
1789 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
1792 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
1796 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
1801 reg
= PIPESTAT(pipe
);
1802 mask
|= PIPESTAT_INT_ENABLE_MASK
;
1803 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
1806 * Clear the PIPE*STAT regs before the IIR
1808 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
1809 PIPESTAT_INT_STATUS_MASK
))
1810 I915_WRITE(reg
, pipe_stats
[pipe
]);
1812 spin_unlock(&dev_priv
->irq_lock
);
1814 for_each_pipe(dev_priv
, pipe
) {
1815 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
1816 intel_pipe_handle_vblank(dev
, pipe
))
1817 intel_check_page_flip(dev
, pipe
);
1819 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
1820 intel_prepare_page_flip(dev
, pipe
);
1821 intel_finish_page_flip(dev
, pipe
);
1824 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1825 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1827 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1828 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1831 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1832 gmbus_irq_handler(dev
);
1835 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
1837 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1838 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1840 if (hotplug_status
) {
1841 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1843 * Make sure hotplug status is cleared before we clear IIR, or else we
1844 * may miss hotplug events.
1846 POSTING_READ(PORT_HOTPLUG_STAT
);
1849 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
1851 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_g4x
);
1853 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1855 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_i915
);
1858 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) &&
1859 hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1860 dp_aux_irq_handler(dev
);
1864 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1866 struct drm_device
*dev
= arg
;
1867 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1868 u32 iir
, gt_iir
, pm_iir
;
1869 irqreturn_t ret
= IRQ_NONE
;
1872 /* Find, clear, then process each source of interrupt */
1874 gt_iir
= I915_READ(GTIIR
);
1876 I915_WRITE(GTIIR
, gt_iir
);
1878 pm_iir
= I915_READ(GEN6_PMIIR
);
1880 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1882 iir
= I915_READ(VLV_IIR
);
1884 /* Consume port before clearing IIR or we'll miss events */
1885 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1886 i9xx_hpd_irq_handler(dev
);
1887 I915_WRITE(VLV_IIR
, iir
);
1890 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1896 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1898 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1899 /* Call regardless, as some status bits might not be
1900 * signalled in iir */
1901 valleyview_pipestat_irq_handler(dev
, iir
);
1908 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
1910 struct drm_device
*dev
= arg
;
1911 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1912 u32 master_ctl
, iir
;
1913 irqreturn_t ret
= IRQ_NONE
;
1916 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
1917 iir
= I915_READ(VLV_IIR
);
1919 if (master_ctl
== 0 && iir
== 0)
1924 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1926 /* Find, clear, then process each source of interrupt */
1929 /* Consume port before clearing IIR or we'll miss events */
1930 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1931 i9xx_hpd_irq_handler(dev
);
1932 I915_WRITE(VLV_IIR
, iir
);
1935 gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
1937 /* Call regardless, as some status bits might not be
1938 * signalled in iir */
1939 valleyview_pipestat_irq_handler(dev
, iir
);
1941 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
1942 POSTING_READ(GEN8_MASTER_IRQ
);
1948 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1950 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1952 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1953 u32 dig_hotplug_reg
;
1955 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1956 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1958 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_ibx
);
1960 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1961 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1962 SDE_AUDIO_POWER_SHIFT
);
1963 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1967 if (pch_iir
& SDE_AUX_MASK
)
1968 dp_aux_irq_handler(dev
);
1970 if (pch_iir
& SDE_GMBUS
)
1971 gmbus_irq_handler(dev
);
1973 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1974 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1976 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1977 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1979 if (pch_iir
& SDE_POISON
)
1980 DRM_ERROR("PCH poison interrupt\n");
1982 if (pch_iir
& SDE_FDI_MASK
)
1983 for_each_pipe(dev_priv
, pipe
)
1984 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1986 I915_READ(FDI_RX_IIR(pipe
)));
1988 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1989 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1991 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1992 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1994 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1995 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1997 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1998 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
2001 static void ivb_err_int_handler(struct drm_device
*dev
)
2003 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2004 u32 err_int
= I915_READ(GEN7_ERR_INT
);
2007 if (err_int
& ERR_INT_POISON
)
2008 DRM_ERROR("Poison interrupt\n");
2010 for_each_pipe(dev_priv
, pipe
) {
2011 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
))
2012 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2014 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
2015 if (IS_IVYBRIDGE(dev
))
2016 ivb_pipe_crc_irq_handler(dev
, pipe
);
2018 hsw_pipe_crc_irq_handler(dev
, pipe
);
2022 I915_WRITE(GEN7_ERR_INT
, err_int
);
2025 static void cpt_serr_int_handler(struct drm_device
*dev
)
2027 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2028 u32 serr_int
= I915_READ(SERR_INT
);
2030 if (serr_int
& SERR_INT_POISON
)
2031 DRM_ERROR("PCH poison interrupt\n");
2033 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
2034 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
2036 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
2037 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
2039 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
2040 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_C
);
2042 I915_WRITE(SERR_INT
, serr_int
);
2045 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2047 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2049 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
2050 u32 dig_hotplug_reg
;
2052 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2053 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2055 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_cpt
);
2057 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
2058 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
2059 SDE_AUDIO_POWER_SHIFT_CPT
);
2060 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2064 if (pch_iir
& SDE_AUX_MASK_CPT
)
2065 dp_aux_irq_handler(dev
);
2067 if (pch_iir
& SDE_GMBUS_CPT
)
2068 gmbus_irq_handler(dev
);
2070 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
2071 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2073 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
2074 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2076 if (pch_iir
& SDE_FDI_MASK_CPT
)
2077 for_each_pipe(dev_priv
, pipe
)
2078 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2080 I915_READ(FDI_RX_IIR(pipe
)));
2082 if (pch_iir
& SDE_ERROR_CPT
)
2083 cpt_serr_int_handler(dev
);
2086 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2088 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2091 if (de_iir
& DE_AUX_CHANNEL_A
)
2092 dp_aux_irq_handler(dev
);
2094 if (de_iir
& DE_GSE
)
2095 intel_opregion_asle_intr(dev
);
2097 if (de_iir
& DE_POISON
)
2098 DRM_ERROR("Poison interrupt\n");
2100 for_each_pipe(dev_priv
, pipe
) {
2101 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2102 intel_pipe_handle_vblank(dev
, pipe
))
2103 intel_check_page_flip(dev
, pipe
);
2105 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2106 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2108 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2109 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2111 /* plane/pipes map 1:1 on ilk+ */
2112 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2113 intel_prepare_page_flip(dev
, pipe
);
2114 intel_finish_page_flip_plane(dev
, pipe
);
2118 /* check event from PCH */
2119 if (de_iir
& DE_PCH_EVENT
) {
2120 u32 pch_iir
= I915_READ(SDEIIR
);
2122 if (HAS_PCH_CPT(dev
))
2123 cpt_irq_handler(dev
, pch_iir
);
2125 ibx_irq_handler(dev
, pch_iir
);
2127 /* should clear PCH hotplug event before clear CPU irq */
2128 I915_WRITE(SDEIIR
, pch_iir
);
2131 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2132 ironlake_rps_change_irq_handler(dev
);
2135 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2140 if (de_iir
& DE_ERR_INT_IVB
)
2141 ivb_err_int_handler(dev
);
2143 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2144 dp_aux_irq_handler(dev
);
2146 if (de_iir
& DE_GSE_IVB
)
2147 intel_opregion_asle_intr(dev
);
2149 for_each_pipe(dev_priv
, pipe
) {
2150 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2151 intel_pipe_handle_vblank(dev
, pipe
))
2152 intel_check_page_flip(dev
, pipe
);
2154 /* plane/pipes map 1:1 on ilk+ */
2155 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2156 intel_prepare_page_flip(dev
, pipe
);
2157 intel_finish_page_flip_plane(dev
, pipe
);
2161 /* check event from PCH */
2162 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2163 u32 pch_iir
= I915_READ(SDEIIR
);
2165 cpt_irq_handler(dev
, pch_iir
);
2167 /* clear PCH hotplug event before clear CPU irq */
2168 I915_WRITE(SDEIIR
, pch_iir
);
2173 * To handle irqs with the minimum potential races with fresh interrupts, we:
2174 * 1 - Disable Master Interrupt Control.
2175 * 2 - Find the source(s) of the interrupt.
2176 * 3 - Clear the Interrupt Identity bits (IIR).
2177 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2178 * 5 - Re-enable Master Interrupt Control.
2180 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2182 struct drm_device
*dev
= arg
;
2183 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2184 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2185 irqreturn_t ret
= IRQ_NONE
;
2187 /* We get interrupts on unclaimed registers, so check for this before we
2188 * do any I915_{READ,WRITE}. */
2189 intel_uncore_check_errors(dev
);
2191 /* disable master interrupt before clearing iir */
2192 de_ier
= I915_READ(DEIER
);
2193 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2194 POSTING_READ(DEIER
);
2196 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2197 * interrupts will will be stored on its back queue, and then we'll be
2198 * able to process them after we restore SDEIER (as soon as we restore
2199 * it, we'll get an interrupt if SDEIIR still has something to process
2200 * due to its back queue). */
2201 if (!HAS_PCH_NOP(dev
)) {
2202 sde_ier
= I915_READ(SDEIER
);
2203 I915_WRITE(SDEIER
, 0);
2204 POSTING_READ(SDEIER
);
2207 /* Find, clear, then process each source of interrupt */
2209 gt_iir
= I915_READ(GTIIR
);
2211 I915_WRITE(GTIIR
, gt_iir
);
2213 if (INTEL_INFO(dev
)->gen
>= 6)
2214 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2216 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2219 de_iir
= I915_READ(DEIIR
);
2221 I915_WRITE(DEIIR
, de_iir
);
2223 if (INTEL_INFO(dev
)->gen
>= 7)
2224 ivb_display_irq_handler(dev
, de_iir
);
2226 ilk_display_irq_handler(dev
, de_iir
);
2229 if (INTEL_INFO(dev
)->gen
>= 6) {
2230 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2232 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2234 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2238 I915_WRITE(DEIER
, de_ier
);
2239 POSTING_READ(DEIER
);
2240 if (!HAS_PCH_NOP(dev
)) {
2241 I915_WRITE(SDEIER
, sde_ier
);
2242 POSTING_READ(SDEIER
);
2248 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2250 struct drm_device
*dev
= arg
;
2251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2253 irqreturn_t ret
= IRQ_NONE
;
2256 u32 aux_mask
= GEN8_AUX_CHANNEL_A
;
2259 aux_mask
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
2262 master_ctl
= I915_READ(GEN8_MASTER_IRQ
);
2263 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2267 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2268 POSTING_READ(GEN8_MASTER_IRQ
);
2270 /* Find, clear, then process each source of interrupt */
2272 ret
= gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2274 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2275 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2277 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2279 if (tmp
& GEN8_DE_MISC_GSE
)
2280 intel_opregion_asle_intr(dev
);
2282 DRM_ERROR("Unexpected DE Misc interrupt\n");
2285 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2288 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2289 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2291 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2295 dp_aux_irq_handler(dev
);
2297 DRM_ERROR("Unexpected DE Port interrupt\n");
2300 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2303 for_each_pipe(dev_priv
, pipe
) {
2304 uint32_t pipe_iir
, flip_done
= 0, fault_errors
= 0;
2306 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2309 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2312 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2314 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2315 intel_pipe_handle_vblank(dev
, pipe
))
2316 intel_check_page_flip(dev
, pipe
);
2319 flip_done
= pipe_iir
& GEN9_PIPE_PLANE1_FLIP_DONE
;
2321 flip_done
= pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
;
2324 intel_prepare_page_flip(dev
, pipe
);
2325 intel_finish_page_flip_plane(dev
, pipe
);
2328 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2329 hsw_pipe_crc_irq_handler(dev
, pipe
);
2331 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
)
2332 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
2337 fault_errors
= pipe_iir
& GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2339 fault_errors
= pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2342 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2344 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2346 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2349 if (!HAS_PCH_NOP(dev
) && master_ctl
& GEN8_DE_PCH_IRQ
) {
2351 * FIXME(BDW): Assume for now that the new interrupt handling
2352 * scheme also closed the SDE interrupt handling race we've seen
2353 * on older pch-split platforms. But this needs testing.
2355 u32 pch_iir
= I915_READ(SDEIIR
);
2357 I915_WRITE(SDEIIR
, pch_iir
);
2359 cpt_irq_handler(dev
, pch_iir
);
2361 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2365 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2366 POSTING_READ(GEN8_MASTER_IRQ
);
2371 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2372 bool reset_completed
)
2374 struct intel_engine_cs
*ring
;
2378 * Notify all waiters for GPU completion events that reset state has
2379 * been changed, and that they need to restart their wait after
2380 * checking for potential errors (and bail out to drop locks if there is
2381 * a gpu reset pending so that i915_error_work_func can acquire them).
2384 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2385 for_each_ring(ring
, dev_priv
, i
)
2386 wake_up_all(&ring
->irq_queue
);
2388 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2389 wake_up_all(&dev_priv
->pending_flip_queue
);
2392 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2393 * reset state is cleared.
2395 if (reset_completed
)
2396 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2400 * i915_error_work_func - do process context error handling work
2401 * @work: work struct
2403 * Fire an error uevent so userspace can see that a hang or error
2406 static void i915_error_work_func(struct work_struct
*work
)
2408 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
2410 struct drm_i915_private
*dev_priv
=
2411 container_of(error
, struct drm_i915_private
, gpu_error
);
2412 struct drm_device
*dev
= dev_priv
->dev
;
2413 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2414 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2415 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2418 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2421 * Note that there's only one work item which does gpu resets, so we
2422 * need not worry about concurrent gpu resets potentially incrementing
2423 * error->reset_counter twice. We only need to take care of another
2424 * racing irq/hangcheck declaring the gpu dead for a second time. A
2425 * quick check for that is good enough: schedule_work ensures the
2426 * correct ordering between hang detection and this work item, and since
2427 * the reset in-progress bit is only ever set by code outside of this
2428 * work we don't need to worry about any other races.
2430 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2431 DRM_DEBUG_DRIVER("resetting chip\n");
2432 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2436 * In most cases it's guaranteed that we get here with an RPM
2437 * reference held, for example because there is a pending GPU
2438 * request that won't finish until the reset is done. This
2439 * isn't the case at least when we get here by doing a
2440 * simulated reset via debugs, so get an RPM reference.
2442 intel_runtime_pm_get(dev_priv
);
2444 intel_prepare_reset(dev
);
2447 * All state reset _must_ be completed before we update the
2448 * reset counter, for otherwise waiters might miss the reset
2449 * pending state and not properly drop locks, resulting in
2450 * deadlocks with the reset work.
2452 ret
= i915_reset(dev
);
2454 intel_finish_reset(dev
);
2456 intel_runtime_pm_put(dev_priv
);
2460 * After all the gem state is reset, increment the reset
2461 * counter and wake up everyone waiting for the reset to
2464 * Since unlock operations are a one-sided barrier only,
2465 * we need to insert a barrier here to order any seqno
2467 * the counter increment.
2469 smp_mb__before_atomic();
2470 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2472 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2473 KOBJ_CHANGE
, reset_done_event
);
2475 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2479 * Note: The wake_up also serves as a memory barrier so that
2480 * waiters see the update value of the reset counter atomic_t.
2482 i915_error_wake_up(dev_priv
, true);
2486 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2488 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2489 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2490 u32 eir
= I915_READ(EIR
);
2496 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2498 i915_get_extra_instdone(dev
, instdone
);
2501 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2502 u32 ipeir
= I915_READ(IPEIR_I965
);
2504 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2505 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2506 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2507 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2508 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2509 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2510 I915_WRITE(IPEIR_I965
, ipeir
);
2511 POSTING_READ(IPEIR_I965
);
2513 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2514 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2515 pr_err("page table error\n");
2516 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2517 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2518 POSTING_READ(PGTBL_ER
);
2522 if (!IS_GEN2(dev
)) {
2523 if (eir
& I915_ERROR_PAGE_TABLE
) {
2524 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2525 pr_err("page table error\n");
2526 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2527 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2528 POSTING_READ(PGTBL_ER
);
2532 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2533 pr_err("memory refresh error:\n");
2534 for_each_pipe(dev_priv
, pipe
)
2535 pr_err("pipe %c stat: 0x%08x\n",
2536 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2537 /* pipestat has already been acked */
2539 if (eir
& I915_ERROR_INSTRUCTION
) {
2540 pr_err("instruction error\n");
2541 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2542 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2543 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2544 if (INTEL_INFO(dev
)->gen
< 4) {
2545 u32 ipeir
= I915_READ(IPEIR
);
2547 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2548 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2549 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2550 I915_WRITE(IPEIR
, ipeir
);
2551 POSTING_READ(IPEIR
);
2553 u32 ipeir
= I915_READ(IPEIR_I965
);
2555 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2556 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2557 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2558 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2559 I915_WRITE(IPEIR_I965
, ipeir
);
2560 POSTING_READ(IPEIR_I965
);
2564 I915_WRITE(EIR
, eir
);
2566 eir
= I915_READ(EIR
);
2569 * some errors might have become stuck,
2572 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2573 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2574 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2579 * i915_handle_error - handle an error interrupt
2582 * Do some basic checking of regsiter state at error interrupt time and
2583 * dump it to the syslog. Also call i915_capture_error_state() to make
2584 * sure we get a record and make it available in debugfs. Fire a uevent
2585 * so userspace knows something bad happened (should trigger collection
2586 * of a ring dump etc.).
2588 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2589 const char *fmt
, ...)
2591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2595 va_start(args
, fmt
);
2596 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2599 i915_capture_error_state(dev
, wedged
, error_msg
);
2600 i915_report_and_clear_eir(dev
);
2603 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2604 &dev_priv
->gpu_error
.reset_counter
);
2607 * Wakeup waiting processes so that the reset work function
2608 * i915_error_work_func doesn't deadlock trying to grab various
2609 * locks. By bumping the reset counter first, the woken
2610 * processes will see a reset in progress and back off,
2611 * releasing their locks and then wait for the reset completion.
2612 * We must do this for _all_ gpu waiters that might hold locks
2613 * that the reset work needs to acquire.
2615 * Note: The wake_up serves as the required memory barrier to
2616 * ensure that the waiters see the updated value of the reset
2619 i915_error_wake_up(dev_priv
, false);
2623 * Our reset work can grab modeset locks (since it needs to reset the
2624 * state of outstanding pagelips). Hence it must not be run on our own
2625 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2626 * code will deadlock.
2628 schedule_work(&dev_priv
->gpu_error
.work
);
2631 /* Called from drm generic code, passed 'crtc' which
2632 * we use as a pipe index
2634 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2636 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2637 unsigned long irqflags
;
2639 if (!i915_pipe_enabled(dev
, pipe
))
2642 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2643 if (INTEL_INFO(dev
)->gen
>= 4)
2644 i915_enable_pipestat(dev_priv
, pipe
,
2645 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2647 i915_enable_pipestat(dev_priv
, pipe
,
2648 PIPE_VBLANK_INTERRUPT_STATUS
);
2649 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2654 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2656 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2657 unsigned long irqflags
;
2658 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2659 DE_PIPE_VBLANK(pipe
);
2661 if (!i915_pipe_enabled(dev
, pipe
))
2664 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2665 ironlake_enable_display_irq(dev_priv
, bit
);
2666 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2671 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2673 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2674 unsigned long irqflags
;
2676 if (!i915_pipe_enabled(dev
, pipe
))
2679 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2680 i915_enable_pipestat(dev_priv
, pipe
,
2681 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2682 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2687 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2689 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2690 unsigned long irqflags
;
2692 if (!i915_pipe_enabled(dev
, pipe
))
2695 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2696 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2697 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2698 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2699 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2703 /* Called from drm generic code, passed 'crtc' which
2704 * we use as a pipe index
2706 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2708 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2709 unsigned long irqflags
;
2711 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2712 i915_disable_pipestat(dev_priv
, pipe
,
2713 PIPE_VBLANK_INTERRUPT_STATUS
|
2714 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2715 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2718 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2720 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2721 unsigned long irqflags
;
2722 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2723 DE_PIPE_VBLANK(pipe
);
2725 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2726 ironlake_disable_display_irq(dev_priv
, bit
);
2727 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2730 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
2732 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2733 unsigned long irqflags
;
2735 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2736 i915_disable_pipestat(dev_priv
, pipe
,
2737 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2738 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2741 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
2743 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2744 unsigned long irqflags
;
2746 if (!i915_pipe_enabled(dev
, pipe
))
2749 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2750 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
2751 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2752 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2753 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2757 ring_last_seqno(struct intel_engine_cs
*ring
)
2759 return list_entry(ring
->request_list
.prev
,
2760 struct drm_i915_gem_request
, list
)->seqno
;
2764 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
2766 return (list_empty(&ring
->request_list
) ||
2767 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
2771 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
2773 if (INTEL_INFO(dev
)->gen
>= 8) {
2774 return (ipehr
>> 23) == 0x1c;
2776 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
2777 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
2778 MI_SEMAPHORE_REGISTER
);
2782 static struct intel_engine_cs
*
2783 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
2785 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2786 struct intel_engine_cs
*signaller
;
2789 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
2790 for_each_ring(signaller
, dev_priv
, i
) {
2791 if (ring
== signaller
)
2794 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
2798 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
2800 for_each_ring(signaller
, dev_priv
, i
) {
2801 if(ring
== signaller
)
2804 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
2809 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2810 ring
->id
, ipehr
, offset
);
2815 static struct intel_engine_cs
*
2816 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
2818 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2819 u32 cmd
, ipehr
, head
;
2823 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2824 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
2828 * HEAD is likely pointing to the dword after the actual command,
2829 * so scan backwards until we find the MBOX. But limit it to just 3
2830 * or 4 dwords depending on the semaphore wait command size.
2831 * Note that we don't care about ACTHD here since that might
2832 * point at at batch, and semaphores are always emitted into the
2833 * ringbuffer itself.
2835 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
2836 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
2838 for (i
= backwards
; i
; --i
) {
2840 * Be paranoid and presume the hw has gone off into the wild -
2841 * our ring is smaller than what the hardware (and hence
2842 * HEAD_ADDR) allows. Also handles wrap-around.
2844 head
&= ring
->buffer
->size
- 1;
2846 /* This here seems to blow up */
2847 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
2857 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
2858 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
2859 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
2861 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
2863 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
2866 static int semaphore_passed(struct intel_engine_cs
*ring
)
2868 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2869 struct intel_engine_cs
*signaller
;
2872 ring
->hangcheck
.deadlock
++;
2874 signaller
= semaphore_waits_for(ring
, &seqno
);
2875 if (signaller
== NULL
)
2878 /* Prevent pathological recursion due to driver bugs */
2879 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
2882 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
2885 /* cursory check for an unkickable deadlock */
2886 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
2887 semaphore_passed(signaller
) < 0)
2893 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2895 struct intel_engine_cs
*ring
;
2898 for_each_ring(ring
, dev_priv
, i
)
2899 ring
->hangcheck
.deadlock
= 0;
2902 static enum intel_ring_hangcheck_action
2903 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
2905 struct drm_device
*dev
= ring
->dev
;
2906 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2909 if (acthd
!= ring
->hangcheck
.acthd
) {
2910 if (acthd
> ring
->hangcheck
.max_acthd
) {
2911 ring
->hangcheck
.max_acthd
= acthd
;
2912 return HANGCHECK_ACTIVE
;
2915 return HANGCHECK_ACTIVE_LOOP
;
2919 return HANGCHECK_HUNG
;
2921 /* Is the chip hanging on a WAIT_FOR_EVENT?
2922 * If so we can simply poke the RB_WAIT bit
2923 * and break the hang. This should work on
2924 * all but the second generation chipsets.
2926 tmp
= I915_READ_CTL(ring
);
2927 if (tmp
& RING_WAIT
) {
2928 i915_handle_error(dev
, false,
2929 "Kicking stuck wait on %s",
2931 I915_WRITE_CTL(ring
, tmp
);
2932 return HANGCHECK_KICK
;
2935 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2936 switch (semaphore_passed(ring
)) {
2938 return HANGCHECK_HUNG
;
2940 i915_handle_error(dev
, false,
2941 "Kicking stuck semaphore on %s",
2943 I915_WRITE_CTL(ring
, tmp
);
2944 return HANGCHECK_KICK
;
2946 return HANGCHECK_WAIT
;
2950 return HANGCHECK_HUNG
;
2954 * This is called when the chip hasn't reported back with completed
2955 * batchbuffers in a long time. We keep track per ring seqno progress and
2956 * if there are no progress, hangcheck score for that ring is increased.
2957 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2958 * we kick the ring. If we see no progress on three subsequent calls
2959 * we assume chip is wedged and try to fix it by resetting the chip.
2961 static void i915_hangcheck_elapsed(unsigned long data
)
2963 struct drm_device
*dev
= (struct drm_device
*)data
;
2964 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2965 struct intel_engine_cs
*ring
;
2967 int busy_count
= 0, rings_hung
= 0;
2968 bool stuck
[I915_NUM_RINGS
] = { 0 };
2973 if (!i915
.enable_hangcheck
)
2976 for_each_ring(ring
, dev_priv
, i
) {
2981 semaphore_clear_deadlocks(dev_priv
);
2983 seqno
= ring
->get_seqno(ring
, false);
2984 acthd
= intel_ring_get_active_head(ring
);
2986 if (ring
->hangcheck
.seqno
== seqno
) {
2987 if (ring_idle(ring
, seqno
)) {
2988 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
2990 if (waitqueue_active(&ring
->irq_queue
)) {
2991 /* Issue a wake-up to catch stuck h/w. */
2992 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
2993 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
2994 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2997 DRM_INFO("Fake missed irq on %s\n",
2999 wake_up_all(&ring
->irq_queue
);
3001 /* Safeguard against driver failure */
3002 ring
->hangcheck
.score
+= BUSY
;
3006 /* We always increment the hangcheck score
3007 * if the ring is busy and still processing
3008 * the same request, so that no single request
3009 * can run indefinitely (such as a chain of
3010 * batches). The only time we do not increment
3011 * the hangcheck score on this ring, if this
3012 * ring is in a legitimate wait for another
3013 * ring. In that case the waiting ring is a
3014 * victim and we want to be sure we catch the
3015 * right culprit. Then every time we do kick
3016 * the ring, add a small increment to the
3017 * score so that we can catch a batch that is
3018 * being repeatedly kicked and so responsible
3019 * for stalling the machine.
3021 ring
->hangcheck
.action
= ring_stuck(ring
,
3024 switch (ring
->hangcheck
.action
) {
3025 case HANGCHECK_IDLE
:
3026 case HANGCHECK_WAIT
:
3027 case HANGCHECK_ACTIVE
:
3029 case HANGCHECK_ACTIVE_LOOP
:
3030 ring
->hangcheck
.score
+= BUSY
;
3032 case HANGCHECK_KICK
:
3033 ring
->hangcheck
.score
+= KICK
;
3035 case HANGCHECK_HUNG
:
3036 ring
->hangcheck
.score
+= HUNG
;
3042 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3044 /* Gradually reduce the count so that we catch DoS
3045 * attempts across multiple batches.
3047 if (ring
->hangcheck
.score
> 0)
3048 ring
->hangcheck
.score
--;
3050 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3053 ring
->hangcheck
.seqno
= seqno
;
3054 ring
->hangcheck
.acthd
= acthd
;
3058 for_each_ring(ring
, dev_priv
, i
) {
3059 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3060 DRM_INFO("%s on %s\n",
3061 stuck
[i
] ? "stuck" : "no progress",
3068 return i915_handle_error(dev
, true, "Ring hung");
3071 /* Reset timer case chip hangs without another request
3073 i915_queue_hangcheck(dev
);
3076 void i915_queue_hangcheck(struct drm_device
*dev
)
3078 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3079 struct timer_list
*timer
= &dev_priv
->gpu_error
.hangcheck_timer
;
3081 if (!i915
.enable_hangcheck
)
3084 /* Don't continually defer the hangcheck, but make sure it is active */
3085 if (timer_pending(timer
))
3088 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
3091 static void ibx_irq_reset(struct drm_device
*dev
)
3093 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3095 if (HAS_PCH_NOP(dev
))
3098 GEN5_IRQ_RESET(SDE
);
3100 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3101 I915_WRITE(SERR_INT
, 0xffffffff);
3105 * SDEIER is also touched by the interrupt handler to work around missed PCH
3106 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3107 * instead we unconditionally enable all PCH interrupt sources here, but then
3108 * only unmask them as needed with SDEIMR.
3110 * This function needs to be called before interrupts are enabled.
3112 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3114 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3116 if (HAS_PCH_NOP(dev
))
3119 WARN_ON(I915_READ(SDEIER
) != 0);
3120 I915_WRITE(SDEIER
, 0xffffffff);
3121 POSTING_READ(SDEIER
);
3124 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3126 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3129 if (INTEL_INFO(dev
)->gen
>= 6)
3130 GEN5_IRQ_RESET(GEN6_PM
);
3135 static void ironlake_irq_reset(struct drm_device
*dev
)
3137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3139 I915_WRITE(HWSTAM
, 0xffffffff);
3143 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3145 gen5_gt_irq_reset(dev
);
3150 static void vlv_display_irq_reset(struct drm_i915_private
*dev_priv
)
3154 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3155 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3157 for_each_pipe(dev_priv
, pipe
)
3158 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3160 GEN5_IRQ_RESET(VLV_
);
3163 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3165 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3168 I915_WRITE(VLV_IMR
, 0);
3169 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3170 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3171 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3173 gen5_gt_irq_reset(dev
);
3175 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3177 vlv_display_irq_reset(dev_priv
);
3180 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3182 GEN8_IRQ_RESET_NDX(GT
, 0);
3183 GEN8_IRQ_RESET_NDX(GT
, 1);
3184 GEN8_IRQ_RESET_NDX(GT
, 2);
3185 GEN8_IRQ_RESET_NDX(GT
, 3);
3188 static void gen8_irq_reset(struct drm_device
*dev
)
3190 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3193 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3194 POSTING_READ(GEN8_MASTER_IRQ
);
3196 gen8_gt_irq_reset(dev_priv
);
3198 for_each_pipe(dev_priv
, pipe
)
3199 if (intel_display_power_is_enabled(dev_priv
,
3200 POWER_DOMAIN_PIPE(pipe
)))
3201 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3203 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3204 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3205 GEN5_IRQ_RESET(GEN8_PCU_
);
3210 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
)
3212 uint32_t extra_ier
= GEN8_PIPE_VBLANK
| GEN8_PIPE_FIFO_UNDERRUN
;
3214 spin_lock_irq(&dev_priv
->irq_lock
);
3215 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
, dev_priv
->de_irq_mask
[PIPE_B
],
3216 ~dev_priv
->de_irq_mask
[PIPE_B
] | extra_ier
);
3217 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
, dev_priv
->de_irq_mask
[PIPE_C
],
3218 ~dev_priv
->de_irq_mask
[PIPE_C
] | extra_ier
);
3219 spin_unlock_irq(&dev_priv
->irq_lock
);
3222 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3226 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3227 POSTING_READ(GEN8_MASTER_IRQ
);
3229 gen8_gt_irq_reset(dev_priv
);
3231 GEN5_IRQ_RESET(GEN8_PCU_
);
3233 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3235 vlv_display_irq_reset(dev_priv
);
3238 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3240 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3241 struct intel_encoder
*intel_encoder
;
3242 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
3244 if (HAS_PCH_IBX(dev
)) {
3245 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3246 for_each_intel_encoder(dev
, intel_encoder
)
3247 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3248 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
3250 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3251 for_each_intel_encoder(dev
, intel_encoder
)
3252 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3253 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
3256 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3259 * Enable digital hotplug on the PCH, and configure the DP short pulse
3260 * duration to 2ms (which is the minimum in the Display Port spec)
3262 * This register is the same on all known PCH chips.
3264 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3265 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3266 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3267 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3268 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3269 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3272 static void ibx_irq_postinstall(struct drm_device
*dev
)
3274 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3277 if (HAS_PCH_NOP(dev
))
3280 if (HAS_PCH_IBX(dev
))
3281 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3283 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3285 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR
);
3286 I915_WRITE(SDEIMR
, ~mask
);
3289 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3291 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3292 u32 pm_irqs
, gt_irqs
;
3294 pm_irqs
= gt_irqs
= 0;
3296 dev_priv
->gt_irq_mask
= ~0;
3297 if (HAS_L3_DPF(dev
)) {
3298 /* L3 parity interrupt is always unmasked. */
3299 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3300 gt_irqs
|= GT_PARITY_ERROR(dev
);
3303 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3305 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3306 ILK_BSD_USER_INTERRUPT
;
3308 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3311 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3313 if (INTEL_INFO(dev
)->gen
>= 6) {
3315 * RPS interrupts will get enabled/disabled on demand when RPS
3316 * itself is enabled/disabled.
3319 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3321 dev_priv
->pm_irq_mask
= 0xffffffff;
3322 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3326 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3329 u32 display_mask
, extra_mask
;
3331 if (INTEL_INFO(dev
)->gen
>= 7) {
3332 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3333 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3334 DE_PLANEB_FLIP_DONE_IVB
|
3335 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3336 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3337 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
3339 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3340 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3342 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3344 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3345 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
3348 dev_priv
->irq_mask
= ~display_mask
;
3350 I915_WRITE(HWSTAM
, 0xeffe);
3352 ibx_irq_pre_postinstall(dev
);
3354 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3356 gen5_gt_irq_postinstall(dev
);
3358 ibx_irq_postinstall(dev
);
3360 if (IS_IRONLAKE_M(dev
)) {
3361 /* Enable PCU event interrupts
3363 * spinlocking not required here for correctness since interrupt
3364 * setup is guaranteed to run in single-threaded context. But we
3365 * need it to make the assert_spin_locked happy. */
3366 spin_lock_irq(&dev_priv
->irq_lock
);
3367 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3368 spin_unlock_irq(&dev_priv
->irq_lock
);
3374 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3380 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3381 PIPE_FIFO_UNDERRUN_STATUS
;
3383 for_each_pipe(dev_priv
, pipe
)
3384 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3385 POSTING_READ(PIPESTAT(PIPE_A
));
3387 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3388 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3390 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3391 for_each_pipe(dev_priv
, pipe
)
3392 i915_enable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3394 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3395 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3396 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3397 if (IS_CHERRYVIEW(dev_priv
))
3398 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3399 dev_priv
->irq_mask
&= ~iir_mask
;
3401 I915_WRITE(VLV_IIR
, iir_mask
);
3402 I915_WRITE(VLV_IIR
, iir_mask
);
3403 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3404 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3405 POSTING_READ(VLV_IMR
);
3408 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3414 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3415 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3416 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3417 if (IS_CHERRYVIEW(dev_priv
))
3418 iir_mask
|= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3420 dev_priv
->irq_mask
|= iir_mask
;
3421 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3422 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3423 I915_WRITE(VLV_IIR
, iir_mask
);
3424 I915_WRITE(VLV_IIR
, iir_mask
);
3425 POSTING_READ(VLV_IIR
);
3427 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3428 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3430 i915_disable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3431 for_each_pipe(dev_priv
, pipe
)
3432 i915_disable_pipestat(dev_priv
, pipe
, pipestat_mask
);
3434 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3435 PIPE_FIFO_UNDERRUN_STATUS
;
3437 for_each_pipe(dev_priv
, pipe
)
3438 I915_WRITE(PIPESTAT(pipe
), pipestat_mask
);
3439 POSTING_READ(PIPESTAT(PIPE_A
));
3442 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3444 assert_spin_locked(&dev_priv
->irq_lock
);
3446 if (dev_priv
->display_irqs_enabled
)
3449 dev_priv
->display_irqs_enabled
= true;
3451 if (intel_irqs_enabled(dev_priv
))
3452 valleyview_display_irqs_install(dev_priv
);
3455 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3457 assert_spin_locked(&dev_priv
->irq_lock
);
3459 if (!dev_priv
->display_irqs_enabled
)
3462 dev_priv
->display_irqs_enabled
= false;
3464 if (intel_irqs_enabled(dev_priv
))
3465 valleyview_display_irqs_uninstall(dev_priv
);
3468 static void vlv_display_irq_postinstall(struct drm_i915_private
*dev_priv
)
3470 dev_priv
->irq_mask
= ~0;
3472 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3473 POSTING_READ(PORT_HOTPLUG_EN
);
3475 I915_WRITE(VLV_IIR
, 0xffffffff);
3476 I915_WRITE(VLV_IIR
, 0xffffffff);
3477 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3478 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3479 POSTING_READ(VLV_IMR
);
3481 /* Interrupt setup is already guaranteed to be single-threaded, this is
3482 * just to make the assert_spin_locked check happy. */
3483 spin_lock_irq(&dev_priv
->irq_lock
);
3484 if (dev_priv
->display_irqs_enabled
)
3485 valleyview_display_irqs_install(dev_priv
);
3486 spin_unlock_irq(&dev_priv
->irq_lock
);
3489 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3491 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3493 vlv_display_irq_postinstall(dev_priv
);
3495 gen5_gt_irq_postinstall(dev
);
3497 /* ack & enable invalid PTE error interrupts */
3498 #if 0 /* FIXME: add support to irq handler for checking these bits */
3499 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3500 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3503 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3508 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3510 /* These are interrupts we'll toggle with the ring mask register */
3511 uint32_t gt_interrupts
[] = {
3512 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3513 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3514 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3515 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3516 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3517 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3518 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3519 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3520 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3522 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3523 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3526 dev_priv
->pm_irq_mask
= 0xffffffff;
3527 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3528 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3530 * RPS interrupts will get enabled/disabled on demand when RPS itself
3531 * is enabled/disabled.
3533 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, 0);
3534 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3537 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3539 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3540 uint32_t de_pipe_enables
;
3542 u32 aux_en
= GEN8_AUX_CHANNEL_A
;
3544 if (IS_GEN9(dev_priv
)) {
3545 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3546 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3547 aux_en
|= GEN9_AUX_CHANNEL_B
| GEN9_AUX_CHANNEL_C
|
3550 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3551 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3553 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3554 GEN8_PIPE_FIFO_UNDERRUN
;
3556 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3557 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3558 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3560 for_each_pipe(dev_priv
, pipe
)
3561 if (intel_display_power_is_enabled(dev_priv
,
3562 POWER_DOMAIN_PIPE(pipe
)))
3563 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3564 dev_priv
->de_irq_mask
[pipe
],
3567 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~aux_en
, aux_en
);
3570 static int gen8_irq_postinstall(struct drm_device
*dev
)
3572 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3574 ibx_irq_pre_postinstall(dev
);
3576 gen8_gt_irq_postinstall(dev_priv
);
3577 gen8_de_irq_postinstall(dev_priv
);
3579 ibx_irq_postinstall(dev
);
3581 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3582 POSTING_READ(GEN8_MASTER_IRQ
);
3587 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3589 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3591 vlv_display_irq_postinstall(dev_priv
);
3593 gen8_gt_irq_postinstall(dev_priv
);
3595 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3596 POSTING_READ(GEN8_MASTER_IRQ
);
3601 static void gen8_irq_uninstall(struct drm_device
*dev
)
3603 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3608 gen8_irq_reset(dev
);
3611 static void vlv_display_irq_uninstall(struct drm_i915_private
*dev_priv
)
3613 /* Interrupt setup is already guaranteed to be single-threaded, this is
3614 * just to make the assert_spin_locked check happy. */
3615 spin_lock_irq(&dev_priv
->irq_lock
);
3616 if (dev_priv
->display_irqs_enabled
)
3617 valleyview_display_irqs_uninstall(dev_priv
);
3618 spin_unlock_irq(&dev_priv
->irq_lock
);
3620 vlv_display_irq_reset(dev_priv
);
3622 dev_priv
->irq_mask
= ~0;
3625 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3627 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3632 I915_WRITE(VLV_MASTER_IER
, 0);
3634 gen5_gt_irq_reset(dev
);
3636 I915_WRITE(HWSTAM
, 0xffffffff);
3638 vlv_display_irq_uninstall(dev_priv
);
3641 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3643 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3648 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3649 POSTING_READ(GEN8_MASTER_IRQ
);
3651 gen8_gt_irq_reset(dev_priv
);
3653 GEN5_IRQ_RESET(GEN8_PCU_
);
3655 vlv_display_irq_uninstall(dev_priv
);
3658 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3660 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3665 ironlake_irq_reset(dev
);
3668 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3670 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3673 for_each_pipe(dev_priv
, pipe
)
3674 I915_WRITE(PIPESTAT(pipe
), 0);
3675 I915_WRITE16(IMR
, 0xffff);
3676 I915_WRITE16(IER
, 0x0);
3677 POSTING_READ16(IER
);
3680 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3682 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3685 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3687 /* Unmask the interrupts that we always want on. */
3688 dev_priv
->irq_mask
=
3689 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3690 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3691 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3692 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3693 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3694 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3697 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3698 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3699 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3700 I915_USER_INTERRUPT
);
3701 POSTING_READ16(IER
);
3703 /* Interrupt setup is already guaranteed to be single-threaded, this is
3704 * just to make the assert_spin_locked check happy. */
3705 spin_lock_irq(&dev_priv
->irq_lock
);
3706 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3707 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3708 spin_unlock_irq(&dev_priv
->irq_lock
);
3714 * Returns true when a page flip has completed.
3716 static bool i8xx_handle_vblank(struct drm_device
*dev
,
3717 int plane
, int pipe
, u32 iir
)
3719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3720 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3722 if (!intel_pipe_handle_vblank(dev
, pipe
))
3725 if ((iir
& flip_pending
) == 0)
3726 goto check_page_flip
;
3728 intel_prepare_page_flip(dev
, plane
);
3730 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3731 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3732 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3733 * the flip is completed (no longer pending). Since this doesn't raise
3734 * an interrupt per se, we watch for the change at vblank.
3736 if (I915_READ16(ISR
) & flip_pending
)
3737 goto check_page_flip
;
3739 intel_finish_page_flip(dev
, pipe
);
3743 intel_check_page_flip(dev
, pipe
);
3747 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3749 struct drm_device
*dev
= arg
;
3750 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3755 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3756 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3758 iir
= I915_READ16(IIR
);
3762 while (iir
& ~flip_mask
) {
3763 /* Can't rely on pipestat interrupt bit in iir as it might
3764 * have been cleared after the pipestat interrupt was received.
3765 * It doesn't set the bit in iir again, but it still produces
3766 * interrupts (for non-MSI).
3768 spin_lock(&dev_priv
->irq_lock
);
3769 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3770 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3772 for_each_pipe(dev_priv
, pipe
) {
3773 int reg
= PIPESTAT(pipe
);
3774 pipe_stats
[pipe
] = I915_READ(reg
);
3777 * Clear the PIPE*STAT regs before the IIR
3779 if (pipe_stats
[pipe
] & 0x8000ffff)
3780 I915_WRITE(reg
, pipe_stats
[pipe
]);
3782 spin_unlock(&dev_priv
->irq_lock
);
3784 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3785 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3787 if (iir
& I915_USER_INTERRUPT
)
3788 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3790 for_each_pipe(dev_priv
, pipe
) {
3795 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3796 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
3797 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3799 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3800 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3802 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3803 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3813 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3815 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3818 for_each_pipe(dev_priv
, pipe
) {
3819 /* Clear enable bits; then clear status bits */
3820 I915_WRITE(PIPESTAT(pipe
), 0);
3821 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3823 I915_WRITE16(IMR
, 0xffff);
3824 I915_WRITE16(IER
, 0x0);
3825 I915_WRITE16(IIR
, I915_READ16(IIR
));
3828 static void i915_irq_preinstall(struct drm_device
* dev
)
3830 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3833 if (I915_HAS_HOTPLUG(dev
)) {
3834 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3835 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3838 I915_WRITE16(HWSTAM
, 0xeffe);
3839 for_each_pipe(dev_priv
, pipe
)
3840 I915_WRITE(PIPESTAT(pipe
), 0);
3841 I915_WRITE(IMR
, 0xffffffff);
3842 I915_WRITE(IER
, 0x0);
3846 static int i915_irq_postinstall(struct drm_device
*dev
)
3848 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3851 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3853 /* Unmask the interrupts that we always want on. */
3854 dev_priv
->irq_mask
=
3855 ~(I915_ASLE_INTERRUPT
|
3856 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3857 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3858 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3859 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3860 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3863 I915_ASLE_INTERRUPT
|
3864 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3865 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3866 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3867 I915_USER_INTERRUPT
;
3869 if (I915_HAS_HOTPLUG(dev
)) {
3870 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3871 POSTING_READ(PORT_HOTPLUG_EN
);
3873 /* Enable in IER... */
3874 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3875 /* and unmask in IMR */
3876 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3879 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3880 I915_WRITE(IER
, enable_mask
);
3883 i915_enable_asle_pipestat(dev
);
3885 /* Interrupt setup is already guaranteed to be single-threaded, this is
3886 * just to make the assert_spin_locked check happy. */
3887 spin_lock_irq(&dev_priv
->irq_lock
);
3888 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3889 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3890 spin_unlock_irq(&dev_priv
->irq_lock
);
3896 * Returns true when a page flip has completed.
3898 static bool i915_handle_vblank(struct drm_device
*dev
,
3899 int plane
, int pipe
, u32 iir
)
3901 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3902 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3904 if (!intel_pipe_handle_vblank(dev
, pipe
))
3907 if ((iir
& flip_pending
) == 0)
3908 goto check_page_flip
;
3910 intel_prepare_page_flip(dev
, plane
);
3912 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3913 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3914 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3915 * the flip is completed (no longer pending). Since this doesn't raise
3916 * an interrupt per se, we watch for the change at vblank.
3918 if (I915_READ(ISR
) & flip_pending
)
3919 goto check_page_flip
;
3921 intel_finish_page_flip(dev
, pipe
);
3925 intel_check_page_flip(dev
, pipe
);
3929 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
3931 struct drm_device
*dev
= arg
;
3932 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3933 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
3935 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3936 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3937 int pipe
, ret
= IRQ_NONE
;
3939 iir
= I915_READ(IIR
);
3941 bool irq_received
= (iir
& ~flip_mask
) != 0;
3942 bool blc_event
= false;
3944 /* Can't rely on pipestat interrupt bit in iir as it might
3945 * have been cleared after the pipestat interrupt was received.
3946 * It doesn't set the bit in iir again, but it still produces
3947 * interrupts (for non-MSI).
3949 spin_lock(&dev_priv
->irq_lock
);
3950 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3951 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
3953 for_each_pipe(dev_priv
, pipe
) {
3954 int reg
= PIPESTAT(pipe
);
3955 pipe_stats
[pipe
] = I915_READ(reg
);
3957 /* Clear the PIPE*STAT regs before the IIR */
3958 if (pipe_stats
[pipe
] & 0x8000ffff) {
3959 I915_WRITE(reg
, pipe_stats
[pipe
]);
3960 irq_received
= true;
3963 spin_unlock(&dev_priv
->irq_lock
);
3968 /* Consume port. Then clear IIR or we'll miss events */
3969 if (I915_HAS_HOTPLUG(dev
) &&
3970 iir
& I915_DISPLAY_PORT_INTERRUPT
)
3971 i9xx_hpd_irq_handler(dev
);
3973 I915_WRITE(IIR
, iir
& ~flip_mask
);
3974 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3976 if (iir
& I915_USER_INTERRUPT
)
3977 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3979 for_each_pipe(dev_priv
, pipe
) {
3984 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3985 i915_handle_vblank(dev
, plane
, pipe
, iir
))
3986 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3988 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3991 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3992 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3994 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3995 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3999 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4000 intel_opregion_asle_intr(dev
);
4002 /* With MSI, interrupts are only generated when iir
4003 * transitions from zero to nonzero. If another bit got
4004 * set while we were handling the existing iir bits, then
4005 * we would never get another interrupt.
4007 * This is fine on non-MSI as well, as if we hit this path
4008 * we avoid exiting the interrupt handler only to generate
4011 * Note that for MSI this could cause a stray interrupt report
4012 * if an interrupt landed in the time between writing IIR and
4013 * the posting read. This should be rare enough to never
4014 * trigger the 99% of 100,000 interrupts test for disabling
4019 } while (iir
& ~flip_mask
);
4024 static void i915_irq_uninstall(struct drm_device
* dev
)
4026 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4029 if (I915_HAS_HOTPLUG(dev
)) {
4030 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4031 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4034 I915_WRITE16(HWSTAM
, 0xffff);
4035 for_each_pipe(dev_priv
, pipe
) {
4036 /* Clear enable bits; then clear status bits */
4037 I915_WRITE(PIPESTAT(pipe
), 0);
4038 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4040 I915_WRITE(IMR
, 0xffffffff);
4041 I915_WRITE(IER
, 0x0);
4043 I915_WRITE(IIR
, I915_READ(IIR
));
4046 static void i965_irq_preinstall(struct drm_device
* dev
)
4048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4051 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4052 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4054 I915_WRITE(HWSTAM
, 0xeffe);
4055 for_each_pipe(dev_priv
, pipe
)
4056 I915_WRITE(PIPESTAT(pipe
), 0);
4057 I915_WRITE(IMR
, 0xffffffff);
4058 I915_WRITE(IER
, 0x0);
4062 static int i965_irq_postinstall(struct drm_device
*dev
)
4064 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4068 /* Unmask the interrupts that we always want on. */
4069 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4070 I915_DISPLAY_PORT_INTERRUPT
|
4071 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4072 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4073 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4074 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4075 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4077 enable_mask
= ~dev_priv
->irq_mask
;
4078 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4079 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4080 enable_mask
|= I915_USER_INTERRUPT
;
4083 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4085 /* Interrupt setup is already guaranteed to be single-threaded, this is
4086 * just to make the assert_spin_locked check happy. */
4087 spin_lock_irq(&dev_priv
->irq_lock
);
4088 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4089 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4090 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4091 spin_unlock_irq(&dev_priv
->irq_lock
);
4094 * Enable some error detection, note the instruction error mask
4095 * bit is reserved, so we leave it masked.
4098 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4099 GM45_ERROR_MEM_PRIV
|
4100 GM45_ERROR_CP_PRIV
|
4101 I915_ERROR_MEMORY_REFRESH
);
4103 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4104 I915_ERROR_MEMORY_REFRESH
);
4106 I915_WRITE(EMR
, error_mask
);
4108 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4109 I915_WRITE(IER
, enable_mask
);
4112 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4113 POSTING_READ(PORT_HOTPLUG_EN
);
4115 i915_enable_asle_pipestat(dev
);
4120 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4122 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4123 struct intel_encoder
*intel_encoder
;
4126 assert_spin_locked(&dev_priv
->irq_lock
);
4128 if (I915_HAS_HOTPLUG(dev
)) {
4129 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
4130 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
4131 /* Note HDMI and DP share hotplug bits */
4132 /* enable bits are the same for all generations */
4133 for_each_intel_encoder(dev
, intel_encoder
)
4134 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
4135 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
4136 /* Programming the CRT detection parameters tends
4137 to generate a spurious hotplug event about three
4138 seconds later. So just do it once.
4141 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4142 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
4143 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4145 /* Ignore TV since it's buggy */
4146 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
4150 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4152 struct drm_device
*dev
= arg
;
4153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4155 u32 pipe_stats
[I915_MAX_PIPES
];
4156 int ret
= IRQ_NONE
, pipe
;
4158 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4159 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4161 iir
= I915_READ(IIR
);
4164 bool irq_received
= (iir
& ~flip_mask
) != 0;
4165 bool blc_event
= false;
4167 /* Can't rely on pipestat interrupt bit in iir as it might
4168 * have been cleared after the pipestat interrupt was received.
4169 * It doesn't set the bit in iir again, but it still produces
4170 * interrupts (for non-MSI).
4172 spin_lock(&dev_priv
->irq_lock
);
4173 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4174 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir
);
4176 for_each_pipe(dev_priv
, pipe
) {
4177 int reg
= PIPESTAT(pipe
);
4178 pipe_stats
[pipe
] = I915_READ(reg
);
4181 * Clear the PIPE*STAT regs before the IIR
4183 if (pipe_stats
[pipe
] & 0x8000ffff) {
4184 I915_WRITE(reg
, pipe_stats
[pipe
]);
4185 irq_received
= true;
4188 spin_unlock(&dev_priv
->irq_lock
);
4195 /* Consume port. Then clear IIR or we'll miss events */
4196 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4197 i9xx_hpd_irq_handler(dev
);
4199 I915_WRITE(IIR
, iir
& ~flip_mask
);
4200 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4202 if (iir
& I915_USER_INTERRUPT
)
4203 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4204 if (iir
& I915_BSD_USER_INTERRUPT
)
4205 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
4207 for_each_pipe(dev_priv
, pipe
) {
4208 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4209 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4210 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4212 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4215 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4216 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4218 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4219 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
4222 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4223 intel_opregion_asle_intr(dev
);
4225 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4226 gmbus_irq_handler(dev
);
4228 /* With MSI, interrupts are only generated when iir
4229 * transitions from zero to nonzero. If another bit got
4230 * set while we were handling the existing iir bits, then
4231 * we would never get another interrupt.
4233 * This is fine on non-MSI as well, as if we hit this path
4234 * we avoid exiting the interrupt handler only to generate
4237 * Note that for MSI this could cause a stray interrupt report
4238 * if an interrupt landed in the time between writing IIR and
4239 * the posting read. This should be rare enough to never
4240 * trigger the 99% of 100,000 interrupts test for disabling
4249 static void i965_irq_uninstall(struct drm_device
* dev
)
4251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4257 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4258 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4260 I915_WRITE(HWSTAM
, 0xffffffff);
4261 for_each_pipe(dev_priv
, pipe
)
4262 I915_WRITE(PIPESTAT(pipe
), 0);
4263 I915_WRITE(IMR
, 0xffffffff);
4264 I915_WRITE(IER
, 0x0);
4266 for_each_pipe(dev_priv
, pipe
)
4267 I915_WRITE(PIPESTAT(pipe
),
4268 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4269 I915_WRITE(IIR
, I915_READ(IIR
));
4272 static void intel_hpd_irq_reenable_work(struct work_struct
*work
)
4274 struct drm_i915_private
*dev_priv
=
4275 container_of(work
, typeof(*dev_priv
),
4276 hotplug_reenable_work
.work
);
4277 struct drm_device
*dev
= dev_priv
->dev
;
4278 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4281 intel_runtime_pm_get(dev_priv
);
4283 spin_lock_irq(&dev_priv
->irq_lock
);
4284 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
4285 struct drm_connector
*connector
;
4287 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
4290 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4292 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4293 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4295 if (intel_connector
->encoder
->hpd_pin
== i
) {
4296 if (connector
->polled
!= intel_connector
->polled
)
4297 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4299 connector
->polled
= intel_connector
->polled
;
4300 if (!connector
->polled
)
4301 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4305 if (dev_priv
->display
.hpd_irq_setup
)
4306 dev_priv
->display
.hpd_irq_setup(dev
);
4307 spin_unlock_irq(&dev_priv
->irq_lock
);
4309 intel_runtime_pm_put(dev_priv
);
4313 * intel_irq_init - initializes irq support
4314 * @dev_priv: i915 device instance
4316 * This function initializes all the irq support including work items, timers
4317 * and all the vtables. It does not setup the interrupt itself though.
4319 void intel_irq_init(struct drm_i915_private
*dev_priv
)
4321 struct drm_device
*dev
= dev_priv
->dev
;
4323 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
4324 INIT_WORK(&dev_priv
->dig_port_work
, i915_digport_work_func
);
4325 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
4326 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4327 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4329 /* Let's track the enabled rps events */
4330 if (IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
4331 /* WaGsvRC0ResidencyMethod:vlv */
4332 dev_priv
->pm_rps_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
4334 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4336 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
4337 i915_hangcheck_elapsed
,
4338 (unsigned long) dev
);
4339 INIT_DELAYED_WORK(&dev_priv
->hotplug_reenable_work
,
4340 intel_hpd_irq_reenable_work
);
4342 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4344 if (IS_GEN2(dev_priv
)) {
4345 dev
->max_vblank_count
= 0;
4346 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4347 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
4348 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4349 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
4351 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4352 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4356 * Opt out of the vblank disable timer on everything except gen2.
4357 * Gen2 doesn't have a hardware frame counter and so depends on
4358 * vblank interrupts to produce sane vblank seuquence numbers.
4360 if (!IS_GEN2(dev_priv
))
4361 dev
->vblank_disable_immediate
= true;
4363 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
4364 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4365 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4368 if (IS_CHERRYVIEW(dev_priv
)) {
4369 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4370 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4371 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4372 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4373 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4374 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4375 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4376 } else if (IS_VALLEYVIEW(dev_priv
)) {
4377 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4378 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4379 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4380 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4381 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4382 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4383 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4384 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
4385 dev
->driver
->irq_handler
= gen8_irq_handler
;
4386 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4387 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4388 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4389 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4390 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4391 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4392 } else if (HAS_PCH_SPLIT(dev
)) {
4393 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4394 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4395 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4396 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4397 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4398 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4399 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4401 if (INTEL_INFO(dev_priv
)->gen
== 2) {
4402 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4403 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4404 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4405 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4406 } else if (INTEL_INFO(dev_priv
)->gen
== 3) {
4407 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4408 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4409 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4410 dev
->driver
->irq_handler
= i915_irq_handler
;
4411 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4413 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4414 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4415 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4416 dev
->driver
->irq_handler
= i965_irq_handler
;
4417 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4419 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4420 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4425 * intel_hpd_init - initializes and enables hpd support
4426 * @dev_priv: i915 device instance
4428 * This function enables the hotplug support. It requires that interrupts have
4429 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4430 * poll request can run concurrently to other code, so locking rules must be
4433 * This is a separate step from interrupt enabling to simplify the locking rules
4434 * in the driver load and resume code.
4436 void intel_hpd_init(struct drm_i915_private
*dev_priv
)
4438 struct drm_device
*dev
= dev_priv
->dev
;
4439 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4440 struct drm_connector
*connector
;
4443 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
4444 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
4445 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4447 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4448 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4449 connector
->polled
= intel_connector
->polled
;
4450 if (connector
->encoder
&& !connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
4451 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4452 if (intel_connector
->mst_port
)
4453 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4456 /* Interrupt setup is already guaranteed to be single-threaded, this is
4457 * just to make the assert_spin_locked checks happy. */
4458 spin_lock_irq(&dev_priv
->irq_lock
);
4459 if (dev_priv
->display
.hpd_irq_setup
)
4460 dev_priv
->display
.hpd_irq_setup(dev
);
4461 spin_unlock_irq(&dev_priv
->irq_lock
);
4465 * intel_irq_install - enables the hardware interrupt
4466 * @dev_priv: i915 device instance
4468 * This function enables the hardware interrupt handling, but leaves the hotplug
4469 * handling still disabled. It is called after intel_irq_init().
4471 * In the driver load and resume code we need working interrupts in a few places
4472 * but don't want to deal with the hassle of concurrent probe and hotplug
4473 * workers. Hence the split into this two-stage approach.
4475 int intel_irq_install(struct drm_i915_private
*dev_priv
)
4478 * We enable some interrupt sources in our postinstall hooks, so mark
4479 * interrupts as enabled _before_ actually enabling them to avoid
4480 * special cases in our ordering checks.
4482 dev_priv
->pm
.irqs_enabled
= true;
4484 return drm_irq_install(dev_priv
->dev
, dev_priv
->dev
->pdev
->irq
);
4488 * intel_irq_uninstall - finilizes all irq handling
4489 * @dev_priv: i915 device instance
4491 * This stops interrupt and hotplug handling and unregisters and frees all
4492 * resources acquired in the init functions.
4494 void intel_irq_uninstall(struct drm_i915_private
*dev_priv
)
4496 drm_irq_uninstall(dev_priv
->dev
);
4497 intel_hpd_cancel_work(dev_priv
);
4498 dev_priv
->pm
.irqs_enabled
= false;
4502 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4503 * @dev_priv: i915 device instance
4505 * This function is used to disable interrupts at runtime, both in the runtime
4506 * pm and the system suspend/resume code.
4508 void intel_runtime_pm_disable_interrupts(struct drm_i915_private
*dev_priv
)
4510 dev_priv
->dev
->driver
->irq_uninstall(dev_priv
->dev
);
4511 dev_priv
->pm
.irqs_enabled
= false;
4515 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4516 * @dev_priv: i915 device instance
4518 * This function is used to enable interrupts at runtime, both in the runtime
4519 * pm and the system suspend/resume code.
4521 void intel_runtime_pm_enable_interrupts(struct drm_i915_private
*dev_priv
)
4523 dev_priv
->pm
.irqs_enabled
= true;
4524 dev_priv
->dev
->driver
->irq_preinstall(dev_priv
->dev
);
4525 dev_priv
->dev
->driver
->irq_postinstall(dev_priv
->dev
);