1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx
[] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt
[] = {
57 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
58 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
59 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
60 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
61 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915
[] = {
65 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
66 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
67 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
68 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
69 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
70 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x
[] = {
74 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
75 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
76 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
77 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
78 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
83 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
84 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
85 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
86 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
87 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
88 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 /* For display hotplug interrupt */
143 ironlake_enable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
145 assert_spin_locked(&dev_priv
->irq_lock
);
147 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
150 if ((dev_priv
->irq_mask
& mask
) != 0) {
151 dev_priv
->irq_mask
&= ~mask
;
152 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
158 ironlake_disable_display_irq(struct drm_i915_private
*dev_priv
, u32 mask
)
160 assert_spin_locked(&dev_priv
->irq_lock
);
162 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
165 if ((dev_priv
->irq_mask
& mask
) != mask
) {
166 dev_priv
->irq_mask
|= mask
;
167 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
173 * ilk_update_gt_irq - update GTIMR
174 * @dev_priv: driver private
175 * @interrupt_mask: mask of interrupt bits to update
176 * @enabled_irq_mask: mask of interrupt bits to enable
178 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
179 uint32_t interrupt_mask
,
180 uint32_t enabled_irq_mask
)
182 assert_spin_locked(&dev_priv
->irq_lock
);
184 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
187 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
188 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
189 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
193 void gen5_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
195 ilk_update_gt_irq(dev_priv
, mask
, mask
);
198 void gen5_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
200 ilk_update_gt_irq(dev_priv
, mask
, 0);
204 * snb_update_pm_irq - update GEN6_PMIMR
205 * @dev_priv: driver private
206 * @interrupt_mask: mask of interrupt bits to update
207 * @enabled_irq_mask: mask of interrupt bits to enable
209 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
210 uint32_t interrupt_mask
,
211 uint32_t enabled_irq_mask
)
215 assert_spin_locked(&dev_priv
->irq_lock
);
217 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
220 new_val
= dev_priv
->pm_irq_mask
;
221 new_val
&= ~interrupt_mask
;
222 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
224 if (new_val
!= dev_priv
->pm_irq_mask
) {
225 dev_priv
->pm_irq_mask
= new_val
;
226 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
227 POSTING_READ(GEN6_PMIMR
);
231 void gen6_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
233 snb_update_pm_irq(dev_priv
, mask
, mask
);
236 void gen6_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
238 snb_update_pm_irq(dev_priv
, mask
, 0);
242 * bdw_update_pm_irq - update GT interrupt 2
243 * @dev_priv: driver private
244 * @interrupt_mask: mask of interrupt bits to update
245 * @enabled_irq_mask: mask of interrupt bits to enable
247 * Copied from the snb function, updated with relevant register offsets
249 static void bdw_update_pm_irq(struct drm_i915_private
*dev_priv
,
250 uint32_t interrupt_mask
,
251 uint32_t enabled_irq_mask
)
255 assert_spin_locked(&dev_priv
->irq_lock
);
257 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
260 new_val
= dev_priv
->pm_irq_mask
;
261 new_val
&= ~interrupt_mask
;
262 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
264 if (new_val
!= dev_priv
->pm_irq_mask
) {
265 dev_priv
->pm_irq_mask
= new_val
;
266 I915_WRITE(GEN8_GT_IMR(2), dev_priv
->pm_irq_mask
);
267 POSTING_READ(GEN8_GT_IMR(2));
271 void gen8_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
273 bdw_update_pm_irq(dev_priv
, mask
, mask
);
276 void gen8_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
278 bdw_update_pm_irq(dev_priv
, mask
, 0);
282 * ibx_display_interrupt_update - update SDEIMR
283 * @dev_priv: driver private
284 * @interrupt_mask: mask of interrupt bits to update
285 * @enabled_irq_mask: mask of interrupt bits to enable
287 void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
288 uint32_t interrupt_mask
,
289 uint32_t enabled_irq_mask
)
291 uint32_t sdeimr
= I915_READ(SDEIMR
);
292 sdeimr
&= ~interrupt_mask
;
293 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
295 assert_spin_locked(&dev_priv
->irq_lock
);
297 if (WARN_ON(!intel_irqs_enabled(dev_priv
)))
300 I915_WRITE(SDEIMR
, sdeimr
);
301 POSTING_READ(SDEIMR
);
305 __i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
306 u32 enable_mask
, u32 status_mask
)
308 u32 reg
= PIPESTAT(pipe
);
309 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
311 assert_spin_locked(&dev_priv
->irq_lock
);
312 WARN_ON(!intel_irqs_enabled(dev_priv
));
314 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
315 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
316 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
317 pipe_name(pipe
), enable_mask
, status_mask
))
320 if ((pipestat
& enable_mask
) == enable_mask
)
323 dev_priv
->pipestat_irq_mask
[pipe
] |= status_mask
;
325 /* Enable the interrupt, clear any pending status */
326 pipestat
|= enable_mask
| status_mask
;
327 I915_WRITE(reg
, pipestat
);
332 __i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
333 u32 enable_mask
, u32 status_mask
)
335 u32 reg
= PIPESTAT(pipe
);
336 u32 pipestat
= I915_READ(reg
) & PIPESTAT_INT_ENABLE_MASK
;
338 assert_spin_locked(&dev_priv
->irq_lock
);
339 WARN_ON(!intel_irqs_enabled(dev_priv
));
341 if (WARN_ONCE(enable_mask
& ~PIPESTAT_INT_ENABLE_MASK
||
342 status_mask
& ~PIPESTAT_INT_STATUS_MASK
,
343 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
344 pipe_name(pipe
), enable_mask
, status_mask
))
347 if ((pipestat
& enable_mask
) == 0)
350 dev_priv
->pipestat_irq_mask
[pipe
] &= ~status_mask
;
352 pipestat
&= ~enable_mask
;
353 I915_WRITE(reg
, pipestat
);
357 static u32
vlv_get_pipestat_enable_mask(struct drm_device
*dev
, u32 status_mask
)
359 u32 enable_mask
= status_mask
<< 16;
362 * On pipe A we don't support the PSR interrupt yet,
363 * on pipe B and C the same bit MBZ.
365 if (WARN_ON_ONCE(status_mask
& PIPE_A_PSR_STATUS_VLV
))
368 * On pipe B and C we don't support the PSR interrupt yet, on pipe
369 * A the same bit is for perf counters which we don't use either.
371 if (WARN_ON_ONCE(status_mask
& PIPE_B_PSR_STATUS_VLV
))
374 enable_mask
&= ~(PIPE_FIFO_UNDERRUN_STATUS
|
375 SPRITE0_FLIP_DONE_INT_EN_VLV
|
376 SPRITE1_FLIP_DONE_INT_EN_VLV
);
377 if (status_mask
& SPRITE0_FLIP_DONE_INT_STATUS_VLV
)
378 enable_mask
|= SPRITE0_FLIP_DONE_INT_EN_VLV
;
379 if (status_mask
& SPRITE1_FLIP_DONE_INT_STATUS_VLV
)
380 enable_mask
|= SPRITE1_FLIP_DONE_INT_EN_VLV
;
386 i915_enable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
391 if (IS_VALLEYVIEW(dev_priv
->dev
))
392 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
395 enable_mask
= status_mask
<< 16;
396 __i915_enable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
400 i915_disable_pipestat(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
405 if (IS_VALLEYVIEW(dev_priv
->dev
))
406 enable_mask
= vlv_get_pipestat_enable_mask(dev_priv
->dev
,
409 enable_mask
= status_mask
<< 16;
410 __i915_disable_pipestat(dev_priv
, pipe
, enable_mask
, status_mask
);
414 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
416 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
418 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
420 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
423 spin_lock_irq(&dev_priv
->irq_lock
);
425 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_STATUS
);
426 if (INTEL_INFO(dev
)->gen
>= 4)
427 i915_enable_pipestat(dev_priv
, PIPE_A
,
428 PIPE_LEGACY_BLC_EVENT_STATUS
);
430 spin_unlock_irq(&dev_priv
->irq_lock
);
434 * i915_pipe_enabled - check if a pipe is enabled
436 * @pipe: pipe to check
438 * Reading certain registers when the pipe is disabled can hang the chip.
439 * Use this routine to make sure the PLL is running and the pipe is active
440 * before reading such registers if unsure.
443 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
445 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
447 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
448 /* Locking is horribly broken here, but whatever. */
449 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
450 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
452 return intel_crtc
->active
;
454 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
459 * This timing diagram depicts the video signal in and
460 * around the vertical blanking period.
462 * Assumptions about the fictitious mode used in this example:
464 * vsync_start = vblank_start + 1
465 * vsync_end = vblank_start + 2
466 * vtotal = vblank_start + 3
469 * latch double buffered registers
470 * increment frame counter (ctg+)
471 * generate start of vblank interrupt (gen4+)
474 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
475 * | may be shifted forward 1-3 extra lines via PIPECONF
477 * | | start of vsync:
478 * | | generate vsync interrupt
480 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
481 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
482 * ----va---> <-----------------vb--------------------> <--------va-------------
483 * | | <----vs-----> |
484 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
485 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
486 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
488 * last visible pixel first visible pixel
489 * | increment frame counter (gen3/4)
490 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
492 * x = horizontal active
493 * _ = horizontal blanking
494 * hs = horizontal sync
495 * va = vertical active
496 * vb = vertical blanking
498 * vbs = vblank_start (number)
501 * - most events happen at the start of horizontal sync
502 * - frame start happens at the start of horizontal blank, 1-4 lines
503 * (depending on PIPECONF settings) after the start of vblank
504 * - gen3/4 pixel and frame counter are synchronized with the start
505 * of horizontal active on the first line of vertical active
508 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
510 /* Gen2 doesn't have a hardware frame counter */
514 /* Called from drm generic code, passed a 'crtc', which
515 * we use as a pipe index
517 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
519 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
520 unsigned long high_frame
;
521 unsigned long low_frame
;
522 u32 high1
, high2
, low
, pixel
, vbl_start
, hsync_start
, htotal
;
524 if (!i915_pipe_enabled(dev
, pipe
)) {
525 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
526 "pipe %c\n", pipe_name(pipe
));
530 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
531 struct intel_crtc
*intel_crtc
=
532 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
533 const struct drm_display_mode
*mode
=
534 &intel_crtc
->config
.adjusted_mode
;
536 htotal
= mode
->crtc_htotal
;
537 hsync_start
= mode
->crtc_hsync_start
;
538 vbl_start
= mode
->crtc_vblank_start
;
539 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
540 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
542 enum transcoder cpu_transcoder
= (enum transcoder
) pipe
;
544 htotal
= ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff) + 1;
545 hsync_start
= (I915_READ(HSYNC(cpu_transcoder
)) & 0x1fff) + 1;
546 vbl_start
= (I915_READ(VBLANK(cpu_transcoder
)) & 0x1fff) + 1;
547 if ((I915_READ(PIPECONF(cpu_transcoder
)) &
548 PIPECONF_INTERLACE_MASK
) != PIPECONF_PROGRESSIVE
)
549 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
552 /* Convert to pixel count */
555 /* Start of vblank event occurs at start of hsync */
556 vbl_start
-= htotal
- hsync_start
;
558 high_frame
= PIPEFRAME(pipe
);
559 low_frame
= PIPEFRAMEPIXEL(pipe
);
562 * High & low register fields aren't synchronized, so make sure
563 * we get a low value that's stable across two reads of the high
567 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
568 low
= I915_READ(low_frame
);
569 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
570 } while (high1
!= high2
);
572 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
573 pixel
= low
& PIPE_PIXEL_MASK
;
574 low
>>= PIPE_FRAME_LOW_SHIFT
;
577 * The frame counter increments at beginning of active.
578 * Cook up a vblank counter by also checking the pixel
579 * counter against vblank start.
581 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
584 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
586 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
587 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
589 if (!i915_pipe_enabled(dev
, pipe
)) {
590 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
591 "pipe %c\n", pipe_name(pipe
));
595 return I915_READ(reg
);
598 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
599 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
601 static int __intel_get_crtc_scanline(struct intel_crtc
*crtc
)
603 struct drm_device
*dev
= crtc
->base
.dev
;
604 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
605 const struct drm_display_mode
*mode
= &crtc
->config
.adjusted_mode
;
606 enum pipe pipe
= crtc
->pipe
;
607 int position
, vtotal
;
609 vtotal
= mode
->crtc_vtotal
;
610 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
614 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
616 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
619 * See update_scanline_offset() for the details on the
620 * scanline_offset adjustment.
622 return (position
+ crtc
->scanline_offset
) % vtotal
;
625 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
626 unsigned int flags
, int *vpos
, int *hpos
,
627 ktime_t
*stime
, ktime_t
*etime
)
629 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
630 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
631 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
632 const struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
634 int vbl_start
, vbl_end
, hsync_start
, htotal
, vtotal
;
637 unsigned long irqflags
;
639 if (!intel_crtc
->active
) {
640 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
641 "pipe %c\n", pipe_name(pipe
));
645 htotal
= mode
->crtc_htotal
;
646 hsync_start
= mode
->crtc_hsync_start
;
647 vtotal
= mode
->crtc_vtotal
;
648 vbl_start
= mode
->crtc_vblank_start
;
649 vbl_end
= mode
->crtc_vblank_end
;
651 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
652 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
657 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
660 * Lock uncore.lock, as we will do multiple timing critical raw
661 * register reads, potentially with preemption disabled, so the
662 * following code must not block on uncore.lock.
664 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
666 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
668 /* Get optional system timestamp before query. */
670 *stime
= ktime_get();
672 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
673 /* No obvious pixelcount register. Only query vertical
674 * scanout position from Display scan line register.
676 position
= __intel_get_crtc_scanline(intel_crtc
);
678 /* Have access to pixelcount since start of frame.
679 * We can split this into vertical and horizontal
682 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
684 /* convert to pixel counts */
690 * In interlaced modes, the pixel counter counts all pixels,
691 * so one field will have htotal more pixels. In order to avoid
692 * the reported position from jumping backwards when the pixel
693 * counter is beyond the length of the shorter field, just
694 * clamp the position the length of the shorter field. This
695 * matches how the scanline counter based position works since
696 * the scanline counter doesn't count the two half lines.
698 if (position
>= vtotal
)
699 position
= vtotal
- 1;
702 * Start of vblank interrupt is triggered at start of hsync,
703 * just prior to the first active line of vblank. However we
704 * consider lines to start at the leading edge of horizontal
705 * active. So, should we get here before we've crossed into
706 * the horizontal active of the first line in vblank, we would
707 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
708 * always add htotal-hsync_start to the current pixel position.
710 position
= (position
+ htotal
- hsync_start
) % vtotal
;
713 /* Get optional system timestamp after query. */
715 *etime
= ktime_get();
717 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
719 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
721 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
724 * While in vblank, position will be negative
725 * counting up towards 0 at vbl_end. And outside
726 * vblank, position will be positive counting
729 if (position
>= vbl_start
)
732 position
+= vtotal
- vbl_end
;
734 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
738 *vpos
= position
/ htotal
;
739 *hpos
= position
- (*vpos
* htotal
);
744 ret
|= DRM_SCANOUTPOS_IN_VBLANK
;
749 int intel_get_crtc_scanline(struct intel_crtc
*crtc
)
751 struct drm_i915_private
*dev_priv
= crtc
->base
.dev
->dev_private
;
752 unsigned long irqflags
;
755 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
756 position
= __intel_get_crtc_scanline(crtc
);
757 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
762 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
764 struct timeval
*vblank_time
,
767 struct drm_crtc
*crtc
;
769 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
770 DRM_ERROR("Invalid crtc %d\n", pipe
);
774 /* Get drm_crtc to timestamp: */
775 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
777 DRM_ERROR("Invalid crtc %d\n", pipe
);
781 if (!crtc
->enabled
) {
782 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
786 /* Helper routine in DRM core does all the work: */
787 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
790 &to_intel_crtc(crtc
)->config
.adjusted_mode
);
793 static bool intel_hpd_irq_event(struct drm_device
*dev
,
794 struct drm_connector
*connector
)
796 enum drm_connector_status old_status
;
798 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
799 old_status
= connector
->status
;
801 connector
->status
= connector
->funcs
->detect(connector
, false);
802 if (old_status
== connector
->status
)
805 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
808 drm_get_connector_status_name(old_status
),
809 drm_get_connector_status_name(connector
->status
));
814 static void i915_digport_work_func(struct work_struct
*work
)
816 struct drm_i915_private
*dev_priv
=
817 container_of(work
, struct drm_i915_private
, dig_port_work
);
818 u32 long_port_mask
, short_port_mask
;
819 struct intel_digital_port
*intel_dig_port
;
823 spin_lock_irq(&dev_priv
->irq_lock
);
824 long_port_mask
= dev_priv
->long_hpd_port_mask
;
825 dev_priv
->long_hpd_port_mask
= 0;
826 short_port_mask
= dev_priv
->short_hpd_port_mask
;
827 dev_priv
->short_hpd_port_mask
= 0;
828 spin_unlock_irq(&dev_priv
->irq_lock
);
830 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
832 bool long_hpd
= false;
833 intel_dig_port
= dev_priv
->hpd_irq_port
[i
];
834 if (!intel_dig_port
|| !intel_dig_port
->hpd_pulse
)
837 if (long_port_mask
& (1 << i
)) {
840 } else if (short_port_mask
& (1 << i
))
844 ret
= intel_dig_port
->hpd_pulse(intel_dig_port
, long_hpd
);
846 /* if we get true fallback to old school hpd */
847 old_bits
|= (1 << intel_dig_port
->base
.hpd_pin
);
853 spin_lock_irq(&dev_priv
->irq_lock
);
854 dev_priv
->hpd_event_bits
|= old_bits
;
855 spin_unlock_irq(&dev_priv
->irq_lock
);
856 schedule_work(&dev_priv
->hotplug_work
);
861 * Handle hotplug events outside the interrupt handler proper.
863 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
865 static void i915_hotplug_work_func(struct work_struct
*work
)
867 struct drm_i915_private
*dev_priv
=
868 container_of(work
, struct drm_i915_private
, hotplug_work
);
869 struct drm_device
*dev
= dev_priv
->dev
;
870 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
871 struct intel_connector
*intel_connector
;
872 struct intel_encoder
*intel_encoder
;
873 struct drm_connector
*connector
;
874 bool hpd_disabled
= false;
875 bool changed
= false;
878 mutex_lock(&mode_config
->mutex
);
879 DRM_DEBUG_KMS("running encoder hotplug functions\n");
881 spin_lock_irq(&dev_priv
->irq_lock
);
883 hpd_event_bits
= dev_priv
->hpd_event_bits
;
884 dev_priv
->hpd_event_bits
= 0;
885 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
886 intel_connector
= to_intel_connector(connector
);
887 if (!intel_connector
->encoder
)
889 intel_encoder
= intel_connector
->encoder
;
890 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
891 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
892 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
893 DRM_INFO("HPD interrupt storm detected on connector %s: "
894 "switching from hotplug detection to polling\n",
896 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
897 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
898 | DRM_CONNECTOR_POLL_DISCONNECT
;
901 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
902 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
903 connector
->name
, intel_encoder
->hpd_pin
);
906 /* if there were no outputs to poll, poll was disabled,
907 * therefore make sure it's enabled when disabling HPD on
910 drm_kms_helper_poll_enable(dev
);
911 mod_delayed_work(system_wq
, &dev_priv
->hotplug_reenable_work
,
912 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
915 spin_unlock_irq(&dev_priv
->irq_lock
);
917 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
918 intel_connector
= to_intel_connector(connector
);
919 if (!intel_connector
->encoder
)
921 intel_encoder
= intel_connector
->encoder
;
922 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
923 if (intel_encoder
->hot_plug
)
924 intel_encoder
->hot_plug(intel_encoder
);
925 if (intel_hpd_irq_event(dev
, connector
))
929 mutex_unlock(&mode_config
->mutex
);
932 drm_kms_helper_hotplug_event(dev
);
935 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
937 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
938 u32 busy_up
, busy_down
, max_avg
, min_avg
;
941 spin_lock(&mchdev_lock
);
943 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
945 new_delay
= dev_priv
->ips
.cur_delay
;
947 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
948 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
949 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
950 max_avg
= I915_READ(RCBMAXAVG
);
951 min_avg
= I915_READ(RCBMINAVG
);
953 /* Handle RCS change request from hw */
954 if (busy_up
> max_avg
) {
955 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
956 new_delay
= dev_priv
->ips
.cur_delay
- 1;
957 if (new_delay
< dev_priv
->ips
.max_delay
)
958 new_delay
= dev_priv
->ips
.max_delay
;
959 } else if (busy_down
< min_avg
) {
960 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
961 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
962 if (new_delay
> dev_priv
->ips
.min_delay
)
963 new_delay
= dev_priv
->ips
.min_delay
;
966 if (ironlake_set_drps(dev
, new_delay
))
967 dev_priv
->ips
.cur_delay
= new_delay
;
969 spin_unlock(&mchdev_lock
);
974 static void notify_ring(struct drm_device
*dev
,
975 struct intel_engine_cs
*ring
)
977 if (!intel_ring_initialized(ring
))
980 trace_i915_gem_request_complete(ring
);
982 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
983 intel_notify_mmio_flip(ring
);
985 wake_up_all(&ring
->irq_queue
);
986 i915_queue_hangcheck(dev
);
989 static u32
vlv_c0_residency(struct drm_i915_private
*dev_priv
,
990 struct intel_rps_ei
*rps_ei
)
992 u32 cz_ts
, cz_freq_khz
;
993 u32 render_count
, media_count
;
994 u32 elapsed_render
, elapsed_media
, elapsed_time
;
997 cz_ts
= vlv_punit_read(dev_priv
, PUNIT_REG_CZ_TIMESTAMP
);
998 cz_freq_khz
= DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* 1000, 4);
1000 render_count
= I915_READ(VLV_RENDER_C0_COUNT_REG
);
1001 media_count
= I915_READ(VLV_MEDIA_C0_COUNT_REG
);
1003 if (rps_ei
->cz_clock
== 0) {
1004 rps_ei
->cz_clock
= cz_ts
;
1005 rps_ei
->render_c0
= render_count
;
1006 rps_ei
->media_c0
= media_count
;
1008 return dev_priv
->rps
.cur_freq
;
1011 elapsed_time
= cz_ts
- rps_ei
->cz_clock
;
1012 rps_ei
->cz_clock
= cz_ts
;
1014 elapsed_render
= render_count
- rps_ei
->render_c0
;
1015 rps_ei
->render_c0
= render_count
;
1017 elapsed_media
= media_count
- rps_ei
->media_c0
;
1018 rps_ei
->media_c0
= media_count
;
1020 /* Convert all the counters into common unit of milli sec */
1021 elapsed_time
/= VLV_CZ_CLOCK_TO_MILLI_SEC
;
1022 elapsed_render
/= cz_freq_khz
;
1023 elapsed_media
/= cz_freq_khz
;
1026 * Calculate overall C0 residency percentage
1027 * only if elapsed time is non zero
1031 ((max(elapsed_render
, elapsed_media
) * 100)
1039 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1040 * busy-ness calculated from C0 counters of render & media power wells
1041 * @dev_priv: DRM device private
1044 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private
*dev_priv
)
1046 u32 residency_C0_up
= 0, residency_C0_down
= 0;
1049 dev_priv
->rps
.ei_interrupt_count
++;
1051 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
1054 if (dev_priv
->rps
.up_ei
.cz_clock
== 0) {
1055 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.up_ei
);
1056 vlv_c0_residency(dev_priv
, &dev_priv
->rps
.down_ei
);
1057 return dev_priv
->rps
.cur_freq
;
1062 * To down throttle, C0 residency should be less than down threshold
1063 * for continous EI intervals. So calculate down EI counters
1064 * once in VLV_INT_COUNT_FOR_DOWN_EI
1066 if (dev_priv
->rps
.ei_interrupt_count
== VLV_INT_COUNT_FOR_DOWN_EI
) {
1068 dev_priv
->rps
.ei_interrupt_count
= 0;
1070 residency_C0_down
= vlv_c0_residency(dev_priv
,
1071 &dev_priv
->rps
.down_ei
);
1073 residency_C0_up
= vlv_c0_residency(dev_priv
,
1074 &dev_priv
->rps
.up_ei
);
1077 new_delay
= dev_priv
->rps
.cur_freq
;
1079 adj
= dev_priv
->rps
.last_adj
;
1080 /* C0 residency is greater than UP threshold. Increase Frequency */
1081 if (residency_C0_up
>= VLV_RP_UP_EI_THRESHOLD
) {
1087 if (dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
)
1088 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1091 * For better performance, jump directly
1092 * to RPe if we're below it.
1094 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1095 new_delay
= dev_priv
->rps
.efficient_freq
;
1097 } else if (!dev_priv
->rps
.ei_interrupt_count
&&
1098 (residency_C0_down
< VLV_RP_DOWN_EI_THRESHOLD
)) {
1104 * This means, C0 residency is less than down threshold over
1105 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1107 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.min_freq_softlimit
)
1108 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1114 static void gen6_pm_rps_work(struct work_struct
*work
)
1116 struct drm_i915_private
*dev_priv
=
1117 container_of(work
, struct drm_i915_private
, rps
.work
);
1121 spin_lock_irq(&dev_priv
->irq_lock
);
1122 pm_iir
= dev_priv
->rps
.pm_iir
;
1123 dev_priv
->rps
.pm_iir
= 0;
1124 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8)
1125 gen8_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1127 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1128 gen6_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
1130 spin_unlock_irq(&dev_priv
->irq_lock
);
1132 /* Make sure we didn't queue anything we're not going to process. */
1133 WARN_ON(pm_iir
& ~dev_priv
->pm_rps_events
);
1135 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1138 mutex_lock(&dev_priv
->rps
.hw_lock
);
1140 adj
= dev_priv
->rps
.last_adj
;
1141 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1145 /* CHV needs even encode values */
1146 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? 2 : 1;
1148 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1151 * For better performance, jump directly
1152 * to RPe if we're below it.
1154 if (new_delay
< dev_priv
->rps
.efficient_freq
)
1155 new_delay
= dev_priv
->rps
.efficient_freq
;
1156 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1157 if (dev_priv
->rps
.cur_freq
> dev_priv
->rps
.efficient_freq
)
1158 new_delay
= dev_priv
->rps
.efficient_freq
;
1160 new_delay
= dev_priv
->rps
.min_freq_softlimit
;
1162 } else if (pm_iir
& GEN6_PM_RP_UP_EI_EXPIRED
) {
1163 new_delay
= vlv_calc_delay_from_C0_counters(dev_priv
);
1164 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1168 /* CHV needs even encode values */
1169 adj
= IS_CHERRYVIEW(dev_priv
->dev
) ? -2 : -1;
1171 new_delay
= dev_priv
->rps
.cur_freq
+ adj
;
1172 } else { /* unknown event */
1173 new_delay
= dev_priv
->rps
.cur_freq
;
1176 /* sysfs frequency interfaces may have snuck in while servicing the
1179 new_delay
= clamp_t(int, new_delay
,
1180 dev_priv
->rps
.min_freq_softlimit
,
1181 dev_priv
->rps
.max_freq_softlimit
);
1183 dev_priv
->rps
.last_adj
= new_delay
- dev_priv
->rps
.cur_freq
;
1185 if (IS_VALLEYVIEW(dev_priv
->dev
))
1186 valleyview_set_rps(dev_priv
->dev
, new_delay
);
1188 gen6_set_rps(dev_priv
->dev
, new_delay
);
1190 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1195 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1197 * @work: workqueue struct
1199 * Doesn't actually do anything except notify userspace. As a consequence of
1200 * this event, userspace should try to remap the bad rows since statistically
1201 * it is likely the same row is more likely to go bad again.
1203 static void ivybridge_parity_work(struct work_struct
*work
)
1205 struct drm_i915_private
*dev_priv
=
1206 container_of(work
, struct drm_i915_private
, l3_parity
.error_work
);
1207 u32 error_status
, row
, bank
, subbank
;
1208 char *parity_event
[6];
1212 /* We must turn off DOP level clock gating to access the L3 registers.
1213 * In order to prevent a get/put style interface, acquire struct mutex
1214 * any time we access those registers.
1216 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1218 /* If we've screwed up tracking, just let the interrupt fire again */
1219 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1222 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1223 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1224 POSTING_READ(GEN7_MISCCPCTL
);
1226 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1230 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1233 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1235 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1237 error_status
= I915_READ(reg
);
1238 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1239 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1240 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1242 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1245 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1246 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1247 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1248 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1249 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1250 parity_event
[5] = NULL
;
1252 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1253 KOBJ_CHANGE
, parity_event
);
1255 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1256 slice
, row
, bank
, subbank
);
1258 kfree(parity_event
[4]);
1259 kfree(parity_event
[3]);
1260 kfree(parity_event
[2]);
1261 kfree(parity_event
[1]);
1264 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1267 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1268 spin_lock_irq(&dev_priv
->irq_lock
);
1269 gen5_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1270 spin_unlock_irq(&dev_priv
->irq_lock
);
1272 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1275 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1277 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1279 if (!HAS_L3_DPF(dev
))
1282 spin_lock(&dev_priv
->irq_lock
);
1283 gen5_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1284 spin_unlock(&dev_priv
->irq_lock
);
1286 iir
&= GT_PARITY_ERROR(dev
);
1287 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1288 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1290 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1291 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1293 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1296 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1297 struct drm_i915_private
*dev_priv
,
1301 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1302 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1303 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1304 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1307 static void snb_gt_irq_handler(struct drm_device
*dev
,
1308 struct drm_i915_private
*dev_priv
,
1313 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1314 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1315 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1316 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1317 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1318 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1320 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1321 GT_BSD_CS_ERROR_INTERRUPT
|
1322 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
1323 i915_handle_error(dev
, false, "GT error interrupt 0x%08x",
1327 if (gt_iir
& GT_PARITY_ERROR(dev
))
1328 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1331 static void gen8_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1333 if ((pm_iir
& dev_priv
->pm_rps_events
) == 0)
1336 spin_lock(&dev_priv
->irq_lock
);
1337 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1338 gen8_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1339 spin_unlock(&dev_priv
->irq_lock
);
1341 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1344 static irqreturn_t
gen8_gt_irq_handler(struct drm_device
*dev
,
1345 struct drm_i915_private
*dev_priv
,
1348 struct intel_engine_cs
*ring
;
1351 irqreturn_t ret
= IRQ_NONE
;
1353 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1354 tmp
= I915_READ(GEN8_GT_IIR(0));
1356 I915_WRITE(GEN8_GT_IIR(0), tmp
);
1359 rcs
= tmp
>> GEN8_RCS_IRQ_SHIFT
;
1360 ring
= &dev_priv
->ring
[RCS
];
1361 if (rcs
& GT_RENDER_USER_INTERRUPT
)
1362 notify_ring(dev
, ring
);
1363 if (rcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1364 intel_execlists_handle_ctx_events(ring
);
1366 bcs
= tmp
>> GEN8_BCS_IRQ_SHIFT
;
1367 ring
= &dev_priv
->ring
[BCS
];
1368 if (bcs
& GT_RENDER_USER_INTERRUPT
)
1369 notify_ring(dev
, ring
);
1370 if (bcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1371 intel_execlists_handle_ctx_events(ring
);
1373 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1376 if (master_ctl
& (GEN8_GT_VCS1_IRQ
| GEN8_GT_VCS2_IRQ
)) {
1377 tmp
= I915_READ(GEN8_GT_IIR(1));
1379 I915_WRITE(GEN8_GT_IIR(1), tmp
);
1382 vcs
= tmp
>> GEN8_VCS1_IRQ_SHIFT
;
1383 ring
= &dev_priv
->ring
[VCS
];
1384 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1385 notify_ring(dev
, ring
);
1386 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1387 intel_execlists_handle_ctx_events(ring
);
1389 vcs
= tmp
>> GEN8_VCS2_IRQ_SHIFT
;
1390 ring
= &dev_priv
->ring
[VCS2
];
1391 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1392 notify_ring(dev
, ring
);
1393 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1394 intel_execlists_handle_ctx_events(ring
);
1396 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1399 if (master_ctl
& GEN8_GT_PM_IRQ
) {
1400 tmp
= I915_READ(GEN8_GT_IIR(2));
1401 if (tmp
& dev_priv
->pm_rps_events
) {
1402 I915_WRITE(GEN8_GT_IIR(2),
1403 tmp
& dev_priv
->pm_rps_events
);
1405 gen8_rps_irq_handler(dev_priv
, tmp
);
1407 DRM_ERROR("The master control interrupt lied (PM)!\n");
1410 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1411 tmp
= I915_READ(GEN8_GT_IIR(3));
1413 I915_WRITE(GEN8_GT_IIR(3), tmp
);
1416 vcs
= tmp
>> GEN8_VECS_IRQ_SHIFT
;
1417 ring
= &dev_priv
->ring
[VECS
];
1418 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1419 notify_ring(dev
, ring
);
1420 if (vcs
& GT_CONTEXT_SWITCH_INTERRUPT
)
1421 intel_execlists_handle_ctx_events(ring
);
1423 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1429 #define HPD_STORM_DETECT_PERIOD 1000
1430 #define HPD_STORM_THRESHOLD 5
1432 static int pch_port_to_hotplug_shift(enum port port
)
1448 static int i915_port_to_hotplug_shift(enum port port
)
1464 static inline enum port
get_port_from_pin(enum hpd_pin pin
)
1474 return PORT_A
; /* no hpd */
1478 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1479 u32 hotplug_trigger
,
1480 u32 dig_hotplug_reg
,
1483 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1486 bool storm_detected
= false;
1487 bool queue_dig
= false, queue_hp
= false;
1489 u32 dig_port_mask
= 0;
1491 if (!hotplug_trigger
)
1494 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1495 hotplug_trigger
, dig_hotplug_reg
);
1497 spin_lock(&dev_priv
->irq_lock
);
1498 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1499 if (!(hpd
[i
] & hotplug_trigger
))
1502 port
= get_port_from_pin(i
);
1503 if (port
&& dev_priv
->hpd_irq_port
[port
]) {
1506 if (HAS_PCH_SPLIT(dev
)) {
1507 dig_shift
= pch_port_to_hotplug_shift(port
);
1508 long_hpd
= (dig_hotplug_reg
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1510 dig_shift
= i915_port_to_hotplug_shift(port
);
1511 long_hpd
= (hotplug_trigger
>> dig_shift
) & PORTB_HOTPLUG_LONG_DETECT
;
1514 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1516 long_hpd
? "long" : "short");
1517 /* for long HPD pulses we want to have the digital queue happen,
1518 but we still want HPD storm detection to function. */
1520 dev_priv
->long_hpd_port_mask
|= (1 << port
);
1521 dig_port_mask
|= hpd
[i
];
1523 /* for short HPD just trigger the digital queue */
1524 dev_priv
->short_hpd_port_mask
|= (1 << port
);
1525 hotplug_trigger
&= ~hpd
[i
];
1531 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1532 if (hpd
[i
] & hotplug_trigger
&&
1533 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
) {
1535 * On GMCH platforms the interrupt mask bits only
1536 * prevent irq generation, not the setting of the
1537 * hotplug bits itself. So only WARN about unexpected
1538 * interrupts on saner platforms.
1540 WARN_ONCE(INTEL_INFO(dev
)->gen
>= 5 && !IS_VALLEYVIEW(dev
),
1541 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1542 hotplug_trigger
, i
, hpd
[i
]);
1547 if (!(hpd
[i
] & hotplug_trigger
) ||
1548 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1551 if (!(dig_port_mask
& hpd
[i
])) {
1552 dev_priv
->hpd_event_bits
|= (1 << i
);
1556 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1557 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1558 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1559 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1560 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1561 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1562 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1563 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1564 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1565 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1566 storm_detected
= true;
1568 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1569 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1570 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1575 dev_priv
->display
.hpd_irq_setup(dev
);
1576 spin_unlock(&dev_priv
->irq_lock
);
1579 * Our hotplug handler can grab modeset locks (by calling down into the
1580 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1581 * queue for otherwise the flush_work in the pageflip code will
1585 queue_work(dev_priv
->dp_wq
, &dev_priv
->dig_port_work
);
1587 schedule_work(&dev_priv
->hotplug_work
);
1590 static void gmbus_irq_handler(struct drm_device
*dev
)
1592 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1594 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1597 static void dp_aux_irq_handler(struct drm_device
*dev
)
1599 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1601 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1604 #if defined(CONFIG_DEBUG_FS)
1605 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1606 uint32_t crc0
, uint32_t crc1
,
1607 uint32_t crc2
, uint32_t crc3
,
1610 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1611 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1612 struct intel_pipe_crc_entry
*entry
;
1615 spin_lock(&pipe_crc
->lock
);
1617 if (!pipe_crc
->entries
) {
1618 spin_unlock(&pipe_crc
->lock
);
1619 DRM_ERROR("spurious interrupt\n");
1623 head
= pipe_crc
->head
;
1624 tail
= pipe_crc
->tail
;
1626 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1627 spin_unlock(&pipe_crc
->lock
);
1628 DRM_ERROR("CRC buffer overflowing\n");
1632 entry
= &pipe_crc
->entries
[head
];
1634 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1635 entry
->crc
[0] = crc0
;
1636 entry
->crc
[1] = crc1
;
1637 entry
->crc
[2] = crc2
;
1638 entry
->crc
[3] = crc3
;
1639 entry
->crc
[4] = crc4
;
1641 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1642 pipe_crc
->head
= head
;
1644 spin_unlock(&pipe_crc
->lock
);
1646 wake_up_interruptible(&pipe_crc
->wq
);
1650 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1651 uint32_t crc0
, uint32_t crc1
,
1652 uint32_t crc2
, uint32_t crc3
,
1657 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1659 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1661 display_pipe_crc_irq_handler(dev
, pipe
,
1662 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1666 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1668 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1670 display_pipe_crc_irq_handler(dev
, pipe
,
1671 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1672 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1673 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1674 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1675 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1678 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1680 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1681 uint32_t res1
, res2
;
1683 if (INTEL_INFO(dev
)->gen
>= 3)
1684 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1688 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1689 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1693 display_pipe_crc_irq_handler(dev
, pipe
,
1694 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1695 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1696 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1700 /* The RPS events need forcewake, so we add them to a work queue and mask their
1701 * IMR bits until the work is done. Other interrupts can be processed without
1702 * the work queue. */
1703 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1705 if (pm_iir
& dev_priv
->pm_rps_events
) {
1706 spin_lock(&dev_priv
->irq_lock
);
1707 dev_priv
->rps
.pm_iir
|= pm_iir
& dev_priv
->pm_rps_events
;
1708 gen6_disable_pm_irq(dev_priv
, pm_iir
& dev_priv
->pm_rps_events
);
1709 spin_unlock(&dev_priv
->irq_lock
);
1711 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1714 if (HAS_VEBOX(dev_priv
->dev
)) {
1715 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1716 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
1718 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
1719 i915_handle_error(dev_priv
->dev
, false,
1720 "VEBOX CS error interrupt 0x%08x",
1726 static bool intel_pipe_handle_vblank(struct drm_device
*dev
, enum pipe pipe
)
1728 if (!drm_handle_vblank(dev
, pipe
))
1734 static void valleyview_pipestat_irq_handler(struct drm_device
*dev
, u32 iir
)
1736 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1737 u32 pipe_stats
[I915_MAX_PIPES
] = { };
1740 spin_lock(&dev_priv
->irq_lock
);
1741 for_each_pipe(dev_priv
, pipe
) {
1743 u32 mask
, iir_bit
= 0;
1746 * PIPESTAT bits get signalled even when the interrupt is
1747 * disabled with the mask bits, and some of the status bits do
1748 * not generate interrupts at all (like the underrun bit). Hence
1749 * we need to be careful that we only handle what we want to
1753 /* fifo underruns are filterered in the underrun handler. */
1754 mask
= PIPE_FIFO_UNDERRUN_STATUS
;
1758 iir_bit
= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
;
1761 iir_bit
= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
1764 iir_bit
= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
1768 mask
|= dev_priv
->pipestat_irq_mask
[pipe
];
1773 reg
= PIPESTAT(pipe
);
1774 mask
|= PIPESTAT_INT_ENABLE_MASK
;
1775 pipe_stats
[pipe
] = I915_READ(reg
) & mask
;
1778 * Clear the PIPE*STAT regs before the IIR
1780 if (pipe_stats
[pipe
] & (PIPE_FIFO_UNDERRUN_STATUS
|
1781 PIPESTAT_INT_STATUS_MASK
))
1782 I915_WRITE(reg
, pipe_stats
[pipe
]);
1784 spin_unlock(&dev_priv
->irq_lock
);
1786 for_each_pipe(dev_priv
, pipe
) {
1787 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
1788 intel_pipe_handle_vblank(dev
, pipe
))
1789 intel_check_page_flip(dev
, pipe
);
1791 if (pipe_stats
[pipe
] & PLANE_FLIP_DONE_INT_STATUS_VLV
) {
1792 intel_prepare_page_flip(dev
, pipe
);
1793 intel_finish_page_flip(dev
, pipe
);
1796 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1797 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1799 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1800 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1803 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1804 gmbus_irq_handler(dev
);
1807 static void i9xx_hpd_irq_handler(struct drm_device
*dev
)
1809 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1810 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1812 if (hotplug_status
) {
1813 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1815 * Make sure hotplug status is cleared before we clear IIR, or else we
1816 * may miss hotplug events.
1818 POSTING_READ(PORT_HOTPLUG_STAT
);
1821 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_G4X
;
1823 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_g4x
);
1825 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1827 intel_hpd_irq_handler(dev
, hotplug_trigger
, 0, hpd_status_i915
);
1830 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) &&
1831 hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1832 dp_aux_irq_handler(dev
);
1836 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1838 struct drm_device
*dev
= arg
;
1839 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1840 u32 iir
, gt_iir
, pm_iir
;
1841 irqreturn_t ret
= IRQ_NONE
;
1844 /* Find, clear, then process each source of interrupt */
1846 gt_iir
= I915_READ(GTIIR
);
1848 I915_WRITE(GTIIR
, gt_iir
);
1850 pm_iir
= I915_READ(GEN6_PMIIR
);
1852 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1854 iir
= I915_READ(VLV_IIR
);
1856 /* Consume port before clearing IIR or we'll miss events */
1857 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1858 i9xx_hpd_irq_handler(dev
);
1859 I915_WRITE(VLV_IIR
, iir
);
1862 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1868 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1870 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1871 /* Call regardless, as some status bits might not be
1872 * signalled in iir */
1873 valleyview_pipestat_irq_handler(dev
, iir
);
1880 static irqreturn_t
cherryview_irq_handler(int irq
, void *arg
)
1882 struct drm_device
*dev
= arg
;
1883 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1884 u32 master_ctl
, iir
;
1885 irqreturn_t ret
= IRQ_NONE
;
1888 master_ctl
= I915_READ(GEN8_MASTER_IRQ
) & ~GEN8_MASTER_IRQ_CONTROL
;
1889 iir
= I915_READ(VLV_IIR
);
1891 if (master_ctl
== 0 && iir
== 0)
1896 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1898 /* Find, clear, then process each source of interrupt */
1901 /* Consume port before clearing IIR or we'll miss events */
1902 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
1903 i9xx_hpd_irq_handler(dev
);
1904 I915_WRITE(VLV_IIR
, iir
);
1907 gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
1909 /* Call regardless, as some status bits might not be
1910 * signalled in iir */
1911 valleyview_pipestat_irq_handler(dev
, iir
);
1913 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
1914 POSTING_READ(GEN8_MASTER_IRQ
);
1920 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1922 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1924 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1925 u32 dig_hotplug_reg
;
1927 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
1928 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
1930 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_ibx
);
1932 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1933 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1934 SDE_AUDIO_POWER_SHIFT
);
1935 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1939 if (pch_iir
& SDE_AUX_MASK
)
1940 dp_aux_irq_handler(dev
);
1942 if (pch_iir
& SDE_GMBUS
)
1943 gmbus_irq_handler(dev
);
1945 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1946 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1948 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1949 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1951 if (pch_iir
& SDE_POISON
)
1952 DRM_ERROR("PCH poison interrupt\n");
1954 if (pch_iir
& SDE_FDI_MASK
)
1955 for_each_pipe(dev_priv
, pipe
)
1956 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1958 I915_READ(FDI_RX_IIR(pipe
)));
1960 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1961 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1963 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1964 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1966 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1967 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
1969 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1970 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
1973 static void ivb_err_int_handler(struct drm_device
*dev
)
1975 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1976 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1979 if (err_int
& ERR_INT_POISON
)
1980 DRM_ERROR("Poison interrupt\n");
1982 for_each_pipe(dev_priv
, pipe
) {
1983 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
))
1984 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
1986 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
1987 if (IS_IVYBRIDGE(dev
))
1988 ivb_pipe_crc_irq_handler(dev
, pipe
);
1990 hsw_pipe_crc_irq_handler(dev
, pipe
);
1994 I915_WRITE(GEN7_ERR_INT
, err_int
);
1997 static void cpt_serr_int_handler(struct drm_device
*dev
)
1999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2000 u32 serr_int
= I915_READ(SERR_INT
);
2002 if (serr_int
& SERR_INT_POISON
)
2003 DRM_ERROR("PCH poison interrupt\n");
2005 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
2006 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_A
);
2008 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
2009 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_B
);
2011 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
2012 intel_pch_fifo_underrun_irq_handler(dev_priv
, TRANSCODER_C
);
2014 I915_WRITE(SERR_INT
, serr_int
);
2017 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
2019 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2021 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
2022 u32 dig_hotplug_reg
;
2024 dig_hotplug_reg
= I915_READ(PCH_PORT_HOTPLUG
);
2025 I915_WRITE(PCH_PORT_HOTPLUG
, dig_hotplug_reg
);
2027 intel_hpd_irq_handler(dev
, hotplug_trigger
, dig_hotplug_reg
, hpd_cpt
);
2029 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
2030 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
2031 SDE_AUDIO_POWER_SHIFT_CPT
);
2032 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2036 if (pch_iir
& SDE_AUX_MASK_CPT
)
2037 dp_aux_irq_handler(dev
);
2039 if (pch_iir
& SDE_GMBUS_CPT
)
2040 gmbus_irq_handler(dev
);
2042 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
2043 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2045 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
2046 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2048 if (pch_iir
& SDE_FDI_MASK_CPT
)
2049 for_each_pipe(dev_priv
, pipe
)
2050 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2052 I915_READ(FDI_RX_IIR(pipe
)));
2054 if (pch_iir
& SDE_ERROR_CPT
)
2055 cpt_serr_int_handler(dev
);
2058 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2060 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2063 if (de_iir
& DE_AUX_CHANNEL_A
)
2064 dp_aux_irq_handler(dev
);
2066 if (de_iir
& DE_GSE
)
2067 intel_opregion_asle_intr(dev
);
2069 if (de_iir
& DE_POISON
)
2070 DRM_ERROR("Poison interrupt\n");
2072 for_each_pipe(dev_priv
, pipe
) {
2073 if (de_iir
& DE_PIPE_VBLANK(pipe
) &&
2074 intel_pipe_handle_vblank(dev
, pipe
))
2075 intel_check_page_flip(dev
, pipe
);
2077 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
2078 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
2080 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
2081 i9xx_pipe_crc_irq_handler(dev
, pipe
);
2083 /* plane/pipes map 1:1 on ilk+ */
2084 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
2085 intel_prepare_page_flip(dev
, pipe
);
2086 intel_finish_page_flip_plane(dev
, pipe
);
2090 /* check event from PCH */
2091 if (de_iir
& DE_PCH_EVENT
) {
2092 u32 pch_iir
= I915_READ(SDEIIR
);
2094 if (HAS_PCH_CPT(dev
))
2095 cpt_irq_handler(dev
, pch_iir
);
2097 ibx_irq_handler(dev
, pch_iir
);
2099 /* should clear PCH hotplug event before clear CPU irq */
2100 I915_WRITE(SDEIIR
, pch_iir
);
2103 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
2104 ironlake_rps_change_irq_handler(dev
);
2107 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
2109 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2112 if (de_iir
& DE_ERR_INT_IVB
)
2113 ivb_err_int_handler(dev
);
2115 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
2116 dp_aux_irq_handler(dev
);
2118 if (de_iir
& DE_GSE_IVB
)
2119 intel_opregion_asle_intr(dev
);
2121 for_each_pipe(dev_priv
, pipe
) {
2122 if (de_iir
& (DE_PIPE_VBLANK_IVB(pipe
)) &&
2123 intel_pipe_handle_vblank(dev
, pipe
))
2124 intel_check_page_flip(dev
, pipe
);
2126 /* plane/pipes map 1:1 on ilk+ */
2127 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(pipe
)) {
2128 intel_prepare_page_flip(dev
, pipe
);
2129 intel_finish_page_flip_plane(dev
, pipe
);
2133 /* check event from PCH */
2134 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
2135 u32 pch_iir
= I915_READ(SDEIIR
);
2137 cpt_irq_handler(dev
, pch_iir
);
2139 /* clear PCH hotplug event before clear CPU irq */
2140 I915_WRITE(SDEIIR
, pch_iir
);
2145 * To handle irqs with the minimum potential races with fresh interrupts, we:
2146 * 1 - Disable Master Interrupt Control.
2147 * 2 - Find the source(s) of the interrupt.
2148 * 3 - Clear the Interrupt Identity bits (IIR).
2149 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2150 * 5 - Re-enable Master Interrupt Control.
2152 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
2154 struct drm_device
*dev
= arg
;
2155 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2156 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
2157 irqreturn_t ret
= IRQ_NONE
;
2159 /* We get interrupts on unclaimed registers, so check for this before we
2160 * do any I915_{READ,WRITE}. */
2161 intel_uncore_check_errors(dev
);
2163 /* disable master interrupt before clearing iir */
2164 de_ier
= I915_READ(DEIER
);
2165 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
2166 POSTING_READ(DEIER
);
2168 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2169 * interrupts will will be stored on its back queue, and then we'll be
2170 * able to process them after we restore SDEIER (as soon as we restore
2171 * it, we'll get an interrupt if SDEIIR still has something to process
2172 * due to its back queue). */
2173 if (!HAS_PCH_NOP(dev
)) {
2174 sde_ier
= I915_READ(SDEIER
);
2175 I915_WRITE(SDEIER
, 0);
2176 POSTING_READ(SDEIER
);
2179 /* Find, clear, then process each source of interrupt */
2181 gt_iir
= I915_READ(GTIIR
);
2183 I915_WRITE(GTIIR
, gt_iir
);
2185 if (INTEL_INFO(dev
)->gen
>= 6)
2186 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2188 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
2191 de_iir
= I915_READ(DEIIR
);
2193 I915_WRITE(DEIIR
, de_iir
);
2195 if (INTEL_INFO(dev
)->gen
>= 7)
2196 ivb_display_irq_handler(dev
, de_iir
);
2198 ilk_display_irq_handler(dev
, de_iir
);
2201 if (INTEL_INFO(dev
)->gen
>= 6) {
2202 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
2204 I915_WRITE(GEN6_PMIIR
, pm_iir
);
2206 gen6_rps_irq_handler(dev_priv
, pm_iir
);
2210 I915_WRITE(DEIER
, de_ier
);
2211 POSTING_READ(DEIER
);
2212 if (!HAS_PCH_NOP(dev
)) {
2213 I915_WRITE(SDEIER
, sde_ier
);
2214 POSTING_READ(SDEIER
);
2220 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
2222 struct drm_device
*dev
= arg
;
2223 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2225 irqreturn_t ret
= IRQ_NONE
;
2229 master_ctl
= I915_READ(GEN8_MASTER_IRQ
);
2230 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
2234 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2235 POSTING_READ(GEN8_MASTER_IRQ
);
2237 /* Find, clear, then process each source of interrupt */
2239 ret
= gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
2241 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
2242 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
2244 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
2246 if (tmp
& GEN8_DE_MISC_GSE
)
2247 intel_opregion_asle_intr(dev
);
2249 DRM_ERROR("Unexpected DE Misc interrupt\n");
2252 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2255 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
2256 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
2258 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
2260 if (tmp
& GEN8_AUX_CHANNEL_A
)
2261 dp_aux_irq_handler(dev
);
2263 DRM_ERROR("Unexpected DE Port interrupt\n");
2266 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2269 for_each_pipe(dev_priv
, pipe
) {
2270 uint32_t pipe_iir
, flip_done
= 0, fault_errors
= 0;
2272 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
2275 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2278 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
2280 if (pipe_iir
& GEN8_PIPE_VBLANK
&&
2281 intel_pipe_handle_vblank(dev
, pipe
))
2282 intel_check_page_flip(dev
, pipe
);
2285 flip_done
= pipe_iir
& GEN9_PIPE_PLANE1_FLIP_DONE
;
2287 flip_done
= pipe_iir
& GEN8_PIPE_PRIMARY_FLIP_DONE
;
2290 intel_prepare_page_flip(dev
, pipe
);
2291 intel_finish_page_flip_plane(dev
, pipe
);
2294 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
2295 hsw_pipe_crc_irq_handler(dev
, pipe
);
2297 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
)
2298 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
2303 fault_errors
= pipe_iir
& GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
2305 fault_errors
= pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2308 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2310 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
2312 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2315 if (!HAS_PCH_NOP(dev
) && master_ctl
& GEN8_DE_PCH_IRQ
) {
2317 * FIXME(BDW): Assume for now that the new interrupt handling
2318 * scheme also closed the SDE interrupt handling race we've seen
2319 * on older pch-split platforms. But this needs testing.
2321 u32 pch_iir
= I915_READ(SDEIIR
);
2323 I915_WRITE(SDEIIR
, pch_iir
);
2325 cpt_irq_handler(dev
, pch_iir
);
2327 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2331 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
2332 POSTING_READ(GEN8_MASTER_IRQ
);
2337 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
2338 bool reset_completed
)
2340 struct intel_engine_cs
*ring
;
2344 * Notify all waiters for GPU completion events that reset state has
2345 * been changed, and that they need to restart their wait after
2346 * checking for potential errors (and bail out to drop locks if there is
2347 * a gpu reset pending so that i915_error_work_func can acquire them).
2350 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2351 for_each_ring(ring
, dev_priv
, i
)
2352 wake_up_all(&ring
->irq_queue
);
2354 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2355 wake_up_all(&dev_priv
->pending_flip_queue
);
2358 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2359 * reset state is cleared.
2361 if (reset_completed
)
2362 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
2366 * i915_error_work_func - do process context error handling work
2367 * @work: work struct
2369 * Fire an error uevent so userspace can see that a hang or error
2372 static void i915_error_work_func(struct work_struct
*work
)
2374 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
2376 struct drm_i915_private
*dev_priv
=
2377 container_of(error
, struct drm_i915_private
, gpu_error
);
2378 struct drm_device
*dev
= dev_priv
->dev
;
2379 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
2380 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
2381 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
2384 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
2387 * Note that there's only one work item which does gpu resets, so we
2388 * need not worry about concurrent gpu resets potentially incrementing
2389 * error->reset_counter twice. We only need to take care of another
2390 * racing irq/hangcheck declaring the gpu dead for a second time. A
2391 * quick check for that is good enough: schedule_work ensures the
2392 * correct ordering between hang detection and this work item, and since
2393 * the reset in-progress bit is only ever set by code outside of this
2394 * work we don't need to worry about any other races.
2396 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
2397 DRM_DEBUG_DRIVER("resetting chip\n");
2398 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
2402 * In most cases it's guaranteed that we get here with an RPM
2403 * reference held, for example because there is a pending GPU
2404 * request that won't finish until the reset is done. This
2405 * isn't the case at least when we get here by doing a
2406 * simulated reset via debugs, so get an RPM reference.
2408 intel_runtime_pm_get(dev_priv
);
2410 * All state reset _must_ be completed before we update the
2411 * reset counter, for otherwise waiters might miss the reset
2412 * pending state and not properly drop locks, resulting in
2413 * deadlocks with the reset work.
2415 ret
= i915_reset(dev
);
2417 intel_display_handle_reset(dev
);
2419 intel_runtime_pm_put(dev_priv
);
2423 * After all the gem state is reset, increment the reset
2424 * counter and wake up everyone waiting for the reset to
2427 * Since unlock operations are a one-sided barrier only,
2428 * we need to insert a barrier here to order any seqno
2430 * the counter increment.
2432 smp_mb__before_atomic();
2433 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2435 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2436 KOBJ_CHANGE
, reset_done_event
);
2438 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2442 * Note: The wake_up also serves as a memory barrier so that
2443 * waiters see the update value of the reset counter atomic_t.
2445 i915_error_wake_up(dev_priv
, true);
2449 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2451 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2452 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2453 u32 eir
= I915_READ(EIR
);
2459 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2461 i915_get_extra_instdone(dev
, instdone
);
2464 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2465 u32 ipeir
= I915_READ(IPEIR_I965
);
2467 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2468 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2469 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2470 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2471 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2472 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2473 I915_WRITE(IPEIR_I965
, ipeir
);
2474 POSTING_READ(IPEIR_I965
);
2476 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2477 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2478 pr_err("page table error\n");
2479 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2480 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2481 POSTING_READ(PGTBL_ER
);
2485 if (!IS_GEN2(dev
)) {
2486 if (eir
& I915_ERROR_PAGE_TABLE
) {
2487 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2488 pr_err("page table error\n");
2489 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2490 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2491 POSTING_READ(PGTBL_ER
);
2495 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2496 pr_err("memory refresh error:\n");
2497 for_each_pipe(dev_priv
, pipe
)
2498 pr_err("pipe %c stat: 0x%08x\n",
2499 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2500 /* pipestat has already been acked */
2502 if (eir
& I915_ERROR_INSTRUCTION
) {
2503 pr_err("instruction error\n");
2504 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2505 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2506 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2507 if (INTEL_INFO(dev
)->gen
< 4) {
2508 u32 ipeir
= I915_READ(IPEIR
);
2510 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2511 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2512 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2513 I915_WRITE(IPEIR
, ipeir
);
2514 POSTING_READ(IPEIR
);
2516 u32 ipeir
= I915_READ(IPEIR_I965
);
2518 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2519 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2520 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2521 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2522 I915_WRITE(IPEIR_I965
, ipeir
);
2523 POSTING_READ(IPEIR_I965
);
2527 I915_WRITE(EIR
, eir
);
2529 eir
= I915_READ(EIR
);
2532 * some errors might have become stuck,
2535 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2536 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2537 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2542 * i915_handle_error - handle an error interrupt
2545 * Do some basic checking of regsiter state at error interrupt time and
2546 * dump it to the syslog. Also call i915_capture_error_state() to make
2547 * sure we get a record and make it available in debugfs. Fire a uevent
2548 * so userspace knows something bad happened (should trigger collection
2549 * of a ring dump etc.).
2551 void i915_handle_error(struct drm_device
*dev
, bool wedged
,
2552 const char *fmt
, ...)
2554 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2558 va_start(args
, fmt
);
2559 vscnprintf(error_msg
, sizeof(error_msg
), fmt
, args
);
2562 i915_capture_error_state(dev
, wedged
, error_msg
);
2563 i915_report_and_clear_eir(dev
);
2566 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2567 &dev_priv
->gpu_error
.reset_counter
);
2570 * Wakeup waiting processes so that the reset work function
2571 * i915_error_work_func doesn't deadlock trying to grab various
2572 * locks. By bumping the reset counter first, the woken
2573 * processes will see a reset in progress and back off,
2574 * releasing their locks and then wait for the reset completion.
2575 * We must do this for _all_ gpu waiters that might hold locks
2576 * that the reset work needs to acquire.
2578 * Note: The wake_up serves as the required memory barrier to
2579 * ensure that the waiters see the updated value of the reset
2582 i915_error_wake_up(dev_priv
, false);
2586 * Our reset work can grab modeset locks (since it needs to reset the
2587 * state of outstanding pagelips). Hence it must not be run on our own
2588 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2589 * code will deadlock.
2591 schedule_work(&dev_priv
->gpu_error
.work
);
2594 /* Called from drm generic code, passed 'crtc' which
2595 * we use as a pipe index
2597 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2599 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2600 unsigned long irqflags
;
2602 if (!i915_pipe_enabled(dev
, pipe
))
2605 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2606 if (INTEL_INFO(dev
)->gen
>= 4)
2607 i915_enable_pipestat(dev_priv
, pipe
,
2608 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2610 i915_enable_pipestat(dev_priv
, pipe
,
2611 PIPE_VBLANK_INTERRUPT_STATUS
);
2612 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2617 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2619 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2620 unsigned long irqflags
;
2621 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2622 DE_PIPE_VBLANK(pipe
);
2624 if (!i915_pipe_enabled(dev
, pipe
))
2627 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2628 ironlake_enable_display_irq(dev_priv
, bit
);
2629 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2634 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2636 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2637 unsigned long irqflags
;
2639 if (!i915_pipe_enabled(dev
, pipe
))
2642 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2643 i915_enable_pipestat(dev_priv
, pipe
,
2644 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2645 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2650 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2652 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2653 unsigned long irqflags
;
2655 if (!i915_pipe_enabled(dev
, pipe
))
2658 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2659 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2660 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2661 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2662 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2666 /* Called from drm generic code, passed 'crtc' which
2667 * we use as a pipe index
2669 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2671 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2672 unsigned long irqflags
;
2674 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2675 i915_disable_pipestat(dev_priv
, pipe
,
2676 PIPE_VBLANK_INTERRUPT_STATUS
|
2677 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2678 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2681 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2683 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2684 unsigned long irqflags
;
2685 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2686 DE_PIPE_VBLANK(pipe
);
2688 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2689 ironlake_disable_display_irq(dev_priv
, bit
);
2690 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2693 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
2695 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2696 unsigned long irqflags
;
2698 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2699 i915_disable_pipestat(dev_priv
, pipe
,
2700 PIPE_START_VBLANK_INTERRUPT_STATUS
);
2701 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2704 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
2706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2707 unsigned long irqflags
;
2709 if (!i915_pipe_enabled(dev
, pipe
))
2712 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2713 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
2714 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2715 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2716 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2720 ring_last_seqno(struct intel_engine_cs
*ring
)
2722 return list_entry(ring
->request_list
.prev
,
2723 struct drm_i915_gem_request
, list
)->seqno
;
2727 ring_idle(struct intel_engine_cs
*ring
, u32 seqno
)
2729 return (list_empty(&ring
->request_list
) ||
2730 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
2734 ipehr_is_semaphore_wait(struct drm_device
*dev
, u32 ipehr
)
2736 if (INTEL_INFO(dev
)->gen
>= 8) {
2737 return (ipehr
>> 23) == 0x1c;
2739 ipehr
&= ~MI_SEMAPHORE_SYNC_MASK
;
2740 return ipehr
== (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
|
2741 MI_SEMAPHORE_REGISTER
);
2745 static struct intel_engine_cs
*
2746 semaphore_wait_to_signaller_ring(struct intel_engine_cs
*ring
, u32 ipehr
, u64 offset
)
2748 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2749 struct intel_engine_cs
*signaller
;
2752 if (INTEL_INFO(dev_priv
->dev
)->gen
>= 8) {
2753 for_each_ring(signaller
, dev_priv
, i
) {
2754 if (ring
== signaller
)
2757 if (offset
== signaller
->semaphore
.signal_ggtt
[ring
->id
])
2761 u32 sync_bits
= ipehr
& MI_SEMAPHORE_SYNC_MASK
;
2763 for_each_ring(signaller
, dev_priv
, i
) {
2764 if(ring
== signaller
)
2767 if (sync_bits
== signaller
->semaphore
.mbox
.wait
[ring
->id
])
2772 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2773 ring
->id
, ipehr
, offset
);
2778 static struct intel_engine_cs
*
2779 semaphore_waits_for(struct intel_engine_cs
*ring
, u32
*seqno
)
2781 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2782 u32 cmd
, ipehr
, head
;
2786 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2787 if (!ipehr_is_semaphore_wait(ring
->dev
, ipehr
))
2791 * HEAD is likely pointing to the dword after the actual command,
2792 * so scan backwards until we find the MBOX. But limit it to just 3
2793 * or 4 dwords depending on the semaphore wait command size.
2794 * Note that we don't care about ACTHD here since that might
2795 * point at at batch, and semaphores are always emitted into the
2796 * ringbuffer itself.
2798 head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
2799 backwards
= (INTEL_INFO(ring
->dev
)->gen
>= 8) ? 5 : 4;
2801 for (i
= backwards
; i
; --i
) {
2803 * Be paranoid and presume the hw has gone off into the wild -
2804 * our ring is smaller than what the hardware (and hence
2805 * HEAD_ADDR) allows. Also handles wrap-around.
2807 head
&= ring
->buffer
->size
- 1;
2809 /* This here seems to blow up */
2810 cmd
= ioread32(ring
->buffer
->virtual_start
+ head
);
2820 *seqno
= ioread32(ring
->buffer
->virtual_start
+ head
+ 4) + 1;
2821 if (INTEL_INFO(ring
->dev
)->gen
>= 8) {
2822 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 12);
2824 offset
= ioread32(ring
->buffer
->virtual_start
+ head
+ 8);
2826 return semaphore_wait_to_signaller_ring(ring
, ipehr
, offset
);
2829 static int semaphore_passed(struct intel_engine_cs
*ring
)
2831 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2832 struct intel_engine_cs
*signaller
;
2835 ring
->hangcheck
.deadlock
++;
2837 signaller
= semaphore_waits_for(ring
, &seqno
);
2838 if (signaller
== NULL
)
2841 /* Prevent pathological recursion due to driver bugs */
2842 if (signaller
->hangcheck
.deadlock
>= I915_NUM_RINGS
)
2845 if (i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
))
2848 /* cursory check for an unkickable deadlock */
2849 if (I915_READ_CTL(signaller
) & RING_WAIT_SEMAPHORE
&&
2850 semaphore_passed(signaller
) < 0)
2856 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2858 struct intel_engine_cs
*ring
;
2861 for_each_ring(ring
, dev_priv
, i
)
2862 ring
->hangcheck
.deadlock
= 0;
2865 static enum intel_ring_hangcheck_action
2866 ring_stuck(struct intel_engine_cs
*ring
, u64 acthd
)
2868 struct drm_device
*dev
= ring
->dev
;
2869 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2872 if (acthd
!= ring
->hangcheck
.acthd
) {
2873 if (acthd
> ring
->hangcheck
.max_acthd
) {
2874 ring
->hangcheck
.max_acthd
= acthd
;
2875 return HANGCHECK_ACTIVE
;
2878 return HANGCHECK_ACTIVE_LOOP
;
2882 return HANGCHECK_HUNG
;
2884 /* Is the chip hanging on a WAIT_FOR_EVENT?
2885 * If so we can simply poke the RB_WAIT bit
2886 * and break the hang. This should work on
2887 * all but the second generation chipsets.
2889 tmp
= I915_READ_CTL(ring
);
2890 if (tmp
& RING_WAIT
) {
2891 i915_handle_error(dev
, false,
2892 "Kicking stuck wait on %s",
2894 I915_WRITE_CTL(ring
, tmp
);
2895 return HANGCHECK_KICK
;
2898 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2899 switch (semaphore_passed(ring
)) {
2901 return HANGCHECK_HUNG
;
2903 i915_handle_error(dev
, false,
2904 "Kicking stuck semaphore on %s",
2906 I915_WRITE_CTL(ring
, tmp
);
2907 return HANGCHECK_KICK
;
2909 return HANGCHECK_WAIT
;
2913 return HANGCHECK_HUNG
;
2917 * This is called when the chip hasn't reported back with completed
2918 * batchbuffers in a long time. We keep track per ring seqno progress and
2919 * if there are no progress, hangcheck score for that ring is increased.
2920 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2921 * we kick the ring. If we see no progress on three subsequent calls
2922 * we assume chip is wedged and try to fix it by resetting the chip.
2924 static void i915_hangcheck_elapsed(unsigned long data
)
2926 struct drm_device
*dev
= (struct drm_device
*)data
;
2927 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2928 struct intel_engine_cs
*ring
;
2930 int busy_count
= 0, rings_hung
= 0;
2931 bool stuck
[I915_NUM_RINGS
] = { 0 };
2936 if (!i915
.enable_hangcheck
)
2939 for_each_ring(ring
, dev_priv
, i
) {
2944 semaphore_clear_deadlocks(dev_priv
);
2946 seqno
= ring
->get_seqno(ring
, false);
2947 acthd
= intel_ring_get_active_head(ring
);
2949 if (ring
->hangcheck
.seqno
== seqno
) {
2950 if (ring_idle(ring
, seqno
)) {
2951 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
2953 if (waitqueue_active(&ring
->irq_queue
)) {
2954 /* Issue a wake-up to catch stuck h/w. */
2955 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
2956 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
2957 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2960 DRM_INFO("Fake missed irq on %s\n",
2962 wake_up_all(&ring
->irq_queue
);
2964 /* Safeguard against driver failure */
2965 ring
->hangcheck
.score
+= BUSY
;
2969 /* We always increment the hangcheck score
2970 * if the ring is busy and still processing
2971 * the same request, so that no single request
2972 * can run indefinitely (such as a chain of
2973 * batches). The only time we do not increment
2974 * the hangcheck score on this ring, if this
2975 * ring is in a legitimate wait for another
2976 * ring. In that case the waiting ring is a
2977 * victim and we want to be sure we catch the
2978 * right culprit. Then every time we do kick
2979 * the ring, add a small increment to the
2980 * score so that we can catch a batch that is
2981 * being repeatedly kicked and so responsible
2982 * for stalling the machine.
2984 ring
->hangcheck
.action
= ring_stuck(ring
,
2987 switch (ring
->hangcheck
.action
) {
2988 case HANGCHECK_IDLE
:
2989 case HANGCHECK_WAIT
:
2990 case HANGCHECK_ACTIVE
:
2992 case HANGCHECK_ACTIVE_LOOP
:
2993 ring
->hangcheck
.score
+= BUSY
;
2995 case HANGCHECK_KICK
:
2996 ring
->hangcheck
.score
+= KICK
;
2998 case HANGCHECK_HUNG
:
2999 ring
->hangcheck
.score
+= HUNG
;
3005 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
3007 /* Gradually reduce the count so that we catch DoS
3008 * attempts across multiple batches.
3010 if (ring
->hangcheck
.score
> 0)
3011 ring
->hangcheck
.score
--;
3013 ring
->hangcheck
.acthd
= ring
->hangcheck
.max_acthd
= 0;
3016 ring
->hangcheck
.seqno
= seqno
;
3017 ring
->hangcheck
.acthd
= acthd
;
3021 for_each_ring(ring
, dev_priv
, i
) {
3022 if (ring
->hangcheck
.score
>= HANGCHECK_SCORE_RING_HUNG
) {
3023 DRM_INFO("%s on %s\n",
3024 stuck
[i
] ? "stuck" : "no progress",
3031 return i915_handle_error(dev
, true, "Ring hung");
3034 /* Reset timer case chip hangs without another request
3036 i915_queue_hangcheck(dev
);
3039 void i915_queue_hangcheck(struct drm_device
*dev
)
3041 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3042 if (!i915
.enable_hangcheck
)
3045 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3046 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
3049 static void ibx_irq_reset(struct drm_device
*dev
)
3051 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3053 if (HAS_PCH_NOP(dev
))
3056 GEN5_IRQ_RESET(SDE
);
3058 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3059 I915_WRITE(SERR_INT
, 0xffffffff);
3063 * SDEIER is also touched by the interrupt handler to work around missed PCH
3064 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3065 * instead we unconditionally enable all PCH interrupt sources here, but then
3066 * only unmask them as needed with SDEIMR.
3068 * This function needs to be called before interrupts are enabled.
3070 static void ibx_irq_pre_postinstall(struct drm_device
*dev
)
3072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3074 if (HAS_PCH_NOP(dev
))
3077 WARN_ON(I915_READ(SDEIER
) != 0);
3078 I915_WRITE(SDEIER
, 0xffffffff);
3079 POSTING_READ(SDEIER
);
3082 static void gen5_gt_irq_reset(struct drm_device
*dev
)
3084 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3087 if (INTEL_INFO(dev
)->gen
>= 6)
3088 GEN5_IRQ_RESET(GEN6_PM
);
3093 static void ironlake_irq_reset(struct drm_device
*dev
)
3095 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3097 I915_WRITE(HWSTAM
, 0xffffffff);
3101 I915_WRITE(GEN7_ERR_INT
, 0xffffffff);
3103 gen5_gt_irq_reset(dev
);
3108 static void valleyview_irq_preinstall(struct drm_device
*dev
)
3110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3114 I915_WRITE(VLV_IMR
, 0);
3115 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
3116 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
3117 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
3120 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3121 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3123 gen5_gt_irq_reset(dev
);
3125 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3127 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3128 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3129 for_each_pipe(dev_priv
, pipe
)
3130 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3132 GEN5_IRQ_RESET(VLV_
);
3135 static void gen8_gt_irq_reset(struct drm_i915_private
*dev_priv
)
3137 GEN8_IRQ_RESET_NDX(GT
, 0);
3138 GEN8_IRQ_RESET_NDX(GT
, 1);
3139 GEN8_IRQ_RESET_NDX(GT
, 2);
3140 GEN8_IRQ_RESET_NDX(GT
, 3);
3143 static void gen8_irq_reset(struct drm_device
*dev
)
3145 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3148 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3149 POSTING_READ(GEN8_MASTER_IRQ
);
3151 gen8_gt_irq_reset(dev_priv
);
3153 for_each_pipe(dev_priv
, pipe
)
3154 if (intel_display_power_is_enabled(dev_priv
,
3155 POWER_DOMAIN_PIPE(pipe
)))
3156 GEN8_IRQ_RESET_NDX(DE_PIPE
, pipe
);
3158 GEN5_IRQ_RESET(GEN8_DE_PORT_
);
3159 GEN5_IRQ_RESET(GEN8_DE_MISC_
);
3160 GEN5_IRQ_RESET(GEN8_PCU_
);
3165 void gen8_irq_power_well_post_enable(struct drm_i915_private
*dev_priv
)
3167 uint32_t extra_ier
= GEN8_PIPE_VBLANK
| GEN8_PIPE_FIFO_UNDERRUN
;
3169 spin_lock_irq(&dev_priv
->irq_lock
);
3170 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_B
, dev_priv
->de_irq_mask
[PIPE_B
],
3171 ~dev_priv
->de_irq_mask
[PIPE_B
] | extra_ier
);
3172 GEN8_IRQ_INIT_NDX(DE_PIPE
, PIPE_C
, dev_priv
->de_irq_mask
[PIPE_C
],
3173 ~dev_priv
->de_irq_mask
[PIPE_C
] | extra_ier
);
3174 spin_unlock_irq(&dev_priv
->irq_lock
);
3177 static void cherryview_irq_preinstall(struct drm_device
*dev
)
3179 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3182 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3183 POSTING_READ(GEN8_MASTER_IRQ
);
3185 gen8_gt_irq_reset(dev_priv
);
3187 GEN5_IRQ_RESET(GEN8_PCU_
);
3189 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK_CHV
);
3191 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3192 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3194 for_each_pipe(dev_priv
, pipe
)
3195 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3197 GEN5_IRQ_RESET(VLV_
);
3200 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
3202 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3203 struct intel_encoder
*intel_encoder
;
3204 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
3206 if (HAS_PCH_IBX(dev
)) {
3207 hotplug_irqs
= SDE_HOTPLUG_MASK
;
3208 for_each_intel_encoder(dev
, intel_encoder
)
3209 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3210 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
3212 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
3213 for_each_intel_encoder(dev
, intel_encoder
)
3214 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3215 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
3218 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
3221 * Enable digital hotplug on the PCH, and configure the DP short pulse
3222 * duration to 2ms (which is the minimum in the Display Port spec)
3224 * This register is the same on all known PCH chips.
3226 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
3227 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
3228 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
3229 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
3230 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
3231 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
3234 static void ibx_irq_postinstall(struct drm_device
*dev
)
3236 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3239 if (HAS_PCH_NOP(dev
))
3242 if (HAS_PCH_IBX(dev
))
3243 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
3245 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
3247 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR
);
3248 I915_WRITE(SDEIMR
, ~mask
);
3251 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
3253 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3254 u32 pm_irqs
, gt_irqs
;
3256 pm_irqs
= gt_irqs
= 0;
3258 dev_priv
->gt_irq_mask
= ~0;
3259 if (HAS_L3_DPF(dev
)) {
3260 /* L3 parity interrupt is always unmasked. */
3261 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
3262 gt_irqs
|= GT_PARITY_ERROR(dev
);
3265 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
3267 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
3268 ILK_BSD_USER_INTERRUPT
;
3270 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
3273 GEN5_IRQ_INIT(GT
, dev_priv
->gt_irq_mask
, gt_irqs
);
3275 if (INTEL_INFO(dev
)->gen
>= 6) {
3276 pm_irqs
|= dev_priv
->pm_rps_events
;
3279 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
3281 dev_priv
->pm_irq_mask
= 0xffffffff;
3282 GEN5_IRQ_INIT(GEN6_PM
, dev_priv
->pm_irq_mask
, pm_irqs
);
3286 static int ironlake_irq_postinstall(struct drm_device
*dev
)
3288 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3289 u32 display_mask
, extra_mask
;
3291 if (INTEL_INFO(dev
)->gen
>= 7) {
3292 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
3293 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
3294 DE_PLANEB_FLIP_DONE_IVB
|
3295 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
3296 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
3297 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
3299 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
3300 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
3302 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
3304 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
3305 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
3308 dev_priv
->irq_mask
= ~display_mask
;
3310 I915_WRITE(HWSTAM
, 0xeffe);
3312 ibx_irq_pre_postinstall(dev
);
3314 GEN5_IRQ_INIT(DE
, dev_priv
->irq_mask
, display_mask
| extra_mask
);
3316 gen5_gt_irq_postinstall(dev
);
3318 ibx_irq_postinstall(dev
);
3320 if (IS_IRONLAKE_M(dev
)) {
3321 /* Enable PCU event interrupts
3323 * spinlocking not required here for correctness since interrupt
3324 * setup is guaranteed to run in single-threaded context. But we
3325 * need it to make the assert_spin_locked happy. */
3326 spin_lock_irq(&dev_priv
->irq_lock
);
3327 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
3328 spin_unlock_irq(&dev_priv
->irq_lock
);
3334 static void valleyview_display_irqs_install(struct drm_i915_private
*dev_priv
)
3339 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3340 PIPE_FIFO_UNDERRUN_STATUS
;
3342 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3343 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3344 POSTING_READ(PIPESTAT(PIPE_A
));
3346 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3347 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3349 i915_enable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3350 PIPE_GMBUS_INTERRUPT_STATUS
);
3351 i915_enable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3353 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3354 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3355 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3356 dev_priv
->irq_mask
&= ~iir_mask
;
3358 I915_WRITE(VLV_IIR
, iir_mask
);
3359 I915_WRITE(VLV_IIR
, iir_mask
);
3360 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3361 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3362 POSTING_READ(VLV_IMR
);
3365 static void valleyview_display_irqs_uninstall(struct drm_i915_private
*dev_priv
)
3370 iir_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3371 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3372 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
;
3374 dev_priv
->irq_mask
|= iir_mask
;
3375 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3376 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3377 I915_WRITE(VLV_IIR
, iir_mask
);
3378 I915_WRITE(VLV_IIR
, iir_mask
);
3379 POSTING_READ(VLV_IIR
);
3381 pipestat_mask
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3382 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3384 i915_disable_pipestat(dev_priv
, PIPE_A
, pipestat_mask
|
3385 PIPE_GMBUS_INTERRUPT_STATUS
);
3386 i915_disable_pipestat(dev_priv
, PIPE_B
, pipestat_mask
);
3388 pipestat_mask
= PIPESTAT_INT_STATUS_MASK
|
3389 PIPE_FIFO_UNDERRUN_STATUS
;
3390 I915_WRITE(PIPESTAT(PIPE_A
), pipestat_mask
);
3391 I915_WRITE(PIPESTAT(PIPE_B
), pipestat_mask
);
3392 POSTING_READ(PIPESTAT(PIPE_A
));
3395 void valleyview_enable_display_irqs(struct drm_i915_private
*dev_priv
)
3397 assert_spin_locked(&dev_priv
->irq_lock
);
3399 if (dev_priv
->display_irqs_enabled
)
3402 dev_priv
->display_irqs_enabled
= true;
3404 if (intel_irqs_enabled(dev_priv
))
3405 valleyview_display_irqs_install(dev_priv
);
3408 void valleyview_disable_display_irqs(struct drm_i915_private
*dev_priv
)
3410 assert_spin_locked(&dev_priv
->irq_lock
);
3412 if (!dev_priv
->display_irqs_enabled
)
3415 dev_priv
->display_irqs_enabled
= false;
3417 if (intel_irqs_enabled(dev_priv
))
3418 valleyview_display_irqs_uninstall(dev_priv
);
3421 static int valleyview_irq_postinstall(struct drm_device
*dev
)
3423 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3425 dev_priv
->irq_mask
= ~0;
3427 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3428 POSTING_READ(PORT_HOTPLUG_EN
);
3430 I915_WRITE(VLV_IIR
, 0xffffffff);
3431 I915_WRITE(VLV_IIR
, 0xffffffff);
3432 I915_WRITE(VLV_IER
, ~dev_priv
->irq_mask
);
3433 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3434 POSTING_READ(VLV_IMR
);
3436 /* Interrupt setup is already guaranteed to be single-threaded, this is
3437 * just to make the assert_spin_locked check happy. */
3438 spin_lock_irq(&dev_priv
->irq_lock
);
3439 if (dev_priv
->display_irqs_enabled
)
3440 valleyview_display_irqs_install(dev_priv
);
3441 spin_unlock_irq(&dev_priv
->irq_lock
);
3443 I915_WRITE(VLV_IIR
, 0xffffffff);
3444 I915_WRITE(VLV_IIR
, 0xffffffff);
3446 gen5_gt_irq_postinstall(dev
);
3448 /* ack & enable invalid PTE error interrupts */
3449 #if 0 /* FIXME: add support to irq handler for checking these bits */
3450 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
3451 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
3454 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
3459 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
3461 /* These are interrupts we'll toggle with the ring mask register */
3462 uint32_t gt_interrupts
[] = {
3463 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3464 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
3465 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
3466 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
|
3467 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
3468 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3469 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
3470 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
|
3471 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
3473 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
|
3474 GT_CONTEXT_SWITCH_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
3477 dev_priv
->pm_irq_mask
= 0xffffffff;
3478 GEN8_IRQ_INIT_NDX(GT
, 0, ~gt_interrupts
[0], gt_interrupts
[0]);
3479 GEN8_IRQ_INIT_NDX(GT
, 1, ~gt_interrupts
[1], gt_interrupts
[1]);
3480 GEN8_IRQ_INIT_NDX(GT
, 2, dev_priv
->pm_irq_mask
, dev_priv
->pm_rps_events
);
3481 GEN8_IRQ_INIT_NDX(GT
, 3, ~gt_interrupts
[3], gt_interrupts
[3]);
3484 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
3486 uint32_t de_pipe_masked
= GEN8_PIPE_CDCLK_CRC_DONE
;
3487 uint32_t de_pipe_enables
;
3490 if (IS_GEN9(dev_priv
))
3491 de_pipe_masked
|= GEN9_PIPE_PLANE1_FLIP_DONE
|
3492 GEN9_DE_PIPE_IRQ_FAULT_ERRORS
;
3494 de_pipe_masked
|= GEN8_PIPE_PRIMARY_FLIP_DONE
|
3495 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
3497 de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
3498 GEN8_PIPE_FIFO_UNDERRUN
;
3500 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
3501 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
3502 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
3504 for_each_pipe(dev_priv
, pipe
)
3505 if (intel_display_power_is_enabled(dev_priv
,
3506 POWER_DOMAIN_PIPE(pipe
)))
3507 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
,
3508 dev_priv
->de_irq_mask
[pipe
],
3511 GEN5_IRQ_INIT(GEN8_DE_PORT_
, ~GEN8_AUX_CHANNEL_A
, GEN8_AUX_CHANNEL_A
);
3514 static int gen8_irq_postinstall(struct drm_device
*dev
)
3516 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3518 ibx_irq_pre_postinstall(dev
);
3520 gen8_gt_irq_postinstall(dev_priv
);
3521 gen8_de_irq_postinstall(dev_priv
);
3523 ibx_irq_postinstall(dev
);
3525 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3526 POSTING_READ(GEN8_MASTER_IRQ
);
3531 static int cherryview_irq_postinstall(struct drm_device
*dev
)
3533 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3534 u32 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
|
3535 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3536 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3537 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT
;
3538 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_STATUS_VLV
|
3539 PIPE_CRC_DONE_INTERRUPT_STATUS
;
3543 * Leave vblank interrupts masked initially. enable/disable will
3544 * toggle them based on usage.
3546 dev_priv
->irq_mask
= ~enable_mask
;
3548 for_each_pipe(dev_priv
, pipe
)
3549 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3551 spin_lock_irq(&dev_priv
->irq_lock
);
3552 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
3553 for_each_pipe(dev_priv
, pipe
)
3554 i915_enable_pipestat(dev_priv
, pipe
, pipestat_enable
);
3555 spin_unlock_irq(&dev_priv
->irq_lock
);
3557 I915_WRITE(VLV_IIR
, 0xffffffff);
3558 I915_WRITE(VLV_IIR
, 0xffffffff);
3559 I915_WRITE(VLV_IER
, enable_mask
);
3560 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
3561 POSTING_READ(VLV_IMR
);
3563 gen8_gt_irq_postinstall(dev_priv
);
3565 I915_WRITE(GEN8_MASTER_IRQ
, MASTER_INTERRUPT_ENABLE
);
3566 POSTING_READ(GEN8_MASTER_IRQ
);
3571 static void gen8_irq_uninstall(struct drm_device
*dev
)
3573 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3578 gen8_irq_reset(dev
);
3581 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3583 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3589 I915_WRITE(VLV_MASTER_IER
, 0);
3591 for_each_pipe(dev_priv
, pipe
)
3592 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3594 I915_WRITE(HWSTAM
, 0xffffffff);
3595 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3596 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3598 /* Interrupt setup is already guaranteed to be single-threaded, this is
3599 * just to make the assert_spin_locked check happy. */
3600 spin_lock_irq(&dev_priv
->irq_lock
);
3601 if (dev_priv
->display_irqs_enabled
)
3602 valleyview_display_irqs_uninstall(dev_priv
);
3603 spin_unlock_irq(&dev_priv
->irq_lock
);
3605 dev_priv
->irq_mask
= 0;
3607 GEN5_IRQ_RESET(VLV_
);
3610 static void cherryview_irq_uninstall(struct drm_device
*dev
)
3612 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3618 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3619 POSTING_READ(GEN8_MASTER_IRQ
);
3621 gen8_gt_irq_reset(dev_priv
);
3623 GEN5_IRQ_RESET(GEN8_PCU_
);
3625 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3626 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3628 for_each_pipe(dev_priv
, pipe
)
3629 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3631 GEN5_IRQ_RESET(VLV_
);
3634 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3636 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3641 ironlake_irq_reset(dev
);
3644 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3649 for_each_pipe(dev_priv
, pipe
)
3650 I915_WRITE(PIPESTAT(pipe
), 0);
3651 I915_WRITE16(IMR
, 0xffff);
3652 I915_WRITE16(IER
, 0x0);
3653 POSTING_READ16(IER
);
3656 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3658 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3661 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3663 /* Unmask the interrupts that we always want on. */
3664 dev_priv
->irq_mask
=
3665 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3666 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3667 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3668 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3669 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3670 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3673 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3674 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3675 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3676 I915_USER_INTERRUPT
);
3677 POSTING_READ16(IER
);
3679 /* Interrupt setup is already guaranteed to be single-threaded, this is
3680 * just to make the assert_spin_locked check happy. */
3681 spin_lock_irq(&dev_priv
->irq_lock
);
3682 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3683 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3684 spin_unlock_irq(&dev_priv
->irq_lock
);
3690 * Returns true when a page flip has completed.
3692 static bool i8xx_handle_vblank(struct drm_device
*dev
,
3693 int plane
, int pipe
, u32 iir
)
3695 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3696 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3698 if (!intel_pipe_handle_vblank(dev
, pipe
))
3701 if ((iir
& flip_pending
) == 0)
3702 goto check_page_flip
;
3704 intel_prepare_page_flip(dev
, plane
);
3706 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3707 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3708 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3709 * the flip is completed (no longer pending). Since this doesn't raise
3710 * an interrupt per se, we watch for the change at vblank.
3712 if (I915_READ16(ISR
) & flip_pending
)
3713 goto check_page_flip
;
3715 intel_finish_page_flip(dev
, pipe
);
3719 intel_check_page_flip(dev
, pipe
);
3723 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3725 struct drm_device
*dev
= arg
;
3726 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3731 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3732 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3734 iir
= I915_READ16(IIR
);
3738 while (iir
& ~flip_mask
) {
3739 /* Can't rely on pipestat interrupt bit in iir as it might
3740 * have been cleared after the pipestat interrupt was received.
3741 * It doesn't set the bit in iir again, but it still produces
3742 * interrupts (for non-MSI).
3744 spin_lock(&dev_priv
->irq_lock
);
3745 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3746 i915_handle_error(dev
, false,
3747 "Command parser error, iir 0x%08x",
3750 for_each_pipe(dev_priv
, pipe
) {
3751 int reg
= PIPESTAT(pipe
);
3752 pipe_stats
[pipe
] = I915_READ(reg
);
3755 * Clear the PIPE*STAT regs before the IIR
3757 if (pipe_stats
[pipe
] & 0x8000ffff)
3758 I915_WRITE(reg
, pipe_stats
[pipe
]);
3760 spin_unlock(&dev_priv
->irq_lock
);
3762 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3763 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3765 i915_update_dri1_breadcrumb(dev
);
3767 if (iir
& I915_USER_INTERRUPT
)
3768 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3770 for_each_pipe(dev_priv
, pipe
) {
3775 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3776 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
3777 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3779 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3780 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3782 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3783 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3793 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3795 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3798 for_each_pipe(dev_priv
, pipe
) {
3799 /* Clear enable bits; then clear status bits */
3800 I915_WRITE(PIPESTAT(pipe
), 0);
3801 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3803 I915_WRITE16(IMR
, 0xffff);
3804 I915_WRITE16(IER
, 0x0);
3805 I915_WRITE16(IIR
, I915_READ16(IIR
));
3808 static void i915_irq_preinstall(struct drm_device
* dev
)
3810 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3813 if (I915_HAS_HOTPLUG(dev
)) {
3814 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3815 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3818 I915_WRITE16(HWSTAM
, 0xeffe);
3819 for_each_pipe(dev_priv
, pipe
)
3820 I915_WRITE(PIPESTAT(pipe
), 0);
3821 I915_WRITE(IMR
, 0xffffffff);
3822 I915_WRITE(IER
, 0x0);
3826 static int i915_irq_postinstall(struct drm_device
*dev
)
3828 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3831 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3833 /* Unmask the interrupts that we always want on. */
3834 dev_priv
->irq_mask
=
3835 ~(I915_ASLE_INTERRUPT
|
3836 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3837 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3838 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3839 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3840 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3843 I915_ASLE_INTERRUPT
|
3844 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3845 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3846 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3847 I915_USER_INTERRUPT
;
3849 if (I915_HAS_HOTPLUG(dev
)) {
3850 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3851 POSTING_READ(PORT_HOTPLUG_EN
);
3853 /* Enable in IER... */
3854 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3855 /* and unmask in IMR */
3856 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3859 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3860 I915_WRITE(IER
, enable_mask
);
3863 i915_enable_asle_pipestat(dev
);
3865 /* Interrupt setup is already guaranteed to be single-threaded, this is
3866 * just to make the assert_spin_locked check happy. */
3867 spin_lock_irq(&dev_priv
->irq_lock
);
3868 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3869 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
3870 spin_unlock_irq(&dev_priv
->irq_lock
);
3876 * Returns true when a page flip has completed.
3878 static bool i915_handle_vblank(struct drm_device
*dev
,
3879 int plane
, int pipe
, u32 iir
)
3881 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3882 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3884 if (!intel_pipe_handle_vblank(dev
, pipe
))
3887 if ((iir
& flip_pending
) == 0)
3888 goto check_page_flip
;
3890 intel_prepare_page_flip(dev
, plane
);
3892 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3893 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3894 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3895 * the flip is completed (no longer pending). Since this doesn't raise
3896 * an interrupt per se, we watch for the change at vblank.
3898 if (I915_READ(ISR
) & flip_pending
)
3899 goto check_page_flip
;
3901 intel_finish_page_flip(dev
, pipe
);
3905 intel_check_page_flip(dev
, pipe
);
3909 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
3911 struct drm_device
*dev
= arg
;
3912 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3913 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
3915 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3916 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3917 int pipe
, ret
= IRQ_NONE
;
3919 iir
= I915_READ(IIR
);
3921 bool irq_received
= (iir
& ~flip_mask
) != 0;
3922 bool blc_event
= false;
3924 /* Can't rely on pipestat interrupt bit in iir as it might
3925 * have been cleared after the pipestat interrupt was received.
3926 * It doesn't set the bit in iir again, but it still produces
3927 * interrupts (for non-MSI).
3929 spin_lock(&dev_priv
->irq_lock
);
3930 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3931 i915_handle_error(dev
, false,
3932 "Command parser error, iir 0x%08x",
3935 for_each_pipe(dev_priv
, pipe
) {
3936 int reg
= PIPESTAT(pipe
);
3937 pipe_stats
[pipe
] = I915_READ(reg
);
3939 /* Clear the PIPE*STAT regs before the IIR */
3940 if (pipe_stats
[pipe
] & 0x8000ffff) {
3941 I915_WRITE(reg
, pipe_stats
[pipe
]);
3942 irq_received
= true;
3945 spin_unlock(&dev_priv
->irq_lock
);
3950 /* Consume port. Then clear IIR or we'll miss events */
3951 if (I915_HAS_HOTPLUG(dev
) &&
3952 iir
& I915_DISPLAY_PORT_INTERRUPT
)
3953 i9xx_hpd_irq_handler(dev
);
3955 I915_WRITE(IIR
, iir
& ~flip_mask
);
3956 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3958 if (iir
& I915_USER_INTERRUPT
)
3959 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3961 for_each_pipe(dev_priv
, pipe
) {
3966 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3967 i915_handle_vblank(dev
, plane
, pipe
, iir
))
3968 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3970 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3973 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3974 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3976 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3977 intel_cpu_fifo_underrun_irq_handler(dev_priv
,
3981 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
3982 intel_opregion_asle_intr(dev
);
3984 /* With MSI, interrupts are only generated when iir
3985 * transitions from zero to nonzero. If another bit got
3986 * set while we were handling the existing iir bits, then
3987 * we would never get another interrupt.
3989 * This is fine on non-MSI as well, as if we hit this path
3990 * we avoid exiting the interrupt handler only to generate
3993 * Note that for MSI this could cause a stray interrupt report
3994 * if an interrupt landed in the time between writing IIR and
3995 * the posting read. This should be rare enough to never
3996 * trigger the 99% of 100,000 interrupts test for disabling
4001 } while (iir
& ~flip_mask
);
4003 i915_update_dri1_breadcrumb(dev
);
4008 static void i915_irq_uninstall(struct drm_device
* dev
)
4010 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4013 if (I915_HAS_HOTPLUG(dev
)) {
4014 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4015 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4018 I915_WRITE16(HWSTAM
, 0xffff);
4019 for_each_pipe(dev_priv
, pipe
) {
4020 /* Clear enable bits; then clear status bits */
4021 I915_WRITE(PIPESTAT(pipe
), 0);
4022 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
4024 I915_WRITE(IMR
, 0xffffffff);
4025 I915_WRITE(IER
, 0x0);
4027 I915_WRITE(IIR
, I915_READ(IIR
));
4030 static void i965_irq_preinstall(struct drm_device
* dev
)
4032 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4035 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4036 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4038 I915_WRITE(HWSTAM
, 0xeffe);
4039 for_each_pipe(dev_priv
, pipe
)
4040 I915_WRITE(PIPESTAT(pipe
), 0);
4041 I915_WRITE(IMR
, 0xffffffff);
4042 I915_WRITE(IER
, 0x0);
4046 static int i965_irq_postinstall(struct drm_device
*dev
)
4048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4052 /* Unmask the interrupts that we always want on. */
4053 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
4054 I915_DISPLAY_PORT_INTERRUPT
|
4055 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
4056 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
4057 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4058 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
4059 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
4061 enable_mask
= ~dev_priv
->irq_mask
;
4062 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4063 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
4064 enable_mask
|= I915_USER_INTERRUPT
;
4067 enable_mask
|= I915_BSD_USER_INTERRUPT
;
4069 /* Interrupt setup is already guaranteed to be single-threaded, this is
4070 * just to make the assert_spin_locked check happy. */
4071 spin_lock_irq(&dev_priv
->irq_lock
);
4072 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_INTERRUPT_STATUS
);
4073 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4074 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_INTERRUPT_STATUS
);
4075 spin_unlock_irq(&dev_priv
->irq_lock
);
4078 * Enable some error detection, note the instruction error mask
4079 * bit is reserved, so we leave it masked.
4082 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
4083 GM45_ERROR_MEM_PRIV
|
4084 GM45_ERROR_CP_PRIV
|
4085 I915_ERROR_MEMORY_REFRESH
);
4087 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
4088 I915_ERROR_MEMORY_REFRESH
);
4090 I915_WRITE(EMR
, error_mask
);
4092 I915_WRITE(IMR
, dev_priv
->irq_mask
);
4093 I915_WRITE(IER
, enable_mask
);
4096 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4097 POSTING_READ(PORT_HOTPLUG_EN
);
4099 i915_enable_asle_pipestat(dev
);
4104 static void i915_hpd_irq_setup(struct drm_device
*dev
)
4106 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4107 struct intel_encoder
*intel_encoder
;
4110 assert_spin_locked(&dev_priv
->irq_lock
);
4112 if (I915_HAS_HOTPLUG(dev
)) {
4113 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
4114 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
4115 /* Note HDMI and DP share hotplug bits */
4116 /* enable bits are the same for all generations */
4117 for_each_intel_encoder(dev
, intel_encoder
)
4118 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
4119 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
4120 /* Programming the CRT detection parameters tends
4121 to generate a spurious hotplug event about three
4122 seconds later. So just do it once.
4125 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
4126 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
4127 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
4129 /* Ignore TV since it's buggy */
4130 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
4134 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
4136 struct drm_device
*dev
= arg
;
4137 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4139 u32 pipe_stats
[I915_MAX_PIPES
];
4140 int ret
= IRQ_NONE
, pipe
;
4142 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
4143 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
4145 iir
= I915_READ(IIR
);
4148 bool irq_received
= (iir
& ~flip_mask
) != 0;
4149 bool blc_event
= false;
4151 /* Can't rely on pipestat interrupt bit in iir as it might
4152 * have been cleared after the pipestat interrupt was received.
4153 * It doesn't set the bit in iir again, but it still produces
4154 * interrupts (for non-MSI).
4156 spin_lock(&dev_priv
->irq_lock
);
4157 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
4158 i915_handle_error(dev
, false,
4159 "Command parser error, iir 0x%08x",
4162 for_each_pipe(dev_priv
, pipe
) {
4163 int reg
= PIPESTAT(pipe
);
4164 pipe_stats
[pipe
] = I915_READ(reg
);
4167 * Clear the PIPE*STAT regs before the IIR
4169 if (pipe_stats
[pipe
] & 0x8000ffff) {
4170 I915_WRITE(reg
, pipe_stats
[pipe
]);
4171 irq_received
= true;
4174 spin_unlock(&dev_priv
->irq_lock
);
4181 /* Consume port. Then clear IIR or we'll miss events */
4182 if (iir
& I915_DISPLAY_PORT_INTERRUPT
)
4183 i9xx_hpd_irq_handler(dev
);
4185 I915_WRITE(IIR
, iir
& ~flip_mask
);
4186 new_iir
= I915_READ(IIR
); /* Flush posted writes */
4188 if (iir
& I915_USER_INTERRUPT
)
4189 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
4190 if (iir
& I915_BSD_USER_INTERRUPT
)
4191 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
4193 for_each_pipe(dev_priv
, pipe
) {
4194 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
4195 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
4196 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
4198 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
4201 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
4202 i9xx_pipe_crc_irq_handler(dev
, pipe
);
4204 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
4205 intel_cpu_fifo_underrun_irq_handler(dev_priv
, pipe
);
4208 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
4209 intel_opregion_asle_intr(dev
);
4211 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
4212 gmbus_irq_handler(dev
);
4214 /* With MSI, interrupts are only generated when iir
4215 * transitions from zero to nonzero. If another bit got
4216 * set while we were handling the existing iir bits, then
4217 * we would never get another interrupt.
4219 * This is fine on non-MSI as well, as if we hit this path
4220 * we avoid exiting the interrupt handler only to generate
4223 * Note that for MSI this could cause a stray interrupt report
4224 * if an interrupt landed in the time between writing IIR and
4225 * the posting read. This should be rare enough to never
4226 * trigger the 99% of 100,000 interrupts test for disabling
4232 i915_update_dri1_breadcrumb(dev
);
4237 static void i965_irq_uninstall(struct drm_device
* dev
)
4239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4245 I915_WRITE(PORT_HOTPLUG_EN
, 0);
4246 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
4248 I915_WRITE(HWSTAM
, 0xffffffff);
4249 for_each_pipe(dev_priv
, pipe
)
4250 I915_WRITE(PIPESTAT(pipe
), 0);
4251 I915_WRITE(IMR
, 0xffffffff);
4252 I915_WRITE(IER
, 0x0);
4254 for_each_pipe(dev_priv
, pipe
)
4255 I915_WRITE(PIPESTAT(pipe
),
4256 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
4257 I915_WRITE(IIR
, I915_READ(IIR
));
4260 static void intel_hpd_irq_reenable_work(struct work_struct
*work
)
4262 struct drm_i915_private
*dev_priv
=
4263 container_of(work
, typeof(*dev_priv
),
4264 hotplug_reenable_work
.work
);
4265 struct drm_device
*dev
= dev_priv
->dev
;
4266 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4269 intel_runtime_pm_get(dev_priv
);
4271 spin_lock_irq(&dev_priv
->irq_lock
);
4272 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
4273 struct drm_connector
*connector
;
4275 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
4278 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4280 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4281 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4283 if (intel_connector
->encoder
->hpd_pin
== i
) {
4284 if (connector
->polled
!= intel_connector
->polled
)
4285 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4287 connector
->polled
= intel_connector
->polled
;
4288 if (!connector
->polled
)
4289 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4293 if (dev_priv
->display
.hpd_irq_setup
)
4294 dev_priv
->display
.hpd_irq_setup(dev
);
4295 spin_unlock_irq(&dev_priv
->irq_lock
);
4297 intel_runtime_pm_put(dev_priv
);
4301 * intel_irq_init - initializes irq support
4302 * @dev_priv: i915 device instance
4304 * This function initializes all the irq support including work items, timers
4305 * and all the vtables. It does not setup the interrupt itself though.
4307 void intel_irq_init(struct drm_i915_private
*dev_priv
)
4309 struct drm_device
*dev
= dev_priv
->dev
;
4311 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
4312 INIT_WORK(&dev_priv
->dig_port_work
, i915_digport_work_func
);
4313 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
4314 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
4315 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
4317 /* Let's track the enabled rps events */
4318 if (IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
4319 /* WaGsvRC0ResidencyMethod:vlv */
4320 dev_priv
->pm_rps_events
= GEN6_PM_RP_UP_EI_EXPIRED
;
4322 dev_priv
->pm_rps_events
= GEN6_PM_RPS_EVENTS
;
4324 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
4325 i915_hangcheck_elapsed
,
4326 (unsigned long) dev
);
4327 INIT_DELAYED_WORK(&dev_priv
->hotplug_reenable_work
,
4328 intel_hpd_irq_reenable_work
);
4330 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
4332 if (IS_GEN2(dev_priv
)) {
4333 dev
->max_vblank_count
= 0;
4334 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
4335 } else if (IS_G4X(dev_priv
) || INTEL_INFO(dev_priv
)->gen
>= 5) {
4336 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
4337 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
4339 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
4340 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
4344 * Opt out of the vblank disable timer on everything except gen2.
4345 * Gen2 doesn't have a hardware frame counter and so depends on
4346 * vblank interrupts to produce sane vblank seuquence numbers.
4348 if (!IS_GEN2(dev_priv
))
4349 dev
->vblank_disable_immediate
= true;
4351 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
4352 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
4353 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
4356 if (IS_CHERRYVIEW(dev_priv
)) {
4357 dev
->driver
->irq_handler
= cherryview_irq_handler
;
4358 dev
->driver
->irq_preinstall
= cherryview_irq_preinstall
;
4359 dev
->driver
->irq_postinstall
= cherryview_irq_postinstall
;
4360 dev
->driver
->irq_uninstall
= cherryview_irq_uninstall
;
4361 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4362 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4363 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4364 } else if (IS_VALLEYVIEW(dev_priv
)) {
4365 dev
->driver
->irq_handler
= valleyview_irq_handler
;
4366 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
4367 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
4368 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
4369 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
4370 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
4371 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4372 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
4373 dev
->driver
->irq_handler
= gen8_irq_handler
;
4374 dev
->driver
->irq_preinstall
= gen8_irq_reset
;
4375 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
4376 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
4377 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
4378 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
4379 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4380 } else if (HAS_PCH_SPLIT(dev
)) {
4381 dev
->driver
->irq_handler
= ironlake_irq_handler
;
4382 dev
->driver
->irq_preinstall
= ironlake_irq_reset
;
4383 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
4384 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
4385 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
4386 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
4387 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
4389 if (INTEL_INFO(dev_priv
)->gen
== 2) {
4390 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
4391 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
4392 dev
->driver
->irq_handler
= i8xx_irq_handler
;
4393 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
4394 } else if (INTEL_INFO(dev_priv
)->gen
== 3) {
4395 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
4396 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
4397 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
4398 dev
->driver
->irq_handler
= i915_irq_handler
;
4399 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4401 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
4402 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
4403 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
4404 dev
->driver
->irq_handler
= i965_irq_handler
;
4405 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
4407 dev
->driver
->enable_vblank
= i915_enable_vblank
;
4408 dev
->driver
->disable_vblank
= i915_disable_vblank
;
4413 * intel_hpd_init - initializes and enables hpd support
4414 * @dev_priv: i915 device instance
4416 * This function enables the hotplug support. It requires that interrupts have
4417 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4418 * poll request can run concurrently to other code, so locking rules must be
4421 * This is a separate step from interrupt enabling to simplify the locking rules
4422 * in the driver load and resume code.
4424 void intel_hpd_init(struct drm_i915_private
*dev_priv
)
4426 struct drm_device
*dev
= dev_priv
->dev
;
4427 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4428 struct drm_connector
*connector
;
4431 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
4432 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
4433 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
4435 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
4436 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4437 connector
->polled
= intel_connector
->polled
;
4438 if (connector
->encoder
&& !connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
4439 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4440 if (intel_connector
->mst_port
)
4441 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
4444 /* Interrupt setup is already guaranteed to be single-threaded, this is
4445 * just to make the assert_spin_locked checks happy. */
4446 spin_lock_irq(&dev_priv
->irq_lock
);
4447 if (dev_priv
->display
.hpd_irq_setup
)
4448 dev_priv
->display
.hpd_irq_setup(dev
);
4449 spin_unlock_irq(&dev_priv
->irq_lock
);
4453 * intel_irq_install - enables the hardware interrupt
4454 * @dev_priv: i915 device instance
4456 * This function enables the hardware interrupt handling, but leaves the hotplug
4457 * handling still disabled. It is called after intel_irq_init().
4459 * In the driver load and resume code we need working interrupts in a few places
4460 * but don't want to deal with the hassle of concurrent probe and hotplug
4461 * workers. Hence the split into this two-stage approach.
4463 int intel_irq_install(struct drm_i915_private
*dev_priv
)
4466 * We enable some interrupt sources in our postinstall hooks, so mark
4467 * interrupts as enabled _before_ actually enabling them to avoid
4468 * special cases in our ordering checks.
4470 dev_priv
->pm
.irqs_enabled
= true;
4472 return drm_irq_install(dev_priv
->dev
, dev_priv
->dev
->pdev
->irq
);
4476 * intel_irq_uninstall - finilizes all irq handling
4477 * @dev_priv: i915 device instance
4479 * This stops interrupt and hotplug handling and unregisters and frees all
4480 * resources acquired in the init functions.
4482 void intel_irq_uninstall(struct drm_i915_private
*dev_priv
)
4484 drm_irq_uninstall(dev_priv
->dev
);
4485 intel_hpd_cancel_work(dev_priv
);
4486 dev_priv
->pm
.irqs_enabled
= false;
4490 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4491 * @dev_priv: i915 device instance
4493 * This function is used to disable interrupts at runtime, both in the runtime
4494 * pm and the system suspend/resume code.
4496 void intel_runtime_pm_disable_interrupts(struct drm_i915_private
*dev_priv
)
4498 dev_priv
->dev
->driver
->irq_uninstall(dev_priv
->dev
);
4499 dev_priv
->pm
.irqs_enabled
= false;
4503 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4504 * @dev_priv: i915 device instance
4506 * This function is used to enable interrupts at runtime, both in the runtime
4507 * pm and the system suspend/resume code.
4509 void intel_runtime_pm_enable_interrupts(struct drm_i915_private
*dev_priv
)
4511 dev_priv
->pm
.irqs_enabled
= true;
4512 dev_priv
->dev
->driver
->irq_preinstall(dev_priv
->dev
);
4513 dev_priv
->dev
->driver
->irq_postinstall(dev_priv
->dev
);