1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 static const u32 hpd_ibx
[] = {
40 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
41 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
42 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
43 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
44 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
47 static const u32 hpd_cpt
[] = {
48 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
49 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
50 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
51 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
52 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
55 static const u32 hpd_mask_i915
[] = {
56 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
57 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
58 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
59 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
60 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
61 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
64 static const u32 hpd_status_gen4
[] = {
65 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
66 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
67 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
68 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
69 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
70 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
73 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
74 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
75 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
76 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
77 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
78 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
82 /* For display hotplug interrupt */
84 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
86 assert_spin_locked(&dev_priv
->irq_lock
);
88 if (dev_priv
->pc8
.irqs_disabled
) {
89 WARN(1, "IRQs disabled\n");
90 dev_priv
->pc8
.regsave
.deimr
&= ~mask
;
94 if ((dev_priv
->irq_mask
& mask
) != 0) {
95 dev_priv
->irq_mask
&= ~mask
;
96 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
102 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
104 assert_spin_locked(&dev_priv
->irq_lock
);
106 if (dev_priv
->pc8
.irqs_disabled
) {
107 WARN(1, "IRQs disabled\n");
108 dev_priv
->pc8
.regsave
.deimr
|= mask
;
112 if ((dev_priv
->irq_mask
& mask
) != mask
) {
113 dev_priv
->irq_mask
|= mask
;
114 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
120 * ilk_update_gt_irq - update GTIMR
121 * @dev_priv: driver private
122 * @interrupt_mask: mask of interrupt bits to update
123 * @enabled_irq_mask: mask of interrupt bits to enable
125 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
126 uint32_t interrupt_mask
,
127 uint32_t enabled_irq_mask
)
129 assert_spin_locked(&dev_priv
->irq_lock
);
131 if (dev_priv
->pc8
.irqs_disabled
) {
132 WARN(1, "IRQs disabled\n");
133 dev_priv
->pc8
.regsave
.gtimr
&= ~interrupt_mask
;
134 dev_priv
->pc8
.regsave
.gtimr
|= (~enabled_irq_mask
&
139 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
140 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
141 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
145 void ilk_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
147 ilk_update_gt_irq(dev_priv
, mask
, mask
);
150 void ilk_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
152 ilk_update_gt_irq(dev_priv
, mask
, 0);
156 * snb_update_pm_irq - update GEN6_PMIMR
157 * @dev_priv: driver private
158 * @interrupt_mask: mask of interrupt bits to update
159 * @enabled_irq_mask: mask of interrupt bits to enable
161 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
162 uint32_t interrupt_mask
,
163 uint32_t enabled_irq_mask
)
167 assert_spin_locked(&dev_priv
->irq_lock
);
169 if (dev_priv
->pc8
.irqs_disabled
) {
170 WARN(1, "IRQs disabled\n");
171 dev_priv
->pc8
.regsave
.gen6_pmimr
&= ~interrupt_mask
;
172 dev_priv
->pc8
.regsave
.gen6_pmimr
|= (~enabled_irq_mask
&
177 new_val
= dev_priv
->pm_irq_mask
;
178 new_val
&= ~interrupt_mask
;
179 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
181 if (new_val
!= dev_priv
->pm_irq_mask
) {
182 dev_priv
->pm_irq_mask
= new_val
;
183 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
184 POSTING_READ(GEN6_PMIMR
);
188 void snb_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
190 snb_update_pm_irq(dev_priv
, mask
, mask
);
193 void snb_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
195 snb_update_pm_irq(dev_priv
, mask
, 0);
198 static bool ivb_can_enable_err_int(struct drm_device
*dev
)
200 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
201 struct intel_crtc
*crtc
;
204 assert_spin_locked(&dev_priv
->irq_lock
);
206 for_each_pipe(pipe
) {
207 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
209 if (crtc
->cpu_fifo_underrun_disabled
)
216 static bool cpt_can_enable_serr_int(struct drm_device
*dev
)
218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
220 struct intel_crtc
*crtc
;
222 assert_spin_locked(&dev_priv
->irq_lock
);
224 for_each_pipe(pipe
) {
225 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
227 if (crtc
->pch_fifo_underrun_disabled
)
234 static void ironlake_set_fifo_underrun_reporting(struct drm_device
*dev
,
235 enum pipe pipe
, bool enable
)
237 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
238 uint32_t bit
= (pipe
== PIPE_A
) ? DE_PIPEA_FIFO_UNDERRUN
:
239 DE_PIPEB_FIFO_UNDERRUN
;
242 ironlake_enable_display_irq(dev_priv
, bit
);
244 ironlake_disable_display_irq(dev_priv
, bit
);
247 static void ivybridge_set_fifo_underrun_reporting(struct drm_device
*dev
,
248 enum pipe pipe
, bool enable
)
250 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
252 I915_WRITE(GEN7_ERR_INT
, ERR_INT_FIFO_UNDERRUN(pipe
));
254 if (!ivb_can_enable_err_int(dev
))
257 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
259 bool was_enabled
= !(I915_READ(DEIMR
) & DE_ERR_INT_IVB
);
261 /* Change the state _after_ we've read out the current one. */
262 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
265 (I915_READ(GEN7_ERR_INT
) & ERR_INT_FIFO_UNDERRUN(pipe
))) {
266 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
273 * ibx_display_interrupt_update - update SDEIMR
274 * @dev_priv: driver private
275 * @interrupt_mask: mask of interrupt bits to update
276 * @enabled_irq_mask: mask of interrupt bits to enable
278 static void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
279 uint32_t interrupt_mask
,
280 uint32_t enabled_irq_mask
)
282 uint32_t sdeimr
= I915_READ(SDEIMR
);
283 sdeimr
&= ~interrupt_mask
;
284 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
286 assert_spin_locked(&dev_priv
->irq_lock
);
288 if (dev_priv
->pc8
.irqs_disabled
&&
289 (interrupt_mask
& SDE_HOTPLUG_MASK_CPT
)) {
290 WARN(1, "IRQs disabled\n");
291 dev_priv
->pc8
.regsave
.sdeimr
&= ~interrupt_mask
;
292 dev_priv
->pc8
.regsave
.sdeimr
|= (~enabled_irq_mask
&
297 I915_WRITE(SDEIMR
, sdeimr
);
298 POSTING_READ(SDEIMR
);
300 #define ibx_enable_display_interrupt(dev_priv, bits) \
301 ibx_display_interrupt_update((dev_priv), (bits), (bits))
302 #define ibx_disable_display_interrupt(dev_priv, bits) \
303 ibx_display_interrupt_update((dev_priv), (bits), 0)
305 static void ibx_set_fifo_underrun_reporting(struct drm_device
*dev
,
306 enum transcoder pch_transcoder
,
309 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
310 uint32_t bit
= (pch_transcoder
== TRANSCODER_A
) ?
311 SDE_TRANSA_FIFO_UNDER
: SDE_TRANSB_FIFO_UNDER
;
314 ibx_enable_display_interrupt(dev_priv
, bit
);
316 ibx_disable_display_interrupt(dev_priv
, bit
);
319 static void cpt_set_fifo_underrun_reporting(struct drm_device
*dev
,
320 enum transcoder pch_transcoder
,
323 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
327 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
));
329 if (!cpt_can_enable_serr_int(dev
))
332 ibx_enable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
334 uint32_t tmp
= I915_READ(SERR_INT
);
335 bool was_enabled
= !(I915_READ(SDEIMR
) & SDE_ERROR_CPT
);
337 /* Change the state _after_ we've read out the current one. */
338 ibx_disable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
341 (tmp
& SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
))) {
342 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
343 transcoder_name(pch_transcoder
));
349 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
352 * @enable: true if we want to report FIFO underrun errors, false otherwise
354 * This function makes us disable or enable CPU fifo underruns for a specific
355 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
356 * reporting for one pipe may also disable all the other CPU error interruts for
357 * the other pipes, due to the fact that there's just one interrupt mask/enable
358 * bit for all the pipes.
360 * Returns the previous state of underrun reporting.
362 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
363 enum pipe pipe
, bool enable
)
365 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
366 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
367 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
371 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
373 ret
= !intel_crtc
->cpu_fifo_underrun_disabled
;
378 intel_crtc
->cpu_fifo_underrun_disabled
= !enable
;
380 if (IS_GEN5(dev
) || IS_GEN6(dev
))
381 ironlake_set_fifo_underrun_reporting(dev
, pipe
, enable
);
382 else if (IS_GEN7(dev
))
383 ivybridge_set_fifo_underrun_reporting(dev
, pipe
, enable
);
386 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
391 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
393 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
394 * @enable: true if we want to report FIFO underrun errors, false otherwise
396 * This function makes us disable or enable PCH fifo underruns for a specific
397 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
398 * underrun reporting for one transcoder may also disable all the other PCH
399 * error interruts for the other transcoders, due to the fact that there's just
400 * one interrupt mask/enable bit for all the transcoders.
402 * Returns the previous state of underrun reporting.
404 bool intel_set_pch_fifo_underrun_reporting(struct drm_device
*dev
,
405 enum transcoder pch_transcoder
,
408 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
409 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pch_transcoder
];
410 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
415 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
416 * has only one pch transcoder A that all pipes can use. To avoid racy
417 * pch transcoder -> pipe lookups from interrupt code simply store the
418 * underrun statistics in crtc A. Since we never expose this anywhere
419 * nor use it outside of the fifo underrun code here using the "wrong"
420 * crtc on LPT won't cause issues.
423 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
425 ret
= !intel_crtc
->pch_fifo_underrun_disabled
;
430 intel_crtc
->pch_fifo_underrun_disabled
= !enable
;
432 if (HAS_PCH_IBX(dev
))
433 ibx_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
435 cpt_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
438 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
444 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
446 u32 reg
= PIPESTAT(pipe
);
447 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
449 assert_spin_locked(&dev_priv
->irq_lock
);
451 if ((pipestat
& mask
) == mask
)
454 /* Enable the interrupt, clear any pending status */
455 pipestat
|= mask
| (mask
>> 16);
456 I915_WRITE(reg
, pipestat
);
461 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
463 u32 reg
= PIPESTAT(pipe
);
464 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
466 assert_spin_locked(&dev_priv
->irq_lock
);
468 if ((pipestat
& mask
) == 0)
472 I915_WRITE(reg
, pipestat
);
477 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
479 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
481 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
482 unsigned long irqflags
;
484 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
487 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
489 i915_enable_pipestat(dev_priv
, 1, PIPE_LEGACY_BLC_EVENT_ENABLE
);
490 if (INTEL_INFO(dev
)->gen
>= 4)
491 i915_enable_pipestat(dev_priv
, 0, PIPE_LEGACY_BLC_EVENT_ENABLE
);
493 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
497 * i915_pipe_enabled - check if a pipe is enabled
499 * @pipe: pipe to check
501 * Reading certain registers when the pipe is disabled can hang the chip.
502 * Use this routine to make sure the PLL is running and the pipe is active
503 * before reading such registers if unsure.
506 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
508 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
510 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
511 /* Locking is horribly broken here, but whatever. */
512 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
513 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
515 return intel_crtc
->active
;
517 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
521 /* Called from drm generic code, passed a 'crtc', which
522 * we use as a pipe index
524 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
526 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
527 unsigned long high_frame
;
528 unsigned long low_frame
;
529 u32 high1
, high2
, low
;
531 if (!i915_pipe_enabled(dev
, pipe
)) {
532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
533 "pipe %c\n", pipe_name(pipe
));
537 high_frame
= PIPEFRAME(pipe
);
538 low_frame
= PIPEFRAMEPIXEL(pipe
);
541 * High & low register fields aren't synchronized, so make sure
542 * we get a low value that's stable across two reads of the high
546 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
547 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
548 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
549 } while (high1
!= high2
);
551 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
552 low
>>= PIPE_FRAME_LOW_SHIFT
;
553 return (high1
<< 8) | low
;
556 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
558 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
559 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
561 if (!i915_pipe_enabled(dev
, pipe
)) {
562 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
563 "pipe %c\n", pipe_name(pipe
));
567 return I915_READ(reg
);
570 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
571 int *vpos
, int *hpos
)
573 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
574 u32 vbl
= 0, position
= 0;
575 int vbl_start
, vbl_end
, htotal
, vtotal
;
578 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
581 if (!i915_pipe_enabled(dev
, pipe
)) {
582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
583 "pipe %c\n", pipe_name(pipe
));
588 vtotal
= 1 + ((I915_READ(VTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
590 if (INTEL_INFO(dev
)->gen
>= 4) {
591 /* No obvious pixelcount register. Only query vertical
592 * scanout position from Display scan line register.
594 position
= I915_READ(PIPEDSL(pipe
));
596 /* Decode into vertical scanout position. Don't have
597 * horizontal scanout position.
599 *vpos
= position
& 0x1fff;
602 /* Have access to pixelcount since start of frame.
603 * We can split this into vertical and horizontal
606 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
608 htotal
= 1 + ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
609 *vpos
= position
/ htotal
;
610 *hpos
= position
- (*vpos
* htotal
);
613 /* Query vblank area. */
614 vbl
= I915_READ(VBLANK(cpu_transcoder
));
616 /* Test position against vblank region. */
617 vbl_start
= vbl
& 0x1fff;
618 vbl_end
= (vbl
>> 16) & 0x1fff;
620 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
623 /* Inside "upper part" of vblank area? Apply corrective offset: */
624 if (in_vbl
&& (*vpos
>= vbl_start
))
625 *vpos
= *vpos
- vtotal
;
627 /* Readouts valid? */
629 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
633 ret
|= DRM_SCANOUTPOS_INVBL
;
638 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
640 struct timeval
*vblank_time
,
643 struct drm_crtc
*crtc
;
645 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
646 DRM_ERROR("Invalid crtc %d\n", pipe
);
650 /* Get drm_crtc to timestamp: */
651 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
653 DRM_ERROR("Invalid crtc %d\n", pipe
);
657 if (!crtc
->enabled
) {
658 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
662 /* Helper routine in DRM core does all the work: */
663 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
668 static bool intel_hpd_irq_event(struct drm_device
*dev
,
669 struct drm_connector
*connector
)
671 enum drm_connector_status old_status
;
673 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
674 old_status
= connector
->status
;
676 connector
->status
= connector
->funcs
->detect(connector
, false);
677 if (old_status
== connector
->status
)
680 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
682 drm_get_connector_name(connector
),
683 drm_get_connector_status_name(old_status
),
684 drm_get_connector_status_name(connector
->status
));
690 * Handle hotplug events outside the interrupt handler proper.
692 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
694 static void i915_hotplug_work_func(struct work_struct
*work
)
696 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
698 struct drm_device
*dev
= dev_priv
->dev
;
699 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
700 struct intel_connector
*intel_connector
;
701 struct intel_encoder
*intel_encoder
;
702 struct drm_connector
*connector
;
703 unsigned long irqflags
;
704 bool hpd_disabled
= false;
705 bool changed
= false;
708 /* HPD irq before everything is fully set up. */
709 if (!dev_priv
->enable_hotplug_processing
)
712 mutex_lock(&mode_config
->mutex
);
713 DRM_DEBUG_KMS("running encoder hotplug functions\n");
715 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
717 hpd_event_bits
= dev_priv
->hpd_event_bits
;
718 dev_priv
->hpd_event_bits
= 0;
719 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
720 intel_connector
= to_intel_connector(connector
);
721 intel_encoder
= intel_connector
->encoder
;
722 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
723 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
724 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
725 DRM_INFO("HPD interrupt storm detected on connector %s: "
726 "switching from hotplug detection to polling\n",
727 drm_get_connector_name(connector
));
728 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
729 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
730 | DRM_CONNECTOR_POLL_DISCONNECT
;
733 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
734 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
735 drm_get_connector_name(connector
), intel_encoder
->hpd_pin
);
738 /* if there were no outputs to poll, poll was disabled,
739 * therefore make sure it's enabled when disabling HPD on
742 drm_kms_helper_poll_enable(dev
);
743 mod_timer(&dev_priv
->hotplug_reenable_timer
,
744 jiffies
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
747 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
749 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
750 intel_connector
= to_intel_connector(connector
);
751 intel_encoder
= intel_connector
->encoder
;
752 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
753 if (intel_encoder
->hot_plug
)
754 intel_encoder
->hot_plug(intel_encoder
);
755 if (intel_hpd_irq_event(dev
, connector
))
759 mutex_unlock(&mode_config
->mutex
);
762 drm_kms_helper_hotplug_event(dev
);
765 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
767 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
768 u32 busy_up
, busy_down
, max_avg
, min_avg
;
771 spin_lock(&mchdev_lock
);
773 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
775 new_delay
= dev_priv
->ips
.cur_delay
;
777 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
778 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
779 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
780 max_avg
= I915_READ(RCBMAXAVG
);
781 min_avg
= I915_READ(RCBMINAVG
);
783 /* Handle RCS change request from hw */
784 if (busy_up
> max_avg
) {
785 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
786 new_delay
= dev_priv
->ips
.cur_delay
- 1;
787 if (new_delay
< dev_priv
->ips
.max_delay
)
788 new_delay
= dev_priv
->ips
.max_delay
;
789 } else if (busy_down
< min_avg
) {
790 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
791 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
792 if (new_delay
> dev_priv
->ips
.min_delay
)
793 new_delay
= dev_priv
->ips
.min_delay
;
796 if (ironlake_set_drps(dev
, new_delay
))
797 dev_priv
->ips
.cur_delay
= new_delay
;
799 spin_unlock(&mchdev_lock
);
804 static void notify_ring(struct drm_device
*dev
,
805 struct intel_ring_buffer
*ring
)
807 if (ring
->obj
== NULL
)
810 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
, false));
812 wake_up_all(&ring
->irq_queue
);
813 i915_queue_hangcheck(dev
);
816 static void gen6_pm_rps_work(struct work_struct
*work
)
818 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
823 spin_lock_irq(&dev_priv
->irq_lock
);
824 pm_iir
= dev_priv
->rps
.pm_iir
;
825 dev_priv
->rps
.pm_iir
= 0;
826 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
827 snb_enable_pm_irq(dev_priv
, GEN6_PM_RPS_EVENTS
);
828 spin_unlock_irq(&dev_priv
->irq_lock
);
830 /* Make sure we didn't queue anything we're not going to process. */
831 WARN_ON(pm_iir
& ~GEN6_PM_RPS_EVENTS
);
833 if ((pm_iir
& GEN6_PM_RPS_EVENTS
) == 0)
836 mutex_lock(&dev_priv
->rps
.hw_lock
);
838 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
839 new_delay
= dev_priv
->rps
.cur_delay
+ 1;
842 * For better performance, jump directly
843 * to RPe if we're below it.
845 if (IS_VALLEYVIEW(dev_priv
->dev
) &&
846 dev_priv
->rps
.cur_delay
< dev_priv
->rps
.rpe_delay
)
847 new_delay
= dev_priv
->rps
.rpe_delay
;
849 new_delay
= dev_priv
->rps
.cur_delay
- 1;
851 /* sysfs frequency interfaces may have snuck in while servicing the
854 if (new_delay
>= dev_priv
->rps
.min_delay
&&
855 new_delay
<= dev_priv
->rps
.max_delay
) {
856 if (IS_VALLEYVIEW(dev_priv
->dev
))
857 valleyview_set_rps(dev_priv
->dev
, new_delay
);
859 gen6_set_rps(dev_priv
->dev
, new_delay
);
862 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
864 * On VLV, when we enter RC6 we may not be at the minimum
865 * voltage level, so arm a timer to check. It should only
866 * fire when there's activity or once after we've entered
867 * RC6, and then won't be re-armed until the next RPS interrupt.
869 mod_delayed_work(dev_priv
->wq
, &dev_priv
->rps
.vlv_work
,
870 msecs_to_jiffies(100));
873 mutex_unlock(&dev_priv
->rps
.hw_lock
);
878 * ivybridge_parity_work - Workqueue called when a parity error interrupt
880 * @work: workqueue struct
882 * Doesn't actually do anything except notify userspace. As a consequence of
883 * this event, userspace should try to remap the bad rows since statistically
884 * it is likely the same row is more likely to go bad again.
886 static void ivybridge_parity_work(struct work_struct
*work
)
888 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
889 l3_parity
.error_work
);
890 u32 error_status
, row
, bank
, subbank
;
891 char *parity_event
[6];
896 /* We must turn off DOP level clock gating to access the L3 registers.
897 * In order to prevent a get/put style interface, acquire struct mutex
898 * any time we access those registers.
900 mutex_lock(&dev_priv
->dev
->struct_mutex
);
902 /* If we've screwed up tracking, just let the interrupt fire again */
903 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
906 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
907 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
908 POSTING_READ(GEN7_MISCCPCTL
);
910 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
914 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
917 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
919 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
921 error_status
= I915_READ(reg
);
922 row
= GEN7_PARITY_ERROR_ROW(error_status
);
923 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
924 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
926 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
929 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
930 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
931 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
932 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
933 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
934 parity_event
[5] = NULL
;
936 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
937 KOBJ_CHANGE
, parity_event
);
939 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
940 slice
, row
, bank
, subbank
);
942 kfree(parity_event
[4]);
943 kfree(parity_event
[3]);
944 kfree(parity_event
[2]);
945 kfree(parity_event
[1]);
948 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
951 WARN_ON(dev_priv
->l3_parity
.which_slice
);
952 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
953 ilk_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
954 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
956 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
959 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
961 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
963 if (!HAS_L3_DPF(dev
))
966 spin_lock(&dev_priv
->irq_lock
);
967 ilk_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
968 spin_unlock(&dev_priv
->irq_lock
);
970 iir
&= GT_PARITY_ERROR(dev
);
971 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
972 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
974 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
975 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
977 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
980 static void ilk_gt_irq_handler(struct drm_device
*dev
,
981 struct drm_i915_private
*dev_priv
,
985 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
986 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
987 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
988 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
991 static void snb_gt_irq_handler(struct drm_device
*dev
,
992 struct drm_i915_private
*dev_priv
,
997 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
998 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
999 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1000 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1001 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1002 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1004 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1005 GT_BSD_CS_ERROR_INTERRUPT
|
1006 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
1007 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
1008 i915_handle_error(dev
, false);
1011 if (gt_iir
& GT_PARITY_ERROR(dev
))
1012 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1015 #define HPD_STORM_DETECT_PERIOD 1000
1016 #define HPD_STORM_THRESHOLD 5
1018 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1019 u32 hotplug_trigger
,
1022 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1024 bool storm_detected
= false;
1026 if (!hotplug_trigger
)
1029 spin_lock(&dev_priv
->irq_lock
);
1030 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1032 WARN(((hpd
[i
] & hotplug_trigger
) &&
1033 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
),
1034 "Received HPD interrupt although disabled\n");
1036 if (!(hpd
[i
] & hotplug_trigger
) ||
1037 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1040 dev_priv
->hpd_event_bits
|= (1 << i
);
1041 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1042 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1043 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1044 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1045 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1046 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1047 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1048 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1049 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1050 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1051 storm_detected
= true;
1053 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1054 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1055 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1060 dev_priv
->display
.hpd_irq_setup(dev
);
1061 spin_unlock(&dev_priv
->irq_lock
);
1064 * Our hotplug handler can grab modeset locks (by calling down into the
1065 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1066 * queue for otherwise the flush_work in the pageflip code will
1069 schedule_work(&dev_priv
->hotplug_work
);
1072 static void gmbus_irq_handler(struct drm_device
*dev
)
1074 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1076 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1079 static void dp_aux_irq_handler(struct drm_device
*dev
)
1081 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1083 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1086 /* The RPS events need forcewake, so we add them to a work queue and mask their
1087 * IMR bits until the work is done. Other interrupts can be processed without
1088 * the work queue. */
1089 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1091 if (pm_iir
& GEN6_PM_RPS_EVENTS
) {
1092 spin_lock(&dev_priv
->irq_lock
);
1093 dev_priv
->rps
.pm_iir
|= pm_iir
& GEN6_PM_RPS_EVENTS
;
1094 snb_disable_pm_irq(dev_priv
, pm_iir
& GEN6_PM_RPS_EVENTS
);
1095 spin_unlock(&dev_priv
->irq_lock
);
1097 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1100 if (HAS_VEBOX(dev_priv
->dev
)) {
1101 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1102 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
1104 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
1105 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir
);
1106 i915_handle_error(dev_priv
->dev
, false);
1111 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1113 struct drm_device
*dev
= (struct drm_device
*) arg
;
1114 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1115 u32 iir
, gt_iir
, pm_iir
;
1116 irqreturn_t ret
= IRQ_NONE
;
1117 unsigned long irqflags
;
1119 u32 pipe_stats
[I915_MAX_PIPES
];
1121 atomic_inc(&dev_priv
->irq_received
);
1124 iir
= I915_READ(VLV_IIR
);
1125 gt_iir
= I915_READ(GTIIR
);
1126 pm_iir
= I915_READ(GEN6_PMIIR
);
1128 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1133 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1135 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1136 for_each_pipe(pipe
) {
1137 int reg
= PIPESTAT(pipe
);
1138 pipe_stats
[pipe
] = I915_READ(reg
);
1141 * Clear the PIPE*STAT regs before the IIR
1143 if (pipe_stats
[pipe
] & 0x8000ffff) {
1144 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1145 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1147 I915_WRITE(reg
, pipe_stats
[pipe
]);
1150 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1152 for_each_pipe(pipe
) {
1153 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
1154 drm_handle_vblank(dev
, pipe
);
1156 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
1157 intel_prepare_page_flip(dev
, pipe
);
1158 intel_finish_page_flip(dev
, pipe
);
1162 /* Consume port. Then clear IIR or we'll miss events */
1163 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
1164 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1165 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1167 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1170 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
1172 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1173 I915_READ(PORT_HOTPLUG_STAT
);
1176 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1177 gmbus_irq_handler(dev
);
1180 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1182 I915_WRITE(GTIIR
, gt_iir
);
1183 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1184 I915_WRITE(VLV_IIR
, iir
);
1191 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1193 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1195 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1197 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ibx
);
1199 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1200 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1201 SDE_AUDIO_POWER_SHIFT
);
1202 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1206 if (pch_iir
& SDE_AUX_MASK
)
1207 dp_aux_irq_handler(dev
);
1209 if (pch_iir
& SDE_GMBUS
)
1210 gmbus_irq_handler(dev
);
1212 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1213 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1215 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1216 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1218 if (pch_iir
& SDE_POISON
)
1219 DRM_ERROR("PCH poison interrupt\n");
1221 if (pch_iir
& SDE_FDI_MASK
)
1223 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1225 I915_READ(FDI_RX_IIR(pipe
)));
1227 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1228 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1230 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1231 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1233 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1234 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1236 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1238 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1239 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1241 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1244 static void ivb_err_int_handler(struct drm_device
*dev
)
1246 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1247 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1249 if (err_int
& ERR_INT_POISON
)
1250 DRM_ERROR("Poison interrupt\n");
1252 if (err_int
& ERR_INT_FIFO_UNDERRUN_A
)
1253 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_A
, false))
1254 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1256 if (err_int
& ERR_INT_FIFO_UNDERRUN_B
)
1257 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_B
, false))
1258 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1260 if (err_int
& ERR_INT_FIFO_UNDERRUN_C
)
1261 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_C
, false))
1262 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1264 I915_WRITE(GEN7_ERR_INT
, err_int
);
1267 static void cpt_serr_int_handler(struct drm_device
*dev
)
1269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1270 u32 serr_int
= I915_READ(SERR_INT
);
1272 if (serr_int
& SERR_INT_POISON
)
1273 DRM_ERROR("PCH poison interrupt\n");
1275 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
1276 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1278 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1280 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
1281 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1283 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1285 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
1286 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_C
,
1288 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1290 I915_WRITE(SERR_INT
, serr_int
);
1293 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1295 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1297 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
1299 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_cpt
);
1301 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
1302 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
1303 SDE_AUDIO_POWER_SHIFT_CPT
);
1304 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1308 if (pch_iir
& SDE_AUX_MASK_CPT
)
1309 dp_aux_irq_handler(dev
);
1311 if (pch_iir
& SDE_GMBUS_CPT
)
1312 gmbus_irq_handler(dev
);
1314 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
1315 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1317 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
1318 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1320 if (pch_iir
& SDE_FDI_MASK_CPT
)
1322 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1324 I915_READ(FDI_RX_IIR(pipe
)));
1326 if (pch_iir
& SDE_ERROR_CPT
)
1327 cpt_serr_int_handler(dev
);
1330 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
1332 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1334 if (de_iir
& DE_AUX_CHANNEL_A
)
1335 dp_aux_irq_handler(dev
);
1337 if (de_iir
& DE_GSE
)
1338 intel_opregion_asle_intr(dev
);
1340 if (de_iir
& DE_PIPEA_VBLANK
)
1341 drm_handle_vblank(dev
, 0);
1343 if (de_iir
& DE_PIPEB_VBLANK
)
1344 drm_handle_vblank(dev
, 1);
1346 if (de_iir
& DE_POISON
)
1347 DRM_ERROR("Poison interrupt\n");
1349 if (de_iir
& DE_PIPEA_FIFO_UNDERRUN
)
1350 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_A
, false))
1351 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1353 if (de_iir
& DE_PIPEB_FIFO_UNDERRUN
)
1354 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_B
, false))
1355 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1357 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
1358 intel_prepare_page_flip(dev
, 0);
1359 intel_finish_page_flip_plane(dev
, 0);
1362 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
1363 intel_prepare_page_flip(dev
, 1);
1364 intel_finish_page_flip_plane(dev
, 1);
1367 /* check event from PCH */
1368 if (de_iir
& DE_PCH_EVENT
) {
1369 u32 pch_iir
= I915_READ(SDEIIR
);
1371 if (HAS_PCH_CPT(dev
))
1372 cpt_irq_handler(dev
, pch_iir
);
1374 ibx_irq_handler(dev
, pch_iir
);
1376 /* should clear PCH hotplug event before clear CPU irq */
1377 I915_WRITE(SDEIIR
, pch_iir
);
1380 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
1381 ironlake_rps_change_irq_handler(dev
);
1384 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
1386 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1389 if (de_iir
& DE_ERR_INT_IVB
)
1390 ivb_err_int_handler(dev
);
1392 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
1393 dp_aux_irq_handler(dev
);
1395 if (de_iir
& DE_GSE_IVB
)
1396 intel_opregion_asle_intr(dev
);
1398 for (i
= 0; i
< 3; i
++) {
1399 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
1400 drm_handle_vblank(dev
, i
);
1401 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
1402 intel_prepare_page_flip(dev
, i
);
1403 intel_finish_page_flip_plane(dev
, i
);
1407 /* check event from PCH */
1408 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
1409 u32 pch_iir
= I915_READ(SDEIIR
);
1411 cpt_irq_handler(dev
, pch_iir
);
1413 /* clear PCH hotplug event before clear CPU irq */
1414 I915_WRITE(SDEIIR
, pch_iir
);
1418 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
1420 struct drm_device
*dev
= (struct drm_device
*) arg
;
1421 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1422 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
1423 irqreturn_t ret
= IRQ_NONE
;
1425 atomic_inc(&dev_priv
->irq_received
);
1427 /* We get interrupts on unclaimed registers, so check for this before we
1428 * do any I915_{READ,WRITE}. */
1429 intel_uncore_check_errors(dev
);
1431 /* disable master interrupt before clearing iir */
1432 de_ier
= I915_READ(DEIER
);
1433 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
1434 POSTING_READ(DEIER
);
1436 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1437 * interrupts will will be stored on its back queue, and then we'll be
1438 * able to process them after we restore SDEIER (as soon as we restore
1439 * it, we'll get an interrupt if SDEIIR still has something to process
1440 * due to its back queue). */
1441 if (!HAS_PCH_NOP(dev
)) {
1442 sde_ier
= I915_READ(SDEIER
);
1443 I915_WRITE(SDEIER
, 0);
1444 POSTING_READ(SDEIER
);
1447 gt_iir
= I915_READ(GTIIR
);
1449 if (INTEL_INFO(dev
)->gen
>= 6)
1450 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1452 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1453 I915_WRITE(GTIIR
, gt_iir
);
1457 de_iir
= I915_READ(DEIIR
);
1459 if (INTEL_INFO(dev
)->gen
>= 7)
1460 ivb_display_irq_handler(dev
, de_iir
);
1462 ilk_display_irq_handler(dev
, de_iir
);
1463 I915_WRITE(DEIIR
, de_iir
);
1467 if (INTEL_INFO(dev
)->gen
>= 6) {
1468 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
1470 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1471 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1476 I915_WRITE(DEIER
, de_ier
);
1477 POSTING_READ(DEIER
);
1478 if (!HAS_PCH_NOP(dev
)) {
1479 I915_WRITE(SDEIER
, sde_ier
);
1480 POSTING_READ(SDEIER
);
1487 * i915_error_work_func - do process context error handling work
1488 * @work: work struct
1490 * Fire an error uevent so userspace can see that a hang or error
1493 static void i915_error_work_func(struct work_struct
*work
)
1495 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
1497 drm_i915_private_t
*dev_priv
= container_of(error
, drm_i915_private_t
,
1499 struct drm_device
*dev
= dev_priv
->dev
;
1500 struct intel_ring_buffer
*ring
;
1501 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
1502 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
1503 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
1506 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
1509 * Note that there's only one work item which does gpu resets, so we
1510 * need not worry about concurrent gpu resets potentially incrementing
1511 * error->reset_counter twice. We only need to take care of another
1512 * racing irq/hangcheck declaring the gpu dead for a second time. A
1513 * quick check for that is good enough: schedule_work ensures the
1514 * correct ordering between hang detection and this work item, and since
1515 * the reset in-progress bit is only ever set by code outside of this
1516 * work we don't need to worry about any other races.
1518 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
1519 DRM_DEBUG_DRIVER("resetting chip\n");
1520 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
,
1523 ret
= i915_reset(dev
);
1527 * After all the gem state is reset, increment the reset
1528 * counter and wake up everyone waiting for the reset to
1531 * Since unlock operations are a one-sided barrier only,
1532 * we need to insert a barrier here to order any seqno
1534 * the counter increment.
1536 smp_mb__before_atomic_inc();
1537 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
1539 kobject_uevent_env(&dev
->primary
->kdev
.kobj
,
1540 KOBJ_CHANGE
, reset_done_event
);
1542 atomic_set(&error
->reset_counter
, I915_WEDGED
);
1545 for_each_ring(ring
, dev_priv
, i
)
1546 wake_up_all(&ring
->irq_queue
);
1548 intel_display_handle_reset(dev
);
1550 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
1554 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1556 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1557 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
1558 u32 eir
= I915_READ(EIR
);
1564 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1566 i915_get_extra_instdone(dev
, instdone
);
1569 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1570 u32 ipeir
= I915_READ(IPEIR_I965
);
1572 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1573 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1574 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1575 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1576 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1577 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1578 I915_WRITE(IPEIR_I965
, ipeir
);
1579 POSTING_READ(IPEIR_I965
);
1581 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1582 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1583 pr_err("page table error\n");
1584 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1585 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1586 POSTING_READ(PGTBL_ER
);
1590 if (!IS_GEN2(dev
)) {
1591 if (eir
& I915_ERROR_PAGE_TABLE
) {
1592 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1593 pr_err("page table error\n");
1594 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1595 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1596 POSTING_READ(PGTBL_ER
);
1600 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1601 pr_err("memory refresh error:\n");
1603 pr_err("pipe %c stat: 0x%08x\n",
1604 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1605 /* pipestat has already been acked */
1607 if (eir
& I915_ERROR_INSTRUCTION
) {
1608 pr_err("instruction error\n");
1609 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1610 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1611 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1612 if (INTEL_INFO(dev
)->gen
< 4) {
1613 u32 ipeir
= I915_READ(IPEIR
);
1615 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1616 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1617 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1618 I915_WRITE(IPEIR
, ipeir
);
1619 POSTING_READ(IPEIR
);
1621 u32 ipeir
= I915_READ(IPEIR_I965
);
1623 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1624 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1625 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1626 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1627 I915_WRITE(IPEIR_I965
, ipeir
);
1628 POSTING_READ(IPEIR_I965
);
1632 I915_WRITE(EIR
, eir
);
1634 eir
= I915_READ(EIR
);
1637 * some errors might have become stuck,
1640 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1641 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1642 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1647 * i915_handle_error - handle an error interrupt
1650 * Do some basic checking of regsiter state at error interrupt time and
1651 * dump it to the syslog. Also call i915_capture_error_state() to make
1652 * sure we get a record and make it available in debugfs. Fire a uevent
1653 * so userspace knows something bad happened (should trigger collection
1654 * of a ring dump etc.).
1656 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1658 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1659 struct intel_ring_buffer
*ring
;
1662 i915_capture_error_state(dev
);
1663 i915_report_and_clear_eir(dev
);
1666 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
1667 &dev_priv
->gpu_error
.reset_counter
);
1670 * Wakeup waiting processes so that the reset work item
1671 * doesn't deadlock trying to grab various locks.
1673 for_each_ring(ring
, dev_priv
, i
)
1674 wake_up_all(&ring
->irq_queue
);
1677 queue_work(dev_priv
->wq
, &dev_priv
->gpu_error
.work
);
1680 static void __always_unused
i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1682 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1683 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1684 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1685 struct drm_i915_gem_object
*obj
;
1686 struct intel_unpin_work
*work
;
1687 unsigned long flags
;
1688 bool stall_detected
;
1690 /* Ignore early vblank irqs */
1691 if (intel_crtc
== NULL
)
1694 spin_lock_irqsave(&dev
->event_lock
, flags
);
1695 work
= intel_crtc
->unpin_work
;
1698 atomic_read(&work
->pending
) >= INTEL_FLIP_COMPLETE
||
1699 !work
->enable_stall_check
) {
1700 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1701 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1705 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1706 obj
= work
->pending_flip_obj
;
1707 if (INTEL_INFO(dev
)->gen
>= 4) {
1708 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1709 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1710 i915_gem_obj_ggtt_offset(obj
);
1712 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1713 stall_detected
= I915_READ(dspaddr
) == (i915_gem_obj_ggtt_offset(obj
) +
1714 crtc
->y
* crtc
->fb
->pitches
[0] +
1715 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1718 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1720 if (stall_detected
) {
1721 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1722 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1726 /* Called from drm generic code, passed 'crtc' which
1727 * we use as a pipe index
1729 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1731 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1732 unsigned long irqflags
;
1734 if (!i915_pipe_enabled(dev
, pipe
))
1737 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1738 if (INTEL_INFO(dev
)->gen
>= 4)
1739 i915_enable_pipestat(dev_priv
, pipe
,
1740 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1742 i915_enable_pipestat(dev_priv
, pipe
,
1743 PIPE_VBLANK_INTERRUPT_ENABLE
);
1745 /* maintain vblank delivery even in deep C-states */
1746 if (dev_priv
->info
->gen
== 3)
1747 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1748 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1753 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1755 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1756 unsigned long irqflags
;
1757 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
1758 DE_PIPE_VBLANK_ILK(pipe
);
1760 if (!i915_pipe_enabled(dev
, pipe
))
1763 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1764 ironlake_enable_display_irq(dev_priv
, bit
);
1765 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1770 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1772 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1773 unsigned long irqflags
;
1776 if (!i915_pipe_enabled(dev
, pipe
))
1779 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1780 imr
= I915_READ(VLV_IMR
);
1782 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1784 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1785 I915_WRITE(VLV_IMR
, imr
);
1786 i915_enable_pipestat(dev_priv
, pipe
,
1787 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1788 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1793 /* Called from drm generic code, passed 'crtc' which
1794 * we use as a pipe index
1796 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1798 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1799 unsigned long irqflags
;
1801 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1802 if (dev_priv
->info
->gen
== 3)
1803 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1805 i915_disable_pipestat(dev_priv
, pipe
,
1806 PIPE_VBLANK_INTERRUPT_ENABLE
|
1807 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1808 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1811 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1813 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1814 unsigned long irqflags
;
1815 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
1816 DE_PIPE_VBLANK_ILK(pipe
);
1818 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1819 ironlake_disable_display_irq(dev_priv
, bit
);
1820 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1823 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1825 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1826 unsigned long irqflags
;
1829 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1830 i915_disable_pipestat(dev_priv
, pipe
,
1831 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1832 imr
= I915_READ(VLV_IMR
);
1834 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1836 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1837 I915_WRITE(VLV_IMR
, imr
);
1838 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1842 ring_last_seqno(struct intel_ring_buffer
*ring
)
1844 return list_entry(ring
->request_list
.prev
,
1845 struct drm_i915_gem_request
, list
)->seqno
;
1849 ring_idle(struct intel_ring_buffer
*ring
, u32 seqno
)
1851 return (list_empty(&ring
->request_list
) ||
1852 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
1855 static struct intel_ring_buffer
*
1856 semaphore_waits_for(struct intel_ring_buffer
*ring
, u32
*seqno
)
1858 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
1859 u32 cmd
, ipehr
, acthd
, acthd_min
;
1861 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
1862 if ((ipehr
& ~(0x3 << 16)) !=
1863 (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
| MI_SEMAPHORE_REGISTER
))
1866 /* ACTHD is likely pointing to the dword after the actual command,
1867 * so scan backwards until we find the MBOX.
1869 acthd
= intel_ring_get_active_head(ring
) & HEAD_ADDR
;
1870 acthd_min
= max((int)acthd
- 3 * 4, 0);
1872 cmd
= ioread32(ring
->virtual_start
+ acthd
);
1877 if (acthd
< acthd_min
)
1881 *seqno
= ioread32(ring
->virtual_start
+acthd
+4)+1;
1882 return &dev_priv
->ring
[(ring
->id
+ (((ipehr
>> 17) & 1) + 1)) % 3];
1885 static int semaphore_passed(struct intel_ring_buffer
*ring
)
1887 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
1888 struct intel_ring_buffer
*signaller
;
1891 ring
->hangcheck
.deadlock
= true;
1893 signaller
= semaphore_waits_for(ring
, &seqno
);
1894 if (signaller
== NULL
|| signaller
->hangcheck
.deadlock
)
1897 /* cursory check for an unkickable deadlock */
1898 ctl
= I915_READ_CTL(signaller
);
1899 if (ctl
& RING_WAIT_SEMAPHORE
&& semaphore_passed(signaller
) < 0)
1902 return i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
);
1905 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
1907 struct intel_ring_buffer
*ring
;
1910 for_each_ring(ring
, dev_priv
, i
)
1911 ring
->hangcheck
.deadlock
= false;
1914 static enum intel_ring_hangcheck_action
1915 ring_stuck(struct intel_ring_buffer
*ring
, u32 acthd
)
1917 struct drm_device
*dev
= ring
->dev
;
1918 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1921 if (ring
->hangcheck
.acthd
!= acthd
)
1922 return HANGCHECK_ACTIVE
;
1925 return HANGCHECK_HUNG
;
1927 /* Is the chip hanging on a WAIT_FOR_EVENT?
1928 * If so we can simply poke the RB_WAIT bit
1929 * and break the hang. This should work on
1930 * all but the second generation chipsets.
1932 tmp
= I915_READ_CTL(ring
);
1933 if (tmp
& RING_WAIT
) {
1934 DRM_ERROR("Kicking stuck wait on %s\n",
1936 I915_WRITE_CTL(ring
, tmp
);
1937 return HANGCHECK_KICK
;
1940 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
1941 switch (semaphore_passed(ring
)) {
1943 return HANGCHECK_HUNG
;
1945 DRM_ERROR("Kicking stuck semaphore on %s\n",
1947 I915_WRITE_CTL(ring
, tmp
);
1948 return HANGCHECK_KICK
;
1950 return HANGCHECK_WAIT
;
1954 return HANGCHECK_HUNG
;
1958 * This is called when the chip hasn't reported back with completed
1959 * batchbuffers in a long time. We keep track per ring seqno progress and
1960 * if there are no progress, hangcheck score for that ring is increased.
1961 * Further, acthd is inspected to see if the ring is stuck. On stuck case
1962 * we kick the ring. If we see no progress on three subsequent calls
1963 * we assume chip is wedged and try to fix it by resetting the chip.
1965 static void i915_hangcheck_elapsed(unsigned long data
)
1967 struct drm_device
*dev
= (struct drm_device
*)data
;
1968 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1969 struct intel_ring_buffer
*ring
;
1971 int busy_count
= 0, rings_hung
= 0;
1972 bool stuck
[I915_NUM_RINGS
] = { 0 };
1978 if (!i915_enable_hangcheck
)
1981 for_each_ring(ring
, dev_priv
, i
) {
1985 semaphore_clear_deadlocks(dev_priv
);
1987 seqno
= ring
->get_seqno(ring
, false);
1988 acthd
= intel_ring_get_active_head(ring
);
1990 if (ring
->hangcheck
.seqno
== seqno
) {
1991 if (ring_idle(ring
, seqno
)) {
1992 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
1994 if (waitqueue_active(&ring
->irq_queue
)) {
1995 /* Issue a wake-up to catch stuck h/w. */
1996 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1998 wake_up_all(&ring
->irq_queue
);
1999 ring
->hangcheck
.score
+= HUNG
;
2003 /* We always increment the hangcheck score
2004 * if the ring is busy and still processing
2005 * the same request, so that no single request
2006 * can run indefinitely (such as a chain of
2007 * batches). The only time we do not increment
2008 * the hangcheck score on this ring, if this
2009 * ring is in a legitimate wait for another
2010 * ring. In that case the waiting ring is a
2011 * victim and we want to be sure we catch the
2012 * right culprit. Then every time we do kick
2013 * the ring, add a small increment to the
2014 * score so that we can catch a batch that is
2015 * being repeatedly kicked and so responsible
2016 * for stalling the machine.
2018 ring
->hangcheck
.action
= ring_stuck(ring
,
2021 switch (ring
->hangcheck
.action
) {
2022 case HANGCHECK_IDLE
:
2023 case HANGCHECK_WAIT
:
2025 case HANGCHECK_ACTIVE
:
2026 ring
->hangcheck
.score
+= BUSY
;
2028 case HANGCHECK_KICK
:
2029 ring
->hangcheck
.score
+= KICK
;
2031 case HANGCHECK_HUNG
:
2032 ring
->hangcheck
.score
+= HUNG
;
2038 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
2040 /* Gradually reduce the count so that we catch DoS
2041 * attempts across multiple batches.
2043 if (ring
->hangcheck
.score
> 0)
2044 ring
->hangcheck
.score
--;
2047 ring
->hangcheck
.seqno
= seqno
;
2048 ring
->hangcheck
.acthd
= acthd
;
2052 for_each_ring(ring
, dev_priv
, i
) {
2053 if (ring
->hangcheck
.score
> FIRE
) {
2054 DRM_INFO("%s on %s\n",
2055 stuck
[i
] ? "stuck" : "no progress",
2062 return i915_handle_error(dev
, true);
2065 /* Reset timer case chip hangs without another request
2067 i915_queue_hangcheck(dev
);
2070 void i915_queue_hangcheck(struct drm_device
*dev
)
2072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2073 if (!i915_enable_hangcheck
)
2076 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
2077 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
2080 static void ibx_irq_preinstall(struct drm_device
*dev
)
2082 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2084 if (HAS_PCH_NOP(dev
))
2087 /* south display irq */
2088 I915_WRITE(SDEIMR
, 0xffffffff);
2090 * SDEIER is also touched by the interrupt handler to work around missed
2091 * PCH interrupts. Hence we can't update it after the interrupt handler
2092 * is enabled - instead we unconditionally enable all PCH interrupt
2093 * sources here, but then only unmask them as needed with SDEIMR.
2095 I915_WRITE(SDEIER
, 0xffffffff);
2096 POSTING_READ(SDEIER
);
2099 static void gen5_gt_irq_preinstall(struct drm_device
*dev
)
2101 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2104 I915_WRITE(GTIMR
, 0xffffffff);
2105 I915_WRITE(GTIER
, 0x0);
2106 POSTING_READ(GTIER
);
2108 if (INTEL_INFO(dev
)->gen
>= 6) {
2110 I915_WRITE(GEN6_PMIMR
, 0xffffffff);
2111 I915_WRITE(GEN6_PMIER
, 0x0);
2112 POSTING_READ(GEN6_PMIER
);
2118 static void ironlake_irq_preinstall(struct drm_device
*dev
)
2120 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2122 atomic_set(&dev_priv
->irq_received
, 0);
2124 I915_WRITE(HWSTAM
, 0xeffe);
2126 I915_WRITE(DEIMR
, 0xffffffff);
2127 I915_WRITE(DEIER
, 0x0);
2128 POSTING_READ(DEIER
);
2130 gen5_gt_irq_preinstall(dev
);
2132 ibx_irq_preinstall(dev
);
2135 static void valleyview_irq_preinstall(struct drm_device
*dev
)
2137 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2140 atomic_set(&dev_priv
->irq_received
, 0);
2143 I915_WRITE(VLV_IMR
, 0);
2144 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
2145 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
2146 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
2149 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2150 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2152 gen5_gt_irq_preinstall(dev
);
2154 I915_WRITE(DPINVGTT
, 0xff);
2156 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2157 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2159 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2160 I915_WRITE(VLV_IIR
, 0xffffffff);
2161 I915_WRITE(VLV_IMR
, 0xffffffff);
2162 I915_WRITE(VLV_IER
, 0x0);
2163 POSTING_READ(VLV_IER
);
2166 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
2168 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2169 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
2170 struct intel_encoder
*intel_encoder
;
2171 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
2173 if (HAS_PCH_IBX(dev
)) {
2174 hotplug_irqs
= SDE_HOTPLUG_MASK
;
2175 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2176 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2177 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
2179 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
2180 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2181 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2182 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
2185 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
2188 * Enable digital hotplug on the PCH, and configure the DP short pulse
2189 * duration to 2ms (which is the minimum in the Display Port spec)
2191 * This register is the same on all known PCH chips.
2193 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
2194 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
2195 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
2196 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
2197 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
2198 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
2201 static void ibx_irq_postinstall(struct drm_device
*dev
)
2203 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2206 if (HAS_PCH_NOP(dev
))
2209 if (HAS_PCH_IBX(dev
)) {
2210 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_TRANSB_FIFO_UNDER
|
2211 SDE_TRANSA_FIFO_UNDER
| SDE_POISON
;
2213 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
| SDE_ERROR_CPT
;
2215 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
2218 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2219 I915_WRITE(SDEIMR
, ~mask
);
2222 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
2224 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2225 u32 pm_irqs
, gt_irqs
;
2227 pm_irqs
= gt_irqs
= 0;
2229 dev_priv
->gt_irq_mask
= ~0;
2230 if (HAS_L3_DPF(dev
)) {
2231 /* L3 parity interrupt is always unmasked. */
2232 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
2233 gt_irqs
|= GT_PARITY_ERROR(dev
);
2236 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
2238 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
2239 ILK_BSD_USER_INTERRUPT
;
2241 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
2244 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2245 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2246 I915_WRITE(GTIER
, gt_irqs
);
2247 POSTING_READ(GTIER
);
2249 if (INTEL_INFO(dev
)->gen
>= 6) {
2250 pm_irqs
|= GEN6_PM_RPS_EVENTS
;
2253 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
2255 dev_priv
->pm_irq_mask
= 0xffffffff;
2256 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2257 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
2258 I915_WRITE(GEN6_PMIER
, pm_irqs
);
2259 POSTING_READ(GEN6_PMIER
);
2263 static int ironlake_irq_postinstall(struct drm_device
*dev
)
2265 unsigned long irqflags
;
2266 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2267 u32 display_mask
, extra_mask
;
2269 if (INTEL_INFO(dev
)->gen
>= 7) {
2270 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
2271 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
2272 DE_PLANEB_FLIP_DONE_IVB
|
2273 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
|
2275 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
2276 DE_PIPEA_VBLANK_IVB
);
2278 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
2280 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
2281 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
2282 DE_AUX_CHANNEL_A
| DE_PIPEB_FIFO_UNDERRUN
|
2283 DE_PIPEA_FIFO_UNDERRUN
| DE_POISON
);
2284 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
;
2287 dev_priv
->irq_mask
= ~display_mask
;
2289 /* should always can generate irq */
2290 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2291 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2292 I915_WRITE(DEIER
, display_mask
| extra_mask
);
2293 POSTING_READ(DEIER
);
2295 gen5_gt_irq_postinstall(dev
);
2297 ibx_irq_postinstall(dev
);
2299 if (IS_IRONLAKE_M(dev
)) {
2300 /* Enable PCU event interrupts
2302 * spinlocking not required here for correctness since interrupt
2303 * setup is guaranteed to run in single-threaded context. But we
2304 * need it to make the assert_spin_locked happy. */
2305 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2306 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
2307 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2313 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2315 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2317 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
2318 unsigned long irqflags
;
2320 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2321 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2322 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2323 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2324 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2327 *Leave vblank interrupts masked initially. enable/disable will
2328 * toggle them based on usage.
2330 dev_priv
->irq_mask
= (~enable_mask
) |
2331 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2332 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2334 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2335 POSTING_READ(PORT_HOTPLUG_EN
);
2337 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2338 I915_WRITE(VLV_IER
, enable_mask
);
2339 I915_WRITE(VLV_IIR
, 0xffffffff);
2340 I915_WRITE(PIPESTAT(0), 0xffff);
2341 I915_WRITE(PIPESTAT(1), 0xffff);
2342 POSTING_READ(VLV_IER
);
2344 /* Interrupt setup is already guaranteed to be single-threaded, this is
2345 * just to make the assert_spin_locked check happy. */
2346 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2347 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
2348 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2349 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
2350 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2352 I915_WRITE(VLV_IIR
, 0xffffffff);
2353 I915_WRITE(VLV_IIR
, 0xffffffff);
2355 gen5_gt_irq_postinstall(dev
);
2357 /* ack & enable invalid PTE error interrupts */
2358 #if 0 /* FIXME: add support to irq handler for checking these bits */
2359 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2360 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2363 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2368 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2370 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2376 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
2379 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2381 I915_WRITE(HWSTAM
, 0xffffffff);
2382 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2383 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2385 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2386 I915_WRITE(VLV_IIR
, 0xffffffff);
2387 I915_WRITE(VLV_IMR
, 0xffffffff);
2388 I915_WRITE(VLV_IER
, 0x0);
2389 POSTING_READ(VLV_IER
);
2392 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2394 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2399 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
2401 I915_WRITE(HWSTAM
, 0xffffffff);
2403 I915_WRITE(DEIMR
, 0xffffffff);
2404 I915_WRITE(DEIER
, 0x0);
2405 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2407 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
2409 I915_WRITE(GTIMR
, 0xffffffff);
2410 I915_WRITE(GTIER
, 0x0);
2411 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2413 if (HAS_PCH_NOP(dev
))
2416 I915_WRITE(SDEIMR
, 0xffffffff);
2417 I915_WRITE(SDEIER
, 0x0);
2418 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2419 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
2420 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
2423 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2425 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2428 atomic_set(&dev_priv
->irq_received
, 0);
2431 I915_WRITE(PIPESTAT(pipe
), 0);
2432 I915_WRITE16(IMR
, 0xffff);
2433 I915_WRITE16(IER
, 0x0);
2434 POSTING_READ16(IER
);
2437 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2439 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2442 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2444 /* Unmask the interrupts that we always want on. */
2445 dev_priv
->irq_mask
=
2446 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2447 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2448 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2449 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2450 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2451 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2454 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2455 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2456 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2457 I915_USER_INTERRUPT
);
2458 POSTING_READ16(IER
);
2464 * Returns true when a page flip has completed.
2466 static bool i8xx_handle_vblank(struct drm_device
*dev
,
2469 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2470 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(pipe
);
2472 if (!drm_handle_vblank(dev
, pipe
))
2475 if ((iir
& flip_pending
) == 0)
2478 intel_prepare_page_flip(dev
, pipe
);
2480 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2481 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2482 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2483 * the flip is completed (no longer pending). Since this doesn't raise
2484 * an interrupt per se, we watch for the change at vblank.
2486 if (I915_READ16(ISR
) & flip_pending
)
2489 intel_finish_page_flip(dev
, pipe
);
2494 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
2496 struct drm_device
*dev
= (struct drm_device
*) arg
;
2497 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2500 unsigned long irqflags
;
2503 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2504 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2506 atomic_inc(&dev_priv
->irq_received
);
2508 iir
= I915_READ16(IIR
);
2512 while (iir
& ~flip_mask
) {
2513 /* Can't rely on pipestat interrupt bit in iir as it might
2514 * have been cleared after the pipestat interrupt was received.
2515 * It doesn't set the bit in iir again, but it still produces
2516 * interrupts (for non-MSI).
2518 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2519 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2520 i915_handle_error(dev
, false);
2522 for_each_pipe(pipe
) {
2523 int reg
= PIPESTAT(pipe
);
2524 pipe_stats
[pipe
] = I915_READ(reg
);
2527 * Clear the PIPE*STAT regs before the IIR
2529 if (pipe_stats
[pipe
] & 0x8000ffff) {
2530 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2531 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2533 I915_WRITE(reg
, pipe_stats
[pipe
]);
2536 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2538 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2539 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2541 i915_update_dri1_breadcrumb(dev
);
2543 if (iir
& I915_USER_INTERRUPT
)
2544 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2546 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2547 i8xx_handle_vblank(dev
, 0, iir
))
2548 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(0);
2550 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2551 i8xx_handle_vblank(dev
, 1, iir
))
2552 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(1);
2560 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2562 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2565 for_each_pipe(pipe
) {
2566 /* Clear enable bits; then clear status bits */
2567 I915_WRITE(PIPESTAT(pipe
), 0);
2568 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2570 I915_WRITE16(IMR
, 0xffff);
2571 I915_WRITE16(IER
, 0x0);
2572 I915_WRITE16(IIR
, I915_READ16(IIR
));
2575 static void i915_irq_preinstall(struct drm_device
* dev
)
2577 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2580 atomic_set(&dev_priv
->irq_received
, 0);
2582 if (I915_HAS_HOTPLUG(dev
)) {
2583 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2584 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2587 I915_WRITE16(HWSTAM
, 0xeffe);
2589 I915_WRITE(PIPESTAT(pipe
), 0);
2590 I915_WRITE(IMR
, 0xffffffff);
2591 I915_WRITE(IER
, 0x0);
2595 static int i915_irq_postinstall(struct drm_device
*dev
)
2597 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2600 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2602 /* Unmask the interrupts that we always want on. */
2603 dev_priv
->irq_mask
=
2604 ~(I915_ASLE_INTERRUPT
|
2605 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2606 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2607 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2608 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2609 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2612 I915_ASLE_INTERRUPT
|
2613 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2614 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2615 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2616 I915_USER_INTERRUPT
;
2618 if (I915_HAS_HOTPLUG(dev
)) {
2619 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2620 POSTING_READ(PORT_HOTPLUG_EN
);
2622 /* Enable in IER... */
2623 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2624 /* and unmask in IMR */
2625 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2628 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2629 I915_WRITE(IER
, enable_mask
);
2632 i915_enable_asle_pipestat(dev
);
2638 * Returns true when a page flip has completed.
2640 static bool i915_handle_vblank(struct drm_device
*dev
,
2641 int plane
, int pipe
, u32 iir
)
2643 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2644 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
2646 if (!drm_handle_vblank(dev
, pipe
))
2649 if ((iir
& flip_pending
) == 0)
2652 intel_prepare_page_flip(dev
, plane
);
2654 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2655 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2656 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2657 * the flip is completed (no longer pending). Since this doesn't raise
2658 * an interrupt per se, we watch for the change at vblank.
2660 if (I915_READ(ISR
) & flip_pending
)
2663 intel_finish_page_flip(dev
, pipe
);
2668 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
2670 struct drm_device
*dev
= (struct drm_device
*) arg
;
2671 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2672 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2673 unsigned long irqflags
;
2675 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2676 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2677 int pipe
, ret
= IRQ_NONE
;
2679 atomic_inc(&dev_priv
->irq_received
);
2681 iir
= I915_READ(IIR
);
2683 bool irq_received
= (iir
& ~flip_mask
) != 0;
2684 bool blc_event
= false;
2686 /* Can't rely on pipestat interrupt bit in iir as it might
2687 * have been cleared after the pipestat interrupt was received.
2688 * It doesn't set the bit in iir again, but it still produces
2689 * interrupts (for non-MSI).
2691 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2692 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2693 i915_handle_error(dev
, false);
2695 for_each_pipe(pipe
) {
2696 int reg
= PIPESTAT(pipe
);
2697 pipe_stats
[pipe
] = I915_READ(reg
);
2699 /* Clear the PIPE*STAT regs before the IIR */
2700 if (pipe_stats
[pipe
] & 0x8000ffff) {
2701 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2702 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2704 I915_WRITE(reg
, pipe_stats
[pipe
]);
2705 irq_received
= true;
2708 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2713 /* Consume port. Then clear IIR or we'll miss events */
2714 if ((I915_HAS_HOTPLUG(dev
)) &&
2715 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2716 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2717 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
2719 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2722 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
2724 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2725 POSTING_READ(PORT_HOTPLUG_STAT
);
2728 I915_WRITE(IIR
, iir
& ~flip_mask
);
2729 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2731 if (iir
& I915_USER_INTERRUPT
)
2732 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2734 for_each_pipe(pipe
) {
2739 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2740 i915_handle_vblank(dev
, plane
, pipe
, iir
))
2741 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
2743 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2747 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2748 intel_opregion_asle_intr(dev
);
2750 /* With MSI, interrupts are only generated when iir
2751 * transitions from zero to nonzero. If another bit got
2752 * set while we were handling the existing iir bits, then
2753 * we would never get another interrupt.
2755 * This is fine on non-MSI as well, as if we hit this path
2756 * we avoid exiting the interrupt handler only to generate
2759 * Note that for MSI this could cause a stray interrupt report
2760 * if an interrupt landed in the time between writing IIR and
2761 * the posting read. This should be rare enough to never
2762 * trigger the 99% of 100,000 interrupts test for disabling
2767 } while (iir
& ~flip_mask
);
2769 i915_update_dri1_breadcrumb(dev
);
2774 static void i915_irq_uninstall(struct drm_device
* dev
)
2776 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2779 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
2781 if (I915_HAS_HOTPLUG(dev
)) {
2782 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2783 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2786 I915_WRITE16(HWSTAM
, 0xffff);
2787 for_each_pipe(pipe
) {
2788 /* Clear enable bits; then clear status bits */
2789 I915_WRITE(PIPESTAT(pipe
), 0);
2790 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2792 I915_WRITE(IMR
, 0xffffffff);
2793 I915_WRITE(IER
, 0x0);
2795 I915_WRITE(IIR
, I915_READ(IIR
));
2798 static void i965_irq_preinstall(struct drm_device
* dev
)
2800 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2803 atomic_set(&dev_priv
->irq_received
, 0);
2805 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2806 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2808 I915_WRITE(HWSTAM
, 0xeffe);
2810 I915_WRITE(PIPESTAT(pipe
), 0);
2811 I915_WRITE(IMR
, 0xffffffff);
2812 I915_WRITE(IER
, 0x0);
2816 static int i965_irq_postinstall(struct drm_device
*dev
)
2818 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2821 unsigned long irqflags
;
2823 /* Unmask the interrupts that we always want on. */
2824 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2825 I915_DISPLAY_PORT_INTERRUPT
|
2826 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2827 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2828 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2829 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2830 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2832 enable_mask
= ~dev_priv
->irq_mask
;
2833 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2834 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
2835 enable_mask
|= I915_USER_INTERRUPT
;
2838 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2840 /* Interrupt setup is already guaranteed to be single-threaded, this is
2841 * just to make the assert_spin_locked check happy. */
2842 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2843 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2844 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2847 * Enable some error detection, note the instruction error mask
2848 * bit is reserved, so we leave it masked.
2851 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2852 GM45_ERROR_MEM_PRIV
|
2853 GM45_ERROR_CP_PRIV
|
2854 I915_ERROR_MEMORY_REFRESH
);
2856 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2857 I915_ERROR_MEMORY_REFRESH
);
2859 I915_WRITE(EMR
, error_mask
);
2861 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2862 I915_WRITE(IER
, enable_mask
);
2865 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2866 POSTING_READ(PORT_HOTPLUG_EN
);
2868 i915_enable_asle_pipestat(dev
);
2873 static void i915_hpd_irq_setup(struct drm_device
*dev
)
2875 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2876 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
2877 struct intel_encoder
*intel_encoder
;
2880 assert_spin_locked(&dev_priv
->irq_lock
);
2882 if (I915_HAS_HOTPLUG(dev
)) {
2883 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2884 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
2885 /* Note HDMI and DP share hotplug bits */
2886 /* enable bits are the same for all generations */
2887 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2888 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2889 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
2890 /* Programming the CRT detection parameters tends
2891 to generate a spurious hotplug event about three
2892 seconds later. So just do it once.
2895 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2896 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
2897 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2899 /* Ignore TV since it's buggy */
2900 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2904 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
2906 struct drm_device
*dev
= (struct drm_device
*) arg
;
2907 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2909 u32 pipe_stats
[I915_MAX_PIPES
];
2910 unsigned long irqflags
;
2912 int ret
= IRQ_NONE
, pipe
;
2914 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2915 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2917 atomic_inc(&dev_priv
->irq_received
);
2919 iir
= I915_READ(IIR
);
2922 bool blc_event
= false;
2924 irq_received
= (iir
& ~flip_mask
) != 0;
2926 /* Can't rely on pipestat interrupt bit in iir as it might
2927 * have been cleared after the pipestat interrupt was received.
2928 * It doesn't set the bit in iir again, but it still produces
2929 * interrupts (for non-MSI).
2931 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2932 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2933 i915_handle_error(dev
, false);
2935 for_each_pipe(pipe
) {
2936 int reg
= PIPESTAT(pipe
);
2937 pipe_stats
[pipe
] = I915_READ(reg
);
2940 * Clear the PIPE*STAT regs before the IIR
2942 if (pipe_stats
[pipe
] & 0x8000ffff) {
2943 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2944 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2946 I915_WRITE(reg
, pipe_stats
[pipe
]);
2950 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2957 /* Consume port. Then clear IIR or we'll miss events */
2958 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
2959 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2960 u32 hotplug_trigger
= hotplug_status
& (IS_G4X(dev
) ?
2961 HOTPLUG_INT_STATUS_G4X
:
2962 HOTPLUG_INT_STATUS_I915
);
2964 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2967 intel_hpd_irq_handler(dev
, hotplug_trigger
,
2968 IS_G4X(dev
) ? hpd_status_gen4
: hpd_status_i915
);
2970 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2971 I915_READ(PORT_HOTPLUG_STAT
);
2974 I915_WRITE(IIR
, iir
& ~flip_mask
);
2975 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2977 if (iir
& I915_USER_INTERRUPT
)
2978 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2979 if (iir
& I915_BSD_USER_INTERRUPT
)
2980 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2982 for_each_pipe(pipe
) {
2983 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2984 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
2985 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
2987 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2992 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2993 intel_opregion_asle_intr(dev
);
2995 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
2996 gmbus_irq_handler(dev
);
2998 /* With MSI, interrupts are only generated when iir
2999 * transitions from zero to nonzero. If another bit got
3000 * set while we were handling the existing iir bits, then
3001 * we would never get another interrupt.
3003 * This is fine on non-MSI as well, as if we hit this path
3004 * we avoid exiting the interrupt handler only to generate
3007 * Note that for MSI this could cause a stray interrupt report
3008 * if an interrupt landed in the time between writing IIR and
3009 * the posting read. This should be rare enough to never
3010 * trigger the 99% of 100,000 interrupts test for disabling
3016 i915_update_dri1_breadcrumb(dev
);
3021 static void i965_irq_uninstall(struct drm_device
* dev
)
3023 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3029 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3031 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3032 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3034 I915_WRITE(HWSTAM
, 0xffffffff);
3036 I915_WRITE(PIPESTAT(pipe
), 0);
3037 I915_WRITE(IMR
, 0xffffffff);
3038 I915_WRITE(IER
, 0x0);
3041 I915_WRITE(PIPESTAT(pipe
),
3042 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
3043 I915_WRITE(IIR
, I915_READ(IIR
));
3046 static void i915_reenable_hotplug_timer_func(unsigned long data
)
3048 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*)data
;
3049 struct drm_device
*dev
= dev_priv
->dev
;
3050 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3051 unsigned long irqflags
;
3054 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3055 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
3056 struct drm_connector
*connector
;
3058 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
3061 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3063 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3064 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3066 if (intel_connector
->encoder
->hpd_pin
== i
) {
3067 if (connector
->polled
!= intel_connector
->polled
)
3068 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3069 drm_get_connector_name(connector
));
3070 connector
->polled
= intel_connector
->polled
;
3071 if (!connector
->polled
)
3072 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3076 if (dev_priv
->display
.hpd_irq_setup
)
3077 dev_priv
->display
.hpd_irq_setup(dev
);
3078 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3081 void intel_irq_init(struct drm_device
*dev
)
3083 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3085 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
3086 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
3087 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
3088 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
3090 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3091 i915_hangcheck_elapsed
,
3092 (unsigned long) dev
);
3093 setup_timer(&dev_priv
->hotplug_reenable_timer
, i915_reenable_hotplug_timer_func
,
3094 (unsigned long) dev_priv
);
3096 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
3098 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
3099 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
3100 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
3101 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
3102 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
3105 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3106 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
3108 dev
->driver
->get_vblank_timestamp
= NULL
;
3109 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
3111 if (IS_VALLEYVIEW(dev
)) {
3112 dev
->driver
->irq_handler
= valleyview_irq_handler
;
3113 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
3114 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
3115 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
3116 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
3117 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
3118 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3119 } else if (HAS_PCH_SPLIT(dev
)) {
3120 dev
->driver
->irq_handler
= ironlake_irq_handler
;
3121 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
3122 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
3123 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
3124 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
3125 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
3126 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
3128 if (INTEL_INFO(dev
)->gen
== 2) {
3129 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
3130 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
3131 dev
->driver
->irq_handler
= i8xx_irq_handler
;
3132 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
3133 } else if (INTEL_INFO(dev
)->gen
== 3) {
3134 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
3135 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
3136 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
3137 dev
->driver
->irq_handler
= i915_irq_handler
;
3138 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3140 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
3141 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
3142 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
3143 dev
->driver
->irq_handler
= i965_irq_handler
;
3144 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3146 dev
->driver
->enable_vblank
= i915_enable_vblank
;
3147 dev
->driver
->disable_vblank
= i915_disable_vblank
;
3151 void intel_hpd_init(struct drm_device
*dev
)
3153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3154 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3155 struct drm_connector
*connector
;
3156 unsigned long irqflags
;
3159 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
3160 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
3161 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3163 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3164 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3165 connector
->polled
= intel_connector
->polled
;
3166 if (!connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
3167 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3170 /* Interrupt setup is already guaranteed to be single-threaded, this is
3171 * just to make the assert_spin_locked checks happy. */
3172 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3173 if (dev_priv
->display
.hpd_irq_setup
)
3174 dev_priv
->display
.hpd_irq_setup(dev
);
3175 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3178 /* Disable interrupts so we can allow Package C8+. */
3179 void hsw_pc8_disable_interrupts(struct drm_device
*dev
)
3181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3182 unsigned long irqflags
;
3184 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3186 dev_priv
->pc8
.regsave
.deimr
= I915_READ(DEIMR
);
3187 dev_priv
->pc8
.regsave
.sdeimr
= I915_READ(SDEIMR
);
3188 dev_priv
->pc8
.regsave
.gtimr
= I915_READ(GTIMR
);
3189 dev_priv
->pc8
.regsave
.gtier
= I915_READ(GTIER
);
3190 dev_priv
->pc8
.regsave
.gen6_pmimr
= I915_READ(GEN6_PMIMR
);
3192 ironlake_disable_display_irq(dev_priv
, ~DE_PCH_EVENT_IVB
);
3193 ibx_disable_display_interrupt(dev_priv
, ~SDE_HOTPLUG_MASK_CPT
);
3194 ilk_disable_gt_irq(dev_priv
, 0xffffffff);
3195 snb_disable_pm_irq(dev_priv
, 0xffffffff);
3197 dev_priv
->pc8
.irqs_disabled
= true;
3199 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3202 /* Restore interrupts so we can recover from Package C8+. */
3203 void hsw_pc8_restore_interrupts(struct drm_device
*dev
)
3205 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3206 unsigned long irqflags
;
3207 uint32_t val
, expected
;
3209 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3211 val
= I915_READ(DEIMR
);
3212 expected
= ~DE_PCH_EVENT_IVB
;
3213 WARN(val
!= expected
, "DEIMR is 0x%08x, not 0x%08x\n", val
, expected
);
3215 val
= I915_READ(SDEIMR
) & ~SDE_HOTPLUG_MASK_CPT
;
3216 expected
= ~SDE_HOTPLUG_MASK_CPT
;
3217 WARN(val
!= expected
, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3220 val
= I915_READ(GTIMR
);
3221 expected
= 0xffffffff;
3222 WARN(val
!= expected
, "GTIMR is 0x%08x, not 0x%08x\n", val
, expected
);
3224 val
= I915_READ(GEN6_PMIMR
);
3225 expected
= 0xffffffff;
3226 WARN(val
!= expected
, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val
,
3229 dev_priv
->pc8
.irqs_disabled
= false;
3231 ironlake_enable_display_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.deimr
);
3232 ibx_enable_display_interrupt(dev_priv
,
3233 ~dev_priv
->pc8
.regsave
.sdeimr
&
3234 ~SDE_HOTPLUG_MASK_CPT
);
3235 ilk_enable_gt_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.gtimr
);
3236 snb_enable_pm_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.gen6_pmimr
);
3237 I915_WRITE(GTIER
, dev_priv
->pc8
.regsave
.gtier
);
3239 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);