1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 /* For display hotplug interrupt */
41 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
43 if ((dev_priv
->irq_mask
& mask
) != 0) {
44 dev_priv
->irq_mask
&= ~mask
;
45 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
51 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
53 if ((dev_priv
->irq_mask
& mask
) != mask
) {
54 dev_priv
->irq_mask
|= mask
;
55 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
61 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
63 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
64 u32 reg
= PIPESTAT(pipe
);
66 dev_priv
->pipestat
[pipe
] |= mask
;
67 /* Enable the interrupt, clear any pending status */
68 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
74 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
76 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
77 u32 reg
= PIPESTAT(pipe
);
79 dev_priv
->pipestat
[pipe
] &= ~mask
;
80 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
86 * intel_enable_asle - enable ASLE interrupt for OpRegion
88 void intel_enable_asle(struct drm_device
*dev
)
90 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
91 unsigned long irqflags
;
93 /* FIXME: opregion/asle for VLV */
94 if (IS_VALLEYVIEW(dev
))
97 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
99 if (HAS_PCH_SPLIT(dev
))
100 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
102 i915_enable_pipestat(dev_priv
, 1,
103 PIPE_LEGACY_BLC_EVENT_ENABLE
);
104 if (INTEL_INFO(dev
)->gen
>= 4)
105 i915_enable_pipestat(dev_priv
, 0,
106 PIPE_LEGACY_BLC_EVENT_ENABLE
);
109 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
113 * i915_pipe_enabled - check if a pipe is enabled
115 * @pipe: pipe to check
117 * Reading certain registers when the pipe is disabled can hang the chip.
118 * Use this routine to make sure the PLL is running and the pipe is active
119 * before reading such registers if unsure.
122 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
124 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
125 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
128 /* Called from drm generic code, passed a 'crtc', which
129 * we use as a pipe index
131 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
133 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
134 unsigned long high_frame
;
135 unsigned long low_frame
;
136 u32 high1
, high2
, low
;
138 if (!i915_pipe_enabled(dev
, pipe
)) {
139 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
140 "pipe %c\n", pipe_name(pipe
));
144 high_frame
= PIPEFRAME(pipe
);
145 low_frame
= PIPEFRAMEPIXEL(pipe
);
148 * High & low register fields aren't synchronized, so make sure
149 * we get a low value that's stable across two reads of the high
153 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
154 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
155 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
156 } while (high1
!= high2
);
158 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
159 low
>>= PIPE_FRAME_LOW_SHIFT
;
160 return (high1
<< 8) | low
;
163 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
165 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
166 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
168 if (!i915_pipe_enabled(dev
, pipe
)) {
169 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
170 "pipe %c\n", pipe_name(pipe
));
174 return I915_READ(reg
);
177 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
178 int *vpos
, int *hpos
)
180 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
181 u32 vbl
= 0, position
= 0;
182 int vbl_start
, vbl_end
, htotal
, vtotal
;
186 if (!i915_pipe_enabled(dev
, pipe
)) {
187 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
188 "pipe %c\n", pipe_name(pipe
));
193 vtotal
= 1 + ((I915_READ(VTOTAL(pipe
)) >> 16) & 0x1fff);
195 if (INTEL_INFO(dev
)->gen
>= 4) {
196 /* No obvious pixelcount register. Only query vertical
197 * scanout position from Display scan line register.
199 position
= I915_READ(PIPEDSL(pipe
));
201 /* Decode into vertical scanout position. Don't have
202 * horizontal scanout position.
204 *vpos
= position
& 0x1fff;
207 /* Have access to pixelcount since start of frame.
208 * We can split this into vertical and horizontal
211 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
213 htotal
= 1 + ((I915_READ(HTOTAL(pipe
)) >> 16) & 0x1fff);
214 *vpos
= position
/ htotal
;
215 *hpos
= position
- (*vpos
* htotal
);
218 /* Query vblank area. */
219 vbl
= I915_READ(VBLANK(pipe
));
221 /* Test position against vblank region. */
222 vbl_start
= vbl
& 0x1fff;
223 vbl_end
= (vbl
>> 16) & 0x1fff;
225 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
228 /* Inside "upper part" of vblank area? Apply corrective offset: */
229 if (in_vbl
&& (*vpos
>= vbl_start
))
230 *vpos
= *vpos
- vtotal
;
232 /* Readouts valid? */
234 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
238 ret
|= DRM_SCANOUTPOS_INVBL
;
243 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
245 struct timeval
*vblank_time
,
248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
249 struct drm_crtc
*crtc
;
251 if (pipe
< 0 || pipe
>= dev_priv
->num_pipe
) {
252 DRM_ERROR("Invalid crtc %d\n", pipe
);
256 /* Get drm_crtc to timestamp: */
257 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
259 DRM_ERROR("Invalid crtc %d\n", pipe
);
263 if (!crtc
->enabled
) {
264 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
268 /* Helper routine in DRM core does all the work: */
269 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
275 * Handle hotplug events outside the interrupt handler proper.
277 static void i915_hotplug_work_func(struct work_struct
*work
)
279 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
281 struct drm_device
*dev
= dev_priv
->dev
;
282 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
283 struct intel_encoder
*encoder
;
285 mutex_lock(&mode_config
->mutex
);
286 DRM_DEBUG_KMS("running encoder hotplug functions\n");
288 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
289 if (encoder
->hot_plug
)
290 encoder
->hot_plug(encoder
);
292 mutex_unlock(&mode_config
->mutex
);
294 /* Just fire off a uevent and let userspace tell us what to do */
295 drm_helper_hpd_irq_event(dev
);
298 /* defined intel_pm.c */
299 extern spinlock_t mchdev_lock
;
301 static void ironlake_handle_rps_change(struct drm_device
*dev
)
303 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
304 u32 busy_up
, busy_down
, max_avg
, min_avg
;
308 spin_lock_irqsave(&mchdev_lock
, flags
);
310 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
312 new_delay
= dev_priv
->ips
.cur_delay
;
314 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
315 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
316 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
317 max_avg
= I915_READ(RCBMAXAVG
);
318 min_avg
= I915_READ(RCBMINAVG
);
320 /* Handle RCS change request from hw */
321 if (busy_up
> max_avg
) {
322 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
323 new_delay
= dev_priv
->ips
.cur_delay
- 1;
324 if (new_delay
< dev_priv
->ips
.max_delay
)
325 new_delay
= dev_priv
->ips
.max_delay
;
326 } else if (busy_down
< min_avg
) {
327 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
328 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
329 if (new_delay
> dev_priv
->ips
.min_delay
)
330 new_delay
= dev_priv
->ips
.min_delay
;
333 if (ironlake_set_drps(dev
, new_delay
))
334 dev_priv
->ips
.cur_delay
= new_delay
;
336 spin_unlock_irqrestore(&mchdev_lock
, flags
);
341 static void notify_ring(struct drm_device
*dev
,
342 struct intel_ring_buffer
*ring
)
344 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
346 if (ring
->obj
== NULL
)
349 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
, false));
351 wake_up_all(&ring
->irq_queue
);
352 if (i915_enable_hangcheck
) {
353 dev_priv
->hangcheck_count
= 0;
354 mod_timer(&dev_priv
->hangcheck_timer
,
356 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
360 static void gen6_pm_rps_work(struct work_struct
*work
)
362 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
367 spin_lock_irq(&dev_priv
->rps
.lock
);
368 pm_iir
= dev_priv
->rps
.pm_iir
;
369 dev_priv
->rps
.pm_iir
= 0;
370 pm_imr
= I915_READ(GEN6_PMIMR
);
371 I915_WRITE(GEN6_PMIMR
, 0);
372 spin_unlock_irq(&dev_priv
->rps
.lock
);
374 if ((pm_iir
& GEN6_PM_DEFERRED_EVENTS
) == 0)
377 mutex_lock(&dev_priv
->dev
->struct_mutex
);
379 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
)
380 new_delay
= dev_priv
->rps
.cur_delay
+ 1;
382 new_delay
= dev_priv
->rps
.cur_delay
- 1;
384 /* sysfs frequency interfaces may have snuck in while servicing the
387 if (!(new_delay
> dev_priv
->rps
.max_delay
||
388 new_delay
< dev_priv
->rps
.min_delay
)) {
389 gen6_set_rps(dev_priv
->dev
, new_delay
);
392 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
397 * ivybridge_parity_work - Workqueue called when a parity error interrupt
399 * @work: workqueue struct
401 * Doesn't actually do anything except notify userspace. As a consequence of
402 * this event, userspace should try to remap the bad rows since statistically
403 * it is likely the same row is more likely to go bad again.
405 static void ivybridge_parity_work(struct work_struct
*work
)
407 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
409 u32 error_status
, row
, bank
, subbank
;
410 char *parity_event
[5];
414 /* We must turn off DOP level clock gating to access the L3 registers.
415 * In order to prevent a get/put style interface, acquire struct mutex
416 * any time we access those registers.
418 mutex_lock(&dev_priv
->dev
->struct_mutex
);
420 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
421 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
422 POSTING_READ(GEN7_MISCCPCTL
);
424 error_status
= I915_READ(GEN7_L3CDERRST1
);
425 row
= GEN7_PARITY_ERROR_ROW(error_status
);
426 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
427 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
429 I915_WRITE(GEN7_L3CDERRST1
, GEN7_PARITY_ERROR_VALID
|
430 GEN7_L3CDERRST1_ENABLE
);
431 POSTING_READ(GEN7_L3CDERRST1
);
433 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
435 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
436 dev_priv
->gt_irq_mask
&= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
437 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
438 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
440 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
442 parity_event
[0] = "L3_PARITY_ERROR=1";
443 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
444 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
445 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
446 parity_event
[4] = NULL
;
448 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
449 KOBJ_CHANGE
, parity_event
);
451 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
454 kfree(parity_event
[3]);
455 kfree(parity_event
[2]);
456 kfree(parity_event
[1]);
459 static void ivybridge_handle_parity_error(struct drm_device
*dev
)
461 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
464 if (!HAS_L3_GPU_CACHE(dev
))
467 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
468 dev_priv
->gt_irq_mask
|= GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
469 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
470 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
472 queue_work(dev_priv
->wq
, &dev_priv
->parity_error_work
);
475 static void snb_gt_irq_handler(struct drm_device
*dev
,
476 struct drm_i915_private
*dev_priv
,
480 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
481 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
482 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
483 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
484 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
485 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
486 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
488 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
489 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
490 GT_RENDER_CS_ERROR_INTERRUPT
)) {
491 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
492 i915_handle_error(dev
, false);
495 if (gt_iir
& GT_GEN7_L3_PARITY_ERROR_INTERRUPT
)
496 ivybridge_handle_parity_error(dev
);
499 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
505 * IIR bits should never already be set because IMR should
506 * prevent an interrupt from being shown in IIR. The warning
507 * displays a case where we've unsafely cleared
508 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
509 * type is not a problem, it displays a problem in the logic.
511 * The mask bit in IMR is cleared by dev_priv->rps.work.
514 spin_lock_irqsave(&dev_priv
->rps
.lock
, flags
);
515 dev_priv
->rps
.pm_iir
|= pm_iir
;
516 I915_WRITE(GEN6_PMIMR
, dev_priv
->rps
.pm_iir
);
517 POSTING_READ(GEN6_PMIMR
);
518 spin_unlock_irqrestore(&dev_priv
->rps
.lock
, flags
);
520 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
523 static irqreturn_t
valleyview_irq_handler(DRM_IRQ_ARGS
)
525 struct drm_device
*dev
= (struct drm_device
*) arg
;
526 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
527 u32 iir
, gt_iir
, pm_iir
;
528 irqreturn_t ret
= IRQ_NONE
;
529 unsigned long irqflags
;
531 u32 pipe_stats
[I915_MAX_PIPES
];
534 atomic_inc(&dev_priv
->irq_received
);
537 iir
= I915_READ(VLV_IIR
);
538 gt_iir
= I915_READ(GTIIR
);
539 pm_iir
= I915_READ(GEN6_PMIIR
);
541 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
546 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
548 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
549 for_each_pipe(pipe
) {
550 int reg
= PIPESTAT(pipe
);
551 pipe_stats
[pipe
] = I915_READ(reg
);
554 * Clear the PIPE*STAT regs before the IIR
556 if (pipe_stats
[pipe
] & 0x8000ffff) {
557 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
558 DRM_DEBUG_DRIVER("pipe %c underrun\n",
560 I915_WRITE(reg
, pipe_stats
[pipe
]);
563 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
565 for_each_pipe(pipe
) {
566 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
567 drm_handle_vblank(dev
, pipe
);
569 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
570 intel_prepare_page_flip(dev
, pipe
);
571 intel_finish_page_flip(dev
, pipe
);
575 /* Consume port. Then clear IIR or we'll miss events */
576 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
577 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
579 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
581 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
582 queue_work(dev_priv
->wq
,
583 &dev_priv
->hotplug_work
);
585 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
586 I915_READ(PORT_HOTPLUG_STAT
);
589 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
592 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
593 gen6_queue_rps_work(dev_priv
, pm_iir
);
595 I915_WRITE(GTIIR
, gt_iir
);
596 I915_WRITE(GEN6_PMIIR
, pm_iir
);
597 I915_WRITE(VLV_IIR
, iir
);
604 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
606 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
609 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
610 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
611 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
612 SDE_AUDIO_POWER_SHIFT
);
614 if (pch_iir
& SDE_GMBUS
)
615 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
617 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
618 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
620 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
621 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
623 if (pch_iir
& SDE_POISON
)
624 DRM_ERROR("PCH poison interrupt\n");
626 if (pch_iir
& SDE_FDI_MASK
)
628 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
630 I915_READ(FDI_RX_IIR(pipe
)));
632 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
633 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
635 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
636 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
638 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
639 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
640 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
641 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
644 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
646 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
649 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
)
650 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
651 (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
652 SDE_AUDIO_POWER_SHIFT_CPT
);
654 if (pch_iir
& SDE_AUX_MASK_CPT
)
655 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
657 if (pch_iir
& SDE_GMBUS_CPT
)
658 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
660 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
661 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
663 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
664 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
666 if (pch_iir
& SDE_FDI_MASK_CPT
)
668 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
670 I915_READ(FDI_RX_IIR(pipe
)));
673 static irqreturn_t
ivybridge_irq_handler(DRM_IRQ_ARGS
)
675 struct drm_device
*dev
= (struct drm_device
*) arg
;
676 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
677 u32 de_iir
, gt_iir
, de_ier
, pm_iir
;
678 irqreturn_t ret
= IRQ_NONE
;
681 atomic_inc(&dev_priv
->irq_received
);
683 /* disable master interrupt before clearing iir */
684 de_ier
= I915_READ(DEIER
);
685 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
687 gt_iir
= I915_READ(GTIIR
);
689 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
690 I915_WRITE(GTIIR
, gt_iir
);
694 de_iir
= I915_READ(DEIIR
);
696 if (de_iir
& DE_GSE_IVB
)
697 intel_opregion_gse_intr(dev
);
699 for (i
= 0; i
< 3; i
++) {
700 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
701 drm_handle_vblank(dev
, i
);
702 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
703 intel_prepare_page_flip(dev
, i
);
704 intel_finish_page_flip_plane(dev
, i
);
708 /* check event from PCH */
709 if (de_iir
& DE_PCH_EVENT_IVB
) {
710 u32 pch_iir
= I915_READ(SDEIIR
);
712 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
713 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
714 cpt_irq_handler(dev
, pch_iir
);
716 /* clear PCH hotplug event before clear CPU irq */
717 I915_WRITE(SDEIIR
, pch_iir
);
720 I915_WRITE(DEIIR
, de_iir
);
724 pm_iir
= I915_READ(GEN6_PMIIR
);
726 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
727 gen6_queue_rps_work(dev_priv
, pm_iir
);
728 I915_WRITE(GEN6_PMIIR
, pm_iir
);
732 I915_WRITE(DEIER
, de_ier
);
738 static void ilk_gt_irq_handler(struct drm_device
*dev
,
739 struct drm_i915_private
*dev_priv
,
742 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
743 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
744 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
745 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
748 static irqreturn_t
ironlake_irq_handler(DRM_IRQ_ARGS
)
750 struct drm_device
*dev
= (struct drm_device
*) arg
;
751 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
753 u32 de_iir
, gt_iir
, de_ier
, pch_iir
, pm_iir
;
756 atomic_inc(&dev_priv
->irq_received
);
758 /* disable master interrupt before clearing iir */
759 de_ier
= I915_READ(DEIER
);
760 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
763 de_iir
= I915_READ(DEIIR
);
764 gt_iir
= I915_READ(GTIIR
);
765 pch_iir
= I915_READ(SDEIIR
);
766 pm_iir
= I915_READ(GEN6_PMIIR
);
768 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0 &&
769 (!IS_GEN6(dev
) || pm_iir
== 0))
772 if (HAS_PCH_CPT(dev
))
773 hotplug_mask
= SDE_HOTPLUG_MASK_CPT
;
775 hotplug_mask
= SDE_HOTPLUG_MASK
;
780 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
782 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
785 intel_opregion_gse_intr(dev
);
787 if (de_iir
& DE_PIPEA_VBLANK
)
788 drm_handle_vblank(dev
, 0);
790 if (de_iir
& DE_PIPEB_VBLANK
)
791 drm_handle_vblank(dev
, 1);
793 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
794 intel_prepare_page_flip(dev
, 0);
795 intel_finish_page_flip_plane(dev
, 0);
798 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
799 intel_prepare_page_flip(dev
, 1);
800 intel_finish_page_flip_plane(dev
, 1);
803 /* check event from PCH */
804 if (de_iir
& DE_PCH_EVENT
) {
805 if (pch_iir
& hotplug_mask
)
806 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
807 if (HAS_PCH_CPT(dev
))
808 cpt_irq_handler(dev
, pch_iir
);
810 ibx_irq_handler(dev
, pch_iir
);
813 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
814 ironlake_handle_rps_change(dev
);
816 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
817 gen6_queue_rps_work(dev_priv
, pm_iir
);
819 /* should clear PCH hotplug event before clear CPU irq */
820 I915_WRITE(SDEIIR
, pch_iir
);
821 I915_WRITE(GTIIR
, gt_iir
);
822 I915_WRITE(DEIIR
, de_iir
);
823 I915_WRITE(GEN6_PMIIR
, pm_iir
);
826 I915_WRITE(DEIER
, de_ier
);
833 * i915_error_work_func - do process context error handling work
836 * Fire an error uevent so userspace can see that a hang or error
839 static void i915_error_work_func(struct work_struct
*work
)
841 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
843 struct drm_device
*dev
= dev_priv
->dev
;
844 char *error_event
[] = { "ERROR=1", NULL
};
845 char *reset_event
[] = { "RESET=1", NULL
};
846 char *reset_done_event
[] = { "ERROR=0", NULL
};
848 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
850 if (atomic_read(&dev_priv
->mm
.wedged
)) {
851 DRM_DEBUG_DRIVER("resetting chip\n");
852 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
853 if (!i915_reset(dev
)) {
854 atomic_set(&dev_priv
->mm
.wedged
, 0);
855 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
857 complete_all(&dev_priv
->error_completion
);
861 /* NB: please notice the memset */
862 static void i915_get_extra_instdone(struct drm_device
*dev
,
865 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
866 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
868 switch(INTEL_INFO(dev
)->gen
) {
871 instdone
[0] = I915_READ(INSTDONE
);
876 instdone
[0] = I915_READ(INSTDONE_I965
);
877 instdone
[1] = I915_READ(INSTDONE1
);
880 WARN_ONCE(1, "Unsupported platform\n");
882 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
883 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
884 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
885 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);
890 #ifdef CONFIG_DEBUG_FS
891 static struct drm_i915_error_object
*
892 i915_error_object_create(struct drm_i915_private
*dev_priv
,
893 struct drm_i915_gem_object
*src
)
895 struct drm_i915_error_object
*dst
;
899 if (src
== NULL
|| src
->pages
== NULL
)
902 count
= src
->base
.size
/ PAGE_SIZE
;
904 dst
= kmalloc(sizeof(*dst
) + count
* sizeof(u32
*), GFP_ATOMIC
);
908 reloc_offset
= src
->gtt_offset
;
909 for (i
= 0; i
< count
; i
++) {
913 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
917 local_irq_save(flags
);
918 if (reloc_offset
< dev_priv
->mm
.gtt_mappable_end
&&
919 src
->has_global_gtt_mapping
) {
922 /* Simply ignore tiling or any overlapping fence.
923 * It's part of the error state, and this hopefully
924 * captures what the GPU read.
927 s
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
929 memcpy_fromio(d
, s
, PAGE_SIZE
);
930 io_mapping_unmap_atomic(s
);
935 page
= i915_gem_object_get_page(src
, i
);
937 drm_clflush_pages(&page
, 1);
939 s
= kmap_atomic(page
);
940 memcpy(d
, s
, PAGE_SIZE
);
943 drm_clflush_pages(&page
, 1);
945 local_irq_restore(flags
);
949 reloc_offset
+= PAGE_SIZE
;
951 dst
->page_count
= count
;
952 dst
->gtt_offset
= src
->gtt_offset
;
958 kfree(dst
->pages
[i
]);
964 i915_error_object_free(struct drm_i915_error_object
*obj
)
971 for (page
= 0; page
< obj
->page_count
; page
++)
972 kfree(obj
->pages
[page
]);
978 i915_error_state_free(struct kref
*error_ref
)
980 struct drm_i915_error_state
*error
= container_of(error_ref
,
981 typeof(*error
), ref
);
984 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
985 i915_error_object_free(error
->ring
[i
].batchbuffer
);
986 i915_error_object_free(error
->ring
[i
].ringbuffer
);
987 kfree(error
->ring
[i
].requests
);
990 kfree(error
->active_bo
);
991 kfree(error
->overlay
);
994 static void capture_bo(struct drm_i915_error_buffer
*err
,
995 struct drm_i915_gem_object
*obj
)
997 err
->size
= obj
->base
.size
;
998 err
->name
= obj
->base
.name
;
999 err
->rseqno
= obj
->last_read_seqno
;
1000 err
->wseqno
= obj
->last_write_seqno
;
1001 err
->gtt_offset
= obj
->gtt_offset
;
1002 err
->read_domains
= obj
->base
.read_domains
;
1003 err
->write_domain
= obj
->base
.write_domain
;
1004 err
->fence_reg
= obj
->fence_reg
;
1006 if (obj
->pin_count
> 0)
1008 if (obj
->user_pin_count
> 0)
1010 err
->tiling
= obj
->tiling_mode
;
1011 err
->dirty
= obj
->dirty
;
1012 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
1013 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
1014 err
->cache_level
= obj
->cache_level
;
1017 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
1018 int count
, struct list_head
*head
)
1020 struct drm_i915_gem_object
*obj
;
1023 list_for_each_entry(obj
, head
, mm_list
) {
1024 capture_bo(err
++, obj
);
1032 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
1033 int count
, struct list_head
*head
)
1035 struct drm_i915_gem_object
*obj
;
1038 list_for_each_entry(obj
, head
, gtt_list
) {
1039 if (obj
->pin_count
== 0)
1042 capture_bo(err
++, obj
);
1050 static void i915_gem_record_fences(struct drm_device
*dev
,
1051 struct drm_i915_error_state
*error
)
1053 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1057 switch (INTEL_INFO(dev
)->gen
) {
1060 for (i
= 0; i
< 16; i
++)
1061 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
1065 for (i
= 0; i
< 16; i
++)
1066 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
1069 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
1070 for (i
= 0; i
< 8; i
++)
1071 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
1073 for (i
= 0; i
< 8; i
++)
1074 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
1080 static struct drm_i915_error_object
*
1081 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
1082 struct intel_ring_buffer
*ring
)
1084 struct drm_i915_gem_object
*obj
;
1087 if (!ring
->get_seqno
)
1090 seqno
= ring
->get_seqno(ring
, false);
1091 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
1092 if (obj
->ring
!= ring
)
1095 if (i915_seqno_passed(seqno
, obj
->last_read_seqno
))
1098 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1101 /* We need to copy these to an anonymous buffer as the simplest
1102 * method to avoid being overwritten by userspace.
1104 return i915_error_object_create(dev_priv
, obj
);
1110 static void i915_record_ring_state(struct drm_device
*dev
,
1111 struct drm_i915_error_state
*error
,
1112 struct intel_ring_buffer
*ring
)
1114 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1116 if (INTEL_INFO(dev
)->gen
>= 6) {
1117 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
1118 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1119 error
->semaphore_mboxes
[ring
->id
][0]
1120 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1121 error
->semaphore_mboxes
[ring
->id
][1]
1122 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1125 if (INTEL_INFO(dev
)->gen
>= 4) {
1126 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1127 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1128 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1129 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1130 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1131 if (ring
->id
== RCS
)
1132 error
->bbaddr
= I915_READ64(BB_ADDR
);
1134 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1135 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1136 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1137 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1140 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1141 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1142 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
, false);
1143 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1144 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1145 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1147 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1148 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1151 static void i915_gem_record_rings(struct drm_device
*dev
,
1152 struct drm_i915_error_state
*error
)
1154 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1155 struct intel_ring_buffer
*ring
;
1156 struct drm_i915_gem_request
*request
;
1159 for_each_ring(ring
, dev_priv
, i
) {
1160 i915_record_ring_state(dev
, error
, ring
);
1162 error
->ring
[i
].batchbuffer
=
1163 i915_error_first_batchbuffer(dev_priv
, ring
);
1165 error
->ring
[i
].ringbuffer
=
1166 i915_error_object_create(dev_priv
, ring
->obj
);
1169 list_for_each_entry(request
, &ring
->request_list
, list
)
1172 error
->ring
[i
].num_requests
= count
;
1173 error
->ring
[i
].requests
=
1174 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1176 if (error
->ring
[i
].requests
== NULL
) {
1177 error
->ring
[i
].num_requests
= 0;
1182 list_for_each_entry(request
, &ring
->request_list
, list
) {
1183 struct drm_i915_error_request
*erq
;
1185 erq
= &error
->ring
[i
].requests
[count
++];
1186 erq
->seqno
= request
->seqno
;
1187 erq
->jiffies
= request
->emitted_jiffies
;
1188 erq
->tail
= request
->tail
;
1194 * i915_capture_error_state - capture an error record for later analysis
1197 * Should be called when an error is detected (either a hang or an error
1198 * interrupt) to capture error state from the time of the error. Fills
1199 * out a structure which becomes available in debugfs for user level tools
1202 static void i915_capture_error_state(struct drm_device
*dev
)
1204 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1205 struct drm_i915_gem_object
*obj
;
1206 struct drm_i915_error_state
*error
;
1207 unsigned long flags
;
1210 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1211 error
= dev_priv
->first_error
;
1212 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1216 /* Account for pipe specific data like PIPE*STAT */
1217 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1219 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1223 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1224 dev
->primary
->index
);
1226 kref_init(&error
->ref
);
1227 error
->eir
= I915_READ(EIR
);
1228 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1229 error
->ccid
= I915_READ(CCID
);
1231 if (HAS_PCH_SPLIT(dev
))
1232 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1233 else if (IS_VALLEYVIEW(dev
))
1234 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1235 else if (IS_GEN2(dev
))
1236 error
->ier
= I915_READ16(IER
);
1238 error
->ier
= I915_READ(IER
);
1241 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1243 if (INTEL_INFO(dev
)->gen
>= 6) {
1244 error
->error
= I915_READ(ERROR_GEN6
);
1245 error
->done_reg
= I915_READ(DONE_REG
);
1248 if (INTEL_INFO(dev
)->gen
== 7)
1249 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1251 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1253 i915_gem_record_fences(dev
, error
);
1254 i915_gem_record_rings(dev
, error
);
1256 /* Record buffers on the active and pinned lists. */
1257 error
->active_bo
= NULL
;
1258 error
->pinned_bo
= NULL
;
1261 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1263 error
->active_bo_count
= i
;
1264 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, gtt_list
)
1267 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1269 error
->active_bo
= NULL
;
1270 error
->pinned_bo
= NULL
;
1272 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1274 if (error
->active_bo
)
1276 error
->active_bo
+ error
->active_bo_count
;
1279 if (error
->active_bo
)
1280 error
->active_bo_count
=
1281 capture_active_bo(error
->active_bo
,
1282 error
->active_bo_count
,
1283 &dev_priv
->mm
.active_list
);
1285 if (error
->pinned_bo
)
1286 error
->pinned_bo_count
=
1287 capture_pinned_bo(error
->pinned_bo
,
1288 error
->pinned_bo_count
,
1289 &dev_priv
->mm
.bound_list
);
1291 do_gettimeofday(&error
->time
);
1293 error
->overlay
= intel_overlay_capture_error_state(dev
);
1294 error
->display
= intel_display_capture_error_state(dev
);
1296 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1297 if (dev_priv
->first_error
== NULL
) {
1298 dev_priv
->first_error
= error
;
1301 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1304 i915_error_state_free(&error
->ref
);
1307 void i915_destroy_error_state(struct drm_device
*dev
)
1309 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1310 struct drm_i915_error_state
*error
;
1311 unsigned long flags
;
1313 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
1314 error
= dev_priv
->first_error
;
1315 dev_priv
->first_error
= NULL
;
1316 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
1319 kref_put(&error
->ref
, i915_error_state_free
);
1322 #define i915_capture_error_state(x)
1325 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1327 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1328 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
1329 u32 eir
= I915_READ(EIR
);
1335 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1337 i915_get_extra_instdone(dev
, instdone
);
1340 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1341 u32 ipeir
= I915_READ(IPEIR_I965
);
1343 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1344 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1345 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1346 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1347 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1348 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1349 I915_WRITE(IPEIR_I965
, ipeir
);
1350 POSTING_READ(IPEIR_I965
);
1352 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1353 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1354 pr_err("page table error\n");
1355 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1356 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1357 POSTING_READ(PGTBL_ER
);
1361 if (!IS_GEN2(dev
)) {
1362 if (eir
& I915_ERROR_PAGE_TABLE
) {
1363 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1364 pr_err("page table error\n");
1365 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1366 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1367 POSTING_READ(PGTBL_ER
);
1371 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1372 pr_err("memory refresh error:\n");
1374 pr_err("pipe %c stat: 0x%08x\n",
1375 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1376 /* pipestat has already been acked */
1378 if (eir
& I915_ERROR_INSTRUCTION
) {
1379 pr_err("instruction error\n");
1380 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1381 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1382 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1383 if (INTEL_INFO(dev
)->gen
< 4) {
1384 u32 ipeir
= I915_READ(IPEIR
);
1386 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1387 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1388 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1389 I915_WRITE(IPEIR
, ipeir
);
1390 POSTING_READ(IPEIR
);
1392 u32 ipeir
= I915_READ(IPEIR_I965
);
1394 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1395 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1396 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1397 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1398 I915_WRITE(IPEIR_I965
, ipeir
);
1399 POSTING_READ(IPEIR_I965
);
1403 I915_WRITE(EIR
, eir
);
1405 eir
= I915_READ(EIR
);
1408 * some errors might have become stuck,
1411 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1412 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1413 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1418 * i915_handle_error - handle an error interrupt
1421 * Do some basic checking of regsiter state at error interrupt time and
1422 * dump it to the syslog. Also call i915_capture_error_state() to make
1423 * sure we get a record and make it available in debugfs. Fire a uevent
1424 * so userspace knows something bad happened (should trigger collection
1425 * of a ring dump etc.).
1427 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1429 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1430 struct intel_ring_buffer
*ring
;
1433 i915_capture_error_state(dev
);
1434 i915_report_and_clear_eir(dev
);
1437 INIT_COMPLETION(dev_priv
->error_completion
);
1438 atomic_set(&dev_priv
->mm
.wedged
, 1);
1441 * Wakeup waiting processes so they don't hang
1443 for_each_ring(ring
, dev_priv
, i
)
1444 wake_up_all(&ring
->irq_queue
);
1447 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
1450 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1452 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1453 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1454 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1455 struct drm_i915_gem_object
*obj
;
1456 struct intel_unpin_work
*work
;
1457 unsigned long flags
;
1458 bool stall_detected
;
1460 /* Ignore early vblank irqs */
1461 if (intel_crtc
== NULL
)
1464 spin_lock_irqsave(&dev
->event_lock
, flags
);
1465 work
= intel_crtc
->unpin_work
;
1467 if (work
== NULL
|| work
->pending
|| !work
->enable_stall_check
) {
1468 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1469 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1473 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1474 obj
= work
->pending_flip_obj
;
1475 if (INTEL_INFO(dev
)->gen
>= 4) {
1476 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1477 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1480 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1481 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1482 crtc
->y
* crtc
->fb
->pitches
[0] +
1483 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1486 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1488 if (stall_detected
) {
1489 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1490 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1494 /* Called from drm generic code, passed 'crtc' which
1495 * we use as a pipe index
1497 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1499 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1500 unsigned long irqflags
;
1502 if (!i915_pipe_enabled(dev
, pipe
))
1505 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1506 if (INTEL_INFO(dev
)->gen
>= 4)
1507 i915_enable_pipestat(dev_priv
, pipe
,
1508 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1510 i915_enable_pipestat(dev_priv
, pipe
,
1511 PIPE_VBLANK_INTERRUPT_ENABLE
);
1513 /* maintain vblank delivery even in deep C-states */
1514 if (dev_priv
->info
->gen
== 3)
1515 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1516 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1521 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1523 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1524 unsigned long irqflags
;
1526 if (!i915_pipe_enabled(dev
, pipe
))
1529 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1530 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1531 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1532 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1537 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1539 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1540 unsigned long irqflags
;
1542 if (!i915_pipe_enabled(dev
, pipe
))
1545 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1546 ironlake_enable_display_irq(dev_priv
,
1547 DE_PIPEA_VBLANK_IVB
<< (5 * pipe
));
1548 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1553 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1555 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1556 unsigned long irqflags
;
1559 if (!i915_pipe_enabled(dev
, pipe
))
1562 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1563 imr
= I915_READ(VLV_IMR
);
1565 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1567 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1568 I915_WRITE(VLV_IMR
, imr
);
1569 i915_enable_pipestat(dev_priv
, pipe
,
1570 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1571 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1576 /* Called from drm generic code, passed 'crtc' which
1577 * we use as a pipe index
1579 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1581 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1582 unsigned long irqflags
;
1584 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1585 if (dev_priv
->info
->gen
== 3)
1586 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1588 i915_disable_pipestat(dev_priv
, pipe
,
1589 PIPE_VBLANK_INTERRUPT_ENABLE
|
1590 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1591 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1594 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1596 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1597 unsigned long irqflags
;
1599 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1600 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1601 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1602 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1605 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1607 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1608 unsigned long irqflags
;
1610 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1611 ironlake_disable_display_irq(dev_priv
,
1612 DE_PIPEA_VBLANK_IVB
<< (pipe
* 5));
1613 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1616 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1618 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1619 unsigned long irqflags
;
1622 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1623 i915_disable_pipestat(dev_priv
, pipe
,
1624 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1625 imr
= I915_READ(VLV_IMR
);
1627 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1629 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1630 I915_WRITE(VLV_IMR
, imr
);
1631 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1635 ring_last_seqno(struct intel_ring_buffer
*ring
)
1637 return list_entry(ring
->request_list
.prev
,
1638 struct drm_i915_gem_request
, list
)->seqno
;
1641 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1643 if (list_empty(&ring
->request_list
) ||
1644 i915_seqno_passed(ring
->get_seqno(ring
, false),
1645 ring_last_seqno(ring
))) {
1646 /* Issue a wake-up to catch stuck h/w. */
1647 if (waitqueue_active(&ring
->irq_queue
)) {
1648 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1650 wake_up_all(&ring
->irq_queue
);
1658 static bool kick_ring(struct intel_ring_buffer
*ring
)
1660 struct drm_device
*dev
= ring
->dev
;
1661 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1662 u32 tmp
= I915_READ_CTL(ring
);
1663 if (tmp
& RING_WAIT
) {
1664 DRM_ERROR("Kicking stuck wait on %s\n",
1666 I915_WRITE_CTL(ring
, tmp
);
1672 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1674 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1676 if (dev_priv
->hangcheck_count
++ > 1) {
1679 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1680 i915_handle_error(dev
, true);
1682 if (!IS_GEN2(dev
)) {
1683 struct intel_ring_buffer
*ring
;
1686 /* Is the chip hanging on a WAIT_FOR_EVENT?
1687 * If so we can simply poke the RB_WAIT bit
1688 * and break the hang. This should work on
1689 * all but the second generation chipsets.
1691 for_each_ring(ring
, dev_priv
, i
)
1692 hung
&= !kick_ring(ring
);
1702 * This is called when the chip hasn't reported back with completed
1703 * batchbuffers in a long time. The first time this is called we simply record
1704 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1705 * again, we assume the chip is wedged and try to fix it.
1707 void i915_hangcheck_elapsed(unsigned long data
)
1709 struct drm_device
*dev
= (struct drm_device
*)data
;
1710 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1711 uint32_t acthd
[I915_NUM_RINGS
], instdone
[I915_NUM_INSTDONE_REG
];
1712 struct intel_ring_buffer
*ring
;
1713 bool err
= false, idle
;
1716 if (!i915_enable_hangcheck
)
1719 memset(acthd
, 0, sizeof(acthd
));
1721 for_each_ring(ring
, dev_priv
, i
) {
1722 idle
&= i915_hangcheck_ring_idle(ring
, &err
);
1723 acthd
[i
] = intel_ring_get_active_head(ring
);
1726 /* If all work is done then ACTHD clearly hasn't advanced. */
1729 if (i915_hangcheck_hung(dev
))
1735 dev_priv
->hangcheck_count
= 0;
1739 i915_get_extra_instdone(dev
, instdone
);
1740 if (memcmp(dev_priv
->last_acthd
, acthd
, sizeof(acthd
)) == 0 &&
1741 memcmp(dev_priv
->prev_instdone
, instdone
, sizeof(instdone
)) == 0) {
1742 if (i915_hangcheck_hung(dev
))
1745 dev_priv
->hangcheck_count
= 0;
1747 memcpy(dev_priv
->last_acthd
, acthd
, sizeof(acthd
));
1748 memcpy(dev_priv
->prev_instdone
, instdone
, sizeof(instdone
));
1752 /* Reset timer case chip hangs without another request being added */
1753 mod_timer(&dev_priv
->hangcheck_timer
,
1754 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
1759 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1761 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1763 atomic_set(&dev_priv
->irq_received
, 0);
1765 I915_WRITE(HWSTAM
, 0xeffe);
1767 /* XXX hotplug from PCH */
1769 I915_WRITE(DEIMR
, 0xffffffff);
1770 I915_WRITE(DEIER
, 0x0);
1771 POSTING_READ(DEIER
);
1774 I915_WRITE(GTIMR
, 0xffffffff);
1775 I915_WRITE(GTIER
, 0x0);
1776 POSTING_READ(GTIER
);
1778 /* south display irq */
1779 I915_WRITE(SDEIMR
, 0xffffffff);
1780 I915_WRITE(SDEIER
, 0x0);
1781 POSTING_READ(SDEIER
);
1784 static void valleyview_irq_preinstall(struct drm_device
*dev
)
1786 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1789 atomic_set(&dev_priv
->irq_received
, 0);
1792 I915_WRITE(VLV_IMR
, 0);
1793 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
1794 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
1795 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
1798 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1799 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1800 I915_WRITE(GTIMR
, 0xffffffff);
1801 I915_WRITE(GTIER
, 0x0);
1802 POSTING_READ(GTIER
);
1804 I915_WRITE(DPINVGTT
, 0xff);
1806 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1807 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1809 I915_WRITE(PIPESTAT(pipe
), 0xffff);
1810 I915_WRITE(VLV_IIR
, 0xffffffff);
1811 I915_WRITE(VLV_IMR
, 0xffffffff);
1812 I915_WRITE(VLV_IER
, 0x0);
1813 POSTING_READ(VLV_IER
);
1817 * Enable digital hotplug on the PCH, and configure the DP short pulse
1818 * duration to 2ms (which is the minimum in the Display Port spec)
1820 * This register is the same on all known PCH chips.
1823 static void ironlake_enable_pch_hotplug(struct drm_device
*dev
)
1825 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1828 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
1829 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
1830 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
1831 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
1832 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
1833 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
1836 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1838 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1839 /* enable kind of interrupts always enabled */
1840 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1841 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
1845 dev_priv
->irq_mask
= ~display_mask
;
1847 /* should always can generate irq */
1848 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1849 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1850 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
1851 POSTING_READ(DEIER
);
1853 dev_priv
->gt_irq_mask
= ~0;
1855 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1856 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1861 GEN6_BSD_USER_INTERRUPT
|
1862 GEN6_BLITTER_USER_INTERRUPT
;
1867 GT_BSD_USER_INTERRUPT
;
1868 I915_WRITE(GTIER
, render_irqs
);
1869 POSTING_READ(GTIER
);
1871 if (HAS_PCH_CPT(dev
)) {
1872 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1873 SDE_PORTB_HOTPLUG_CPT
|
1874 SDE_PORTC_HOTPLUG_CPT
|
1875 SDE_PORTD_HOTPLUG_CPT
);
1877 hotplug_mask
= (SDE_CRT_HOTPLUG
|
1884 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
1886 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1887 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
1888 I915_WRITE(SDEIER
, hotplug_mask
);
1889 POSTING_READ(SDEIER
);
1891 ironlake_enable_pch_hotplug(dev
);
1893 if (IS_IRONLAKE_M(dev
)) {
1894 /* Clear & enable PCU event interrupts */
1895 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1896 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1897 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1903 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
1905 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1906 /* enable kind of interrupts always enabled */
1908 DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
| DE_PCH_EVENT_IVB
|
1909 DE_PLANEC_FLIP_DONE_IVB
|
1910 DE_PLANEB_FLIP_DONE_IVB
|
1911 DE_PLANEA_FLIP_DONE_IVB
;
1915 dev_priv
->irq_mask
= ~display_mask
;
1917 /* should always can generate irq */
1918 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1919 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1922 DE_PIPEC_VBLANK_IVB
|
1923 DE_PIPEB_VBLANK_IVB
|
1924 DE_PIPEA_VBLANK_IVB
);
1925 POSTING_READ(DEIER
);
1927 dev_priv
->gt_irq_mask
= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1929 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1930 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1932 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
1933 GEN6_BLITTER_USER_INTERRUPT
| GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1934 I915_WRITE(GTIER
, render_irqs
);
1935 POSTING_READ(GTIER
);
1937 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1938 SDE_PORTB_HOTPLUG_CPT
|
1939 SDE_PORTC_HOTPLUG_CPT
|
1940 SDE_PORTD_HOTPLUG_CPT
);
1941 dev_priv
->pch_irq_mask
= ~hotplug_mask
;
1943 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1944 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask
);
1945 I915_WRITE(SDEIER
, hotplug_mask
);
1946 POSTING_READ(SDEIER
);
1948 ironlake_enable_pch_hotplug(dev
);
1953 static int valleyview_irq_postinstall(struct drm_device
*dev
)
1955 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1957 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
1958 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
1961 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
1962 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
1963 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
1964 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
1965 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1968 *Leave vblank interrupts masked initially. enable/disable will
1969 * toggle them based on usage.
1971 dev_priv
->irq_mask
= (~enable_mask
) |
1972 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
1973 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1975 dev_priv
->pipestat
[0] = 0;
1976 dev_priv
->pipestat
[1] = 0;
1978 /* Hack for broken MSIs on VLV */
1979 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
1980 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
1981 msid
&= 0xff; /* mask out delivery bits */
1983 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
1985 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
1986 I915_WRITE(VLV_IER
, enable_mask
);
1987 I915_WRITE(VLV_IIR
, 0xffffffff);
1988 I915_WRITE(PIPESTAT(0), 0xffff);
1989 I915_WRITE(PIPESTAT(1), 0xffff);
1990 POSTING_READ(VLV_IER
);
1992 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
1993 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
1995 I915_WRITE(VLV_IIR
, 0xffffffff);
1996 I915_WRITE(VLV_IIR
, 0xffffffff);
1998 dev_priv
->gt_irq_mask
= ~0;
2000 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2001 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2002 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2003 I915_WRITE(GTIER
, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT
|
2004 GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
2005 GT_GEN6_BLT_USER_INTERRUPT
|
2006 GT_GEN6_BSD_USER_INTERRUPT
|
2007 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
2008 GT_GEN7_L3_PARITY_ERROR_INTERRUPT
|
2010 GT_RENDER_CS_ERROR_INTERRUPT
|
2013 POSTING_READ(GTIER
);
2015 /* ack & enable invalid PTE error interrupts */
2016 #if 0 /* FIXME: add support to irq handler for checking these bits */
2017 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2018 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2021 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2022 #if 0 /* FIXME: check register definitions; some have moved */
2023 /* Note HDMI and DP share bits */
2024 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2025 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2026 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2027 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2028 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2029 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2030 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
2031 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2032 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
2033 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2034 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2035 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2036 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2040 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2045 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2047 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2054 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2056 I915_WRITE(HWSTAM
, 0xffffffff);
2057 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2058 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2060 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2061 I915_WRITE(VLV_IIR
, 0xffffffff);
2062 I915_WRITE(VLV_IMR
, 0xffffffff);
2063 I915_WRITE(VLV_IER
, 0x0);
2064 POSTING_READ(VLV_IER
);
2067 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2069 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2074 I915_WRITE(HWSTAM
, 0xffffffff);
2076 I915_WRITE(DEIMR
, 0xffffffff);
2077 I915_WRITE(DEIER
, 0x0);
2078 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2080 I915_WRITE(GTIMR
, 0xffffffff);
2081 I915_WRITE(GTIER
, 0x0);
2082 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2084 I915_WRITE(SDEIMR
, 0xffffffff);
2085 I915_WRITE(SDEIER
, 0x0);
2086 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2089 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2091 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2094 atomic_set(&dev_priv
->irq_received
, 0);
2097 I915_WRITE(PIPESTAT(pipe
), 0);
2098 I915_WRITE16(IMR
, 0xffff);
2099 I915_WRITE16(IER
, 0x0);
2100 POSTING_READ16(IER
);
2103 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2105 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2107 dev_priv
->pipestat
[0] = 0;
2108 dev_priv
->pipestat
[1] = 0;
2111 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2113 /* Unmask the interrupts that we always want on. */
2114 dev_priv
->irq_mask
=
2115 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2116 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2117 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2118 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2119 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2120 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2123 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2124 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2125 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2126 I915_USER_INTERRUPT
);
2127 POSTING_READ16(IER
);
2132 static irqreturn_t
i8xx_irq_handler(DRM_IRQ_ARGS
)
2134 struct drm_device
*dev
= (struct drm_device
*) arg
;
2135 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2138 unsigned long irqflags
;
2142 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2143 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2145 atomic_inc(&dev_priv
->irq_received
);
2147 iir
= I915_READ16(IIR
);
2151 while (iir
& ~flip_mask
) {
2152 /* Can't rely on pipestat interrupt bit in iir as it might
2153 * have been cleared after the pipestat interrupt was received.
2154 * It doesn't set the bit in iir again, but it still produces
2155 * interrupts (for non-MSI).
2157 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2158 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2159 i915_handle_error(dev
, false);
2161 for_each_pipe(pipe
) {
2162 int reg
= PIPESTAT(pipe
);
2163 pipe_stats
[pipe
] = I915_READ(reg
);
2166 * Clear the PIPE*STAT regs before the IIR
2168 if (pipe_stats
[pipe
] & 0x8000ffff) {
2169 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2170 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2172 I915_WRITE(reg
, pipe_stats
[pipe
]);
2176 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2178 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2179 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2181 i915_update_dri1_breadcrumb(dev
);
2183 if (iir
& I915_USER_INTERRUPT
)
2184 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2186 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2187 drm_handle_vblank(dev
, 0)) {
2188 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
2189 intel_prepare_page_flip(dev
, 0);
2190 intel_finish_page_flip(dev
, 0);
2191 flip_mask
&= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
;
2195 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2196 drm_handle_vblank(dev
, 1)) {
2197 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
2198 intel_prepare_page_flip(dev
, 1);
2199 intel_finish_page_flip(dev
, 1);
2200 flip_mask
&= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2210 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2212 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2215 for_each_pipe(pipe
) {
2216 /* Clear enable bits; then clear status bits */
2217 I915_WRITE(PIPESTAT(pipe
), 0);
2218 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2220 I915_WRITE16(IMR
, 0xffff);
2221 I915_WRITE16(IER
, 0x0);
2222 I915_WRITE16(IIR
, I915_READ16(IIR
));
2225 static void i915_irq_preinstall(struct drm_device
* dev
)
2227 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2230 atomic_set(&dev_priv
->irq_received
, 0);
2232 if (I915_HAS_HOTPLUG(dev
)) {
2233 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2234 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2237 I915_WRITE16(HWSTAM
, 0xeffe);
2239 I915_WRITE(PIPESTAT(pipe
), 0);
2240 I915_WRITE(IMR
, 0xffffffff);
2241 I915_WRITE(IER
, 0x0);
2245 static int i915_irq_postinstall(struct drm_device
*dev
)
2247 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2250 dev_priv
->pipestat
[0] = 0;
2251 dev_priv
->pipestat
[1] = 0;
2253 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2255 /* Unmask the interrupts that we always want on. */
2256 dev_priv
->irq_mask
=
2257 ~(I915_ASLE_INTERRUPT
|
2258 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2259 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2260 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2261 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2262 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2265 I915_ASLE_INTERRUPT
|
2266 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2267 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2268 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2269 I915_USER_INTERRUPT
;
2271 if (I915_HAS_HOTPLUG(dev
)) {
2272 /* Enable in IER... */
2273 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2274 /* and unmask in IMR */
2275 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2278 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2279 I915_WRITE(IER
, enable_mask
);
2282 if (I915_HAS_HOTPLUG(dev
)) {
2283 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2285 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2286 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2287 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2288 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2289 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2290 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2291 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2292 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2293 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2294 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2295 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2296 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2297 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2300 /* Ignore TV since it's buggy */
2302 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2305 intel_opregion_enable_asle(dev
);
2310 static irqreturn_t
i915_irq_handler(DRM_IRQ_ARGS
)
2312 struct drm_device
*dev
= (struct drm_device
*) arg
;
2313 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2314 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2315 unsigned long irqflags
;
2317 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2318 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2320 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
,
2321 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2323 int pipe
, ret
= IRQ_NONE
;
2325 atomic_inc(&dev_priv
->irq_received
);
2327 iir
= I915_READ(IIR
);
2329 bool irq_received
= (iir
& ~flip_mask
) != 0;
2330 bool blc_event
= false;
2332 /* Can't rely on pipestat interrupt bit in iir as it might
2333 * have been cleared after the pipestat interrupt was received.
2334 * It doesn't set the bit in iir again, but it still produces
2335 * interrupts (for non-MSI).
2337 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2338 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2339 i915_handle_error(dev
, false);
2341 for_each_pipe(pipe
) {
2342 int reg
= PIPESTAT(pipe
);
2343 pipe_stats
[pipe
] = I915_READ(reg
);
2345 /* Clear the PIPE*STAT regs before the IIR */
2346 if (pipe_stats
[pipe
] & 0x8000ffff) {
2347 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2348 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2350 I915_WRITE(reg
, pipe_stats
[pipe
]);
2351 irq_received
= true;
2354 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2359 /* Consume port. Then clear IIR or we'll miss events */
2360 if ((I915_HAS_HOTPLUG(dev
)) &&
2361 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2362 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2364 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2366 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2367 queue_work(dev_priv
->wq
,
2368 &dev_priv
->hotplug_work
);
2370 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2371 POSTING_READ(PORT_HOTPLUG_STAT
);
2374 I915_WRITE(IIR
, iir
& ~flip_mask
);
2375 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2377 if (iir
& I915_USER_INTERRUPT
)
2378 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2380 for_each_pipe(pipe
) {
2384 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2385 drm_handle_vblank(dev
, pipe
)) {
2386 if (iir
& flip
[plane
]) {
2387 intel_prepare_page_flip(dev
, plane
);
2388 intel_finish_page_flip(dev
, pipe
);
2389 flip_mask
&= ~flip
[plane
];
2393 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2397 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2398 intel_opregion_asle_intr(dev
);
2400 /* With MSI, interrupts are only generated when iir
2401 * transitions from zero to nonzero. If another bit got
2402 * set while we were handling the existing iir bits, then
2403 * we would never get another interrupt.
2405 * This is fine on non-MSI as well, as if we hit this path
2406 * we avoid exiting the interrupt handler only to generate
2409 * Note that for MSI this could cause a stray interrupt report
2410 * if an interrupt landed in the time between writing IIR and
2411 * the posting read. This should be rare enough to never
2412 * trigger the 99% of 100,000 interrupts test for disabling
2417 } while (iir
& ~flip_mask
);
2419 i915_update_dri1_breadcrumb(dev
);
2424 static void i915_irq_uninstall(struct drm_device
* dev
)
2426 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2429 if (I915_HAS_HOTPLUG(dev
)) {
2430 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2431 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2434 I915_WRITE16(HWSTAM
, 0xffff);
2435 for_each_pipe(pipe
) {
2436 /* Clear enable bits; then clear status bits */
2437 I915_WRITE(PIPESTAT(pipe
), 0);
2438 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2440 I915_WRITE(IMR
, 0xffffffff);
2441 I915_WRITE(IER
, 0x0);
2443 I915_WRITE(IIR
, I915_READ(IIR
));
2446 static void i965_irq_preinstall(struct drm_device
* dev
)
2448 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2451 atomic_set(&dev_priv
->irq_received
, 0);
2453 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2454 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2456 I915_WRITE(HWSTAM
, 0xeffe);
2458 I915_WRITE(PIPESTAT(pipe
), 0);
2459 I915_WRITE(IMR
, 0xffffffff);
2460 I915_WRITE(IER
, 0x0);
2464 static int i965_irq_postinstall(struct drm_device
*dev
)
2466 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2471 /* Unmask the interrupts that we always want on. */
2472 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2473 I915_DISPLAY_PORT_INTERRUPT
|
2474 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2475 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2476 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2477 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2478 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2480 enable_mask
= ~dev_priv
->irq_mask
;
2481 enable_mask
|= I915_USER_INTERRUPT
;
2484 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2486 dev_priv
->pipestat
[0] = 0;
2487 dev_priv
->pipestat
[1] = 0;
2490 * Enable some error detection, note the instruction error mask
2491 * bit is reserved, so we leave it masked.
2494 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2495 GM45_ERROR_MEM_PRIV
|
2496 GM45_ERROR_CP_PRIV
|
2497 I915_ERROR_MEMORY_REFRESH
);
2499 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2500 I915_ERROR_MEMORY_REFRESH
);
2502 I915_WRITE(EMR
, error_mask
);
2504 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2505 I915_WRITE(IER
, enable_mask
);
2508 /* Note HDMI and DP share hotplug bits */
2510 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2511 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2512 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2513 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2514 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2515 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2517 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_G4X
)
2518 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2519 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_G4X
)
2520 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2522 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I965
)
2523 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2524 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I965
)
2525 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2527 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2528 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2530 /* Programming the CRT detection parameters tends
2531 to generate a spurious hotplug event about three
2532 seconds later. So just do it once.
2535 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2536 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2539 /* Ignore TV since it's buggy */
2541 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2543 intel_opregion_enable_asle(dev
);
2548 static irqreturn_t
i965_irq_handler(DRM_IRQ_ARGS
)
2550 struct drm_device
*dev
= (struct drm_device
*) arg
;
2551 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2553 u32 pipe_stats
[I915_MAX_PIPES
];
2554 unsigned long irqflags
;
2556 int ret
= IRQ_NONE
, pipe
;
2558 atomic_inc(&dev_priv
->irq_received
);
2560 iir
= I915_READ(IIR
);
2563 bool blc_event
= false;
2565 irq_received
= iir
!= 0;
2567 /* Can't rely on pipestat interrupt bit in iir as it might
2568 * have been cleared after the pipestat interrupt was received.
2569 * It doesn't set the bit in iir again, but it still produces
2570 * interrupts (for non-MSI).
2572 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2573 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2574 i915_handle_error(dev
, false);
2576 for_each_pipe(pipe
) {
2577 int reg
= PIPESTAT(pipe
);
2578 pipe_stats
[pipe
] = I915_READ(reg
);
2581 * Clear the PIPE*STAT regs before the IIR
2583 if (pipe_stats
[pipe
] & 0x8000ffff) {
2584 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2585 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2587 I915_WRITE(reg
, pipe_stats
[pipe
]);
2591 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2598 /* Consume port. Then clear IIR or we'll miss events */
2599 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
2600 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2602 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2604 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2605 queue_work(dev_priv
->wq
,
2606 &dev_priv
->hotplug_work
);
2608 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2609 I915_READ(PORT_HOTPLUG_STAT
);
2612 I915_WRITE(IIR
, iir
);
2613 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2615 if (iir
& I915_USER_INTERRUPT
)
2616 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2617 if (iir
& I915_BSD_USER_INTERRUPT
)
2618 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2620 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
)
2621 intel_prepare_page_flip(dev
, 0);
2623 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
)
2624 intel_prepare_page_flip(dev
, 1);
2626 for_each_pipe(pipe
) {
2627 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2628 drm_handle_vblank(dev
, pipe
)) {
2629 i915_pageflip_stall_check(dev
, pipe
);
2630 intel_finish_page_flip(dev
, pipe
);
2633 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2638 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2639 intel_opregion_asle_intr(dev
);
2641 /* With MSI, interrupts are only generated when iir
2642 * transitions from zero to nonzero. If another bit got
2643 * set while we were handling the existing iir bits, then
2644 * we would never get another interrupt.
2646 * This is fine on non-MSI as well, as if we hit this path
2647 * we avoid exiting the interrupt handler only to generate
2650 * Note that for MSI this could cause a stray interrupt report
2651 * if an interrupt landed in the time between writing IIR and
2652 * the posting read. This should be rare enough to never
2653 * trigger the 99% of 100,000 interrupts test for disabling
2659 i915_update_dri1_breadcrumb(dev
);
2664 static void i965_irq_uninstall(struct drm_device
* dev
)
2666 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2672 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2673 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2675 I915_WRITE(HWSTAM
, 0xffffffff);
2677 I915_WRITE(PIPESTAT(pipe
), 0);
2678 I915_WRITE(IMR
, 0xffffffff);
2679 I915_WRITE(IER
, 0x0);
2682 I915_WRITE(PIPESTAT(pipe
),
2683 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2684 I915_WRITE(IIR
, I915_READ(IIR
));
2687 void intel_irq_init(struct drm_device
*dev
)
2689 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2691 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2692 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
2693 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
2694 INIT_WORK(&dev_priv
->parity_error_work
, ivybridge_parity_work
);
2696 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2697 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2698 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
2699 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2700 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2703 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2704 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2706 dev
->driver
->get_vblank_timestamp
= NULL
;
2707 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2709 if (IS_VALLEYVIEW(dev
)) {
2710 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2711 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2712 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2713 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2714 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2715 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2716 } else if (IS_IVYBRIDGE(dev
)) {
2717 /* Share pre & uninstall handlers with ILK/SNB */
2718 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2719 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2720 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2721 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2722 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2723 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2724 } else if (IS_HASWELL(dev
)) {
2725 /* Share interrupts handling with IVB */
2726 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2727 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2728 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2729 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2730 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2731 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2732 } else if (HAS_PCH_SPLIT(dev
)) {
2733 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2734 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2735 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2736 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2737 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2738 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2740 if (INTEL_INFO(dev
)->gen
== 2) {
2741 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2742 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2743 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2744 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2745 } else if (INTEL_INFO(dev
)->gen
== 3) {
2746 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
2747 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
2748 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
2749 dev
->driver
->irq_handler
= i915_irq_handler
;
2751 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
2752 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
2753 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
2754 dev
->driver
->irq_handler
= i965_irq_handler
;
2756 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2757 dev
->driver
->disable_vblank
= i915_disable_vblank
;