1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 /* For display hotplug interrupt */
41 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
43 if ((dev_priv
->irq_mask
& mask
) != 0) {
44 dev_priv
->irq_mask
&= ~mask
;
45 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
51 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
53 if ((dev_priv
->irq_mask
& mask
) != mask
) {
54 dev_priv
->irq_mask
|= mask
;
55 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
61 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
63 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
64 u32 reg
= PIPESTAT(pipe
);
66 dev_priv
->pipestat
[pipe
] |= mask
;
67 /* Enable the interrupt, clear any pending status */
68 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
74 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
76 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
77 u32 reg
= PIPESTAT(pipe
);
79 dev_priv
->pipestat
[pipe
] &= ~mask
;
80 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
86 * intel_enable_asle - enable ASLE interrupt for OpRegion
88 void intel_enable_asle(struct drm_device
*dev
)
90 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
91 unsigned long irqflags
;
93 /* FIXME: opregion/asle for VLV */
94 if (IS_VALLEYVIEW(dev
))
97 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
99 if (HAS_PCH_SPLIT(dev
))
100 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
102 i915_enable_pipestat(dev_priv
, 1,
103 PIPE_LEGACY_BLC_EVENT_ENABLE
);
104 if (INTEL_INFO(dev
)->gen
>= 4)
105 i915_enable_pipestat(dev_priv
, 0,
106 PIPE_LEGACY_BLC_EVENT_ENABLE
);
109 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
113 * i915_pipe_enabled - check if a pipe is enabled
115 * @pipe: pipe to check
117 * Reading certain registers when the pipe is disabled can hang the chip.
118 * Use this routine to make sure the PLL is running and the pipe is active
119 * before reading such registers if unsure.
122 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
124 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
125 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
128 return I915_READ(PIPECONF(cpu_transcoder
)) & PIPECONF_ENABLE
;
131 /* Called from drm generic code, passed a 'crtc', which
132 * we use as a pipe index
134 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
136 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
137 unsigned long high_frame
;
138 unsigned long low_frame
;
139 u32 high1
, high2
, low
;
141 if (!i915_pipe_enabled(dev
, pipe
)) {
142 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
143 "pipe %c\n", pipe_name(pipe
));
147 high_frame
= PIPEFRAME(pipe
);
148 low_frame
= PIPEFRAMEPIXEL(pipe
);
151 * High & low register fields aren't synchronized, so make sure
152 * we get a low value that's stable across two reads of the high
156 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
157 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
158 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
159 } while (high1
!= high2
);
161 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
162 low
>>= PIPE_FRAME_LOW_SHIFT
;
163 return (high1
<< 8) | low
;
166 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
168 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
169 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
171 if (!i915_pipe_enabled(dev
, pipe
)) {
172 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
173 "pipe %c\n", pipe_name(pipe
));
177 return I915_READ(reg
);
180 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
181 int *vpos
, int *hpos
)
183 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
184 u32 vbl
= 0, position
= 0;
185 int vbl_start
, vbl_end
, htotal
, vtotal
;
188 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
191 if (!i915_pipe_enabled(dev
, pipe
)) {
192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
193 "pipe %c\n", pipe_name(pipe
));
198 vtotal
= 1 + ((I915_READ(VTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
200 if (INTEL_INFO(dev
)->gen
>= 4) {
201 /* No obvious pixelcount register. Only query vertical
202 * scanout position from Display scan line register.
204 position
= I915_READ(PIPEDSL(pipe
));
206 /* Decode into vertical scanout position. Don't have
207 * horizontal scanout position.
209 *vpos
= position
& 0x1fff;
212 /* Have access to pixelcount since start of frame.
213 * We can split this into vertical and horizontal
216 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
218 htotal
= 1 + ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
219 *vpos
= position
/ htotal
;
220 *hpos
= position
- (*vpos
* htotal
);
223 /* Query vblank area. */
224 vbl
= I915_READ(VBLANK(cpu_transcoder
));
226 /* Test position against vblank region. */
227 vbl_start
= vbl
& 0x1fff;
228 vbl_end
= (vbl
>> 16) & 0x1fff;
230 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
233 /* Inside "upper part" of vblank area? Apply corrective offset: */
234 if (in_vbl
&& (*vpos
>= vbl_start
))
235 *vpos
= *vpos
- vtotal
;
237 /* Readouts valid? */
239 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
243 ret
|= DRM_SCANOUTPOS_INVBL
;
248 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
250 struct timeval
*vblank_time
,
253 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
254 struct drm_crtc
*crtc
;
256 if (pipe
< 0 || pipe
>= dev_priv
->num_pipe
) {
257 DRM_ERROR("Invalid crtc %d\n", pipe
);
261 /* Get drm_crtc to timestamp: */
262 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
264 DRM_ERROR("Invalid crtc %d\n", pipe
);
268 if (!crtc
->enabled
) {
269 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
273 /* Helper routine in DRM core does all the work: */
274 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
280 * Handle hotplug events outside the interrupt handler proper.
282 static void i915_hotplug_work_func(struct work_struct
*work
)
284 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
286 struct drm_device
*dev
= dev_priv
->dev
;
287 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
288 struct intel_encoder
*encoder
;
290 /* HPD irq before everything is fully set up. */
291 if (!dev_priv
->enable_hotplug_processing
)
294 mutex_lock(&mode_config
->mutex
);
295 DRM_DEBUG_KMS("running encoder hotplug functions\n");
297 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
298 if (encoder
->hot_plug
)
299 encoder
->hot_plug(encoder
);
301 mutex_unlock(&mode_config
->mutex
);
303 /* Just fire off a uevent and let userspace tell us what to do */
304 drm_helper_hpd_irq_event(dev
);
307 static void ironlake_handle_rps_change(struct drm_device
*dev
)
309 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
310 u32 busy_up
, busy_down
, max_avg
, min_avg
;
314 spin_lock_irqsave(&mchdev_lock
, flags
);
316 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
318 new_delay
= dev_priv
->ips
.cur_delay
;
320 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
321 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
322 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
323 max_avg
= I915_READ(RCBMAXAVG
);
324 min_avg
= I915_READ(RCBMINAVG
);
326 /* Handle RCS change request from hw */
327 if (busy_up
> max_avg
) {
328 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
329 new_delay
= dev_priv
->ips
.cur_delay
- 1;
330 if (new_delay
< dev_priv
->ips
.max_delay
)
331 new_delay
= dev_priv
->ips
.max_delay
;
332 } else if (busy_down
< min_avg
) {
333 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
334 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
335 if (new_delay
> dev_priv
->ips
.min_delay
)
336 new_delay
= dev_priv
->ips
.min_delay
;
339 if (ironlake_set_drps(dev
, new_delay
))
340 dev_priv
->ips
.cur_delay
= new_delay
;
342 spin_unlock_irqrestore(&mchdev_lock
, flags
);
347 static void notify_ring(struct drm_device
*dev
,
348 struct intel_ring_buffer
*ring
)
350 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
352 if (ring
->obj
== NULL
)
355 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
, false));
357 wake_up_all(&ring
->irq_queue
);
358 if (i915_enable_hangcheck
) {
359 dev_priv
->gpu_error
.hangcheck_count
= 0;
360 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
361 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
365 static void gen6_pm_rps_work(struct work_struct
*work
)
367 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
372 spin_lock_irq(&dev_priv
->rps
.lock
);
373 pm_iir
= dev_priv
->rps
.pm_iir
;
374 dev_priv
->rps
.pm_iir
= 0;
375 pm_imr
= I915_READ(GEN6_PMIMR
);
376 I915_WRITE(GEN6_PMIMR
, 0);
377 spin_unlock_irq(&dev_priv
->rps
.lock
);
379 if ((pm_iir
& GEN6_PM_DEFERRED_EVENTS
) == 0)
382 mutex_lock(&dev_priv
->rps
.hw_lock
);
384 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
)
385 new_delay
= dev_priv
->rps
.cur_delay
+ 1;
387 new_delay
= dev_priv
->rps
.cur_delay
- 1;
389 /* sysfs frequency interfaces may have snuck in while servicing the
392 if (!(new_delay
> dev_priv
->rps
.max_delay
||
393 new_delay
< dev_priv
->rps
.min_delay
)) {
394 gen6_set_rps(dev_priv
->dev
, new_delay
);
397 mutex_unlock(&dev_priv
->rps
.hw_lock
);
402 * ivybridge_parity_work - Workqueue called when a parity error interrupt
404 * @work: workqueue struct
406 * Doesn't actually do anything except notify userspace. As a consequence of
407 * this event, userspace should try to remap the bad rows since statistically
408 * it is likely the same row is more likely to go bad again.
410 static void ivybridge_parity_work(struct work_struct
*work
)
412 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
413 l3_parity
.error_work
);
414 u32 error_status
, row
, bank
, subbank
;
415 char *parity_event
[5];
419 /* We must turn off DOP level clock gating to access the L3 registers.
420 * In order to prevent a get/put style interface, acquire struct mutex
421 * any time we access those registers.
423 mutex_lock(&dev_priv
->dev
->struct_mutex
);
425 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
426 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
427 POSTING_READ(GEN7_MISCCPCTL
);
429 error_status
= I915_READ(GEN7_L3CDERRST1
);
430 row
= GEN7_PARITY_ERROR_ROW(error_status
);
431 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
432 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
434 I915_WRITE(GEN7_L3CDERRST1
, GEN7_PARITY_ERROR_VALID
|
435 GEN7_L3CDERRST1_ENABLE
);
436 POSTING_READ(GEN7_L3CDERRST1
);
438 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
440 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
441 dev_priv
->gt_irq_mask
&= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
442 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
443 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
445 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
447 parity_event
[0] = "L3_PARITY_ERROR=1";
448 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
449 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
450 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
451 parity_event
[4] = NULL
;
453 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
454 KOBJ_CHANGE
, parity_event
);
456 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
459 kfree(parity_event
[3]);
460 kfree(parity_event
[2]);
461 kfree(parity_event
[1]);
464 static void ivybridge_handle_parity_error(struct drm_device
*dev
)
466 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
469 if (!HAS_L3_GPU_CACHE(dev
))
472 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
473 dev_priv
->gt_irq_mask
|= GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
474 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
475 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
477 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
480 static void snb_gt_irq_handler(struct drm_device
*dev
,
481 struct drm_i915_private
*dev_priv
,
485 if (gt_iir
& (GEN6_RENDER_USER_INTERRUPT
|
486 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
))
487 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
488 if (gt_iir
& GEN6_BSD_USER_INTERRUPT
)
489 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
490 if (gt_iir
& GEN6_BLITTER_USER_INTERRUPT
)
491 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
493 if (gt_iir
& (GT_GEN6_BLT_CS_ERROR_INTERRUPT
|
494 GT_GEN6_BSD_CS_ERROR_INTERRUPT
|
495 GT_RENDER_CS_ERROR_INTERRUPT
)) {
496 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
497 i915_handle_error(dev
, false);
500 if (gt_iir
& GT_GEN7_L3_PARITY_ERROR_INTERRUPT
)
501 ivybridge_handle_parity_error(dev
);
504 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
510 * IIR bits should never already be set because IMR should
511 * prevent an interrupt from being shown in IIR. The warning
512 * displays a case where we've unsafely cleared
513 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
514 * type is not a problem, it displays a problem in the logic.
516 * The mask bit in IMR is cleared by dev_priv->rps.work.
519 spin_lock_irqsave(&dev_priv
->rps
.lock
, flags
);
520 dev_priv
->rps
.pm_iir
|= pm_iir
;
521 I915_WRITE(GEN6_PMIMR
, dev_priv
->rps
.pm_iir
);
522 POSTING_READ(GEN6_PMIMR
);
523 spin_unlock_irqrestore(&dev_priv
->rps
.lock
, flags
);
525 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
528 static void gmbus_irq_handler(struct drm_device
*dev
)
530 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
532 wake_up_all(&dev_priv
->gmbus_wait_queue
);
535 static void dp_aux_irq_handler(struct drm_device
*dev
)
537 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
539 wake_up_all(&dev_priv
->gmbus_wait_queue
);
542 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
544 struct drm_device
*dev
= (struct drm_device
*) arg
;
545 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
546 u32 iir
, gt_iir
, pm_iir
;
547 irqreturn_t ret
= IRQ_NONE
;
548 unsigned long irqflags
;
550 u32 pipe_stats
[I915_MAX_PIPES
];
552 atomic_inc(&dev_priv
->irq_received
);
555 iir
= I915_READ(VLV_IIR
);
556 gt_iir
= I915_READ(GTIIR
);
557 pm_iir
= I915_READ(GEN6_PMIIR
);
559 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
564 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
566 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
567 for_each_pipe(pipe
) {
568 int reg
= PIPESTAT(pipe
);
569 pipe_stats
[pipe
] = I915_READ(reg
);
572 * Clear the PIPE*STAT regs before the IIR
574 if (pipe_stats
[pipe
] & 0x8000ffff) {
575 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
576 DRM_DEBUG_DRIVER("pipe %c underrun\n",
578 I915_WRITE(reg
, pipe_stats
[pipe
]);
581 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
583 for_each_pipe(pipe
) {
584 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
585 drm_handle_vblank(dev
, pipe
);
587 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
588 intel_prepare_page_flip(dev
, pipe
);
589 intel_finish_page_flip(dev
, pipe
);
593 /* Consume port. Then clear IIR or we'll miss events */
594 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
595 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
597 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
599 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
600 queue_work(dev_priv
->wq
,
601 &dev_priv
->hotplug_work
);
603 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
604 I915_READ(PORT_HOTPLUG_STAT
);
607 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
608 gmbus_irq_handler(dev
);
610 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
611 gen6_queue_rps_work(dev_priv
, pm_iir
);
613 I915_WRITE(GTIIR
, gt_iir
);
614 I915_WRITE(GEN6_PMIIR
, pm_iir
);
615 I915_WRITE(VLV_IIR
, iir
);
622 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
624 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
627 if (pch_iir
& SDE_HOTPLUG_MASK
)
628 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
630 if (pch_iir
& SDE_AUDIO_POWER_MASK
)
631 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
632 (pch_iir
& SDE_AUDIO_POWER_MASK
) >>
633 SDE_AUDIO_POWER_SHIFT
);
635 if (pch_iir
& SDE_AUX_MASK
)
636 dp_aux_irq_handler(dev
);
638 if (pch_iir
& SDE_GMBUS
)
639 gmbus_irq_handler(dev
);
641 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
644 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
645 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
647 if (pch_iir
& SDE_POISON
)
648 DRM_ERROR("PCH poison interrupt\n");
650 if (pch_iir
& SDE_FDI_MASK
)
652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
654 I915_READ(FDI_RX_IIR(pipe
)));
656 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
657 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
659 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
660 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
662 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
663 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
664 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
665 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
668 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
670 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
673 if (pch_iir
& SDE_HOTPLUG_MASK_CPT
)
674 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
676 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
)
677 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
678 (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
679 SDE_AUDIO_POWER_SHIFT_CPT
);
681 if (pch_iir
& SDE_AUX_MASK_CPT
)
682 dp_aux_irq_handler(dev
);
684 if (pch_iir
& SDE_GMBUS_CPT
)
685 gmbus_irq_handler(dev
);
687 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
690 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
691 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
693 if (pch_iir
& SDE_FDI_MASK_CPT
)
695 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
697 I915_READ(FDI_RX_IIR(pipe
)));
700 static irqreturn_t
ivybridge_irq_handler(int irq
, void *arg
)
702 struct drm_device
*dev
= (struct drm_device
*) arg
;
703 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
704 u32 de_iir
, gt_iir
, de_ier
, pm_iir
;
705 irqreturn_t ret
= IRQ_NONE
;
708 atomic_inc(&dev_priv
->irq_received
);
710 /* disable master interrupt before clearing iir */
711 de_ier
= I915_READ(DEIER
);
712 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
714 gt_iir
= I915_READ(GTIIR
);
716 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
717 I915_WRITE(GTIIR
, gt_iir
);
721 de_iir
= I915_READ(DEIIR
);
723 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
724 dp_aux_irq_handler(dev
);
726 if (de_iir
& DE_GSE_IVB
)
727 intel_opregion_gse_intr(dev
);
729 for (i
= 0; i
< 3; i
++) {
730 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
731 drm_handle_vblank(dev
, i
);
732 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
733 intel_prepare_page_flip(dev
, i
);
734 intel_finish_page_flip_plane(dev
, i
);
738 /* check event from PCH */
739 if (de_iir
& DE_PCH_EVENT_IVB
) {
740 u32 pch_iir
= I915_READ(SDEIIR
);
742 cpt_irq_handler(dev
, pch_iir
);
744 /* clear PCH hotplug event before clear CPU irq */
745 I915_WRITE(SDEIIR
, pch_iir
);
748 I915_WRITE(DEIIR
, de_iir
);
752 pm_iir
= I915_READ(GEN6_PMIIR
);
754 if (pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
755 gen6_queue_rps_work(dev_priv
, pm_iir
);
756 I915_WRITE(GEN6_PMIIR
, pm_iir
);
760 I915_WRITE(DEIER
, de_ier
);
766 static void ilk_gt_irq_handler(struct drm_device
*dev
,
767 struct drm_i915_private
*dev_priv
,
770 if (gt_iir
& (GT_USER_INTERRUPT
| GT_PIPE_NOTIFY
))
771 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
772 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
773 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
776 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
778 struct drm_device
*dev
= (struct drm_device
*) arg
;
779 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
781 u32 de_iir
, gt_iir
, de_ier
, pm_iir
;
783 atomic_inc(&dev_priv
->irq_received
);
785 /* disable master interrupt before clearing iir */
786 de_ier
= I915_READ(DEIER
);
787 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
790 de_iir
= I915_READ(DEIIR
);
791 gt_iir
= I915_READ(GTIIR
);
792 pm_iir
= I915_READ(GEN6_PMIIR
);
794 if (de_iir
== 0 && gt_iir
== 0 && (!IS_GEN6(dev
) || pm_iir
== 0))
800 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
802 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
804 if (de_iir
& DE_AUX_CHANNEL_A
)
805 dp_aux_irq_handler(dev
);
808 intel_opregion_gse_intr(dev
);
810 if (de_iir
& DE_PIPEA_VBLANK
)
811 drm_handle_vblank(dev
, 0);
813 if (de_iir
& DE_PIPEB_VBLANK
)
814 drm_handle_vblank(dev
, 1);
816 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
817 intel_prepare_page_flip(dev
, 0);
818 intel_finish_page_flip_plane(dev
, 0);
821 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
822 intel_prepare_page_flip(dev
, 1);
823 intel_finish_page_flip_plane(dev
, 1);
826 /* check event from PCH */
827 if (de_iir
& DE_PCH_EVENT
) {
828 u32 pch_iir
= I915_READ(SDEIIR
);
830 if (HAS_PCH_CPT(dev
))
831 cpt_irq_handler(dev
, pch_iir
);
833 ibx_irq_handler(dev
, pch_iir
);
835 /* should clear PCH hotplug event before clear CPU irq */
836 I915_WRITE(SDEIIR
, pch_iir
);
839 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
840 ironlake_handle_rps_change(dev
);
842 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_DEFERRED_EVENTS
)
843 gen6_queue_rps_work(dev_priv
, pm_iir
);
845 I915_WRITE(GTIIR
, gt_iir
);
846 I915_WRITE(DEIIR
, de_iir
);
847 I915_WRITE(GEN6_PMIIR
, pm_iir
);
850 I915_WRITE(DEIER
, de_ier
);
857 * i915_error_work_func - do process context error handling work
860 * Fire an error uevent so userspace can see that a hang or error
863 static void i915_error_work_func(struct work_struct
*work
)
865 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
867 struct drm_device
*dev
= dev_priv
->dev
;
868 char *error_event
[] = { "ERROR=1", NULL
};
869 char *reset_event
[] = { "RESET=1", NULL
};
870 char *reset_done_event
[] = { "ERROR=0", NULL
};
872 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
874 if (atomic_read(&dev_priv
->gpu_error
.wedged
)) {
875 DRM_DEBUG_DRIVER("resetting chip\n");
876 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
877 if (!i915_reset(dev
)) {
878 atomic_set(&dev_priv
->gpu_error
.wedged
, 0);
879 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
881 complete_all(&dev_priv
->gpu_error
.completion
);
885 /* NB: please notice the memset */
886 static void i915_get_extra_instdone(struct drm_device
*dev
,
889 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
890 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
892 switch(INTEL_INFO(dev
)->gen
) {
895 instdone
[0] = I915_READ(INSTDONE
);
900 instdone
[0] = I915_READ(INSTDONE_I965
);
901 instdone
[1] = I915_READ(INSTDONE1
);
904 WARN_ONCE(1, "Unsupported platform\n");
906 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
907 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
908 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
909 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);
914 #ifdef CONFIG_DEBUG_FS
915 static struct drm_i915_error_object
*
916 i915_error_object_create(struct drm_i915_private
*dev_priv
,
917 struct drm_i915_gem_object
*src
)
919 struct drm_i915_error_object
*dst
;
923 if (src
== NULL
|| src
->pages
== NULL
)
926 count
= src
->base
.size
/ PAGE_SIZE
;
928 dst
= kmalloc(sizeof(*dst
) + count
* sizeof(u32
*), GFP_ATOMIC
);
932 reloc_offset
= src
->gtt_offset
;
933 for (i
= 0; i
< count
; i
++) {
937 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
941 local_irq_save(flags
);
942 if (reloc_offset
< dev_priv
->gtt
.mappable_end
&&
943 src
->has_global_gtt_mapping
) {
946 /* Simply ignore tiling or any overlapping fence.
947 * It's part of the error state, and this hopefully
948 * captures what the GPU read.
951 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
953 memcpy_fromio(d
, s
, PAGE_SIZE
);
954 io_mapping_unmap_atomic(s
);
955 } else if (src
->stolen
) {
956 unsigned long offset
;
958 offset
= dev_priv
->mm
.stolen_base
;
959 offset
+= src
->stolen
->start
;
960 offset
+= i
<< PAGE_SHIFT
;
962 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
967 page
= i915_gem_object_get_page(src
, i
);
969 drm_clflush_pages(&page
, 1);
971 s
= kmap_atomic(page
);
972 memcpy(d
, s
, PAGE_SIZE
);
975 drm_clflush_pages(&page
, 1);
977 local_irq_restore(flags
);
981 reloc_offset
+= PAGE_SIZE
;
983 dst
->page_count
= count
;
984 dst
->gtt_offset
= src
->gtt_offset
;
990 kfree(dst
->pages
[i
]);
996 i915_error_object_free(struct drm_i915_error_object
*obj
)
1003 for (page
= 0; page
< obj
->page_count
; page
++)
1004 kfree(obj
->pages
[page
]);
1010 i915_error_state_free(struct kref
*error_ref
)
1012 struct drm_i915_error_state
*error
= container_of(error_ref
,
1013 typeof(*error
), ref
);
1016 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
1017 i915_error_object_free(error
->ring
[i
].batchbuffer
);
1018 i915_error_object_free(error
->ring
[i
].ringbuffer
);
1019 kfree(error
->ring
[i
].requests
);
1022 kfree(error
->active_bo
);
1023 kfree(error
->overlay
);
1026 static void capture_bo(struct drm_i915_error_buffer
*err
,
1027 struct drm_i915_gem_object
*obj
)
1029 err
->size
= obj
->base
.size
;
1030 err
->name
= obj
->base
.name
;
1031 err
->rseqno
= obj
->last_read_seqno
;
1032 err
->wseqno
= obj
->last_write_seqno
;
1033 err
->gtt_offset
= obj
->gtt_offset
;
1034 err
->read_domains
= obj
->base
.read_domains
;
1035 err
->write_domain
= obj
->base
.write_domain
;
1036 err
->fence_reg
= obj
->fence_reg
;
1038 if (obj
->pin_count
> 0)
1040 if (obj
->user_pin_count
> 0)
1042 err
->tiling
= obj
->tiling_mode
;
1043 err
->dirty
= obj
->dirty
;
1044 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
1045 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
1046 err
->cache_level
= obj
->cache_level
;
1049 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
1050 int count
, struct list_head
*head
)
1052 struct drm_i915_gem_object
*obj
;
1055 list_for_each_entry(obj
, head
, mm_list
) {
1056 capture_bo(err
++, obj
);
1064 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
1065 int count
, struct list_head
*head
)
1067 struct drm_i915_gem_object
*obj
;
1070 list_for_each_entry(obj
, head
, gtt_list
) {
1071 if (obj
->pin_count
== 0)
1074 capture_bo(err
++, obj
);
1082 static void i915_gem_record_fences(struct drm_device
*dev
,
1083 struct drm_i915_error_state
*error
)
1085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1089 switch (INTEL_INFO(dev
)->gen
) {
1092 for (i
= 0; i
< 16; i
++)
1093 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
1097 for (i
= 0; i
< 16; i
++)
1098 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
1101 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
1102 for (i
= 0; i
< 8; i
++)
1103 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
1105 for (i
= 0; i
< 8; i
++)
1106 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
1114 static struct drm_i915_error_object
*
1115 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
1116 struct intel_ring_buffer
*ring
)
1118 struct drm_i915_gem_object
*obj
;
1121 if (!ring
->get_seqno
)
1124 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
)) {
1125 u32 acthd
= I915_READ(ACTHD
);
1127 if (WARN_ON(ring
->id
!= RCS
))
1130 obj
= ring
->private;
1131 if (acthd
>= obj
->gtt_offset
&&
1132 acthd
< obj
->gtt_offset
+ obj
->base
.size
)
1133 return i915_error_object_create(dev_priv
, obj
);
1136 seqno
= ring
->get_seqno(ring
, false);
1137 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
1138 if (obj
->ring
!= ring
)
1141 if (i915_seqno_passed(seqno
, obj
->last_read_seqno
))
1144 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1147 /* We need to copy these to an anonymous buffer as the simplest
1148 * method to avoid being overwritten by userspace.
1150 return i915_error_object_create(dev_priv
, obj
);
1156 static void i915_record_ring_state(struct drm_device
*dev
,
1157 struct drm_i915_error_state
*error
,
1158 struct intel_ring_buffer
*ring
)
1160 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1162 if (INTEL_INFO(dev
)->gen
>= 6) {
1163 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
1164 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1165 error
->semaphore_mboxes
[ring
->id
][0]
1166 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1167 error
->semaphore_mboxes
[ring
->id
][1]
1168 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1169 error
->semaphore_seqno
[ring
->id
][0] = ring
->sync_seqno
[0];
1170 error
->semaphore_seqno
[ring
->id
][1] = ring
->sync_seqno
[1];
1173 if (INTEL_INFO(dev
)->gen
>= 4) {
1174 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1175 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1176 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1177 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1178 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1179 if (ring
->id
== RCS
)
1180 error
->bbaddr
= I915_READ64(BB_ADDR
);
1182 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1183 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1184 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1185 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1188 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1189 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1190 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
, false);
1191 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1192 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1193 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1195 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1196 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1199 static void i915_gem_record_rings(struct drm_device
*dev
,
1200 struct drm_i915_error_state
*error
)
1202 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1203 struct intel_ring_buffer
*ring
;
1204 struct drm_i915_gem_request
*request
;
1207 for_each_ring(ring
, dev_priv
, i
) {
1208 i915_record_ring_state(dev
, error
, ring
);
1210 error
->ring
[i
].batchbuffer
=
1211 i915_error_first_batchbuffer(dev_priv
, ring
);
1213 error
->ring
[i
].ringbuffer
=
1214 i915_error_object_create(dev_priv
, ring
->obj
);
1217 list_for_each_entry(request
, &ring
->request_list
, list
)
1220 error
->ring
[i
].num_requests
= count
;
1221 error
->ring
[i
].requests
=
1222 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1224 if (error
->ring
[i
].requests
== NULL
) {
1225 error
->ring
[i
].num_requests
= 0;
1230 list_for_each_entry(request
, &ring
->request_list
, list
) {
1231 struct drm_i915_error_request
*erq
;
1233 erq
= &error
->ring
[i
].requests
[count
++];
1234 erq
->seqno
= request
->seqno
;
1235 erq
->jiffies
= request
->emitted_jiffies
;
1236 erq
->tail
= request
->tail
;
1242 * i915_capture_error_state - capture an error record for later analysis
1245 * Should be called when an error is detected (either a hang or an error
1246 * interrupt) to capture error state from the time of the error. Fills
1247 * out a structure which becomes available in debugfs for user level tools
1250 static void i915_capture_error_state(struct drm_device
*dev
)
1252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1253 struct drm_i915_gem_object
*obj
;
1254 struct drm_i915_error_state
*error
;
1255 unsigned long flags
;
1258 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1259 error
= dev_priv
->gpu_error
.first_error
;
1260 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1264 /* Account for pipe specific data like PIPE*STAT */
1265 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1267 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1271 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1272 dev
->primary
->index
);
1274 kref_init(&error
->ref
);
1275 error
->eir
= I915_READ(EIR
);
1276 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1277 error
->ccid
= I915_READ(CCID
);
1279 if (HAS_PCH_SPLIT(dev
))
1280 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1281 else if (IS_VALLEYVIEW(dev
))
1282 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1283 else if (IS_GEN2(dev
))
1284 error
->ier
= I915_READ16(IER
);
1286 error
->ier
= I915_READ(IER
);
1289 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1291 if (INTEL_INFO(dev
)->gen
>= 6) {
1292 error
->error
= I915_READ(ERROR_GEN6
);
1293 error
->done_reg
= I915_READ(DONE_REG
);
1296 if (INTEL_INFO(dev
)->gen
== 7)
1297 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1299 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1301 i915_gem_record_fences(dev
, error
);
1302 i915_gem_record_rings(dev
, error
);
1304 /* Record buffers on the active and pinned lists. */
1305 error
->active_bo
= NULL
;
1306 error
->pinned_bo
= NULL
;
1309 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1311 error
->active_bo_count
= i
;
1312 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, gtt_list
)
1315 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1317 error
->active_bo
= NULL
;
1318 error
->pinned_bo
= NULL
;
1320 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1322 if (error
->active_bo
)
1324 error
->active_bo
+ error
->active_bo_count
;
1327 if (error
->active_bo
)
1328 error
->active_bo_count
=
1329 capture_active_bo(error
->active_bo
,
1330 error
->active_bo_count
,
1331 &dev_priv
->mm
.active_list
);
1333 if (error
->pinned_bo
)
1334 error
->pinned_bo_count
=
1335 capture_pinned_bo(error
->pinned_bo
,
1336 error
->pinned_bo_count
,
1337 &dev_priv
->mm
.bound_list
);
1339 do_gettimeofday(&error
->time
);
1341 error
->overlay
= intel_overlay_capture_error_state(dev
);
1342 error
->display
= intel_display_capture_error_state(dev
);
1344 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1345 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1346 dev_priv
->gpu_error
.first_error
= error
;
1349 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1352 i915_error_state_free(&error
->ref
);
1355 void i915_destroy_error_state(struct drm_device
*dev
)
1357 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1358 struct drm_i915_error_state
*error
;
1359 unsigned long flags
;
1361 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1362 error
= dev_priv
->gpu_error
.first_error
;
1363 dev_priv
->gpu_error
.first_error
= NULL
;
1364 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1367 kref_put(&error
->ref
, i915_error_state_free
);
1370 #define i915_capture_error_state(x)
1373 static void i915_report_and_clear_eir(struct drm_device
*dev
)
1375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1376 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
1377 u32 eir
= I915_READ(EIR
);
1383 pr_err("render error detected, EIR: 0x%08x\n", eir
);
1385 i915_get_extra_instdone(dev
, instdone
);
1388 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
1389 u32 ipeir
= I915_READ(IPEIR_I965
);
1391 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1392 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1393 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1394 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1395 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1396 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1397 I915_WRITE(IPEIR_I965
, ipeir
);
1398 POSTING_READ(IPEIR_I965
);
1400 if (eir
& GM45_ERROR_PAGE_TABLE
) {
1401 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1402 pr_err("page table error\n");
1403 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1404 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1405 POSTING_READ(PGTBL_ER
);
1409 if (!IS_GEN2(dev
)) {
1410 if (eir
& I915_ERROR_PAGE_TABLE
) {
1411 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
1412 pr_err("page table error\n");
1413 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
1414 I915_WRITE(PGTBL_ER
, pgtbl_err
);
1415 POSTING_READ(PGTBL_ER
);
1419 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
1420 pr_err("memory refresh error:\n");
1422 pr_err("pipe %c stat: 0x%08x\n",
1423 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
1424 /* pipestat has already been acked */
1426 if (eir
& I915_ERROR_INSTRUCTION
) {
1427 pr_err("instruction error\n");
1428 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
1429 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
1430 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
1431 if (INTEL_INFO(dev
)->gen
< 4) {
1432 u32 ipeir
= I915_READ(IPEIR
);
1434 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
1435 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
1436 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
1437 I915_WRITE(IPEIR
, ipeir
);
1438 POSTING_READ(IPEIR
);
1440 u32 ipeir
= I915_READ(IPEIR_I965
);
1442 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
1443 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
1444 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
1445 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
1446 I915_WRITE(IPEIR_I965
, ipeir
);
1447 POSTING_READ(IPEIR_I965
);
1451 I915_WRITE(EIR
, eir
);
1453 eir
= I915_READ(EIR
);
1456 * some errors might have become stuck,
1459 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
1460 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
1461 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
1466 * i915_handle_error - handle an error interrupt
1469 * Do some basic checking of regsiter state at error interrupt time and
1470 * dump it to the syslog. Also call i915_capture_error_state() to make
1471 * sure we get a record and make it available in debugfs. Fire a uevent
1472 * so userspace knows something bad happened (should trigger collection
1473 * of a ring dump etc.).
1475 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
1477 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1478 struct intel_ring_buffer
*ring
;
1481 i915_capture_error_state(dev
);
1482 i915_report_and_clear_eir(dev
);
1485 INIT_COMPLETION(dev_priv
->gpu_error
.completion
);
1486 atomic_set(&dev_priv
->gpu_error
.wedged
, 1);
1489 * Wakeup waiting processes so they don't hang
1491 for_each_ring(ring
, dev_priv
, i
)
1492 wake_up_all(&ring
->irq_queue
);
1495 queue_work(dev_priv
->wq
, &dev_priv
->gpu_error
.work
);
1498 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
1500 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1501 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1502 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1503 struct drm_i915_gem_object
*obj
;
1504 struct intel_unpin_work
*work
;
1505 unsigned long flags
;
1506 bool stall_detected
;
1508 /* Ignore early vblank irqs */
1509 if (intel_crtc
== NULL
)
1512 spin_lock_irqsave(&dev
->event_lock
, flags
);
1513 work
= intel_crtc
->unpin_work
;
1516 atomic_read(&work
->pending
) >= INTEL_FLIP_COMPLETE
||
1517 !work
->enable_stall_check
) {
1518 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1519 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1523 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1524 obj
= work
->pending_flip_obj
;
1525 if (INTEL_INFO(dev
)->gen
>= 4) {
1526 int dspsurf
= DSPSURF(intel_crtc
->plane
);
1527 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
1530 int dspaddr
= DSPADDR(intel_crtc
->plane
);
1531 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
1532 crtc
->y
* crtc
->fb
->pitches
[0] +
1533 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
1536 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
1538 if (stall_detected
) {
1539 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1540 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
1544 /* Called from drm generic code, passed 'crtc' which
1545 * we use as a pipe index
1547 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1549 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1550 unsigned long irqflags
;
1552 if (!i915_pipe_enabled(dev
, pipe
))
1555 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1556 if (INTEL_INFO(dev
)->gen
>= 4)
1557 i915_enable_pipestat(dev_priv
, pipe
,
1558 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1560 i915_enable_pipestat(dev_priv
, pipe
,
1561 PIPE_VBLANK_INTERRUPT_ENABLE
);
1563 /* maintain vblank delivery even in deep C-states */
1564 if (dev_priv
->info
->gen
== 3)
1565 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
1566 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1571 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
1573 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1574 unsigned long irqflags
;
1576 if (!i915_pipe_enabled(dev
, pipe
))
1579 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1580 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1581 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1582 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1587 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
1589 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1590 unsigned long irqflags
;
1592 if (!i915_pipe_enabled(dev
, pipe
))
1595 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1596 ironlake_enable_display_irq(dev_priv
,
1597 DE_PIPEA_VBLANK_IVB
<< (5 * pipe
));
1598 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1603 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
1605 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1606 unsigned long irqflags
;
1609 if (!i915_pipe_enabled(dev
, pipe
))
1612 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1613 imr
= I915_READ(VLV_IMR
);
1615 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1617 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1618 I915_WRITE(VLV_IMR
, imr
);
1619 i915_enable_pipestat(dev_priv
, pipe
,
1620 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1621 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1626 /* Called from drm generic code, passed 'crtc' which
1627 * we use as a pipe index
1629 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1631 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1632 unsigned long irqflags
;
1634 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1635 if (dev_priv
->info
->gen
== 3)
1636 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
1638 i915_disable_pipestat(dev_priv
, pipe
,
1639 PIPE_VBLANK_INTERRUPT_ENABLE
|
1640 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1641 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1644 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
1646 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1647 unsigned long irqflags
;
1649 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1650 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1651 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1652 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1655 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
1657 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1658 unsigned long irqflags
;
1660 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1661 ironlake_disable_display_irq(dev_priv
,
1662 DE_PIPEA_VBLANK_IVB
<< (pipe
* 5));
1663 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1666 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
1668 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1669 unsigned long irqflags
;
1672 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1673 i915_disable_pipestat(dev_priv
, pipe
,
1674 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1675 imr
= I915_READ(VLV_IMR
);
1677 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
1679 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
1680 I915_WRITE(VLV_IMR
, imr
);
1681 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1685 ring_last_seqno(struct intel_ring_buffer
*ring
)
1687 return list_entry(ring
->request_list
.prev
,
1688 struct drm_i915_gem_request
, list
)->seqno
;
1691 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1693 if (list_empty(&ring
->request_list
) ||
1694 i915_seqno_passed(ring
->get_seqno(ring
, false),
1695 ring_last_seqno(ring
))) {
1696 /* Issue a wake-up to catch stuck h/w. */
1697 if (waitqueue_active(&ring
->irq_queue
)) {
1698 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1700 wake_up_all(&ring
->irq_queue
);
1708 static bool kick_ring(struct intel_ring_buffer
*ring
)
1710 struct drm_device
*dev
= ring
->dev
;
1711 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1712 u32 tmp
= I915_READ_CTL(ring
);
1713 if (tmp
& RING_WAIT
) {
1714 DRM_ERROR("Kicking stuck wait on %s\n",
1716 I915_WRITE_CTL(ring
, tmp
);
1722 static bool i915_hangcheck_hung(struct drm_device
*dev
)
1724 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1726 if (dev_priv
->gpu_error
.hangcheck_count
++ > 1) {
1729 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1730 i915_handle_error(dev
, true);
1732 if (!IS_GEN2(dev
)) {
1733 struct intel_ring_buffer
*ring
;
1736 /* Is the chip hanging on a WAIT_FOR_EVENT?
1737 * If so we can simply poke the RB_WAIT bit
1738 * and break the hang. This should work on
1739 * all but the second generation chipsets.
1741 for_each_ring(ring
, dev_priv
, i
)
1742 hung
&= !kick_ring(ring
);
1752 * This is called when the chip hasn't reported back with completed
1753 * batchbuffers in a long time. The first time this is called we simply record
1754 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1755 * again, we assume the chip is wedged and try to fix it.
1757 void i915_hangcheck_elapsed(unsigned long data
)
1759 struct drm_device
*dev
= (struct drm_device
*)data
;
1760 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1761 uint32_t acthd
[I915_NUM_RINGS
], instdone
[I915_NUM_INSTDONE_REG
];
1762 struct intel_ring_buffer
*ring
;
1763 bool err
= false, idle
;
1766 if (!i915_enable_hangcheck
)
1769 memset(acthd
, 0, sizeof(acthd
));
1771 for_each_ring(ring
, dev_priv
, i
) {
1772 idle
&= i915_hangcheck_ring_idle(ring
, &err
);
1773 acthd
[i
] = intel_ring_get_active_head(ring
);
1776 /* If all work is done then ACTHD clearly hasn't advanced. */
1779 if (i915_hangcheck_hung(dev
))
1785 dev_priv
->gpu_error
.hangcheck_count
= 0;
1789 i915_get_extra_instdone(dev
, instdone
);
1790 if (memcmp(dev_priv
->gpu_error
.last_acthd
, acthd
,
1791 sizeof(acthd
)) == 0 &&
1792 memcmp(dev_priv
->gpu_error
.prev_instdone
, instdone
,
1793 sizeof(instdone
)) == 0) {
1794 if (i915_hangcheck_hung(dev
))
1797 dev_priv
->gpu_error
.hangcheck_count
= 0;
1799 memcpy(dev_priv
->gpu_error
.last_acthd
, acthd
,
1801 memcpy(dev_priv
->gpu_error
.prev_instdone
, instdone
,
1806 /* Reset timer case chip hangs without another request being added */
1807 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
1808 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
1813 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1815 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1817 atomic_set(&dev_priv
->irq_received
, 0);
1819 I915_WRITE(HWSTAM
, 0xeffe);
1821 /* XXX hotplug from PCH */
1823 I915_WRITE(DEIMR
, 0xffffffff);
1824 I915_WRITE(DEIER
, 0x0);
1825 POSTING_READ(DEIER
);
1828 I915_WRITE(GTIMR
, 0xffffffff);
1829 I915_WRITE(GTIER
, 0x0);
1830 POSTING_READ(GTIER
);
1832 /* south display irq */
1833 I915_WRITE(SDEIMR
, 0xffffffff);
1834 I915_WRITE(SDEIER
, 0x0);
1835 POSTING_READ(SDEIER
);
1838 static void valleyview_irq_preinstall(struct drm_device
*dev
)
1840 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1843 atomic_set(&dev_priv
->irq_received
, 0);
1846 I915_WRITE(VLV_IMR
, 0);
1847 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
1848 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
1849 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
1852 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1853 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1854 I915_WRITE(GTIMR
, 0xffffffff);
1855 I915_WRITE(GTIER
, 0x0);
1856 POSTING_READ(GTIER
);
1858 I915_WRITE(DPINVGTT
, 0xff);
1860 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1861 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1863 I915_WRITE(PIPESTAT(pipe
), 0xffff);
1864 I915_WRITE(VLV_IIR
, 0xffffffff);
1865 I915_WRITE(VLV_IMR
, 0xffffffff);
1866 I915_WRITE(VLV_IER
, 0x0);
1867 POSTING_READ(VLV_IER
);
1871 * Enable digital hotplug on the PCH, and configure the DP short pulse
1872 * duration to 2ms (which is the minimum in the Display Port spec)
1874 * This register is the same on all known PCH chips.
1877 static void ironlake_enable_pch_hotplug(struct drm_device
*dev
)
1879 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1882 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
1883 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
1884 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
1885 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
1886 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
1887 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
1890 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1892 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1893 /* enable kind of interrupts always enabled */
1894 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1895 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
1901 dev_priv
->irq_mask
= ~display_mask
;
1903 /* should always can generate irq */
1904 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1905 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1906 I915_WRITE(DEIER
, display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
);
1907 POSTING_READ(DEIER
);
1909 dev_priv
->gt_irq_mask
= ~0;
1911 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1912 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1917 GEN6_BSD_USER_INTERRUPT
|
1918 GEN6_BLITTER_USER_INTERRUPT
;
1923 GT_BSD_USER_INTERRUPT
;
1924 I915_WRITE(GTIER
, render_irqs
);
1925 POSTING_READ(GTIER
);
1927 if (HAS_PCH_CPT(dev
)) {
1928 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1929 SDE_PORTB_HOTPLUG_CPT
|
1930 SDE_PORTC_HOTPLUG_CPT
|
1931 SDE_PORTD_HOTPLUG_CPT
|
1935 hotplug_mask
= (SDE_CRT_HOTPLUG
|
1943 pch_irq_mask
= ~hotplug_mask
;
1945 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1946 I915_WRITE(SDEIMR
, pch_irq_mask
);
1947 I915_WRITE(SDEIER
, hotplug_mask
);
1948 POSTING_READ(SDEIER
);
1950 ironlake_enable_pch_hotplug(dev
);
1952 if (IS_IRONLAKE_M(dev
)) {
1953 /* Clear & enable PCU event interrupts */
1954 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1955 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1956 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1962 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
1964 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1965 /* enable kind of interrupts always enabled */
1967 DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
| DE_PCH_EVENT_IVB
|
1968 DE_PLANEC_FLIP_DONE_IVB
|
1969 DE_PLANEB_FLIP_DONE_IVB
|
1970 DE_PLANEA_FLIP_DONE_IVB
|
1971 DE_AUX_CHANNEL_A_IVB
;
1976 dev_priv
->irq_mask
= ~display_mask
;
1978 /* should always can generate irq */
1979 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1980 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
1983 DE_PIPEC_VBLANK_IVB
|
1984 DE_PIPEB_VBLANK_IVB
|
1985 DE_PIPEA_VBLANK_IVB
);
1986 POSTING_READ(DEIER
);
1988 dev_priv
->gt_irq_mask
= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1990 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1991 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
1993 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
1994 GEN6_BLITTER_USER_INTERRUPT
| GT_GEN7_L3_PARITY_ERROR_INTERRUPT
;
1995 I915_WRITE(GTIER
, render_irqs
);
1996 POSTING_READ(GTIER
);
1998 hotplug_mask
= (SDE_CRT_HOTPLUG_CPT
|
1999 SDE_PORTB_HOTPLUG_CPT
|
2000 SDE_PORTC_HOTPLUG_CPT
|
2001 SDE_PORTD_HOTPLUG_CPT
|
2004 pch_irq_mask
= ~hotplug_mask
;
2006 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2007 I915_WRITE(SDEIMR
, pch_irq_mask
);
2008 I915_WRITE(SDEIER
, hotplug_mask
);
2009 POSTING_READ(SDEIER
);
2011 ironlake_enable_pch_hotplug(dev
);
2016 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2018 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2020 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
2024 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2025 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2026 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2027 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2028 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2031 *Leave vblank interrupts masked initially. enable/disable will
2032 * toggle them based on usage.
2034 dev_priv
->irq_mask
= (~enable_mask
) |
2035 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2036 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2038 dev_priv
->pipestat
[0] = 0;
2039 dev_priv
->pipestat
[1] = 0;
2041 /* Hack for broken MSIs on VLV */
2042 pci_write_config_dword(dev_priv
->dev
->pdev
, 0x94, 0xfee00000);
2043 pci_read_config_word(dev
->pdev
, 0x98, &msid
);
2044 msid
&= 0xff; /* mask out delivery bits */
2046 pci_write_config_word(dev_priv
->dev
->pdev
, 0x98, msid
);
2048 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2049 POSTING_READ(PORT_HOTPLUG_EN
);
2051 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2052 I915_WRITE(VLV_IER
, enable_mask
);
2053 I915_WRITE(VLV_IIR
, 0xffffffff);
2054 I915_WRITE(PIPESTAT(0), 0xffff);
2055 I915_WRITE(PIPESTAT(1), 0xffff);
2056 POSTING_READ(VLV_IER
);
2058 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
2059 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2060 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
2062 I915_WRITE(VLV_IIR
, 0xffffffff);
2063 I915_WRITE(VLV_IIR
, 0xffffffff);
2065 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2066 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2068 render_irqs
= GT_USER_INTERRUPT
| GEN6_BSD_USER_INTERRUPT
|
2069 GEN6_BLITTER_USER_INTERRUPT
;
2070 I915_WRITE(GTIER
, render_irqs
);
2071 POSTING_READ(GTIER
);
2073 /* ack & enable invalid PTE error interrupts */
2074 #if 0 /* FIXME: add support to irq handler for checking these bits */
2075 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2076 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2079 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2084 static void valleyview_hpd_irq_setup(struct drm_device
*dev
)
2086 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2087 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2089 /* Note HDMI and DP share bits */
2090 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2091 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2092 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2093 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2094 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2095 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2096 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2097 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2098 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2099 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2100 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2101 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2102 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2105 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2108 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2110 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2117 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2119 I915_WRITE(HWSTAM
, 0xffffffff);
2120 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2121 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2123 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2124 I915_WRITE(VLV_IIR
, 0xffffffff);
2125 I915_WRITE(VLV_IMR
, 0xffffffff);
2126 I915_WRITE(VLV_IER
, 0x0);
2127 POSTING_READ(VLV_IER
);
2130 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2132 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2137 I915_WRITE(HWSTAM
, 0xffffffff);
2139 I915_WRITE(DEIMR
, 0xffffffff);
2140 I915_WRITE(DEIER
, 0x0);
2141 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2143 I915_WRITE(GTIMR
, 0xffffffff);
2144 I915_WRITE(GTIER
, 0x0);
2145 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2147 I915_WRITE(SDEIMR
, 0xffffffff);
2148 I915_WRITE(SDEIER
, 0x0);
2149 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2152 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2154 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2157 atomic_set(&dev_priv
->irq_received
, 0);
2160 I915_WRITE(PIPESTAT(pipe
), 0);
2161 I915_WRITE16(IMR
, 0xffff);
2162 I915_WRITE16(IER
, 0x0);
2163 POSTING_READ16(IER
);
2166 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2168 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2170 dev_priv
->pipestat
[0] = 0;
2171 dev_priv
->pipestat
[1] = 0;
2174 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2176 /* Unmask the interrupts that we always want on. */
2177 dev_priv
->irq_mask
=
2178 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2179 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2180 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2181 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2182 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2183 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2186 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2187 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2188 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2189 I915_USER_INTERRUPT
);
2190 POSTING_READ16(IER
);
2195 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
2197 struct drm_device
*dev
= (struct drm_device
*) arg
;
2198 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2201 unsigned long irqflags
;
2205 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2206 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2208 atomic_inc(&dev_priv
->irq_received
);
2210 iir
= I915_READ16(IIR
);
2214 while (iir
& ~flip_mask
) {
2215 /* Can't rely on pipestat interrupt bit in iir as it might
2216 * have been cleared after the pipestat interrupt was received.
2217 * It doesn't set the bit in iir again, but it still produces
2218 * interrupts (for non-MSI).
2220 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2221 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2222 i915_handle_error(dev
, false);
2224 for_each_pipe(pipe
) {
2225 int reg
= PIPESTAT(pipe
);
2226 pipe_stats
[pipe
] = I915_READ(reg
);
2229 * Clear the PIPE*STAT regs before the IIR
2231 if (pipe_stats
[pipe
] & 0x8000ffff) {
2232 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2233 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2235 I915_WRITE(reg
, pipe_stats
[pipe
]);
2239 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2241 I915_WRITE16(IIR
, iir
& ~flip_mask
);
2242 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
2244 i915_update_dri1_breadcrumb(dev
);
2246 if (iir
& I915_USER_INTERRUPT
)
2247 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2249 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2250 drm_handle_vblank(dev
, 0)) {
2251 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
2252 intel_prepare_page_flip(dev
, 0);
2253 intel_finish_page_flip(dev
, 0);
2254 flip_mask
&= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
;
2258 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2259 drm_handle_vblank(dev
, 1)) {
2260 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
2261 intel_prepare_page_flip(dev
, 1);
2262 intel_finish_page_flip(dev
, 1);
2263 flip_mask
&= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2273 static void i8xx_irq_uninstall(struct drm_device
* dev
)
2275 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2278 for_each_pipe(pipe
) {
2279 /* Clear enable bits; then clear status bits */
2280 I915_WRITE(PIPESTAT(pipe
), 0);
2281 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2283 I915_WRITE16(IMR
, 0xffff);
2284 I915_WRITE16(IER
, 0x0);
2285 I915_WRITE16(IIR
, I915_READ16(IIR
));
2288 static void i915_irq_preinstall(struct drm_device
* dev
)
2290 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2293 atomic_set(&dev_priv
->irq_received
, 0);
2295 if (I915_HAS_HOTPLUG(dev
)) {
2296 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2297 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2300 I915_WRITE16(HWSTAM
, 0xeffe);
2302 I915_WRITE(PIPESTAT(pipe
), 0);
2303 I915_WRITE(IMR
, 0xffffffff);
2304 I915_WRITE(IER
, 0x0);
2308 static int i915_irq_postinstall(struct drm_device
*dev
)
2310 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2313 dev_priv
->pipestat
[0] = 0;
2314 dev_priv
->pipestat
[1] = 0;
2316 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2318 /* Unmask the interrupts that we always want on. */
2319 dev_priv
->irq_mask
=
2320 ~(I915_ASLE_INTERRUPT
|
2321 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2322 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2323 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2324 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2325 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2328 I915_ASLE_INTERRUPT
|
2329 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2330 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2331 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2332 I915_USER_INTERRUPT
;
2334 if (I915_HAS_HOTPLUG(dev
)) {
2335 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2336 POSTING_READ(PORT_HOTPLUG_EN
);
2338 /* Enable in IER... */
2339 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
2340 /* and unmask in IMR */
2341 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
2344 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2345 I915_WRITE(IER
, enable_mask
);
2348 intel_opregion_enable_asle(dev
);
2353 static void i915_hpd_irq_setup(struct drm_device
*dev
)
2355 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2358 if (I915_HAS_HOTPLUG(dev
)) {
2359 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
2361 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2362 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2363 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2364 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2365 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2366 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2367 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I915
)
2368 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2369 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I915
)
2370 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2371 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2372 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2373 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2376 /* Ignore TV since it's buggy */
2378 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2382 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
2384 struct drm_device
*dev
= (struct drm_device
*) arg
;
2385 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2386 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
2387 unsigned long irqflags
;
2389 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2390 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
2392 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
,
2393 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2395 int pipe
, ret
= IRQ_NONE
;
2397 atomic_inc(&dev_priv
->irq_received
);
2399 iir
= I915_READ(IIR
);
2401 bool irq_received
= (iir
& ~flip_mask
) != 0;
2402 bool blc_event
= false;
2404 /* Can't rely on pipestat interrupt bit in iir as it might
2405 * have been cleared after the pipestat interrupt was received.
2406 * It doesn't set the bit in iir again, but it still produces
2407 * interrupts (for non-MSI).
2409 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2410 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2411 i915_handle_error(dev
, false);
2413 for_each_pipe(pipe
) {
2414 int reg
= PIPESTAT(pipe
);
2415 pipe_stats
[pipe
] = I915_READ(reg
);
2417 /* Clear the PIPE*STAT regs before the IIR */
2418 if (pipe_stats
[pipe
] & 0x8000ffff) {
2419 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2420 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2422 I915_WRITE(reg
, pipe_stats
[pipe
]);
2423 irq_received
= true;
2426 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2431 /* Consume port. Then clear IIR or we'll miss events */
2432 if ((I915_HAS_HOTPLUG(dev
)) &&
2433 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
2434 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2436 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2438 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2439 queue_work(dev_priv
->wq
,
2440 &dev_priv
->hotplug_work
);
2442 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2443 POSTING_READ(PORT_HOTPLUG_STAT
);
2446 I915_WRITE(IIR
, iir
& ~flip_mask
);
2447 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2449 if (iir
& I915_USER_INTERRUPT
)
2450 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2452 for_each_pipe(pipe
) {
2456 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
2457 drm_handle_vblank(dev
, pipe
)) {
2458 if (iir
& flip
[plane
]) {
2459 intel_prepare_page_flip(dev
, plane
);
2460 intel_finish_page_flip(dev
, pipe
);
2461 flip_mask
&= ~flip
[plane
];
2465 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2469 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2470 intel_opregion_asle_intr(dev
);
2472 /* With MSI, interrupts are only generated when iir
2473 * transitions from zero to nonzero. If another bit got
2474 * set while we were handling the existing iir bits, then
2475 * we would never get another interrupt.
2477 * This is fine on non-MSI as well, as if we hit this path
2478 * we avoid exiting the interrupt handler only to generate
2481 * Note that for MSI this could cause a stray interrupt report
2482 * if an interrupt landed in the time between writing IIR and
2483 * the posting read. This should be rare enough to never
2484 * trigger the 99% of 100,000 interrupts test for disabling
2489 } while (iir
& ~flip_mask
);
2491 i915_update_dri1_breadcrumb(dev
);
2496 static void i915_irq_uninstall(struct drm_device
* dev
)
2498 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2501 if (I915_HAS_HOTPLUG(dev
)) {
2502 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2503 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2506 I915_WRITE16(HWSTAM
, 0xffff);
2507 for_each_pipe(pipe
) {
2508 /* Clear enable bits; then clear status bits */
2509 I915_WRITE(PIPESTAT(pipe
), 0);
2510 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
2512 I915_WRITE(IMR
, 0xffffffff);
2513 I915_WRITE(IER
, 0x0);
2515 I915_WRITE(IIR
, I915_READ(IIR
));
2518 static void i965_irq_preinstall(struct drm_device
* dev
)
2520 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2523 atomic_set(&dev_priv
->irq_received
, 0);
2525 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2526 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2528 I915_WRITE(HWSTAM
, 0xeffe);
2530 I915_WRITE(PIPESTAT(pipe
), 0);
2531 I915_WRITE(IMR
, 0xffffffff);
2532 I915_WRITE(IER
, 0x0);
2536 static int i965_irq_postinstall(struct drm_device
*dev
)
2538 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2542 /* Unmask the interrupts that we always want on. */
2543 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
2544 I915_DISPLAY_PORT_INTERRUPT
|
2545 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2546 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2547 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2548 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2549 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2551 enable_mask
= ~dev_priv
->irq_mask
;
2552 enable_mask
|= I915_USER_INTERRUPT
;
2555 enable_mask
|= I915_BSD_USER_INTERRUPT
;
2557 dev_priv
->pipestat
[0] = 0;
2558 dev_priv
->pipestat
[1] = 0;
2559 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2562 * Enable some error detection, note the instruction error mask
2563 * bit is reserved, so we leave it masked.
2566 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
2567 GM45_ERROR_MEM_PRIV
|
2568 GM45_ERROR_CP_PRIV
|
2569 I915_ERROR_MEMORY_REFRESH
);
2571 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
2572 I915_ERROR_MEMORY_REFRESH
);
2574 I915_WRITE(EMR
, error_mask
);
2576 I915_WRITE(IMR
, dev_priv
->irq_mask
);
2577 I915_WRITE(IER
, enable_mask
);
2580 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2581 POSTING_READ(PORT_HOTPLUG_EN
);
2583 intel_opregion_enable_asle(dev
);
2588 static void i965_hpd_irq_setup(struct drm_device
*dev
)
2590 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2593 /* Note HDMI and DP share hotplug bits */
2595 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
2596 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
2597 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
2598 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
2599 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
2600 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
2602 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_G4X
)
2603 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2604 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_G4X
)
2605 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2607 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS_I965
)
2608 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
2609 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS_I965
)
2610 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
2612 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
2613 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
2615 /* Programming the CRT detection parameters tends
2616 to generate a spurious hotplug event about three
2617 seconds later. So just do it once.
2620 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
2621 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
2624 /* Ignore TV since it's buggy */
2626 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
2629 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
2631 struct drm_device
*dev
= (struct drm_device
*) arg
;
2632 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2634 u32 pipe_stats
[I915_MAX_PIPES
];
2635 unsigned long irqflags
;
2637 int ret
= IRQ_NONE
, pipe
;
2639 atomic_inc(&dev_priv
->irq_received
);
2641 iir
= I915_READ(IIR
);
2644 bool blc_event
= false;
2646 irq_received
= iir
!= 0;
2648 /* Can't rely on pipestat interrupt bit in iir as it might
2649 * have been cleared after the pipestat interrupt was received.
2650 * It doesn't set the bit in iir again, but it still produces
2651 * interrupts (for non-MSI).
2653 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2654 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
2655 i915_handle_error(dev
, false);
2657 for_each_pipe(pipe
) {
2658 int reg
= PIPESTAT(pipe
);
2659 pipe_stats
[pipe
] = I915_READ(reg
);
2662 * Clear the PIPE*STAT regs before the IIR
2664 if (pipe_stats
[pipe
] & 0x8000ffff) {
2665 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
2666 DRM_DEBUG_DRIVER("pipe %c underrun\n",
2668 I915_WRITE(reg
, pipe_stats
[pipe
]);
2672 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2679 /* Consume port. Then clear IIR or we'll miss events */
2680 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
2681 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
2683 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2685 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
2686 queue_work(dev_priv
->wq
,
2687 &dev_priv
->hotplug_work
);
2689 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
2690 I915_READ(PORT_HOTPLUG_STAT
);
2693 I915_WRITE(IIR
, iir
);
2694 new_iir
= I915_READ(IIR
); /* Flush posted writes */
2696 if (iir
& I915_USER_INTERRUPT
)
2697 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
2698 if (iir
& I915_BSD_USER_INTERRUPT
)
2699 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
2701 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
)
2702 intel_prepare_page_flip(dev
, 0);
2704 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
)
2705 intel_prepare_page_flip(dev
, 1);
2707 for_each_pipe(pipe
) {
2708 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
2709 drm_handle_vblank(dev
, pipe
)) {
2710 i915_pageflip_stall_check(dev
, pipe
);
2711 intel_finish_page_flip(dev
, pipe
);
2714 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
2719 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
2720 intel_opregion_asle_intr(dev
);
2722 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
2723 gmbus_irq_handler(dev
);
2725 /* With MSI, interrupts are only generated when iir
2726 * transitions from zero to nonzero. If another bit got
2727 * set while we were handling the existing iir bits, then
2728 * we would never get another interrupt.
2730 * This is fine on non-MSI as well, as if we hit this path
2731 * we avoid exiting the interrupt handler only to generate
2734 * Note that for MSI this could cause a stray interrupt report
2735 * if an interrupt landed in the time between writing IIR and
2736 * the posting read. This should be rare enough to never
2737 * trigger the 99% of 100,000 interrupts test for disabling
2743 i915_update_dri1_breadcrumb(dev
);
2748 static void i965_irq_uninstall(struct drm_device
* dev
)
2750 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2756 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2757 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2759 I915_WRITE(HWSTAM
, 0xffffffff);
2761 I915_WRITE(PIPESTAT(pipe
), 0);
2762 I915_WRITE(IMR
, 0xffffffff);
2763 I915_WRITE(IER
, 0x0);
2766 I915_WRITE(PIPESTAT(pipe
),
2767 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
2768 I915_WRITE(IIR
, I915_READ(IIR
));
2771 void intel_irq_init(struct drm_device
*dev
)
2773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2775 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
2776 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
2777 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
2778 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
2780 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
2781 i915_hangcheck_elapsed
,
2782 (unsigned long) dev
);
2784 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
2786 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
2787 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
2788 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
2789 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
2790 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
2793 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
2794 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
2796 dev
->driver
->get_vblank_timestamp
= NULL
;
2797 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
2799 if (IS_VALLEYVIEW(dev
)) {
2800 dev
->driver
->irq_handler
= valleyview_irq_handler
;
2801 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
2802 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
2803 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
2804 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
2805 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
2806 dev_priv
->display
.hpd_irq_setup
= valleyview_hpd_irq_setup
;
2807 } else if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
2808 /* Share pre & uninstall handlers with ILK/SNB */
2809 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
2810 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2811 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
2812 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2813 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
2814 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
2815 } else if (HAS_PCH_SPLIT(dev
)) {
2816 dev
->driver
->irq_handler
= ironlake_irq_handler
;
2817 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
2818 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
2819 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
2820 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
2821 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
2823 if (INTEL_INFO(dev
)->gen
== 2) {
2824 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
2825 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
2826 dev
->driver
->irq_handler
= i8xx_irq_handler
;
2827 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
2828 } else if (INTEL_INFO(dev
)->gen
== 3) {
2829 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
2830 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
2831 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
2832 dev
->driver
->irq_handler
= i915_irq_handler
;
2833 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
2835 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
2836 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
2837 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
2838 dev
->driver
->irq_handler
= i965_irq_handler
;
2839 dev_priv
->display
.hpd_irq_setup
= i965_hpd_irq_setup
;
2841 dev
->driver
->enable_vblank
= i915_enable_vblank
;
2842 dev
->driver
->disable_vblank
= i915_disable_vblank
;
2846 void intel_hpd_init(struct drm_device
*dev
)
2848 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2850 if (dev_priv
->display
.hpd_irq_setup
)
2851 dev_priv
->display
.hpd_irq_setup(dev
);