1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 static const u32 hpd_ibx
[] = {
40 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
41 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
42 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
43 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
44 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
47 static const u32 hpd_cpt
[] = {
48 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
49 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
50 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
51 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
52 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
55 static const u32 hpd_mask_i915
[] = {
56 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
57 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
58 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
59 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
60 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
61 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
64 static const u32 hpd_status_gen4
[] = {
65 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
66 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
67 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
68 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
69 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
70 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
73 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
74 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
75 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
76 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
77 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
78 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
82 /* For display hotplug interrupt */
84 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
86 assert_spin_locked(&dev_priv
->irq_lock
);
88 if ((dev_priv
->irq_mask
& mask
) != 0) {
89 dev_priv
->irq_mask
&= ~mask
;
90 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
96 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
98 assert_spin_locked(&dev_priv
->irq_lock
);
100 if ((dev_priv
->irq_mask
& mask
) != mask
) {
101 dev_priv
->irq_mask
|= mask
;
102 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
107 static bool ivb_can_enable_err_int(struct drm_device
*dev
)
109 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
110 struct intel_crtc
*crtc
;
113 assert_spin_locked(&dev_priv
->irq_lock
);
115 for_each_pipe(pipe
) {
116 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
118 if (crtc
->cpu_fifo_underrun_disabled
)
125 static bool cpt_can_enable_serr_int(struct drm_device
*dev
)
127 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
129 struct intel_crtc
*crtc
;
131 for_each_pipe(pipe
) {
132 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
134 if (crtc
->pch_fifo_underrun_disabled
)
141 static void ironlake_set_fifo_underrun_reporting(struct drm_device
*dev
,
142 enum pipe pipe
, bool enable
)
144 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
145 uint32_t bit
= (pipe
== PIPE_A
) ? DE_PIPEA_FIFO_UNDERRUN
:
146 DE_PIPEB_FIFO_UNDERRUN
;
149 ironlake_enable_display_irq(dev_priv
, bit
);
151 ironlake_disable_display_irq(dev_priv
, bit
);
154 static void ivybridge_set_fifo_underrun_reporting(struct drm_device
*dev
,
157 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
160 if (!ivb_can_enable_err_int(dev
))
163 I915_WRITE(GEN7_ERR_INT
, ERR_INT_FIFO_UNDERRUN_A
|
164 ERR_INT_FIFO_UNDERRUN_B
|
165 ERR_INT_FIFO_UNDERRUN_C
);
167 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
169 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
173 static void ibx_set_fifo_underrun_reporting(struct intel_crtc
*crtc
,
176 struct drm_device
*dev
= crtc
->base
.dev
;
177 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
178 uint32_t bit
= (crtc
->pipe
== PIPE_A
) ? SDE_TRANSA_FIFO_UNDER
:
179 SDE_TRANSB_FIFO_UNDER
;
182 I915_WRITE(SDEIMR
, I915_READ(SDEIMR
) & ~bit
);
184 I915_WRITE(SDEIMR
, I915_READ(SDEIMR
) | bit
);
186 POSTING_READ(SDEIMR
);
189 static void cpt_set_fifo_underrun_reporting(struct drm_device
*dev
,
190 enum transcoder pch_transcoder
,
193 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
196 if (!cpt_can_enable_serr_int(dev
))
199 I915_WRITE(SERR_INT
, SERR_INT_TRANS_A_FIFO_UNDERRUN
|
200 SERR_INT_TRANS_B_FIFO_UNDERRUN
|
201 SERR_INT_TRANS_C_FIFO_UNDERRUN
);
203 I915_WRITE(SDEIMR
, I915_READ(SDEIMR
) & ~SDE_ERROR_CPT
);
205 I915_WRITE(SDEIMR
, I915_READ(SDEIMR
) | SDE_ERROR_CPT
);
208 POSTING_READ(SDEIMR
);
212 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
215 * @enable: true if we want to report FIFO underrun errors, false otherwise
217 * This function makes us disable or enable CPU fifo underruns for a specific
218 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
219 * reporting for one pipe may also disable all the other CPU error interruts for
220 * the other pipes, due to the fact that there's just one interrupt mask/enable
221 * bit for all the pipes.
223 * Returns the previous state of underrun reporting.
225 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
226 enum pipe pipe
, bool enable
)
228 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
229 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
230 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
234 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
236 ret
= !intel_crtc
->cpu_fifo_underrun_disabled
;
241 intel_crtc
->cpu_fifo_underrun_disabled
= !enable
;
243 if (IS_GEN5(dev
) || IS_GEN6(dev
))
244 ironlake_set_fifo_underrun_reporting(dev
, pipe
, enable
);
245 else if (IS_GEN7(dev
))
246 ivybridge_set_fifo_underrun_reporting(dev
, enable
);
249 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
254 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
256 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
257 * @enable: true if we want to report FIFO underrun errors, false otherwise
259 * This function makes us disable or enable PCH fifo underruns for a specific
260 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
261 * underrun reporting for one transcoder may also disable all the other PCH
262 * error interruts for the other transcoders, due to the fact that there's just
263 * one interrupt mask/enable bit for all the transcoders.
265 * Returns the previous state of underrun reporting.
267 bool intel_set_pch_fifo_underrun_reporting(struct drm_device
*dev
,
268 enum transcoder pch_transcoder
,
271 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
273 struct drm_crtc
*crtc
;
274 struct intel_crtc
*intel_crtc
;
278 if (HAS_PCH_LPT(dev
)) {
281 struct drm_crtc
*c
= dev_priv
->pipe_to_crtc_mapping
[p
];
282 if (intel_pipe_has_type(c
, INTEL_OUTPUT_ANALOG
)) {
288 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
292 crtc
= dev_priv
->pipe_to_crtc_mapping
[pch_transcoder
];
294 intel_crtc
= to_intel_crtc(crtc
);
296 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
298 ret
= !intel_crtc
->pch_fifo_underrun_disabled
;
303 intel_crtc
->pch_fifo_underrun_disabled
= !enable
;
305 if (HAS_PCH_IBX(dev
))
306 ibx_set_fifo_underrun_reporting(intel_crtc
, enable
);
308 cpt_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
311 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
317 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
319 u32 reg
= PIPESTAT(pipe
);
320 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
322 if ((pipestat
& mask
) == mask
)
325 /* Enable the interrupt, clear any pending status */
326 pipestat
|= mask
| (mask
>> 16);
327 I915_WRITE(reg
, pipestat
);
332 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
334 u32 reg
= PIPESTAT(pipe
);
335 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
337 if ((pipestat
& mask
) == 0)
341 I915_WRITE(reg
, pipestat
);
346 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
348 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
350 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
351 unsigned long irqflags
;
353 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
356 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
358 i915_enable_pipestat(dev_priv
, 1, PIPE_LEGACY_BLC_EVENT_ENABLE
);
359 if (INTEL_INFO(dev
)->gen
>= 4)
360 i915_enable_pipestat(dev_priv
, 0, PIPE_LEGACY_BLC_EVENT_ENABLE
);
362 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
366 * i915_pipe_enabled - check if a pipe is enabled
368 * @pipe: pipe to check
370 * Reading certain registers when the pipe is disabled can hang the chip.
371 * Use this routine to make sure the PLL is running and the pipe is active
372 * before reading such registers if unsure.
375 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
377 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
379 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
380 /* Locking is horribly broken here, but whatever. */
381 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
382 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
384 return intel_crtc
->active
;
386 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
390 /* Called from drm generic code, passed a 'crtc', which
391 * we use as a pipe index
393 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
395 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
396 unsigned long high_frame
;
397 unsigned long low_frame
;
398 u32 high1
, high2
, low
;
400 if (!i915_pipe_enabled(dev
, pipe
)) {
401 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
402 "pipe %c\n", pipe_name(pipe
));
406 high_frame
= PIPEFRAME(pipe
);
407 low_frame
= PIPEFRAMEPIXEL(pipe
);
410 * High & low register fields aren't synchronized, so make sure
411 * we get a low value that's stable across two reads of the high
415 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
416 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
417 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
418 } while (high1
!= high2
);
420 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
421 low
>>= PIPE_FRAME_LOW_SHIFT
;
422 return (high1
<< 8) | low
;
425 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
427 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
428 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
430 if (!i915_pipe_enabled(dev
, pipe
)) {
431 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
432 "pipe %c\n", pipe_name(pipe
));
436 return I915_READ(reg
);
439 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
440 int *vpos
, int *hpos
)
442 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
443 u32 vbl
= 0, position
= 0;
444 int vbl_start
, vbl_end
, htotal
, vtotal
;
447 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
450 if (!i915_pipe_enabled(dev
, pipe
)) {
451 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
452 "pipe %c\n", pipe_name(pipe
));
457 vtotal
= 1 + ((I915_READ(VTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
459 if (INTEL_INFO(dev
)->gen
>= 4) {
460 /* No obvious pixelcount register. Only query vertical
461 * scanout position from Display scan line register.
463 position
= I915_READ(PIPEDSL(pipe
));
465 /* Decode into vertical scanout position. Don't have
466 * horizontal scanout position.
468 *vpos
= position
& 0x1fff;
471 /* Have access to pixelcount since start of frame.
472 * We can split this into vertical and horizontal
475 position
= (I915_READ(PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
477 htotal
= 1 + ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff);
478 *vpos
= position
/ htotal
;
479 *hpos
= position
- (*vpos
* htotal
);
482 /* Query vblank area. */
483 vbl
= I915_READ(VBLANK(cpu_transcoder
));
485 /* Test position against vblank region. */
486 vbl_start
= vbl
& 0x1fff;
487 vbl_end
= (vbl
>> 16) & 0x1fff;
489 if ((*vpos
< vbl_start
) || (*vpos
> vbl_end
))
492 /* Inside "upper part" of vblank area? Apply corrective offset: */
493 if (in_vbl
&& (*vpos
>= vbl_start
))
494 *vpos
= *vpos
- vtotal
;
496 /* Readouts valid? */
498 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
502 ret
|= DRM_SCANOUTPOS_INVBL
;
507 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
509 struct timeval
*vblank_time
,
512 struct drm_crtc
*crtc
;
514 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
515 DRM_ERROR("Invalid crtc %d\n", pipe
);
519 /* Get drm_crtc to timestamp: */
520 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
522 DRM_ERROR("Invalid crtc %d\n", pipe
);
526 if (!crtc
->enabled
) {
527 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
531 /* Helper routine in DRM core does all the work: */
532 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
537 static int intel_hpd_irq_event(struct drm_device
*dev
, struct drm_connector
*connector
)
539 enum drm_connector_status old_status
;
541 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
542 old_status
= connector
->status
;
544 connector
->status
= connector
->funcs
->detect(connector
, false);
545 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
547 drm_get_connector_name(connector
),
548 old_status
, connector
->status
);
549 return (old_status
!= connector
->status
);
553 * Handle hotplug events outside the interrupt handler proper.
555 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
557 static void i915_hotplug_work_func(struct work_struct
*work
)
559 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
561 struct drm_device
*dev
= dev_priv
->dev
;
562 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
563 struct intel_connector
*intel_connector
;
564 struct intel_encoder
*intel_encoder
;
565 struct drm_connector
*connector
;
566 unsigned long irqflags
;
567 bool hpd_disabled
= false;
568 bool changed
= false;
571 /* HPD irq before everything is fully set up. */
572 if (!dev_priv
->enable_hotplug_processing
)
575 mutex_lock(&mode_config
->mutex
);
576 DRM_DEBUG_KMS("running encoder hotplug functions\n");
578 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
580 hpd_event_bits
= dev_priv
->hpd_event_bits
;
581 dev_priv
->hpd_event_bits
= 0;
582 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
583 intel_connector
= to_intel_connector(connector
);
584 intel_encoder
= intel_connector
->encoder
;
585 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
586 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
587 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
588 DRM_INFO("HPD interrupt storm detected on connector %s: "
589 "switching from hotplug detection to polling\n",
590 drm_get_connector_name(connector
));
591 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
592 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
593 | DRM_CONNECTOR_POLL_DISCONNECT
;
596 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
597 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
598 drm_get_connector_name(connector
), intel_encoder
->hpd_pin
);
601 /* if there were no outputs to poll, poll was disabled,
602 * therefore make sure it's enabled when disabling HPD on
605 drm_kms_helper_poll_enable(dev
);
606 mod_timer(&dev_priv
->hotplug_reenable_timer
,
607 jiffies
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
610 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
612 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
613 intel_connector
= to_intel_connector(connector
);
614 intel_encoder
= intel_connector
->encoder
;
615 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
616 if (intel_encoder
->hot_plug
)
617 intel_encoder
->hot_plug(intel_encoder
);
618 if (intel_hpd_irq_event(dev
, connector
))
622 mutex_unlock(&mode_config
->mutex
);
625 drm_kms_helper_hotplug_event(dev
);
628 static void ironlake_handle_rps_change(struct drm_device
*dev
)
630 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
631 u32 busy_up
, busy_down
, max_avg
, min_avg
;
635 spin_lock_irqsave(&mchdev_lock
, flags
);
637 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
639 new_delay
= dev_priv
->ips
.cur_delay
;
641 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
642 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
643 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
644 max_avg
= I915_READ(RCBMAXAVG
);
645 min_avg
= I915_READ(RCBMINAVG
);
647 /* Handle RCS change request from hw */
648 if (busy_up
> max_avg
) {
649 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
650 new_delay
= dev_priv
->ips
.cur_delay
- 1;
651 if (new_delay
< dev_priv
->ips
.max_delay
)
652 new_delay
= dev_priv
->ips
.max_delay
;
653 } else if (busy_down
< min_avg
) {
654 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
655 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
656 if (new_delay
> dev_priv
->ips
.min_delay
)
657 new_delay
= dev_priv
->ips
.min_delay
;
660 if (ironlake_set_drps(dev
, new_delay
))
661 dev_priv
->ips
.cur_delay
= new_delay
;
663 spin_unlock_irqrestore(&mchdev_lock
, flags
);
668 static void notify_ring(struct drm_device
*dev
,
669 struct intel_ring_buffer
*ring
)
671 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
673 if (ring
->obj
== NULL
)
676 trace_i915_gem_request_complete(ring
, ring
->get_seqno(ring
, false));
678 wake_up_all(&ring
->irq_queue
);
679 if (i915_enable_hangcheck
) {
680 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
681 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
685 static void gen6_pm_rps_work(struct work_struct
*work
)
687 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
692 spin_lock_irq(&dev_priv
->rps
.lock
);
693 pm_iir
= dev_priv
->rps
.pm_iir
;
694 dev_priv
->rps
.pm_iir
= 0;
695 pm_imr
= I915_READ(GEN6_PMIMR
);
696 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
697 I915_WRITE(GEN6_PMIMR
, pm_imr
& ~GEN6_PM_RPS_EVENTS
);
698 spin_unlock_irq(&dev_priv
->rps
.lock
);
700 if ((pm_iir
& GEN6_PM_RPS_EVENTS
) == 0)
703 mutex_lock(&dev_priv
->rps
.hw_lock
);
705 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
706 new_delay
= dev_priv
->rps
.cur_delay
+ 1;
709 * For better performance, jump directly
710 * to RPe if we're below it.
712 if (IS_VALLEYVIEW(dev_priv
->dev
) &&
713 dev_priv
->rps
.cur_delay
< dev_priv
->rps
.rpe_delay
)
714 new_delay
= dev_priv
->rps
.rpe_delay
;
716 new_delay
= dev_priv
->rps
.cur_delay
- 1;
718 /* sysfs frequency interfaces may have snuck in while servicing the
721 if (new_delay
>= dev_priv
->rps
.min_delay
&&
722 new_delay
<= dev_priv
->rps
.max_delay
) {
723 if (IS_VALLEYVIEW(dev_priv
->dev
))
724 valleyview_set_rps(dev_priv
->dev
, new_delay
);
726 gen6_set_rps(dev_priv
->dev
, new_delay
);
729 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
731 * On VLV, when we enter RC6 we may not be at the minimum
732 * voltage level, so arm a timer to check. It should only
733 * fire when there's activity or once after we've entered
734 * RC6, and then won't be re-armed until the next RPS interrupt.
736 mod_delayed_work(dev_priv
->wq
, &dev_priv
->rps
.vlv_work
,
737 msecs_to_jiffies(100));
740 mutex_unlock(&dev_priv
->rps
.hw_lock
);
745 * ivybridge_parity_work - Workqueue called when a parity error interrupt
747 * @work: workqueue struct
749 * Doesn't actually do anything except notify userspace. As a consequence of
750 * this event, userspace should try to remap the bad rows since statistically
751 * it is likely the same row is more likely to go bad again.
753 static void ivybridge_parity_work(struct work_struct
*work
)
755 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
756 l3_parity
.error_work
);
757 u32 error_status
, row
, bank
, subbank
;
758 char *parity_event
[5];
762 /* We must turn off DOP level clock gating to access the L3 registers.
763 * In order to prevent a get/put style interface, acquire struct mutex
764 * any time we access those registers.
766 mutex_lock(&dev_priv
->dev
->struct_mutex
);
768 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
769 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
770 POSTING_READ(GEN7_MISCCPCTL
);
772 error_status
= I915_READ(GEN7_L3CDERRST1
);
773 row
= GEN7_PARITY_ERROR_ROW(error_status
);
774 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
775 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
777 I915_WRITE(GEN7_L3CDERRST1
, GEN7_PARITY_ERROR_VALID
|
778 GEN7_L3CDERRST1_ENABLE
);
779 POSTING_READ(GEN7_L3CDERRST1
);
781 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
783 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
784 dev_priv
->gt_irq_mask
&= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
785 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
786 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
788 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
790 parity_event
[0] = "L3_PARITY_ERROR=1";
791 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
792 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
793 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
794 parity_event
[4] = NULL
;
796 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
.kobj
,
797 KOBJ_CHANGE
, parity_event
);
799 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
802 kfree(parity_event
[3]);
803 kfree(parity_event
[2]);
804 kfree(parity_event
[1]);
807 static void ivybridge_handle_parity_error(struct drm_device
*dev
)
809 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
812 if (!HAS_L3_GPU_CACHE(dev
))
815 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
816 dev_priv
->gt_irq_mask
|= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
817 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
818 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
820 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
823 static void snb_gt_irq_handler(struct drm_device
*dev
,
824 struct drm_i915_private
*dev_priv
,
829 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
830 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
831 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
832 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
833 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
834 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
836 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
837 GT_BSD_CS_ERROR_INTERRUPT
|
838 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
839 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
840 i915_handle_error(dev
, false);
843 if (gt_iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
844 ivybridge_handle_parity_error(dev
);
847 /* Legacy way of handling PM interrupts */
848 static void gen6_queue_rps_work(struct drm_i915_private
*dev_priv
,
854 * IIR bits should never already be set because IMR should
855 * prevent an interrupt from being shown in IIR. The warning
856 * displays a case where we've unsafely cleared
857 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
858 * type is not a problem, it displays a problem in the logic.
860 * The mask bit in IMR is cleared by dev_priv->rps.work.
863 spin_lock_irqsave(&dev_priv
->rps
.lock
, flags
);
864 dev_priv
->rps
.pm_iir
|= pm_iir
;
865 I915_WRITE(GEN6_PMIMR
, dev_priv
->rps
.pm_iir
);
866 POSTING_READ(GEN6_PMIMR
);
867 spin_unlock_irqrestore(&dev_priv
->rps
.lock
, flags
);
869 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
872 #define HPD_STORM_DETECT_PERIOD 1000
873 #define HPD_STORM_THRESHOLD 5
875 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
879 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
880 unsigned long irqflags
;
882 bool storm_detected
= false;
884 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
886 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
888 if (!(hpd
[i
] & hotplug_trigger
) ||
889 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
892 dev_priv
->hpd_event_bits
|= (1 << i
);
893 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
894 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
895 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
896 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
897 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
898 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
899 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
900 dev_priv
->hpd_event_bits
&= ~(1 << i
);
901 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
902 storm_detected
= true;
904 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
908 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
911 dev_priv
->display
.hpd_irq_setup(dev
);
914 static void gmbus_irq_handler(struct drm_device
*dev
)
916 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
918 wake_up_all(&dev_priv
->gmbus_wait_queue
);
921 static void dp_aux_irq_handler(struct drm_device
*dev
)
923 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
925 wake_up_all(&dev_priv
->gmbus_wait_queue
);
928 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
929 * we must be able to deal with other PM interrupts. This is complicated because
930 * of the way in which we use the masks to defer the RPS work (which for
931 * posterity is necessary because of forcewake).
933 static void hsw_pm_irq_handler(struct drm_i915_private
*dev_priv
,
938 spin_lock_irqsave(&dev_priv
->rps
.lock
, flags
);
939 dev_priv
->rps
.pm_iir
|= pm_iir
& GEN6_PM_RPS_EVENTS
;
940 if (dev_priv
->rps
.pm_iir
) {
941 I915_WRITE(GEN6_PMIMR
, dev_priv
->rps
.pm_iir
);
942 /* never want to mask useful interrupts. (also posting read) */
943 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR
) & ~GEN6_PM_RPS_EVENTS
);
944 /* TODO: if queue_work is slow, move it out of the spinlock */
945 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
947 spin_unlock_irqrestore(&dev_priv
->rps
.lock
, flags
);
949 if (pm_iir
& ~GEN6_PM_RPS_EVENTS
) {
950 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
951 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
953 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
954 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir
);
955 i915_handle_error(dev_priv
->dev
, false);
960 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
962 struct drm_device
*dev
= (struct drm_device
*) arg
;
963 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
964 u32 iir
, gt_iir
, pm_iir
;
965 irqreturn_t ret
= IRQ_NONE
;
966 unsigned long irqflags
;
968 u32 pipe_stats
[I915_MAX_PIPES
];
970 atomic_inc(&dev_priv
->irq_received
);
973 iir
= I915_READ(VLV_IIR
);
974 gt_iir
= I915_READ(GTIIR
);
975 pm_iir
= I915_READ(GEN6_PMIIR
);
977 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
982 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
984 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
985 for_each_pipe(pipe
) {
986 int reg
= PIPESTAT(pipe
);
987 pipe_stats
[pipe
] = I915_READ(reg
);
990 * Clear the PIPE*STAT regs before the IIR
992 if (pipe_stats
[pipe
] & 0x8000ffff) {
993 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
994 DRM_DEBUG_DRIVER("pipe %c underrun\n",
996 I915_WRITE(reg
, pipe_stats
[pipe
]);
999 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1001 for_each_pipe(pipe
) {
1002 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
)
1003 drm_handle_vblank(dev
, pipe
);
1005 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
1006 intel_prepare_page_flip(dev
, pipe
);
1007 intel_finish_page_flip(dev
, pipe
);
1011 /* Consume port. Then clear IIR or we'll miss events */
1012 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
1013 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1014 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1016 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1018 if (hotplug_trigger
) {
1019 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
1020 queue_work(dev_priv
->wq
,
1021 &dev_priv
->hotplug_work
);
1023 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1024 I915_READ(PORT_HOTPLUG_STAT
);
1027 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1028 gmbus_irq_handler(dev
);
1030 if (pm_iir
& GEN6_PM_RPS_EVENTS
)
1031 gen6_queue_rps_work(dev_priv
, pm_iir
);
1033 I915_WRITE(GTIIR
, gt_iir
);
1034 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1035 I915_WRITE(VLV_IIR
, iir
);
1042 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1044 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1046 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1048 if (hotplug_trigger
) {
1049 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ibx
);
1050 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
1052 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1053 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1054 SDE_AUDIO_POWER_SHIFT
);
1055 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1059 if (pch_iir
& SDE_AUX_MASK
)
1060 dp_aux_irq_handler(dev
);
1062 if (pch_iir
& SDE_GMBUS
)
1063 gmbus_irq_handler(dev
);
1065 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1066 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1068 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1069 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1071 if (pch_iir
& SDE_POISON
)
1072 DRM_ERROR("PCH poison interrupt\n");
1074 if (pch_iir
& SDE_FDI_MASK
)
1076 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1078 I915_READ(FDI_RX_IIR(pipe
)));
1080 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1081 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1083 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1084 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1086 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1087 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1089 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1091 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1092 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1094 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1097 static void ivb_err_int_handler(struct drm_device
*dev
)
1099 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1100 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1102 if (err_int
& ERR_INT_POISON
)
1103 DRM_ERROR("Poison interrupt\n");
1105 if (err_int
& ERR_INT_FIFO_UNDERRUN_A
)
1106 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_A
, false))
1107 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1109 if (err_int
& ERR_INT_FIFO_UNDERRUN_B
)
1110 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_B
, false))
1111 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1113 if (err_int
& ERR_INT_FIFO_UNDERRUN_C
)
1114 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_C
, false))
1115 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1117 I915_WRITE(GEN7_ERR_INT
, err_int
);
1120 static void cpt_serr_int_handler(struct drm_device
*dev
)
1122 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1123 u32 serr_int
= I915_READ(SERR_INT
);
1125 if (serr_int
& SERR_INT_POISON
)
1126 DRM_ERROR("PCH poison interrupt\n");
1128 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
1129 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1131 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1133 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
1134 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1136 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1138 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
1139 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_C
,
1141 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1143 I915_WRITE(SERR_INT
, serr_int
);
1146 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1148 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1150 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
1152 if (hotplug_trigger
) {
1153 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_cpt
);
1154 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
1156 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
1157 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
1158 SDE_AUDIO_POWER_SHIFT_CPT
);
1159 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1163 if (pch_iir
& SDE_AUX_MASK_CPT
)
1164 dp_aux_irq_handler(dev
);
1166 if (pch_iir
& SDE_GMBUS_CPT
)
1167 gmbus_irq_handler(dev
);
1169 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
1170 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1172 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
1173 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1175 if (pch_iir
& SDE_FDI_MASK_CPT
)
1177 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1179 I915_READ(FDI_RX_IIR(pipe
)));
1181 if (pch_iir
& SDE_ERROR_CPT
)
1182 cpt_serr_int_handler(dev
);
1185 static irqreturn_t
ivybridge_irq_handler(int irq
, void *arg
)
1187 struct drm_device
*dev
= (struct drm_device
*) arg
;
1188 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1189 u32 de_iir
, gt_iir
, de_ier
, pm_iir
, sde_ier
= 0;
1190 irqreturn_t ret
= IRQ_NONE
;
1193 atomic_inc(&dev_priv
->irq_received
);
1195 /* We get interrupts on unclaimed registers, so check for this before we
1196 * do any I915_{READ,WRITE}. */
1197 if (IS_HASWELL(dev
) &&
1198 (I915_READ_NOTRACE(FPGA_DBG
) & FPGA_DBG_RM_NOCLAIM
)) {
1199 DRM_ERROR("Unclaimed register before interrupt\n");
1200 I915_WRITE_NOTRACE(FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
1203 /* disable master interrupt before clearing iir */
1204 de_ier
= I915_READ(DEIER
);
1205 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
1207 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1208 * interrupts will will be stored on its back queue, and then we'll be
1209 * able to process them after we restore SDEIER (as soon as we restore
1210 * it, we'll get an interrupt if SDEIIR still has something to process
1211 * due to its back queue). */
1212 if (!HAS_PCH_NOP(dev
)) {
1213 sde_ier
= I915_READ(SDEIER
);
1214 I915_WRITE(SDEIER
, 0);
1215 POSTING_READ(SDEIER
);
1218 /* On Haswell, also mask ERR_INT because we don't want to risk
1219 * generating "unclaimed register" interrupts from inside the interrupt
1221 if (IS_HASWELL(dev
)) {
1222 spin_lock(&dev_priv
->irq_lock
);
1223 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
1224 spin_unlock(&dev_priv
->irq_lock
);
1227 gt_iir
= I915_READ(GTIIR
);
1229 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1230 I915_WRITE(GTIIR
, gt_iir
);
1234 de_iir
= I915_READ(DEIIR
);
1236 if (de_iir
& DE_ERR_INT_IVB
)
1237 ivb_err_int_handler(dev
);
1239 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
1240 dp_aux_irq_handler(dev
);
1242 if (de_iir
& DE_GSE_IVB
)
1243 intel_opregion_asle_intr(dev
);
1245 for (i
= 0; i
< 3; i
++) {
1246 if (de_iir
& (DE_PIPEA_VBLANK_IVB
<< (5 * i
)))
1247 drm_handle_vblank(dev
, i
);
1248 if (de_iir
& (DE_PLANEA_FLIP_DONE_IVB
<< (5 * i
))) {
1249 intel_prepare_page_flip(dev
, i
);
1250 intel_finish_page_flip_plane(dev
, i
);
1254 /* check event from PCH */
1255 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
1256 u32 pch_iir
= I915_READ(SDEIIR
);
1258 cpt_irq_handler(dev
, pch_iir
);
1260 /* clear PCH hotplug event before clear CPU irq */
1261 I915_WRITE(SDEIIR
, pch_iir
);
1264 I915_WRITE(DEIIR
, de_iir
);
1268 pm_iir
= I915_READ(GEN6_PMIIR
);
1270 if (IS_HASWELL(dev
))
1271 hsw_pm_irq_handler(dev_priv
, pm_iir
);
1272 else if (pm_iir
& GEN6_PM_RPS_EVENTS
)
1273 gen6_queue_rps_work(dev_priv
, pm_iir
);
1274 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1278 if (IS_HASWELL(dev
)) {
1279 spin_lock(&dev_priv
->irq_lock
);
1280 if (ivb_can_enable_err_int(dev
))
1281 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
1282 spin_unlock(&dev_priv
->irq_lock
);
1285 I915_WRITE(DEIER
, de_ier
);
1286 POSTING_READ(DEIER
);
1287 if (!HAS_PCH_NOP(dev
)) {
1288 I915_WRITE(SDEIER
, sde_ier
);
1289 POSTING_READ(SDEIER
);
1295 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1296 struct drm_i915_private
*dev_priv
,
1300 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1301 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1302 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1303 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1306 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
1308 struct drm_device
*dev
= (struct drm_device
*) arg
;
1309 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1311 u32 de_iir
, gt_iir
, de_ier
, pm_iir
, sde_ier
;
1313 atomic_inc(&dev_priv
->irq_received
);
1315 /* disable master interrupt before clearing iir */
1316 de_ier
= I915_READ(DEIER
);
1317 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
1318 POSTING_READ(DEIER
);
1320 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1321 * interrupts will will be stored on its back queue, and then we'll be
1322 * able to process them after we restore SDEIER (as soon as we restore
1323 * it, we'll get an interrupt if SDEIIR still has something to process
1324 * due to its back queue). */
1325 sde_ier
= I915_READ(SDEIER
);
1326 I915_WRITE(SDEIER
, 0);
1327 POSTING_READ(SDEIER
);
1329 de_iir
= I915_READ(DEIIR
);
1330 gt_iir
= I915_READ(GTIIR
);
1331 pm_iir
= I915_READ(GEN6_PMIIR
);
1333 if (de_iir
== 0 && gt_iir
== 0 && (!IS_GEN6(dev
) || pm_iir
== 0))
1339 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1341 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1343 if (de_iir
& DE_AUX_CHANNEL_A
)
1344 dp_aux_irq_handler(dev
);
1346 if (de_iir
& DE_GSE
)
1347 intel_opregion_asle_intr(dev
);
1349 if (de_iir
& DE_PIPEA_VBLANK
)
1350 drm_handle_vblank(dev
, 0);
1352 if (de_iir
& DE_PIPEB_VBLANK
)
1353 drm_handle_vblank(dev
, 1);
1355 if (de_iir
& DE_POISON
)
1356 DRM_ERROR("Poison interrupt\n");
1358 if (de_iir
& DE_PIPEA_FIFO_UNDERRUN
)
1359 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_A
, false))
1360 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1362 if (de_iir
& DE_PIPEB_FIFO_UNDERRUN
)
1363 if (intel_set_cpu_fifo_underrun_reporting(dev
, PIPE_B
, false))
1364 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1366 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
1367 intel_prepare_page_flip(dev
, 0);
1368 intel_finish_page_flip_plane(dev
, 0);
1371 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
1372 intel_prepare_page_flip(dev
, 1);
1373 intel_finish_page_flip_plane(dev
, 1);
1376 /* check event from PCH */
1377 if (de_iir
& DE_PCH_EVENT
) {
1378 u32 pch_iir
= I915_READ(SDEIIR
);
1380 if (HAS_PCH_CPT(dev
))
1381 cpt_irq_handler(dev
, pch_iir
);
1383 ibx_irq_handler(dev
, pch_iir
);
1385 /* should clear PCH hotplug event before clear CPU irq */
1386 I915_WRITE(SDEIIR
, pch_iir
);
1389 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
1390 ironlake_handle_rps_change(dev
);
1392 if (IS_GEN6(dev
) && pm_iir
& GEN6_PM_RPS_EVENTS
)
1393 gen6_queue_rps_work(dev_priv
, pm_iir
);
1395 I915_WRITE(GTIIR
, gt_iir
);
1396 I915_WRITE(DEIIR
, de_iir
);
1397 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1400 I915_WRITE(DEIER
, de_ier
);
1401 POSTING_READ(DEIER
);
1402 I915_WRITE(SDEIER
, sde_ier
);
1403 POSTING_READ(SDEIER
);
1409 * i915_error_work_func - do process context error handling work
1410 * @work: work struct
1412 * Fire an error uevent so userspace can see that a hang or error
1415 static void i915_error_work_func(struct work_struct
*work
)
1417 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
1419 drm_i915_private_t
*dev_priv
= container_of(error
, drm_i915_private_t
,
1421 struct drm_device
*dev
= dev_priv
->dev
;
1422 struct intel_ring_buffer
*ring
;
1423 char *error_event
[] = { "ERROR=1", NULL
};
1424 char *reset_event
[] = { "RESET=1", NULL
};
1425 char *reset_done_event
[] = { "ERROR=0", NULL
};
1428 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
1431 * Note that there's only one work item which does gpu resets, so we
1432 * need not worry about concurrent gpu resets potentially incrementing
1433 * error->reset_counter twice. We only need to take care of another
1434 * racing irq/hangcheck declaring the gpu dead for a second time. A
1435 * quick check for that is good enough: schedule_work ensures the
1436 * correct ordering between hang detection and this work item, and since
1437 * the reset in-progress bit is only ever set by code outside of this
1438 * work we don't need to worry about any other races.
1440 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
1441 DRM_DEBUG_DRIVER("resetting chip\n");
1442 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
,
1445 ret
= i915_reset(dev
);
1449 * After all the gem state is reset, increment the reset
1450 * counter and wake up everyone waiting for the reset to
1453 * Since unlock operations are a one-sided barrier only,
1454 * we need to insert a barrier here to order any seqno
1456 * the counter increment.
1458 smp_mb__before_atomic_inc();
1459 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
1461 kobject_uevent_env(&dev
->primary
->kdev
.kobj
,
1462 KOBJ_CHANGE
, reset_done_event
);
1464 atomic_set(&error
->reset_counter
, I915_WEDGED
);
1467 for_each_ring(ring
, dev_priv
, i
)
1468 wake_up_all(&ring
->irq_queue
);
1470 intel_display_handle_reset(dev
);
1472 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
1476 /* NB: please notice the memset */
1477 static void i915_get_extra_instdone(struct drm_device
*dev
,
1480 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1481 memset(instdone
, 0, sizeof(*instdone
) * I915_NUM_INSTDONE_REG
);
1483 switch(INTEL_INFO(dev
)->gen
) {
1486 instdone
[0] = I915_READ(INSTDONE
);
1491 instdone
[0] = I915_READ(INSTDONE_I965
);
1492 instdone
[1] = I915_READ(INSTDONE1
);
1495 WARN_ONCE(1, "Unsupported platform\n");
1497 instdone
[0] = I915_READ(GEN7_INSTDONE_1
);
1498 instdone
[1] = I915_READ(GEN7_SC_INSTDONE
);
1499 instdone
[2] = I915_READ(GEN7_SAMPLER_INSTDONE
);
1500 instdone
[3] = I915_READ(GEN7_ROW_INSTDONE
);
1505 #ifdef CONFIG_DEBUG_FS
1506 static struct drm_i915_error_object
*
1507 i915_error_object_create_sized(struct drm_i915_private
*dev_priv
,
1508 struct drm_i915_gem_object
*src
,
1509 const int num_pages
)
1511 struct drm_i915_error_object
*dst
;
1515 if (src
== NULL
|| src
->pages
== NULL
)
1518 dst
= kmalloc(sizeof(*dst
) + num_pages
* sizeof(u32
*), GFP_ATOMIC
);
1522 reloc_offset
= src
->gtt_offset
;
1523 for (i
= 0; i
< num_pages
; i
++) {
1524 unsigned long flags
;
1527 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
1531 local_irq_save(flags
);
1532 if (reloc_offset
< dev_priv
->gtt
.mappable_end
&&
1533 src
->has_global_gtt_mapping
) {
1536 /* Simply ignore tiling or any overlapping fence.
1537 * It's part of the error state, and this hopefully
1538 * captures what the GPU read.
1541 s
= io_mapping_map_atomic_wc(dev_priv
->gtt
.mappable
,
1543 memcpy_fromio(d
, s
, PAGE_SIZE
);
1544 io_mapping_unmap_atomic(s
);
1545 } else if (src
->stolen
) {
1546 unsigned long offset
;
1548 offset
= dev_priv
->mm
.stolen_base
;
1549 offset
+= src
->stolen
->start
;
1550 offset
+= i
<< PAGE_SHIFT
;
1552 memcpy_fromio(d
, (void __iomem
*) offset
, PAGE_SIZE
);
1557 page
= i915_gem_object_get_page(src
, i
);
1559 drm_clflush_pages(&page
, 1);
1561 s
= kmap_atomic(page
);
1562 memcpy(d
, s
, PAGE_SIZE
);
1565 drm_clflush_pages(&page
, 1);
1567 local_irq_restore(flags
);
1571 reloc_offset
+= PAGE_SIZE
;
1573 dst
->page_count
= num_pages
;
1574 dst
->gtt_offset
= src
->gtt_offset
;
1580 kfree(dst
->pages
[i
]);
1584 #define i915_error_object_create(dev_priv, src) \
1585 i915_error_object_create_sized((dev_priv), (src), \
1586 (src)->base.size>>PAGE_SHIFT)
1589 i915_error_object_free(struct drm_i915_error_object
*obj
)
1596 for (page
= 0; page
< obj
->page_count
; page
++)
1597 kfree(obj
->pages
[page
]);
1603 i915_error_state_free(struct kref
*error_ref
)
1605 struct drm_i915_error_state
*error
= container_of(error_ref
,
1606 typeof(*error
), ref
);
1609 for (i
= 0; i
< ARRAY_SIZE(error
->ring
); i
++) {
1610 i915_error_object_free(error
->ring
[i
].batchbuffer
);
1611 i915_error_object_free(error
->ring
[i
].ringbuffer
);
1612 i915_error_object_free(error
->ring
[i
].ctx
);
1613 kfree(error
->ring
[i
].requests
);
1616 kfree(error
->active_bo
);
1617 kfree(error
->overlay
);
1618 kfree(error
->display
);
1621 static void capture_bo(struct drm_i915_error_buffer
*err
,
1622 struct drm_i915_gem_object
*obj
)
1624 err
->size
= obj
->base
.size
;
1625 err
->name
= obj
->base
.name
;
1626 err
->rseqno
= obj
->last_read_seqno
;
1627 err
->wseqno
= obj
->last_write_seqno
;
1628 err
->gtt_offset
= obj
->gtt_offset
;
1629 err
->read_domains
= obj
->base
.read_domains
;
1630 err
->write_domain
= obj
->base
.write_domain
;
1631 err
->fence_reg
= obj
->fence_reg
;
1633 if (obj
->pin_count
> 0)
1635 if (obj
->user_pin_count
> 0)
1637 err
->tiling
= obj
->tiling_mode
;
1638 err
->dirty
= obj
->dirty
;
1639 err
->purgeable
= obj
->madv
!= I915_MADV_WILLNEED
;
1640 err
->ring
= obj
->ring
? obj
->ring
->id
: -1;
1641 err
->cache_level
= obj
->cache_level
;
1644 static u32
capture_active_bo(struct drm_i915_error_buffer
*err
,
1645 int count
, struct list_head
*head
)
1647 struct drm_i915_gem_object
*obj
;
1650 list_for_each_entry(obj
, head
, mm_list
) {
1651 capture_bo(err
++, obj
);
1659 static u32
capture_pinned_bo(struct drm_i915_error_buffer
*err
,
1660 int count
, struct list_head
*head
)
1662 struct drm_i915_gem_object
*obj
;
1665 list_for_each_entry(obj
, head
, global_list
) {
1666 if (obj
->pin_count
== 0)
1669 capture_bo(err
++, obj
);
1677 static void i915_gem_record_fences(struct drm_device
*dev
,
1678 struct drm_i915_error_state
*error
)
1680 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1684 switch (INTEL_INFO(dev
)->gen
) {
1687 for (i
= 0; i
< dev_priv
->num_fence_regs
; i
++)
1688 error
->fence
[i
] = I915_READ64(FENCE_REG_SANDYBRIDGE_0
+ (i
* 8));
1692 for (i
= 0; i
< 16; i
++)
1693 error
->fence
[i
] = I915_READ64(FENCE_REG_965_0
+ (i
* 8));
1696 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
1697 for (i
= 0; i
< 8; i
++)
1698 error
->fence
[i
+8] = I915_READ(FENCE_REG_945_8
+ (i
* 4));
1700 for (i
= 0; i
< 8; i
++)
1701 error
->fence
[i
] = I915_READ(FENCE_REG_830_0
+ (i
* 4));
1709 static struct drm_i915_error_object
*
1710 i915_error_first_batchbuffer(struct drm_i915_private
*dev_priv
,
1711 struct intel_ring_buffer
*ring
)
1713 struct drm_i915_gem_object
*obj
;
1716 if (!ring
->get_seqno
)
1719 if (HAS_BROKEN_CS_TLB(dev_priv
->dev
)) {
1720 u32 acthd
= I915_READ(ACTHD
);
1722 if (WARN_ON(ring
->id
!= RCS
))
1725 obj
= ring
->private;
1726 if (acthd
>= obj
->gtt_offset
&&
1727 acthd
< obj
->gtt_offset
+ obj
->base
.size
)
1728 return i915_error_object_create(dev_priv
, obj
);
1731 seqno
= ring
->get_seqno(ring
, false);
1732 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
) {
1733 if (obj
->ring
!= ring
)
1736 if (i915_seqno_passed(seqno
, obj
->last_read_seqno
))
1739 if ((obj
->base
.read_domains
& I915_GEM_DOMAIN_COMMAND
) == 0)
1742 /* We need to copy these to an anonymous buffer as the simplest
1743 * method to avoid being overwritten by userspace.
1745 return i915_error_object_create(dev_priv
, obj
);
1751 static void i915_record_ring_state(struct drm_device
*dev
,
1752 struct drm_i915_error_state
*error
,
1753 struct intel_ring_buffer
*ring
)
1755 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1757 if (INTEL_INFO(dev
)->gen
>= 6) {
1758 error
->rc_psmi
[ring
->id
] = I915_READ(ring
->mmio_base
+ 0x50);
1759 error
->fault_reg
[ring
->id
] = I915_READ(RING_FAULT_REG(ring
));
1760 error
->semaphore_mboxes
[ring
->id
][0]
1761 = I915_READ(RING_SYNC_0(ring
->mmio_base
));
1762 error
->semaphore_mboxes
[ring
->id
][1]
1763 = I915_READ(RING_SYNC_1(ring
->mmio_base
));
1764 error
->semaphore_seqno
[ring
->id
][0] = ring
->sync_seqno
[0];
1765 error
->semaphore_seqno
[ring
->id
][1] = ring
->sync_seqno
[1];
1768 if (INTEL_INFO(dev
)->gen
>= 4) {
1769 error
->faddr
[ring
->id
] = I915_READ(RING_DMA_FADD(ring
->mmio_base
));
1770 error
->ipeir
[ring
->id
] = I915_READ(RING_IPEIR(ring
->mmio_base
));
1771 error
->ipehr
[ring
->id
] = I915_READ(RING_IPEHR(ring
->mmio_base
));
1772 error
->instdone
[ring
->id
] = I915_READ(RING_INSTDONE(ring
->mmio_base
));
1773 error
->instps
[ring
->id
] = I915_READ(RING_INSTPS(ring
->mmio_base
));
1774 if (ring
->id
== RCS
)
1775 error
->bbaddr
= I915_READ64(BB_ADDR
);
1777 error
->faddr
[ring
->id
] = I915_READ(DMA_FADD_I8XX
);
1778 error
->ipeir
[ring
->id
] = I915_READ(IPEIR
);
1779 error
->ipehr
[ring
->id
] = I915_READ(IPEHR
);
1780 error
->instdone
[ring
->id
] = I915_READ(INSTDONE
);
1783 error
->waiting
[ring
->id
] = waitqueue_active(&ring
->irq_queue
);
1784 error
->instpm
[ring
->id
] = I915_READ(RING_INSTPM(ring
->mmio_base
));
1785 error
->seqno
[ring
->id
] = ring
->get_seqno(ring
, false);
1786 error
->acthd
[ring
->id
] = intel_ring_get_active_head(ring
);
1787 error
->head
[ring
->id
] = I915_READ_HEAD(ring
);
1788 error
->tail
[ring
->id
] = I915_READ_TAIL(ring
);
1789 error
->ctl
[ring
->id
] = I915_READ_CTL(ring
);
1791 error
->cpu_ring_head
[ring
->id
] = ring
->head
;
1792 error
->cpu_ring_tail
[ring
->id
] = ring
->tail
;
1796 static void i915_gem_record_active_context(struct intel_ring_buffer
*ring
,
1797 struct drm_i915_error_state
*error
,
1798 struct drm_i915_error_ring
*ering
)
1800 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
1801 struct drm_i915_gem_object
*obj
;
1803 /* Currently render ring is the only HW context user */
1804 if (ring
->id
!= RCS
|| !error
->ccid
)
1807 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
) {
1808 if ((error
->ccid
& PAGE_MASK
) == obj
->gtt_offset
) {
1809 ering
->ctx
= i915_error_object_create_sized(dev_priv
,
1815 static void i915_gem_record_rings(struct drm_device
*dev
,
1816 struct drm_i915_error_state
*error
)
1818 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1819 struct intel_ring_buffer
*ring
;
1820 struct drm_i915_gem_request
*request
;
1823 for_each_ring(ring
, dev_priv
, i
) {
1824 i915_record_ring_state(dev
, error
, ring
);
1826 error
->ring
[i
].batchbuffer
=
1827 i915_error_first_batchbuffer(dev_priv
, ring
);
1829 error
->ring
[i
].ringbuffer
=
1830 i915_error_object_create(dev_priv
, ring
->obj
);
1833 i915_gem_record_active_context(ring
, error
, &error
->ring
[i
]);
1836 list_for_each_entry(request
, &ring
->request_list
, list
)
1839 error
->ring
[i
].num_requests
= count
;
1840 error
->ring
[i
].requests
=
1841 kmalloc(count
*sizeof(struct drm_i915_error_request
),
1843 if (error
->ring
[i
].requests
== NULL
) {
1844 error
->ring
[i
].num_requests
= 0;
1849 list_for_each_entry(request
, &ring
->request_list
, list
) {
1850 struct drm_i915_error_request
*erq
;
1852 erq
= &error
->ring
[i
].requests
[count
++];
1853 erq
->seqno
= request
->seqno
;
1854 erq
->jiffies
= request
->emitted_jiffies
;
1855 erq
->tail
= request
->tail
;
1861 * i915_capture_error_state - capture an error record for later analysis
1864 * Should be called when an error is detected (either a hang or an error
1865 * interrupt) to capture error state from the time of the error. Fills
1866 * out a structure which becomes available in debugfs for user level tools
1869 static void i915_capture_error_state(struct drm_device
*dev
)
1871 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1872 struct drm_i915_gem_object
*obj
;
1873 struct drm_i915_error_state
*error
;
1874 unsigned long flags
;
1877 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1878 error
= dev_priv
->gpu_error
.first_error
;
1879 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1883 /* Account for pipe specific data like PIPE*STAT */
1884 error
= kzalloc(sizeof(*error
), GFP_ATOMIC
);
1886 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1890 DRM_INFO("capturing error event; look for more information in "
1891 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1892 dev
->primary
->index
);
1894 kref_init(&error
->ref
);
1895 error
->eir
= I915_READ(EIR
);
1896 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
1897 if (HAS_HW_CONTEXTS(dev
))
1898 error
->ccid
= I915_READ(CCID
);
1900 if (HAS_PCH_SPLIT(dev
))
1901 error
->ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1902 else if (IS_VALLEYVIEW(dev
))
1903 error
->ier
= I915_READ(GTIER
) | I915_READ(VLV_IER
);
1904 else if (IS_GEN2(dev
))
1905 error
->ier
= I915_READ16(IER
);
1907 error
->ier
= I915_READ(IER
);
1909 if (INTEL_INFO(dev
)->gen
>= 6)
1910 error
->derrmr
= I915_READ(DERRMR
);
1912 if (IS_VALLEYVIEW(dev
))
1913 error
->forcewake
= I915_READ(FORCEWAKE_VLV
);
1914 else if (INTEL_INFO(dev
)->gen
>= 7)
1915 error
->forcewake
= I915_READ(FORCEWAKE_MT
);
1916 else if (INTEL_INFO(dev
)->gen
== 6)
1917 error
->forcewake
= I915_READ(FORCEWAKE
);
1919 if (!HAS_PCH_SPLIT(dev
))
1921 error
->pipestat
[pipe
] = I915_READ(PIPESTAT(pipe
));
1923 if (INTEL_INFO(dev
)->gen
>= 6) {
1924 error
->error
= I915_READ(ERROR_GEN6
);
1925 error
->done_reg
= I915_READ(DONE_REG
);
1928 if (INTEL_INFO(dev
)->gen
== 7)
1929 error
->err_int
= I915_READ(GEN7_ERR_INT
);
1931 i915_get_extra_instdone(dev
, error
->extra_instdone
);
1933 i915_gem_record_fences(dev
, error
);
1934 i915_gem_record_rings(dev
, error
);
1936 /* Record buffers on the active and pinned lists. */
1937 error
->active_bo
= NULL
;
1938 error
->pinned_bo
= NULL
;
1941 list_for_each_entry(obj
, &dev_priv
->mm
.active_list
, mm_list
)
1943 error
->active_bo_count
= i
;
1944 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_list
)
1947 error
->pinned_bo_count
= i
- error
->active_bo_count
;
1949 error
->active_bo
= NULL
;
1950 error
->pinned_bo
= NULL
;
1952 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*i
,
1954 if (error
->active_bo
)
1956 error
->active_bo
+ error
->active_bo_count
;
1959 if (error
->active_bo
)
1960 error
->active_bo_count
=
1961 capture_active_bo(error
->active_bo
,
1962 error
->active_bo_count
,
1963 &dev_priv
->mm
.active_list
);
1965 if (error
->pinned_bo
)
1966 error
->pinned_bo_count
=
1967 capture_pinned_bo(error
->pinned_bo
,
1968 error
->pinned_bo_count
,
1969 &dev_priv
->mm
.bound_list
);
1971 do_gettimeofday(&error
->time
);
1973 error
->overlay
= intel_overlay_capture_error_state(dev
);
1974 error
->display
= intel_display_capture_error_state(dev
);
1976 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1977 if (dev_priv
->gpu_error
.first_error
== NULL
) {
1978 dev_priv
->gpu_error
.first_error
= error
;
1981 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1984 i915_error_state_free(&error
->ref
);
1987 void i915_destroy_error_state(struct drm_device
*dev
)
1989 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1990 struct drm_i915_error_state
*error
;
1991 unsigned long flags
;
1993 spin_lock_irqsave(&dev_priv
->gpu_error
.lock
, flags
);
1994 error
= dev_priv
->gpu_error
.first_error
;
1995 dev_priv
->gpu_error
.first_error
= NULL
;
1996 spin_unlock_irqrestore(&dev_priv
->gpu_error
.lock
, flags
);
1999 kref_put(&error
->ref
, i915_error_state_free
);
2002 #define i915_capture_error_state(x)
2005 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2007 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2008 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2009 u32 eir
= I915_READ(EIR
);
2015 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2017 i915_get_extra_instdone(dev
, instdone
);
2020 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2021 u32 ipeir
= I915_READ(IPEIR_I965
);
2023 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2024 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2025 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2026 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2027 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2028 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2029 I915_WRITE(IPEIR_I965
, ipeir
);
2030 POSTING_READ(IPEIR_I965
);
2032 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2033 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2034 pr_err("page table error\n");
2035 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2036 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2037 POSTING_READ(PGTBL_ER
);
2041 if (!IS_GEN2(dev
)) {
2042 if (eir
& I915_ERROR_PAGE_TABLE
) {
2043 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2044 pr_err("page table error\n");
2045 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2046 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2047 POSTING_READ(PGTBL_ER
);
2051 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2052 pr_err("memory refresh error:\n");
2054 pr_err("pipe %c stat: 0x%08x\n",
2055 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2056 /* pipestat has already been acked */
2058 if (eir
& I915_ERROR_INSTRUCTION
) {
2059 pr_err("instruction error\n");
2060 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2061 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2062 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2063 if (INTEL_INFO(dev
)->gen
< 4) {
2064 u32 ipeir
= I915_READ(IPEIR
);
2066 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2067 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2068 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2069 I915_WRITE(IPEIR
, ipeir
);
2070 POSTING_READ(IPEIR
);
2072 u32 ipeir
= I915_READ(IPEIR_I965
);
2074 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2075 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2076 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2077 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2078 I915_WRITE(IPEIR_I965
, ipeir
);
2079 POSTING_READ(IPEIR_I965
);
2083 I915_WRITE(EIR
, eir
);
2085 eir
= I915_READ(EIR
);
2088 * some errors might have become stuck,
2091 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2092 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2093 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2098 * i915_handle_error - handle an error interrupt
2101 * Do some basic checking of regsiter state at error interrupt time and
2102 * dump it to the syslog. Also call i915_capture_error_state() to make
2103 * sure we get a record and make it available in debugfs. Fire a uevent
2104 * so userspace knows something bad happened (should trigger collection
2105 * of a ring dump etc.).
2107 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
2109 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2110 struct intel_ring_buffer
*ring
;
2113 i915_capture_error_state(dev
);
2114 i915_report_and_clear_eir(dev
);
2117 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2118 &dev_priv
->gpu_error
.reset_counter
);
2121 * Wakeup waiting processes so that the reset work item
2122 * doesn't deadlock trying to grab various locks.
2124 for_each_ring(ring
, dev_priv
, i
)
2125 wake_up_all(&ring
->irq_queue
);
2128 queue_work(dev_priv
->wq
, &dev_priv
->gpu_error
.work
);
2131 static void __always_unused
i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
2133 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2134 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
2135 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2136 struct drm_i915_gem_object
*obj
;
2137 struct intel_unpin_work
*work
;
2138 unsigned long flags
;
2139 bool stall_detected
;
2141 /* Ignore early vblank irqs */
2142 if (intel_crtc
== NULL
)
2145 spin_lock_irqsave(&dev
->event_lock
, flags
);
2146 work
= intel_crtc
->unpin_work
;
2149 atomic_read(&work
->pending
) >= INTEL_FLIP_COMPLETE
||
2150 !work
->enable_stall_check
) {
2151 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2152 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
2156 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2157 obj
= work
->pending_flip_obj
;
2158 if (INTEL_INFO(dev
)->gen
>= 4) {
2159 int dspsurf
= DSPSURF(intel_crtc
->plane
);
2160 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
2163 int dspaddr
= DSPADDR(intel_crtc
->plane
);
2164 stall_detected
= I915_READ(dspaddr
) == (obj
->gtt_offset
+
2165 crtc
->y
* crtc
->fb
->pitches
[0] +
2166 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
2169 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
2171 if (stall_detected
) {
2172 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2173 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
2177 /* Called from drm generic code, passed 'crtc' which
2178 * we use as a pipe index
2180 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2182 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2183 unsigned long irqflags
;
2185 if (!i915_pipe_enabled(dev
, pipe
))
2188 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2189 if (INTEL_INFO(dev
)->gen
>= 4)
2190 i915_enable_pipestat(dev_priv
, pipe
,
2191 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2193 i915_enable_pipestat(dev_priv
, pipe
,
2194 PIPE_VBLANK_INTERRUPT_ENABLE
);
2196 /* maintain vblank delivery even in deep C-states */
2197 if (dev_priv
->info
->gen
== 3)
2198 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
2199 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2204 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2206 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2207 unsigned long irqflags
;
2209 if (!i915_pipe_enabled(dev
, pipe
))
2212 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2213 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
2214 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
2215 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2220 static int ivybridge_enable_vblank(struct drm_device
*dev
, int pipe
)
2222 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2223 unsigned long irqflags
;
2225 if (!i915_pipe_enabled(dev
, pipe
))
2228 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2229 ironlake_enable_display_irq(dev_priv
,
2230 DE_PIPEA_VBLANK_IVB
<< (5 * pipe
));
2231 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2236 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2238 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2239 unsigned long irqflags
;
2242 if (!i915_pipe_enabled(dev
, pipe
))
2245 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2246 imr
= I915_READ(VLV_IMR
);
2248 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
2250 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2251 I915_WRITE(VLV_IMR
, imr
);
2252 i915_enable_pipestat(dev_priv
, pipe
,
2253 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2254 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2259 /* Called from drm generic code, passed 'crtc' which
2260 * we use as a pipe index
2262 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2264 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2265 unsigned long irqflags
;
2267 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2268 if (dev_priv
->info
->gen
== 3)
2269 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
2271 i915_disable_pipestat(dev_priv
, pipe
,
2272 PIPE_VBLANK_INTERRUPT_ENABLE
|
2273 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2274 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2277 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2279 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2280 unsigned long irqflags
;
2282 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2283 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
2284 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
2285 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2288 static void ivybridge_disable_vblank(struct drm_device
*dev
, int pipe
)
2290 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2291 unsigned long irqflags
;
2293 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2294 ironlake_disable_display_irq(dev_priv
,
2295 DE_PIPEA_VBLANK_IVB
<< (pipe
* 5));
2296 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2299 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
2301 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2302 unsigned long irqflags
;
2305 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2306 i915_disable_pipestat(dev_priv
, pipe
,
2307 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2308 imr
= I915_READ(VLV_IMR
);
2310 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
2312 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2313 I915_WRITE(VLV_IMR
, imr
);
2314 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2318 ring_last_seqno(struct intel_ring_buffer
*ring
)
2320 return list_entry(ring
->request_list
.prev
,
2321 struct drm_i915_gem_request
, list
)->seqno
;
2325 ring_idle(struct intel_ring_buffer
*ring
, u32 seqno
)
2327 return (list_empty(&ring
->request_list
) ||
2328 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
2331 static struct intel_ring_buffer
*
2332 semaphore_waits_for(struct intel_ring_buffer
*ring
, u32
*seqno
)
2334 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2335 u32 cmd
, ipehr
, acthd
, acthd_min
;
2337 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2338 if ((ipehr
& ~(0x3 << 16)) !=
2339 (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
| MI_SEMAPHORE_REGISTER
))
2342 /* ACTHD is likely pointing to the dword after the actual command,
2343 * so scan backwards until we find the MBOX.
2345 acthd
= intel_ring_get_active_head(ring
) & HEAD_ADDR
;
2346 acthd_min
= max((int)acthd
- 3 * 4, 0);
2348 cmd
= ioread32(ring
->virtual_start
+ acthd
);
2353 if (acthd
< acthd_min
)
2357 *seqno
= ioread32(ring
->virtual_start
+acthd
+4)+1;
2358 return &dev_priv
->ring
[(ring
->id
+ (((ipehr
>> 17) & 1) + 1)) % 3];
2361 static int semaphore_passed(struct intel_ring_buffer
*ring
)
2363 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2364 struct intel_ring_buffer
*signaller
;
2367 ring
->hangcheck
.deadlock
= true;
2369 signaller
= semaphore_waits_for(ring
, &seqno
);
2370 if (signaller
== NULL
|| signaller
->hangcheck
.deadlock
)
2373 /* cursory check for an unkickable deadlock */
2374 ctl
= I915_READ_CTL(signaller
);
2375 if (ctl
& RING_WAIT_SEMAPHORE
&& semaphore_passed(signaller
) < 0)
2378 return i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
);
2381 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2383 struct intel_ring_buffer
*ring
;
2386 for_each_ring(ring
, dev_priv
, i
)
2387 ring
->hangcheck
.deadlock
= false;
2390 static enum intel_ring_hangcheck_action
2391 ring_stuck(struct intel_ring_buffer
*ring
, u32 acthd
)
2393 struct drm_device
*dev
= ring
->dev
;
2394 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2397 if (ring
->hangcheck
.acthd
!= acthd
)
2403 /* Is the chip hanging on a WAIT_FOR_EVENT?
2404 * If so we can simply poke the RB_WAIT bit
2405 * and break the hang. This should work on
2406 * all but the second generation chipsets.
2408 tmp
= I915_READ_CTL(ring
);
2409 if (tmp
& RING_WAIT
) {
2410 DRM_ERROR("Kicking stuck wait on %s\n",
2412 I915_WRITE_CTL(ring
, tmp
);
2416 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2417 switch (semaphore_passed(ring
)) {
2421 DRM_ERROR("Kicking stuck semaphore on %s\n",
2423 I915_WRITE_CTL(ring
, tmp
);
2434 * This is called when the chip hasn't reported back with completed
2435 * batchbuffers in a long time. We keep track per ring seqno progress and
2436 * if there are no progress, hangcheck score for that ring is increased.
2437 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2438 * we kick the ring. If we see no progress on three subsequent calls
2439 * we assume chip is wedged and try to fix it by resetting the chip.
2441 void i915_hangcheck_elapsed(unsigned long data
)
2443 struct drm_device
*dev
= (struct drm_device
*)data
;
2444 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2445 struct intel_ring_buffer
*ring
;
2447 int busy_count
= 0, rings_hung
= 0;
2448 bool stuck
[I915_NUM_RINGS
] = { 0 };
2454 if (!i915_enable_hangcheck
)
2457 for_each_ring(ring
, dev_priv
, i
) {
2461 semaphore_clear_deadlocks(dev_priv
);
2463 seqno
= ring
->get_seqno(ring
, false);
2464 acthd
= intel_ring_get_active_head(ring
);
2466 if (ring
->hangcheck
.seqno
== seqno
) {
2467 if (ring_idle(ring
, seqno
)) {
2468 if (waitqueue_active(&ring
->irq_queue
)) {
2469 /* Issue a wake-up to catch stuck h/w. */
2470 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2472 wake_up_all(&ring
->irq_queue
);
2473 ring
->hangcheck
.score
+= HUNG
;
2479 /* We always increment the hangcheck score
2480 * if the ring is busy and still processing
2481 * the same request, so that no single request
2482 * can run indefinitely (such as a chain of
2483 * batches). The only time we do not increment
2484 * the hangcheck score on this ring, if this
2485 * ring is in a legitimate wait for another
2486 * ring. In that case the waiting ring is a
2487 * victim and we want to be sure we catch the
2488 * right culprit. Then every time we do kick
2489 * the ring, add a small increment to the
2490 * score so that we can catch a batch that is
2491 * being repeatedly kicked and so responsible
2492 * for stalling the machine.
2494 ring
->hangcheck
.action
= ring_stuck(ring
,
2497 switch (ring
->hangcheck
.action
) {
2512 ring
->hangcheck
.score
+= score
;
2515 /* Gradually reduce the count so that we catch DoS
2516 * attempts across multiple batches.
2518 if (ring
->hangcheck
.score
> 0)
2519 ring
->hangcheck
.score
--;
2522 ring
->hangcheck
.seqno
= seqno
;
2523 ring
->hangcheck
.acthd
= acthd
;
2527 for_each_ring(ring
, dev_priv
, i
) {
2528 if (ring
->hangcheck
.score
> FIRE
) {
2529 DRM_ERROR("%s on %s\n",
2530 stuck
[i
] ? "stuck" : "no progress",
2537 return i915_handle_error(dev
, true);
2540 /* Reset timer case chip hangs without another request
2542 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
2543 round_jiffies_up(jiffies
+
2544 DRM_I915_HANGCHECK_JIFFIES
));
2547 static void ibx_irq_preinstall(struct drm_device
*dev
)
2549 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2551 if (HAS_PCH_NOP(dev
))
2554 /* south display irq */
2555 I915_WRITE(SDEIMR
, 0xffffffff);
2557 * SDEIER is also touched by the interrupt handler to work around missed
2558 * PCH interrupts. Hence we can't update it after the interrupt handler
2559 * is enabled - instead we unconditionally enable all PCH interrupt
2560 * sources here, but then only unmask them as needed with SDEIMR.
2562 I915_WRITE(SDEIER
, 0xffffffff);
2563 POSTING_READ(SDEIER
);
2568 static void ironlake_irq_preinstall(struct drm_device
*dev
)
2570 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2572 atomic_set(&dev_priv
->irq_received
, 0);
2574 I915_WRITE(HWSTAM
, 0xeffe);
2576 /* XXX hotplug from PCH */
2578 I915_WRITE(DEIMR
, 0xffffffff);
2579 I915_WRITE(DEIER
, 0x0);
2580 POSTING_READ(DEIER
);
2583 I915_WRITE(GTIMR
, 0xffffffff);
2584 I915_WRITE(GTIER
, 0x0);
2585 POSTING_READ(GTIER
);
2587 ibx_irq_preinstall(dev
);
2590 static void ivybridge_irq_preinstall(struct drm_device
*dev
)
2592 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2594 atomic_set(&dev_priv
->irq_received
, 0);
2596 I915_WRITE(HWSTAM
, 0xeffe);
2598 /* XXX hotplug from PCH */
2600 I915_WRITE(DEIMR
, 0xffffffff);
2601 I915_WRITE(DEIER
, 0x0);
2602 POSTING_READ(DEIER
);
2605 I915_WRITE(GTIMR
, 0xffffffff);
2606 I915_WRITE(GTIER
, 0x0);
2607 POSTING_READ(GTIER
);
2609 /* Power management */
2610 I915_WRITE(GEN6_PMIMR
, 0xffffffff);
2611 I915_WRITE(GEN6_PMIER
, 0x0);
2612 POSTING_READ(GEN6_PMIER
);
2614 ibx_irq_preinstall(dev
);
2617 static void valleyview_irq_preinstall(struct drm_device
*dev
)
2619 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2622 atomic_set(&dev_priv
->irq_received
, 0);
2625 I915_WRITE(VLV_IMR
, 0);
2626 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
2627 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
2628 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
2631 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2632 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2633 I915_WRITE(GTIMR
, 0xffffffff);
2634 I915_WRITE(GTIER
, 0x0);
2635 POSTING_READ(GTIER
);
2637 I915_WRITE(DPINVGTT
, 0xff);
2639 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2640 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2642 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2643 I915_WRITE(VLV_IIR
, 0xffffffff);
2644 I915_WRITE(VLV_IMR
, 0xffffffff);
2645 I915_WRITE(VLV_IER
, 0x0);
2646 POSTING_READ(VLV_IER
);
2649 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
2651 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2652 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
2653 struct intel_encoder
*intel_encoder
;
2654 u32 mask
= ~I915_READ(SDEIMR
);
2657 if (HAS_PCH_IBX(dev
)) {
2658 mask
&= ~SDE_HOTPLUG_MASK
;
2659 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2660 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2661 mask
|= hpd_ibx
[intel_encoder
->hpd_pin
];
2663 mask
&= ~SDE_HOTPLUG_MASK_CPT
;
2664 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2665 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2666 mask
|= hpd_cpt
[intel_encoder
->hpd_pin
];
2669 I915_WRITE(SDEIMR
, ~mask
);
2672 * Enable digital hotplug on the PCH, and configure the DP short pulse
2673 * duration to 2ms (which is the minimum in the Display Port spec)
2675 * This register is the same on all known PCH chips.
2677 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
2678 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
2679 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
2680 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
2681 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
2682 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
2685 static void ibx_irq_postinstall(struct drm_device
*dev
)
2687 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2690 if (HAS_PCH_NOP(dev
))
2693 if (HAS_PCH_IBX(dev
)) {
2694 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_TRANSB_FIFO_UNDER
|
2695 SDE_TRANSA_FIFO_UNDER
| SDE_POISON
;
2697 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
| SDE_ERROR_CPT
;
2699 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
2702 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2703 I915_WRITE(SDEIMR
, ~mask
);
2706 static int ironlake_irq_postinstall(struct drm_device
*dev
)
2708 unsigned long irqflags
;
2710 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2711 /* enable kind of interrupts always enabled */
2712 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
2713 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
2714 DE_AUX_CHANNEL_A
| DE_PIPEB_FIFO_UNDERRUN
|
2715 DE_PIPEA_FIFO_UNDERRUN
| DE_POISON
;
2718 dev_priv
->irq_mask
= ~display_mask
;
2720 /* should always can generate irq */
2721 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2722 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2723 I915_WRITE(DEIER
, display_mask
|
2724 DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
);
2725 POSTING_READ(DEIER
);
2727 dev_priv
->gt_irq_mask
= ~0;
2729 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2730 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2732 gt_irqs
= GT_RENDER_USER_INTERRUPT
;
2735 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
2737 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
2738 ILK_BSD_USER_INTERRUPT
;
2740 I915_WRITE(GTIER
, gt_irqs
);
2741 POSTING_READ(GTIER
);
2743 ibx_irq_postinstall(dev
);
2745 if (IS_IRONLAKE_M(dev
)) {
2746 /* Enable PCU event interrupts
2748 * spinlocking not required here for correctness since interrupt
2749 * setup is guaranteed to run in single-threaded context. But we
2750 * need it to make the assert_spin_locked happy. */
2751 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2752 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
2753 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2759 static int ivybridge_irq_postinstall(struct drm_device
*dev
)
2761 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2762 /* enable kind of interrupts always enabled */
2764 DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
| DE_PCH_EVENT_IVB
|
2765 DE_PLANEC_FLIP_DONE_IVB
|
2766 DE_PLANEB_FLIP_DONE_IVB
|
2767 DE_PLANEA_FLIP_DONE_IVB
|
2768 DE_AUX_CHANNEL_A_IVB
|
2770 u32 pm_irqs
= GEN6_PM_RPS_EVENTS
;
2773 dev_priv
->irq_mask
= ~display_mask
;
2775 /* should always can generate irq */
2776 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
2777 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2778 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2781 DE_PIPEC_VBLANK_IVB
|
2782 DE_PIPEB_VBLANK_IVB
|
2783 DE_PIPEA_VBLANK_IVB
);
2784 POSTING_READ(DEIER
);
2786 dev_priv
->gt_irq_mask
= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2788 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2789 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2791 gt_irqs
= GT_RENDER_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
|
2792 GT_BLT_USER_INTERRUPT
| GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2793 I915_WRITE(GTIER
, gt_irqs
);
2794 POSTING_READ(GTIER
);
2796 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2798 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
|
2799 PM_VEBOX_CS_ERROR_INTERRUPT
;
2801 /* Our enable/disable rps functions may touch these registers so
2802 * make sure to set a known state for only the non-RPS bits.
2803 * The RMW is extra paranoia since this should be called after being set
2804 * to a known state in preinstall.
2806 I915_WRITE(GEN6_PMIMR
,
2807 (I915_READ(GEN6_PMIMR
) | ~GEN6_PM_RPS_EVENTS
) & ~pm_irqs
);
2808 I915_WRITE(GEN6_PMIER
,
2809 (I915_READ(GEN6_PMIER
) & GEN6_PM_RPS_EVENTS
) | pm_irqs
);
2810 POSTING_READ(GEN6_PMIER
);
2812 ibx_irq_postinstall(dev
);
2817 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2819 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2822 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
;
2824 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2825 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2826 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2827 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2828 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2831 *Leave vblank interrupts masked initially. enable/disable will
2832 * toggle them based on usage.
2834 dev_priv
->irq_mask
= (~enable_mask
) |
2835 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2836 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2838 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2839 POSTING_READ(PORT_HOTPLUG_EN
);
2841 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2842 I915_WRITE(VLV_IER
, enable_mask
);
2843 I915_WRITE(VLV_IIR
, 0xffffffff);
2844 I915_WRITE(PIPESTAT(0), 0xffff);
2845 I915_WRITE(PIPESTAT(1), 0xffff);
2846 POSTING_READ(VLV_IER
);
2848 i915_enable_pipestat(dev_priv
, 0, pipestat_enable
);
2849 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
2850 i915_enable_pipestat(dev_priv
, 1, pipestat_enable
);
2852 I915_WRITE(VLV_IIR
, 0xffffffff);
2853 I915_WRITE(VLV_IIR
, 0xffffffff);
2855 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2856 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2858 gt_irqs
= GT_RENDER_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
|
2859 GT_BLT_USER_INTERRUPT
;
2860 I915_WRITE(GTIER
, gt_irqs
);
2861 POSTING_READ(GTIER
);
2863 /* ack & enable invalid PTE error interrupts */
2864 #if 0 /* FIXME: add support to irq handler for checking these bits */
2865 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2866 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2869 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2874 static void valleyview_irq_uninstall(struct drm_device
*dev
)
2876 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2882 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
2885 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2887 I915_WRITE(HWSTAM
, 0xffffffff);
2888 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2889 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2891 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2892 I915_WRITE(VLV_IIR
, 0xffffffff);
2893 I915_WRITE(VLV_IMR
, 0xffffffff);
2894 I915_WRITE(VLV_IER
, 0x0);
2895 POSTING_READ(VLV_IER
);
2898 static void ironlake_irq_uninstall(struct drm_device
*dev
)
2900 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2905 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
2907 I915_WRITE(HWSTAM
, 0xffffffff);
2909 I915_WRITE(DEIMR
, 0xffffffff);
2910 I915_WRITE(DEIER
, 0x0);
2911 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2913 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
2915 I915_WRITE(GTIMR
, 0xffffffff);
2916 I915_WRITE(GTIER
, 0x0);
2917 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2919 if (HAS_PCH_NOP(dev
))
2922 I915_WRITE(SDEIMR
, 0xffffffff);
2923 I915_WRITE(SDEIER
, 0x0);
2924 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2925 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
2926 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
2929 static void i8xx_irq_preinstall(struct drm_device
* dev
)
2931 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2934 atomic_set(&dev_priv
->irq_received
, 0);
2937 I915_WRITE(PIPESTAT(pipe
), 0);
2938 I915_WRITE16(IMR
, 0xffff);
2939 I915_WRITE16(IER
, 0x0);
2940 POSTING_READ16(IER
);
2943 static int i8xx_irq_postinstall(struct drm_device
*dev
)
2945 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2948 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
2950 /* Unmask the interrupts that we always want on. */
2951 dev_priv
->irq_mask
=
2952 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2953 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2954 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
2955 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
2956 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2957 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
2960 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2961 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2962 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
2963 I915_USER_INTERRUPT
);
2964 POSTING_READ16(IER
);
2970 * Returns true when a page flip has completed.
2972 static bool i8xx_handle_vblank(struct drm_device
*dev
,
2975 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2976 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(pipe
);
2978 if (!drm_handle_vblank(dev
, pipe
))
2981 if ((iir
& flip_pending
) == 0)
2984 intel_prepare_page_flip(dev
, pipe
);
2986 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2987 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2988 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2989 * the flip is completed (no longer pending). Since this doesn't raise
2990 * an interrupt per se, we watch for the change at vblank.
2992 if (I915_READ16(ISR
) & flip_pending
)
2995 intel_finish_page_flip(dev
, pipe
);
3000 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3002 struct drm_device
*dev
= (struct drm_device
*) arg
;
3003 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3006 unsigned long irqflags
;
3010 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3011 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3013 atomic_inc(&dev_priv
->irq_received
);
3015 iir
= I915_READ16(IIR
);
3019 while (iir
& ~flip_mask
) {
3020 /* Can't rely on pipestat interrupt bit in iir as it might
3021 * have been cleared after the pipestat interrupt was received.
3022 * It doesn't set the bit in iir again, but it still produces
3023 * interrupts (for non-MSI).
3025 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3026 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3027 i915_handle_error(dev
, false);
3029 for_each_pipe(pipe
) {
3030 int reg
= PIPESTAT(pipe
);
3031 pipe_stats
[pipe
] = I915_READ(reg
);
3034 * Clear the PIPE*STAT regs before the IIR
3036 if (pipe_stats
[pipe
] & 0x8000ffff) {
3037 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3038 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3040 I915_WRITE(reg
, pipe_stats
[pipe
]);
3044 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3046 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3047 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3049 i915_update_dri1_breadcrumb(dev
);
3051 if (iir
& I915_USER_INTERRUPT
)
3052 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3054 if (pipe_stats
[0] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3055 i8xx_handle_vblank(dev
, 0, iir
))
3056 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(0);
3058 if (pipe_stats
[1] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3059 i8xx_handle_vblank(dev
, 1, iir
))
3060 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(1);
3068 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3070 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3073 for_each_pipe(pipe
) {
3074 /* Clear enable bits; then clear status bits */
3075 I915_WRITE(PIPESTAT(pipe
), 0);
3076 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3078 I915_WRITE16(IMR
, 0xffff);
3079 I915_WRITE16(IER
, 0x0);
3080 I915_WRITE16(IIR
, I915_READ16(IIR
));
3083 static void i915_irq_preinstall(struct drm_device
* dev
)
3085 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3088 atomic_set(&dev_priv
->irq_received
, 0);
3090 if (I915_HAS_HOTPLUG(dev
)) {
3091 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3092 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3095 I915_WRITE16(HWSTAM
, 0xeffe);
3097 I915_WRITE(PIPESTAT(pipe
), 0);
3098 I915_WRITE(IMR
, 0xffffffff);
3099 I915_WRITE(IER
, 0x0);
3103 static int i915_irq_postinstall(struct drm_device
*dev
)
3105 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3108 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3110 /* Unmask the interrupts that we always want on. */
3111 dev_priv
->irq_mask
=
3112 ~(I915_ASLE_INTERRUPT
|
3113 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3114 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3115 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3116 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3117 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3120 I915_ASLE_INTERRUPT
|
3121 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3123 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3124 I915_USER_INTERRUPT
;
3126 if (I915_HAS_HOTPLUG(dev
)) {
3127 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3128 POSTING_READ(PORT_HOTPLUG_EN
);
3130 /* Enable in IER... */
3131 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3132 /* and unmask in IMR */
3133 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3136 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3137 I915_WRITE(IER
, enable_mask
);
3140 i915_enable_asle_pipestat(dev
);
3146 * Returns true when a page flip has completed.
3148 static bool i915_handle_vblank(struct drm_device
*dev
,
3149 int plane
, int pipe
, u32 iir
)
3151 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3152 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3154 if (!drm_handle_vblank(dev
, pipe
))
3157 if ((iir
& flip_pending
) == 0)
3160 intel_prepare_page_flip(dev
, plane
);
3162 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3163 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3164 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3165 * the flip is completed (no longer pending). Since this doesn't raise
3166 * an interrupt per se, we watch for the change at vblank.
3168 if (I915_READ(ISR
) & flip_pending
)
3171 intel_finish_page_flip(dev
, pipe
);
3176 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
3178 struct drm_device
*dev
= (struct drm_device
*) arg
;
3179 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3180 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
3181 unsigned long irqflags
;
3183 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3184 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3185 int pipe
, ret
= IRQ_NONE
;
3187 atomic_inc(&dev_priv
->irq_received
);
3189 iir
= I915_READ(IIR
);
3191 bool irq_received
= (iir
& ~flip_mask
) != 0;
3192 bool blc_event
= false;
3194 /* Can't rely on pipestat interrupt bit in iir as it might
3195 * have been cleared after the pipestat interrupt was received.
3196 * It doesn't set the bit in iir again, but it still produces
3197 * interrupts (for non-MSI).
3199 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3200 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3201 i915_handle_error(dev
, false);
3203 for_each_pipe(pipe
) {
3204 int reg
= PIPESTAT(pipe
);
3205 pipe_stats
[pipe
] = I915_READ(reg
);
3207 /* Clear the PIPE*STAT regs before the IIR */
3208 if (pipe_stats
[pipe
] & 0x8000ffff) {
3209 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3210 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3212 I915_WRITE(reg
, pipe_stats
[pipe
]);
3213 irq_received
= true;
3216 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3221 /* Consume port. Then clear IIR or we'll miss events */
3222 if ((I915_HAS_HOTPLUG(dev
)) &&
3223 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
3224 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
3225 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
3227 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3229 if (hotplug_trigger
) {
3230 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
3231 queue_work(dev_priv
->wq
,
3232 &dev_priv
->hotplug_work
);
3234 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
3235 POSTING_READ(PORT_HOTPLUG_STAT
);
3238 I915_WRITE(IIR
, iir
& ~flip_mask
);
3239 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3241 if (iir
& I915_USER_INTERRUPT
)
3242 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3244 for_each_pipe(pipe
) {
3249 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3250 i915_handle_vblank(dev
, plane
, pipe
, iir
))
3251 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3253 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3257 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
3258 intel_opregion_asle_intr(dev
);
3260 /* With MSI, interrupts are only generated when iir
3261 * transitions from zero to nonzero. If another bit got
3262 * set while we were handling the existing iir bits, then
3263 * we would never get another interrupt.
3265 * This is fine on non-MSI as well, as if we hit this path
3266 * we avoid exiting the interrupt handler only to generate
3269 * Note that for MSI this could cause a stray interrupt report
3270 * if an interrupt landed in the time between writing IIR and
3271 * the posting read. This should be rare enough to never
3272 * trigger the 99% of 100,000 interrupts test for disabling
3277 } while (iir
& ~flip_mask
);
3279 i915_update_dri1_breadcrumb(dev
);
3284 static void i915_irq_uninstall(struct drm_device
* dev
)
3286 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3289 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3291 if (I915_HAS_HOTPLUG(dev
)) {
3292 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3293 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3296 I915_WRITE16(HWSTAM
, 0xffff);
3297 for_each_pipe(pipe
) {
3298 /* Clear enable bits; then clear status bits */
3299 I915_WRITE(PIPESTAT(pipe
), 0);
3300 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3302 I915_WRITE(IMR
, 0xffffffff);
3303 I915_WRITE(IER
, 0x0);
3305 I915_WRITE(IIR
, I915_READ(IIR
));
3308 static void i965_irq_preinstall(struct drm_device
* dev
)
3310 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3313 atomic_set(&dev_priv
->irq_received
, 0);
3315 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3316 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3318 I915_WRITE(HWSTAM
, 0xeffe);
3320 I915_WRITE(PIPESTAT(pipe
), 0);
3321 I915_WRITE(IMR
, 0xffffffff);
3322 I915_WRITE(IER
, 0x0);
3326 static int i965_irq_postinstall(struct drm_device
*dev
)
3328 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3332 /* Unmask the interrupts that we always want on. */
3333 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
3334 I915_DISPLAY_PORT_INTERRUPT
|
3335 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3336 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3337 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3338 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3339 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3341 enable_mask
= ~dev_priv
->irq_mask
;
3342 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3343 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3344 enable_mask
|= I915_USER_INTERRUPT
;
3347 enable_mask
|= I915_BSD_USER_INTERRUPT
;
3349 i915_enable_pipestat(dev_priv
, 0, PIPE_GMBUS_EVENT_ENABLE
);
3352 * Enable some error detection, note the instruction error mask
3353 * bit is reserved, so we leave it masked.
3356 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
3357 GM45_ERROR_MEM_PRIV
|
3358 GM45_ERROR_CP_PRIV
|
3359 I915_ERROR_MEMORY_REFRESH
);
3361 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
3362 I915_ERROR_MEMORY_REFRESH
);
3364 I915_WRITE(EMR
, error_mask
);
3366 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3367 I915_WRITE(IER
, enable_mask
);
3370 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3371 POSTING_READ(PORT_HOTPLUG_EN
);
3373 i915_enable_asle_pipestat(dev
);
3378 static void i915_hpd_irq_setup(struct drm_device
*dev
)
3380 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3381 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3382 struct intel_encoder
*intel_encoder
;
3385 if (I915_HAS_HOTPLUG(dev
)) {
3386 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
3387 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
3388 /* Note HDMI and DP share hotplug bits */
3389 /* enable bits are the same for all generations */
3390 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
3391 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3392 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
3393 /* Programming the CRT detection parameters tends
3394 to generate a spurious hotplug event about three
3395 seconds later. So just do it once.
3398 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
3399 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
3400 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
3402 /* Ignore TV since it's buggy */
3403 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
3407 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
3409 struct drm_device
*dev
= (struct drm_device
*) arg
;
3410 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3412 u32 pipe_stats
[I915_MAX_PIPES
];
3413 unsigned long irqflags
;
3415 int ret
= IRQ_NONE
, pipe
;
3417 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3418 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3420 atomic_inc(&dev_priv
->irq_received
);
3422 iir
= I915_READ(IIR
);
3425 bool blc_event
= false;
3427 irq_received
= (iir
& ~flip_mask
) != 0;
3429 /* Can't rely on pipestat interrupt bit in iir as it might
3430 * have been cleared after the pipestat interrupt was received.
3431 * It doesn't set the bit in iir again, but it still produces
3432 * interrupts (for non-MSI).
3434 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3435 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3436 i915_handle_error(dev
, false);
3438 for_each_pipe(pipe
) {
3439 int reg
= PIPESTAT(pipe
);
3440 pipe_stats
[pipe
] = I915_READ(reg
);
3443 * Clear the PIPE*STAT regs before the IIR
3445 if (pipe_stats
[pipe
] & 0x8000ffff) {
3446 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3447 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3449 I915_WRITE(reg
, pipe_stats
[pipe
]);
3453 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3460 /* Consume port. Then clear IIR or we'll miss events */
3461 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
3462 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
3463 u32 hotplug_trigger
= hotplug_status
& (IS_G4X(dev
) ?
3464 HOTPLUG_INT_STATUS_G4X
:
3465 HOTPLUG_INT_STATUS_I915
);
3467 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3469 if (hotplug_trigger
) {
3470 intel_hpd_irq_handler(dev
, hotplug_trigger
,
3471 IS_G4X(dev
) ? hpd_status_gen4
: hpd_status_i915
);
3472 queue_work(dev_priv
->wq
,
3473 &dev_priv
->hotplug_work
);
3475 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
3476 I915_READ(PORT_HOTPLUG_STAT
);
3479 I915_WRITE(IIR
, iir
& ~flip_mask
);
3480 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3482 if (iir
& I915_USER_INTERRUPT
)
3483 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3484 if (iir
& I915_BSD_USER_INTERRUPT
)
3485 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
3487 for_each_pipe(pipe
) {
3488 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
3489 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
3490 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
3492 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3497 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
3498 intel_opregion_asle_intr(dev
);
3500 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
3501 gmbus_irq_handler(dev
);
3503 /* With MSI, interrupts are only generated when iir
3504 * transitions from zero to nonzero. If another bit got
3505 * set while we were handling the existing iir bits, then
3506 * we would never get another interrupt.
3508 * This is fine on non-MSI as well, as if we hit this path
3509 * we avoid exiting the interrupt handler only to generate
3512 * Note that for MSI this could cause a stray interrupt report
3513 * if an interrupt landed in the time between writing IIR and
3514 * the posting read. This should be rare enough to never
3515 * trigger the 99% of 100,000 interrupts test for disabling
3521 i915_update_dri1_breadcrumb(dev
);
3526 static void i965_irq_uninstall(struct drm_device
* dev
)
3528 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3534 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3536 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3537 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3539 I915_WRITE(HWSTAM
, 0xffffffff);
3541 I915_WRITE(PIPESTAT(pipe
), 0);
3542 I915_WRITE(IMR
, 0xffffffff);
3543 I915_WRITE(IER
, 0x0);
3546 I915_WRITE(PIPESTAT(pipe
),
3547 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
3548 I915_WRITE(IIR
, I915_READ(IIR
));
3551 static void i915_reenable_hotplug_timer_func(unsigned long data
)
3553 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*)data
;
3554 struct drm_device
*dev
= dev_priv
->dev
;
3555 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3556 unsigned long irqflags
;
3559 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3560 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
3561 struct drm_connector
*connector
;
3563 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
3566 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3568 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3569 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3571 if (intel_connector
->encoder
->hpd_pin
== i
) {
3572 if (connector
->polled
!= intel_connector
->polled
)
3573 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3574 drm_get_connector_name(connector
));
3575 connector
->polled
= intel_connector
->polled
;
3576 if (!connector
->polled
)
3577 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3581 if (dev_priv
->display
.hpd_irq_setup
)
3582 dev_priv
->display
.hpd_irq_setup(dev
);
3583 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3586 void intel_irq_init(struct drm_device
*dev
)
3588 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3590 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
3591 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
3592 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
3593 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
3595 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3596 i915_hangcheck_elapsed
,
3597 (unsigned long) dev
);
3598 setup_timer(&dev_priv
->hotplug_reenable_timer
, i915_reenable_hotplug_timer_func
,
3599 (unsigned long) dev_priv
);
3601 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
3603 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
3604 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
3605 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
3606 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
3607 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
3610 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3611 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
3613 dev
->driver
->get_vblank_timestamp
= NULL
;
3614 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
3616 if (IS_VALLEYVIEW(dev
)) {
3617 dev
->driver
->irq_handler
= valleyview_irq_handler
;
3618 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
3619 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
3620 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
3621 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
3622 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
3623 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3624 } else if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
3625 /* Share uninstall handlers with ILK/SNB */
3626 dev
->driver
->irq_handler
= ivybridge_irq_handler
;
3627 dev
->driver
->irq_preinstall
= ivybridge_irq_preinstall
;
3628 dev
->driver
->irq_postinstall
= ivybridge_irq_postinstall
;
3629 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
3630 dev
->driver
->enable_vblank
= ivybridge_enable_vblank
;
3631 dev
->driver
->disable_vblank
= ivybridge_disable_vblank
;
3632 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
3633 } else if (HAS_PCH_SPLIT(dev
)) {
3634 dev
->driver
->irq_handler
= ironlake_irq_handler
;
3635 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
3636 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
3637 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
3638 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
3639 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
3640 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
3642 if (INTEL_INFO(dev
)->gen
== 2) {
3643 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
3644 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
3645 dev
->driver
->irq_handler
= i8xx_irq_handler
;
3646 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
3647 } else if (INTEL_INFO(dev
)->gen
== 3) {
3648 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
3649 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
3650 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
3651 dev
->driver
->irq_handler
= i915_irq_handler
;
3652 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3654 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
3655 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
3656 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
3657 dev
->driver
->irq_handler
= i965_irq_handler
;
3658 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3660 dev
->driver
->enable_vblank
= i915_enable_vblank
;
3661 dev
->driver
->disable_vblank
= i915_disable_vblank
;
3665 void intel_hpd_init(struct drm_device
*dev
)
3667 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3668 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3669 struct drm_connector
*connector
;
3672 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
3673 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
3674 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3676 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3677 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3678 connector
->polled
= intel_connector
->polled
;
3679 if (!connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
3680 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3682 if (dev_priv
->display
.hpd_irq_setup
)
3683 dev_priv
->display
.hpd_irq_setup(dev
);