drm/i915: Keep the CRC values into a circular buffer
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
index d537b6151495427a7d1a95e5f39a88eab19df6eb..73d76af13ed438ccf2460f8fce8084ccc5b8be8b 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <linux/sysrq.h>
 #include <linux/slab.h>
+#include <linux/circ_buf.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -85,6 +86,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
 
+       if (dev_priv->pc8.irqs_disabled) {
+               WARN(1, "IRQs disabled\n");
+               dev_priv->pc8.regsave.deimr &= ~mask;
+               return;
+       }
+
        if ((dev_priv->irq_mask & mask) != 0) {
                dev_priv->irq_mask &= ~mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +104,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
 
+       if (dev_priv->pc8.irqs_disabled) {
+               WARN(1, "IRQs disabled\n");
+               dev_priv->pc8.regsave.deimr |= mask;
+               return;
+       }
+
        if ((dev_priv->irq_mask & mask) != mask) {
                dev_priv->irq_mask |= mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +117,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }
 
+/**
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+                             uint32_t interrupt_mask,
+                             uint32_t enabled_irq_mask)
+{
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (dev_priv->pc8.irqs_disabled) {
+               WARN(1, "IRQs disabled\n");
+               dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
+               dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
+                                               interrupt_mask);
+               return;
+       }
+
+       dev_priv->gt_irq_mask &= ~interrupt_mask;
+       dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       POSTING_READ(GTIMR);
+}
+
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       ilk_update_gt_irq(dev_priv, mask, mask);
+}
+
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       ilk_update_gt_irq(dev_priv, mask, 0);
+}
+
+/**
+  * snb_update_pm_irq - update GEN6_PMIMR
+  * @dev_priv: driver private
+  * @interrupt_mask: mask of interrupt bits to update
+  * @enabled_irq_mask: mask of interrupt bits to enable
+  */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+                             uint32_t interrupt_mask,
+                             uint32_t enabled_irq_mask)
+{
+       uint32_t new_val;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       if (dev_priv->pc8.irqs_disabled) {
+               WARN(1, "IRQs disabled\n");
+               dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
+               dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
+                                                    interrupt_mask);
+               return;
+       }
+
+       new_val = dev_priv->pm_irq_mask;
+       new_val &= ~interrupt_mask;
+       new_val |= (~enabled_irq_mask & interrupt_mask);
+
+       if (new_val != dev_priv->pm_irq_mask) {
+               dev_priv->pm_irq_mask = new_val;
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+               POSTING_READ(GEN6_PMIMR);
+       }
+}
+
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       snb_update_pm_irq(dev_priv, mask, 0);
+}
+
 static bool ivb_can_enable_err_int(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,6 +286,15 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 
        assert_spin_locked(&dev_priv->irq_lock);
 
+       if (dev_priv->pc8.irqs_disabled &&
+           (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
+               WARN(1, "IRQs disabled\n");
+               dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
+               dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
+                                                interrupt_mask);
+               return;
+       }
+
        I915_WRITE(SDEIMR, sdeimr);
        POSTING_READ(SDEIMR);
 }
@@ -418,6 +519,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
        }
 }
 
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+       /* Gen2 doesn't have a hardware frame counter */
+       return 0;
+}
+
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -426,7 +533,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
        unsigned long low_frame;
-       u32 high1, high2, low;
+       u32 high1, high2, low, pixel, vbl_start;
 
        if (!i915_pipe_enabled(dev, pipe)) {
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -434,6 +541,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
                return 0;
        }
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               struct intel_crtc *intel_crtc =
+                       to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+               const struct drm_display_mode *mode =
+                       &intel_crtc->config.adjusted_mode;
+
+               vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+       } else {
+               enum transcoder cpu_transcoder =
+                       intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+               u32 htotal;
+
+               htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+               vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+               vbl_start *= htotal;
+       }
+
        high_frame = PIPEFRAME(pipe);
        low_frame = PIPEFRAMEPIXEL(pipe);
 
@@ -444,13 +569,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
         */
        do {
                high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-               low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+               low   = I915_READ(low_frame);
                high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
        } while (high1 != high2);
 
        high1 >>= PIPE_FRAME_HIGH_SHIFT;
+       pixel = low & PIPE_PIXEL_MASK;
        low >>= PIPE_FRAME_LOW_SHIFT;
-       return (high1 << 8) | low;
+
+       /*
+        * The frame counter increments at beginning of active.
+        * Cook up a vblank counter by also checking the pixel
+        * counter against vblank start.
+        */
+       return ((high1 << 8) | low) + (pixel >= vbl_start);
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -467,37 +599,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
        return I915_READ(reg);
 }
 
+static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t status;
+
+       if (IS_VALLEYVIEW(dev)) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ(VLV_ISR) & status;
+       } else if (IS_GEN2(dev)) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ16(ISR) & status;
+       } else if (INTEL_INFO(dev)->gen < 5) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ(ISR) & status;
+       } else if (INTEL_INFO(dev)->gen < 7) {
+               status = pipe == PIPE_A ?
+                       DE_PIPEA_VBLANK :
+                       DE_PIPEB_VBLANK;
+
+               return I915_READ(DEISR) & status;
+       } else {
+               switch (pipe) {
+               default:
+               case PIPE_A:
+                       status = DE_PIPEA_VBLANK_IVB;
+                       break;
+               case PIPE_B:
+                       status = DE_PIPEB_VBLANK_IVB;
+                       break;
+               case PIPE_C:
+                       status = DE_PIPEC_VBLANK_IVB;
+                       break;
+               }
+
+               return I915_READ(DEISR) & status;
+       }
+}
+
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                             int *vpos, int *hpos)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 vbl = 0, position = 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+       int position;
        int vbl_start, vbl_end, htotal, vtotal;
        bool in_vbl = true;
        int ret = 0;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
 
-       if (!i915_pipe_enabled(dev, pipe)) {
+       if (!intel_crtc->active) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
                                 "pipe %c\n", pipe_name(pipe));
                return 0;
        }
 
-       /* Get vtotal. */
-       vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+       htotal = mode->crtc_htotal;
+       vtotal = mode->crtc_vtotal;
+       vbl_start = mode->crtc_vblank_start;
+       vbl_end = mode->crtc_vblank_end;
 
-       if (INTEL_INFO(dev)->gen >= 4) {
+       ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                /* No obvious pixelcount register. Only query vertical
                 * scanout position from Display scan line register.
                 */
-               position = I915_READ(PIPEDSL(pipe));
+               if (IS_GEN2(dev))
+                       position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+               else
+                       position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-               /* Decode into vertical scanout position. Don't have
-                * horizontal scanout position.
+               /*
+                * The scanline counter increments at the leading edge
+                * of hsync, ie. it completely misses the active portion
+                * of the line. Fix up the counter at both edges of vblank
+                * to get a more accurate picture whether we're in vblank
+                * or not.
                 */
-               *vpos = position & 0x1fff;
-               *hpos = 0;
+               in_vbl = intel_pipe_in_vblank(dev, pipe);
+               if ((in_vbl && position == vbl_start - 1) ||
+                   (!in_vbl && position == vbl_end - 1))
+                       position = (position + 1) % vtotal;
        } else {
                /* Have access to pixelcount since start of frame.
                 * We can split this into vertical and horizontal
@@ -505,28 +698,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                 */
                position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-               htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
-               *vpos = position / htotal;
-               *hpos = position - (*vpos * htotal);
+               /* convert to pixel counts */
+               vbl_start *= htotal;
+               vbl_end *= htotal;
+               vtotal *= htotal;
        }
 
-       /* Query vblank area. */
-       vbl = I915_READ(VBLANK(cpu_transcoder));
-
-       /* Test position against vblank region. */
-       vbl_start = vbl & 0x1fff;
-       vbl_end = (vbl >> 16) & 0x1fff;
+       in_vbl = position >= vbl_start && position < vbl_end;
 
-       if ((*vpos < vbl_start) || (*vpos > vbl_end))
-               in_vbl = false;
-
-       /* Inside "upper part" of vblank area? Apply corrective offset: */
-       if (in_vbl && (*vpos >= vbl_start))
-               *vpos = *vpos - vtotal;
+       /*
+        * While in vblank, position will be negative
+        * counting up towards 0 at vbl_end. And outside
+        * vblank, position will be positive counting
+        * up since vbl_end.
+        */
+       if (position >= vbl_start)
+               position -= vbl_end;
+       else
+               position += vtotal - vbl_end;
 
-       /* Readouts valid? */
-       if (vbl > 0)
-               ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+               *vpos = position;
+               *hpos = 0;
+       } else {
+               *vpos = position / htotal;
+               *hpos = position - (*vpos * htotal);
+       }
 
        /* In vblank? */
        if (in_vbl)
@@ -565,7 +762,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                                                     crtc);
 }
 
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+                               struct drm_connector *connector)
 {
        enum drm_connector_status old_status;
 
@@ -573,11 +771,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
        old_status = connector->status;
 
        connector->status = connector->funcs->detect(connector, false);
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+       if (old_status == connector->status)
+               return false;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
                      connector->base.id,
                      drm_get_connector_name(connector),
-                     old_status, connector->status);
-       return (old_status != connector->status);
+                     drm_get_connector_status_name(old_status),
+                     drm_get_connector_status_name(connector->status));
+
+       return true;
 }
 
 /*
@@ -701,7 +904,7 @@ static void notify_ring(struct drm_device *dev,
        if (ring->obj == NULL)
                return;
 
-       trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+       trace_i915_gem_request_complete(ring);
 
        wake_up_all(&ring->irq_queue);
        i915_queue_hangcheck(dev);
@@ -711,56 +914,67 @@ static void gen6_pm_rps_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps.work);
-       u32 pm_iir, pm_imr;
-       u8 new_delay;
+       u32 pm_iir;
+       int new_delay, adj;
 
        spin_lock_irq(&dev_priv->irq_lock);
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
-       pm_imr = I915_READ(GEN6_PMIMR);
        /* Make sure not to corrupt PMIMR state used by ringbuffer code */
-       I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
+       snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
        spin_unlock_irq(&dev_priv->irq_lock);
 
+       /* Make sure we didn't queue anything we're not going to process. */
+       WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
+
        if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
                return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
+       adj = dev_priv->rps.last_adj;
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               new_delay = dev_priv->rps.cur_delay + 1;
+               if (adj > 0)
+                       adj *= 2;
+               else
+                       adj = 1;
+               new_delay = dev_priv->rps.cur_delay + adj;
 
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
                 */
-               if (IS_VALLEYVIEW(dev_priv->dev) &&
-                   dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+               if (new_delay < dev_priv->rps.rpe_delay)
+                       new_delay = dev_priv->rps.rpe_delay;
+       } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+               if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
                        new_delay = dev_priv->rps.rpe_delay;
-       } else
-               new_delay = dev_priv->rps.cur_delay - 1;
+               else
+                       new_delay = dev_priv->rps.min_delay;
+               adj = 0;
+       } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+               if (adj < 0)
+                       adj *= 2;
+               else
+                       adj = -1;
+               new_delay = dev_priv->rps.cur_delay + adj;
+       } else { /* unknown event */
+               new_delay = dev_priv->rps.cur_delay;
+       }
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
-       if (new_delay >= dev_priv->rps.min_delay &&
-           new_delay <= dev_priv->rps.max_delay) {
-               if (IS_VALLEYVIEW(dev_priv->dev))
-                       valleyview_set_rps(dev_priv->dev, new_delay);
-               else
-                       gen6_set_rps(dev_priv->dev, new_delay);
-       }
-
-       if (IS_VALLEYVIEW(dev_priv->dev)) {
-               /*
-                * On VLV, when we enter RC6 we may not be at the minimum
-                * voltage level, so arm a timer to check.  It should only
-                * fire when there's activity or once after we've entered
-                * RC6, and then won't be re-armed until the next RPS interrupt.
-                */
-               mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-                                msecs_to_jiffies(100));
-       }
+       if (new_delay < (int)dev_priv->rps.min_delay)
+               new_delay = dev_priv->rps.min_delay;
+       if (new_delay > (int)dev_priv->rps.max_delay)
+               new_delay = dev_priv->rps.max_delay;
+       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               valleyview_set_rps(dev_priv->dev, new_delay);
+       else
+               gen6_set_rps(dev_priv->dev, new_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
@@ -780,9 +994,10 @@ static void ivybridge_parity_work(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    l3_parity.error_work);
        u32 error_status, row, bank, subbank;
-       char *parity_event[5];
+       char *parity_event[6];
        uint32_t misccpctl;
        unsigned long flags;
+       uint8_t slice = 0;
 
        /* We must turn off DOP level clock gating to access the L3 registers.
         * In order to prevent a get/put style interface, acquire struct mutex
@@ -790,60 +1005,95 @@ static void ivybridge_parity_work(struct work_struct *work)
         */
        mutex_lock(&dev_priv->dev->struct_mutex);
 
+       /* If we've screwed up tracking, just let the interrupt fire again */
+       if (WARN_ON(!dev_priv->l3_parity.which_slice))
+               goto out;
+
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
        POSTING_READ(GEN7_MISCCPCTL);
 
-       error_status = I915_READ(GEN7_L3CDERRST1);
-       row = GEN7_PARITY_ERROR_ROW(error_status);
-       bank = GEN7_PARITY_ERROR_BANK(error_status);
-       subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+       while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+               u32 reg;
 
-       I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
-                                   GEN7_L3CDERRST1_ENABLE);
-       POSTING_READ(GEN7_L3CDERRST1);
+               slice--;
+               if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+                       break;
 
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+               dev_priv->l3_parity.which_slice &= ~(1<<slice);
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+               reg = GEN7_L3CDERRST1 + (slice * 0x200);
 
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+               error_status = I915_READ(reg);
+               row = GEN7_PARITY_ERROR_ROW(error_status);
+               bank = GEN7_PARITY_ERROR_BANK(error_status);
+               subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+               I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+               POSTING_READ(reg);
+
+               parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+               parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+               parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+               parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+               parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+               parity_event[5] = NULL;
+
+               kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+                                  KOBJ_CHANGE, parity_event);
 
-       parity_event[0] = "L3_PARITY_ERROR=1";
-       parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-       parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-       parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-       parity_event[4] = NULL;
+               DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+                         slice, row, bank, subbank);
 
-       kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-                          KOBJ_CHANGE, parity_event);
+               kfree(parity_event[4]);
+               kfree(parity_event[3]);
+               kfree(parity_event[2]);
+               kfree(parity_event[1]);
+       }
 
-       DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-                 row, bank, subbank);
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
-       kfree(parity_event[3]);
-       kfree(parity_event[2]);
-       kfree(parity_event[1]);
+out:
+       WARN_ON(dev_priv->l3_parity.which_slice);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       if (!HAS_L3_GPU_CACHE(dev))
+       if (!HAS_L3_DPF(dev))
                return;
 
        spin_lock(&dev_priv->irq_lock);
-       dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
        spin_unlock(&dev_priv->irq_lock);
 
+       iir &= GT_PARITY_ERROR(dev);
+       if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+               dev_priv->l3_parity.which_slice |= 1 << 1;
+
+       if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+               dev_priv->l3_parity.which_slice |= 1 << 0;
+
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
+static void ilk_gt_irq_handler(struct drm_device *dev,
+                              struct drm_i915_private *dev_priv,
+                              u32 gt_iir)
+{
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+               notify_ring(dev, &dev_priv->ring[RCS]);
+       if (gt_iir & ILK_BSD_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
 static void snb_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
@@ -864,31 +1114,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                i915_handle_error(dev, false);
        }
 
-       if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-               ivybridge_parity_error_irq_handler(dev);
-}
-
-/* Legacy way of handling PM interrupts */
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
-                                u32 pm_iir)
-{
-       /*
-        * IIR bits should never already be set because IMR should
-        * prevent an interrupt from being shown in IIR. The warning
-        * displays a case where we've unsafely cleared
-        * dev_priv->rps.pm_iir. Although missing an interrupt of the same
-        * type is not a problem, it displays a problem in the logic.
-        *
-        * The mask bit in IMR is cleared by dev_priv->rps.work.
-        */
-
-       spin_lock(&dev_priv->irq_lock);
-       dev_priv->rps.pm_iir |= pm_iir;
-       I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
-       POSTING_READ(GEN6_PMIMR);
-       spin_unlock(&dev_priv->irq_lock);
-
-       queue_work(dev_priv->wq, &dev_priv->rps.work);
+       if (gt_iir & GT_PARITY_ERROR(dev))
+               ivybridge_parity_error_irq_handler(dev, gt_iir);
 }
 
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -908,6 +1135,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
+               WARN(((hpd[i] & hotplug_trigger) &&
+                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
+                    "Received HPD interrupt although disabled\n");
+
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
                        continue;
@@ -918,6 +1149,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
                                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
                        dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
                        dev_priv->hpd_stats[i].hpd_cnt = 0;
+                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
                } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
                        dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
                        dev_priv->hpd_event_bits &= ~(1 << i);
@@ -925,6 +1157,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
                        storm_detected = true;
                } else {
                        dev_priv->hpd_stats[i].hpd_cnt++;
+                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+                                     dev_priv->hpd_stats[i].hpd_cnt);
                }
        }
 
@@ -932,8 +1166,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock(&dev_priv->irq_lock);
 
-       queue_work(dev_priv->wq,
-                  &dev_priv->hotplug_work);
+       /*
+        * Our hotplug handler can grab modeset locks (by calling down into the
+        * fb helpers). Hence it must not be run on our own dev-priv->wq work
+        * queue for otherwise the flush_work in the pageflip code will
+        * deadlock.
+        */
+       schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -950,31 +1189,64 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
-/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
- * we must be able to deal with other PM interrupts. This is complicated because
- * of the way in which we use the masks to defer the RPS work (which for
- * posterity is necessary because of forcewake).
- */
-static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
-                              u32 pm_iir)
+#if defined(CONFIG_DEBUG_FS)
+static void ivb_pipe_crc_update(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+       struct intel_pipe_crc_entry *entry;
+       ktime_t now;
+       int ts, head, tail;
+
+       head = atomic_read(&pipe_crc->head);
+       tail = atomic_read(&pipe_crc->tail);
+
+       if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+               DRM_ERROR("CRC buffer overflowing\n");
+               return;
+       }
+
+       entry = &pipe_crc->entries[head];
+
+       now = ktime_get();
+       ts = ktime_to_us(now);
+
+       entry->timestamp = ts;
+       entry->crc[0] = I915_READ(PIPE_CRC_RES_1_IVB(pipe));
+       entry->crc[1] = I915_READ(PIPE_CRC_RES_2_IVB(pipe));
+       entry->crc[2] = I915_READ(PIPE_CRC_RES_3_IVB(pipe));
+       entry->crc[3] = I915_READ(PIPE_CRC_RES_4_IVB(pipe));
+       entry->crc[4] = I915_READ(PIPE_CRC_RES_5_IVB(pipe));
+
+       head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+       atomic_set(&pipe_crc->head, head);
+}
+#else
+static void ivb_pipe_crc_update(struct drm_device *dev, int pipe) {}
+#endif
+
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
        if (pm_iir & GEN6_PM_RPS_EVENTS) {
                spin_lock(&dev_priv->irq_lock);
                dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
-               I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
-               /* never want to mask useful interrupts. (also posting read) */
-               WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+               snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
                spin_unlock(&dev_priv->irq_lock);
 
                queue_work(dev_priv->wq, &dev_priv->rps.work);
        }
 
-       if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-               notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+       if (HAS_VEBOX(dev_priv->dev)) {
+               if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+                       notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
-       if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-               DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
-               i915_handle_error(dev_priv->dev, false);
+               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+                       DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+                       i915_handle_error(dev_priv->dev, false);
+               }
        }
 }
 
@@ -1046,7 +1318,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
                        gmbus_irq_handler(dev);
 
-               if (pm_iir & GEN6_PM_RPS_EVENTS)
+               if (pm_iir)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
 
                I915_WRITE(GTIIR, gt_iir);
@@ -1131,6 +1403,15 @@ static void ivb_err_int_handler(struct drm_device *dev)
                if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
                        DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
 
+       if (err_int & ERR_INT_PIPE_CRC_DONE_A)
+               ivb_pipe_crc_update(dev, PIPE_A);
+
+       if (err_int & ERR_INT_PIPE_CRC_DONE_B)
+               ivb_pipe_crc_update(dev, PIPE_B);
+
+       if (err_int & ERR_INT_PIPE_CRC_DONE_C)
+               ivb_pipe_crc_update(dev, PIPE_C);
+
        I915_WRITE(GEN7_ERR_INT, err_int);
 }
 
@@ -1197,163 +1478,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                cpt_serr_int_handler(dev);
 }
 
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
-{
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
-       irqreturn_t ret = IRQ_NONE;
-       int i;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* We get interrupts on unclaimed registers, so check for this before we
-        * do any I915_{READ,WRITE}. */
-       if (IS_HASWELL(dev) &&
-           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unclaimed register before interrupt\n");
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-       }
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       if (!HAS_PCH_NOP(dev)) {
-               sde_ier = I915_READ(SDEIER);
-               I915_WRITE(SDEIER, 0);
-               POSTING_READ(SDEIER);
-       }
-
-       /* On Haswell, also mask ERR_INT because we don't want to risk
-        * generating "unclaimed register" interrupts from inside the interrupt
-        * handler. */
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       gt_iir = I915_READ(GTIIR);
-       if (gt_iir) {
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
-               I915_WRITE(GTIIR, gt_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       de_iir = I915_READ(DEIIR);
-       if (de_iir) {
-               if (de_iir & DE_ERR_INT_IVB)
-                       ivb_err_int_handler(dev);
-
-               if (de_iir & DE_AUX_CHANNEL_A_IVB)
-                       dp_aux_irq_handler(dev);
-
-               if (de_iir & DE_GSE_IVB)
-                       intel_opregion_asle_intr(dev);
-
-               for (i = 0; i < 3; i++) {
-                       if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-                               drm_handle_vblank(dev, i);
-                       if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
-                               intel_prepare_page_flip(dev, i);
-                               intel_finish_page_flip_plane(dev, i);
-                       }
-               }
-
-               /* check event from PCH */
-               if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
-                       u32 pch_iir = I915_READ(SDEIIR);
-
-                       cpt_irq_handler(dev, pch_iir);
-
-                       /* clear PCH hotplug event before clear CPU irq */
-                       I915_WRITE(SDEIIR, pch_iir);
-               }
-
-               I915_WRITE(DEIIR, de_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       pm_iir = I915_READ(GEN6_PMIIR);
-       if (pm_iir) {
-               if (IS_HASWELL(dev))
-                       hsw_pm_irq_handler(dev_priv, pm_iir);
-               else if (pm_iir & GEN6_PM_RPS_EVENTS)
-                       gen6_rps_irq_handler(dev_priv, pm_iir);
-               I915_WRITE(GEN6_PMIIR, pm_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               if (ivb_can_enable_err_int(dev))
-                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       if (!HAS_PCH_NOP(dev)) {
-               I915_WRITE(SDEIER, sde_ier);
-               POSTING_READ(SDEIER);
-       }
-
-       return ret;
-}
-
-static void ilk_gt_irq_handler(struct drm_device *dev,
-                              struct drm_i915_private *dev_priv,
-                              u32 gt_iir)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-               notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       int ret = IRQ_NONE;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-       POSTING_READ(DEIER);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       sde_ier = I915_READ(SDEIER);
-       I915_WRITE(SDEIER, 0);
-       POSTING_READ(SDEIER);
-
-       de_iir = I915_READ(DEIIR);
-       gt_iir = I915_READ(GTIIR);
-       pm_iir = I915_READ(GEN6_PMIIR);
-
-       if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
-               goto done;
-
-       ret = IRQ_HANDLED;
-
-       if (IS_GEN5(dev))
-               ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-       else
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (de_iir & DE_AUX_CHANNEL_A)
                dp_aux_irq_handler(dev);
@@ -1401,25 +1528,140 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
+       if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
                ironlake_rps_change_irq_handler(dev);
+}
 
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
-               gen6_rps_irq_handler(dev_priv, pm_iir);
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
 
-       I915_WRITE(GTIIR, gt_iir);
-       I915_WRITE(DEIIR, de_iir);
-       I915_WRITE(GEN6_PMIIR, pm_iir);
+       if (de_iir & DE_ERR_INT_IVB)
+               ivb_err_int_handler(dev);
+
+       if (de_iir & DE_AUX_CHANNEL_A_IVB)
+               dp_aux_irq_handler(dev);
+
+       if (de_iir & DE_GSE_IVB)
+               intel_opregion_asle_intr(dev);
+
+       for (i = 0; i < 3; i++) {
+               if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+                       drm_handle_vblank(dev, i);
+               if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+                       intel_prepare_page_flip(dev, i);
+                       intel_finish_page_flip_plane(dev, i);
+               }
+       }
+
+       /* check event from PCH */
+       if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+               u32 pch_iir = I915_READ(SDEIIR);
+
+               cpt_irq_handler(dev, pch_iir);
+
+               /* clear PCH hotplug event before clear CPU irq */
+               I915_WRITE(SDEIIR, pch_iir);
+       }
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+       irqreturn_t ret = IRQ_NONE;
+
+       atomic_inc(&dev_priv->irq_received);
+
+       /* We get interrupts on unclaimed registers, so check for this before we
+        * do any I915_{READ,WRITE}. */
+       intel_uncore_check_errors(dev);
+
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       POSTING_READ(DEIER);
+
+       /* Disable south interrupts. We'll only write to SDEIIR once, so further
+        * interrupts will will be stored on its back queue, and then we'll be
+        * able to process them after we restore SDEIER (as soon as we restore
+        * it, we'll get an interrupt if SDEIIR still has something to process
+        * due to its back queue). */
+       if (!HAS_PCH_NOP(dev)) {
+               sde_ier = I915_READ(SDEIER);
+               I915_WRITE(SDEIER, 0);
+               POSTING_READ(SDEIER);
+       }
+
+       gt_iir = I915_READ(GTIIR);
+       if (gt_iir) {
+               if (INTEL_INFO(dev)->gen >= 6)
+                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
+               else
+                       ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+               I915_WRITE(GTIIR, gt_iir);
+               ret = IRQ_HANDLED;
+       }
+
+       de_iir = I915_READ(DEIIR);
+       if (de_iir) {
+               if (INTEL_INFO(dev)->gen >= 7)
+                       ivb_display_irq_handler(dev, de_iir);
+               else
+                       ilk_display_irq_handler(dev, de_iir);
+               I915_WRITE(DEIIR, de_iir);
+               ret = IRQ_HANDLED;
+       }
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 pm_iir = I915_READ(GEN6_PMIIR);
+               if (pm_iir) {
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
+                       I915_WRITE(GEN6_PMIIR, pm_iir);
+                       ret = IRQ_HANDLED;
+               }
+       }
 
-done:
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
-       I915_WRITE(SDEIER, sde_ier);
-       POSTING_READ(SDEIER);
+       if (!HAS_PCH_NOP(dev)) {
+               I915_WRITE(SDEIER, sde_ier);
+               POSTING_READ(SDEIER);
+       }
 
        return ret;
 }
 
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+                              bool reset_completed)
+{
+       struct intel_ring_buffer *ring;
+       int i;
+
+       /*
+        * Notify all waiters for GPU completion events that reset state has
+        * been changed, and that they need to restart their wait after
+        * checking for potential errors (and bail out to drop locks if there is
+        * a gpu reset pending so that i915_error_work_func can acquire them).
+        */
+
+       /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+       for_each_ring(ring, dev_priv, i)
+               wake_up_all(&ring->irq_queue);
+
+       /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+       wake_up_all(&dev_priv->pending_flip_queue);
+
+       /*
+        * Signal tasks blocked in i915_gem_wait_for_error that the pending
+        * reset state is cleared.
+        */
+       if (reset_completed)
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
 /**
  * i915_error_work_func - do process context error handling work
  * @work: work struct
@@ -1434,11 +1676,10 @@ static void i915_error_work_func(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
                                                    gpu_error);
        struct drm_device *dev = dev_priv->dev;
-       struct intel_ring_buffer *ring;
-       char *error_event[] = { "ERROR=1", NULL };
-       char *reset_event[] = { "RESET=1", NULL };
-       char *reset_done_event[] = { "ERROR=0", NULL };
-       int i, ret;
+       char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+       char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+       char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+       int ret;
 
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
@@ -1457,8 +1698,16 @@ static void i915_error_work_func(struct work_struct *work)
                kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
                                   reset_event);
 
+               /*
+                * All state reset _must_ be completed before we update the
+                * reset counter, for otherwise waiters might miss the reset
+                * pending state and not properly drop locks, resulting in
+                * deadlocks with the reset work.
+                */
                ret = i915_reset(dev);
 
+               intel_display_handle_reset(dev);
+
                if (ret == 0) {
                        /*
                         * After all the gem state is reset, increment the reset
@@ -1479,12 +1728,11 @@ static void i915_error_work_func(struct work_struct *work)
                        atomic_set(&error->reset_counter, I915_WEDGED);
                }
 
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
-
-               intel_display_handle_reset(dev);
-
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
+               /*
+                * Note: The wake_up also serves as a memory barrier so that
+                * waiters see the update value of the reset counter atomic_t.
+                */
+               i915_error_wake_up(dev_priv, true);
        }
 }
 
@@ -1593,8 +1841,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       int i;
 
        i915_capture_error_state(dev);
        i915_report_and_clear_eir(dev);
@@ -1604,14 +1850,28 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
                                &dev_priv->gpu_error.reset_counter);
 
                /*
-                * Wakeup waiting processes so that the reset work item
-                * doesn't deadlock trying to grab various locks.
+                * Wakeup waiting processes so that the reset work function
+                * i915_error_work_func doesn't deadlock trying to grab various
+                * locks. By bumping the reset counter first, the woken
+                * processes will see a reset in progress and back off,
+                * releasing their locks and then wait for the reset completion.
+                * We must do this for _all_ gpu waiters that might hold locks
+                * that the reset work needs to acquire.
+                *
+                * Note: The wake_up serves as the required memory barrier to
+                * ensure that the waiters see the updated value of the reset
+                * counter atomic_t.
                 */
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
+               i915_error_wake_up(dev_priv, false);
        }
 
-       queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+       /*
+        * Our reset work can grab modeset locks (since it needs to reset the
+        * state of outstanding pagelips). Hence it must not be run on our own
+        * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+        * code will deadlock.
+        */
+       schedule_work(&dev_priv->gpu_error.work);
 }
 
 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1691,29 +1951,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-                                   DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-       return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       if (!i915_pipe_enabled(dev, pipe))
-               return -EINVAL;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv,
-                                   DE_PIPEA_VBLANK_IVB << (5 * pipe));
+       ironlake_enable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -1764,21 +2009,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-                                    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv,
-                                    DE_PIPEA_VBLANK_IVB << (pipe * 5));
+       ironlake_disable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -1881,10 +2116,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
        u32 tmp;
 
        if (ring->hangcheck.acthd != acthd)
-               return active;
+               return HANGCHECK_ACTIVE;
 
        if (IS_GEN2(dev))
-               return hung;
+               return HANGCHECK_HUNG;
 
        /* Is the chip hanging on a WAIT_FOR_EVENT?
         * If so we can simply poke the RB_WAIT bit
@@ -1895,25 +2130,27 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
        if (tmp & RING_WAIT) {
                DRM_ERROR("Kicking stuck wait on %s\n",
                          ring->name);
+               i915_handle_error(dev, false);
                I915_WRITE_CTL(ring, tmp);
-               return kick;
+               return HANGCHECK_KICK;
        }
 
        if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
                switch (semaphore_passed(ring)) {
                default:
-                       return hung;
+                       return HANGCHECK_HUNG;
                case 1:
                        DRM_ERROR("Kicking stuck semaphore on %s\n",
                                  ring->name);
+                       i915_handle_error(dev, false);
                        I915_WRITE_CTL(ring, tmp);
-                       return kick;
+                       return HANGCHECK_KICK;
                case 0:
-                       return wait;
+                       return HANGCHECK_WAIT;
                }
        }
 
-       return hung;
+       return HANGCHECK_HUNG;
 }
 
 /**
@@ -1924,7 +2161,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
  * we kick the ring. If we see no progress on three subsequent calls
  * we assume chip is wedged and try to fix it by resetting the chip.
  */
-void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1951,17 +2188,20 @@ void i915_hangcheck_elapsed(unsigned long data)
 
                if (ring->hangcheck.seqno == seqno) {
                        if (ring_idle(ring, seqno)) {
+                               ring->hangcheck.action = HANGCHECK_IDLE;
+
                                if (waitqueue_active(&ring->irq_queue)) {
                                        /* Issue a wake-up to catch stuck h/w. */
-                                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                                 ring->name);
-                                       wake_up_all(&ring->irq_queue);
-                                       ring->hangcheck.score += HUNG;
+                                       if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+                                               DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+                                                         ring->name);
+                                               wake_up_all(&ring->irq_queue);
+                                       }
+                                       /* Safeguard against driver failure */
+                                       ring->hangcheck.score += BUSY;
                                } else
                                        busy = false;
                        } else {
-                               int score;
-
                                /* We always increment the hangcheck score
                                 * if the ring is busy and still processing
                                 * the same request, so that no single request
@@ -1981,23 +2221,24 @@ void i915_hangcheck_elapsed(unsigned long data)
                                                                    acthd);
 
                                switch (ring->hangcheck.action) {
-                               case wait:
-                                       score = 0;
+                               case HANGCHECK_IDLE:
+                               case HANGCHECK_WAIT:
                                        break;
-                               case active:
-                                       score = BUSY;
+                               case HANGCHECK_ACTIVE:
+                                       ring->hangcheck.score += BUSY;
                                        break;
-                               case kick:
-                                       score = KICK;
+                               case HANGCHECK_KICK:
+                                       ring->hangcheck.score += KICK;
                                        break;
-                               case hung:
-                                       score = HUNG;
+                               case HANGCHECK_HUNG:
+                                       ring->hangcheck.score += HUNG;
                                        stuck[i] = true;
                                        break;
                                }
-                               ring->hangcheck.score += score;
                        }
                } else {
+                       ring->hangcheck.action = HANGCHECK_ACTIVE;
+
                        /* Gradually reduce the count so that we catch DoS
                         * attempts across multiple batches.
                         */
@@ -2012,9 +2253,9 @@ void i915_hangcheck_elapsed(unsigned long data)
 
        for_each_ring(ring, dev_priv, i) {
                if (ring->hangcheck.score > FIRE) {
-                       DRM_ERROR("%s on %s\n",
-                                 stuck[i] ? "stuck" : "no progress",
-                                 ring->name);
+                       DRM_INFO("%s on %s\n",
+                                stuck[i] ? "stuck" : "no progress",
+                                ring->name);
                        rings_hung++;
                }
        }
@@ -2188,10 +2429,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
        pm_irqs = gt_irqs = 0;
 
        dev_priv->gt_irq_mask = ~0;
-       if (HAS_L3_GPU_CACHE(dev)) {
+       if (HAS_L3_DPF(dev)) {
                /* L3 parity interrupt is always unmasked. */
-               dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-               gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+               dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+               gt_irqs |= GT_PARITY_ERROR(dev);
        }
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2213,8 +2454,9 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
                if (HAS_VEBOX(dev))
                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
+               dev_priv->pm_irq_mask = 0xffffffff;
                I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-               I915_WRITE(GEN6_PMIMR, 0xffffffff);
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
                I915_WRITE(GEN6_PMIER, pm_irqs);
                POSTING_READ(GEN6_PMIER);
        }
@@ -2223,21 +2465,33 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
        unsigned long irqflags;
-
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-                          DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-                          DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
+       u32 display_mask, extra_mask;
+
+       if (INTEL_INFO(dev)->gen >= 7) {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+                               DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+                               DE_PLANEB_FLIP_DONE_IVB |
+                               DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+                               DE_ERR_INT_IVB);
+               extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+                             DE_PIPEA_VBLANK_IVB);
+
+               I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+       } else {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+                               DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+                               DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+               extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+       }
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER, display_mask |
-                         DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
+       I915_WRITE(DEIER, display_mask | extra_mask);
        POSTING_READ(DEIER);
 
        gen5_gt_irq_postinstall(dev);
@@ -2258,38 +2512,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static int ivybridge_irq_postinstall(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask =
-               DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
-               DE_PLANEC_FLIP_DONE_IVB |
-               DE_PLANEB_FLIP_DONE_IVB |
-               DE_PLANEA_FLIP_DONE_IVB |
-               DE_AUX_CHANNEL_A_IVB |
-               DE_ERR_INT_IVB;
-
-       dev_priv->irq_mask = ~display_mask;
-
-       /* should always can generate irq */
-       I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
-       I915_WRITE(DEIIR, I915_READ(DEIIR));
-       I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER,
-                  display_mask |
-                  DE_PIPEC_VBLANK_IVB |
-                  DE_PIPEB_VBLANK_IVB |
-                  DE_PIPEA_VBLANK_IVB);
-       POSTING_READ(DEIER);
-
-       gen5_gt_irq_postinstall(dev);
-
-       ibx_irq_postinstall(dev);
-
-       return 0;
-}
-
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2478,7 +2700,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
        u16 iir, new_iir;
        u32 pipe_stats[2];
        unsigned long irqflags;
-       int irq_received;
        int pipe;
        u16 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2512,7 +2733,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
                                                         pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
-                               irq_received = 1;
                        }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3077,18 +3297,21 @@ void intel_irq_init(struct drm_device *dev)
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-       dev->driver->get_vblank_counter = i915_get_vblank_counter;
-       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (IS_GEN2(dev)) {
+               dev->max_vblank_count = 0;
+               dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       } else {
+               dev->driver->get_vblank_counter = i915_get_vblank_counter;
+               dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
        }
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
-       else
-               dev->driver->get_vblank_timestamp = NULL;
-       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+               dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+       }
 
        if (IS_VALLEYVIEW(dev)) {
                dev->driver->irq_handler = valleyview_irq_handler;
@@ -3098,15 +3321,6 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-               /* Share uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -3165,3 +3379,67 @@ void intel_hpd_init(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
+
+/* Disable interrupts so we can allow Package C8+. */
+void hsw_pc8_disable_interrupts(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+       dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
+       dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
+       dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
+       dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
+       dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
+
+       ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
+       ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+       ilk_disable_gt_irq(dev_priv, 0xffffffff);
+       snb_disable_pm_irq(dev_priv, 0xffffffff);
+
+       dev_priv->pc8.irqs_disabled = true;
+
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/* Restore interrupts so we can recover from Package C8+. */
+void hsw_pc8_restore_interrupts(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+       uint32_t val, expected;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+       val = I915_READ(DEIMR);
+       expected = ~DE_PCH_EVENT_IVB;
+       WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+       val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
+       expected = ~SDE_HOTPLUG_MASK_CPT;
+       WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
+            val, expected);
+
+       val = I915_READ(GTIMR);
+       expected = 0xffffffff;
+       WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+       val = I915_READ(GEN6_PMIMR);
+       expected = 0xffffffff;
+       WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
+            expected);
+
+       dev_priv->pc8.irqs_disabled = false;
+
+       ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
+       ibx_enable_display_interrupt(dev_priv,
+                                    ~dev_priv->pc8.regsave.sdeimr &
+                                    ~SDE_HOTPLUG_MASK_CPT);
+       ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
+       snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
+       I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
+
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
This page took 0.044486 seconds and 5 git commands to generate.