drm/i915: Abstract the legacy workload submission mechanism away
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
index c50e3b41d6fef28d3f7e44e42fd2db7a5584089f..f0d24db76e72fdc2b8b12d57fea8dc83c5d87ec4 100644 (file)
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
 {
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
 {
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
        POSTING_READ(GTIMR);
 }
 
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        ilk_update_gt_irq(dev_priv, mask, mask);
 }
 
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        ilk_update_gt_irq(dev_priv, mask, 0);
 }
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
        }
 }
 
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        snb_update_pm_irq(dev_priv, mask, mask);
 }
 
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        snb_update_pm_irq(dev_priv, mask, 0);
 }
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
        }
 }
 
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        bdw_update_pm_irq(dev_priv, mask, mask);
 }
 
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        bdw_update_pm_irq(dev_priv, mask, 0);
 }
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       if (WARN_ON(dev_priv->pm.irqs_disabled))
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
        I915_WRITE(SDEIMR, sdeimr);
@@ -1156,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
        bool changed = false;
        u32 hpd_event_bits;
 
-       /* HPD irq before everything is fully set up. */
-       if (!dev_priv->enable_hotplug_processing)
-               return;
-
        mutex_lock(&mode_config->mutex);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
@@ -1169,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        dev_priv->hpd_event_bits = 0;
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
+               if (!intel_connector->encoder)
+                       continue;
                intel_encoder = intel_connector->encoder;
                if (intel_encoder->hpd_pin > HPD_NONE &&
                    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1199,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
 
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
+               if (!intel_connector->encoder)
+                       continue;
                intel_encoder = intel_connector->encoder;
                if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
                        if (intel_encoder->hot_plug)
@@ -1272,6 +1272,131 @@ static void notify_ring(struct drm_device *dev,
        i915_queue_hangcheck(dev);
 }
 
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+                           struct intel_rps_ei *rps_ei)
+{
+       u32 cz_ts, cz_freq_khz;
+       u32 render_count, media_count;
+       u32 elapsed_render, elapsed_media, elapsed_time;
+       u32 residency = 0;
+
+       cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+       cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+       render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+       media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+       if (rps_ei->cz_clock == 0) {
+               rps_ei->cz_clock = cz_ts;
+               rps_ei->render_c0 = render_count;
+               rps_ei->media_c0 = media_count;
+
+               return dev_priv->rps.cur_freq;
+       }
+
+       elapsed_time = cz_ts - rps_ei->cz_clock;
+       rps_ei->cz_clock = cz_ts;
+
+       elapsed_render = render_count - rps_ei->render_c0;
+       rps_ei->render_c0 = render_count;
+
+       elapsed_media = media_count - rps_ei->media_c0;
+       rps_ei->media_c0 = media_count;
+
+       /* Convert all the counters into common unit of milli sec */
+       elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+       elapsed_render /=  cz_freq_khz;
+       elapsed_media /= cz_freq_khz;
+
+       /*
+        * Calculate overall C0 residency percentage
+        * only if elapsed time is non zero
+        */
+       if (elapsed_time) {
+               residency =
+                       ((max(elapsed_render, elapsed_media) * 100)
+                               / elapsed_time);
+       }
+
+       return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+       u32 residency_C0_up = 0, residency_C0_down = 0;
+       int new_delay, adj;
+
+       dev_priv->rps.ei_interrupt_count++;
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+       if (dev_priv->rps.up_ei.cz_clock == 0) {
+               vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+               vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+               return dev_priv->rps.cur_freq;
+       }
+
+
+       /*
+        * To down throttle, C0 residency should be less than down threshold
+        * for continous EI intervals. So calculate down EI counters
+        * once in VLV_INT_COUNT_FOR_DOWN_EI
+        */
+       if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+               dev_priv->rps.ei_interrupt_count = 0;
+
+               residency_C0_down = vlv_c0_residency(dev_priv,
+                                                    &dev_priv->rps.down_ei);
+       } else {
+               residency_C0_up = vlv_c0_residency(dev_priv,
+                                                  &dev_priv->rps.up_ei);
+       }
+
+       new_delay = dev_priv->rps.cur_freq;
+
+       adj = dev_priv->rps.last_adj;
+       /* C0 residency is greater than UP threshold. Increase Frequency */
+       if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+               if (adj > 0)
+                       adj *= 2;
+               else
+                       adj = 1;
+
+               if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+                       new_delay = dev_priv->rps.cur_freq + adj;
+
+               /*
+                * For better performance, jump directly
+                * to RPe if we're below it.
+                */
+               if (new_delay < dev_priv->rps.efficient_freq)
+                       new_delay = dev_priv->rps.efficient_freq;
+
+       } else if (!dev_priv->rps.ei_interrupt_count &&
+                       (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+               if (adj < 0)
+                       adj *= 2;
+               else
+                       adj = -1;
+               /*
+                * This means, C0 residency is less than down threshold over
+                * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+                */
+               if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+                       new_delay = dev_priv->rps.cur_freq + adj;
+       }
+
+       return new_delay;
+}
+
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
@@ -1282,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
        spin_lock_irq(&dev_priv->irq_lock);
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
-       if (IS_BROADWELL(dev_priv->dev))
-               bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+               gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        else {
                /* Make sure not to corrupt PMIMR state used by ringbuffer */
-               snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+               gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        }
        spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -1320,6 +1445,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
                else
                        new_delay = dev_priv->rps.min_freq_softlimit;
                adj = 0;
+       } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+               new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                        adj *= 2;
@@ -1426,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
 out:
        WARN_ON(dev_priv->l3_parity.which_slice);
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+       gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1440,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
                return;
 
        spin_lock(&dev_priv->irq_lock);
-       ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+       gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
        spin_unlock(&dev_priv->irq_lock);
 
        iir &= GT_PARITY_ERROR(dev);
@@ -1495,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
        spin_lock(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-       bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+       gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
        spin_unlock(&dev_priv->irq_lock);
 
        queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1719,7 +1846,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
         * deadlock.
         */
        if (queue_dig)
-               schedule_work(&dev_priv->dig_port_work);
+               queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
        if (queue_hp)
                schedule_work(&dev_priv->hotplug_work);
 }
@@ -1842,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
        if (pm_iir & dev_priv->pm_rps_events) {
                spin_lock(&dev_priv->irq_lock);
                dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-               snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+               gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
                spin_unlock(&dev_priv->irq_lock);
 
                queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1862,14 +1989,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 
 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
 {
-       struct intel_crtc *crtc;
-
        if (!drm_handle_vblank(dev, pipe))
                return false;
 
-       crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-       wake_up(&crtc->vbl_wait);
-
        return true;
 }
 
@@ -2927,12 +3049,7 @@ static bool
 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
 {
        if (INTEL_INFO(dev)->gen >= 8) {
-               /*
-                * FIXME: gen8 semaphore support - currently we don't emit
-                * semaphores on bdw anyway, but this needs to be addressed when
-                * we merge that code.
-                */
-               return false;
+               return (ipehr >> 23) == 0x1c;
        } else {
                ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
                return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2941,19 +3058,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
 }
 
 static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
        int i;
 
        if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
-               /*
-                * FIXME: gen8 semaphore support - currently we don't emit
-                * semaphores on bdw anyway, but this needs to be addressed when
-                * we merge that code.
-                */
-               return NULL;
+               for_each_ring(signaller, dev_priv, i) {
+                       if (ring == signaller)
+                               continue;
+
+                       if (offset == signaller->semaphore.signal_ggtt[ring->id])
+                               return signaller;
+               }
        } else {
                u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
@@ -2966,8 +3084,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
                }
        }
 
-       DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
-                 ring->id, ipehr);
+       DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+                 ring->id, ipehr, offset);
 
        return NULL;
 }
@@ -2977,7 +3095,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u32 cmd, ipehr, head;
-       int i;
+       u64 offset = 0;
+       int i, backwards;
 
        ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
        if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2986,13 +3105,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
        /*
         * HEAD is likely pointing to the dword after the actual command,
         * so scan backwards until we find the MBOX. But limit it to just 3
-        * dwords. Note that we don't care about ACTHD here since that might
+        * or 4 dwords depending on the semaphore wait command size.
+        * Note that we don't care about ACTHD here since that might
         * point at at batch, and semaphores are always emitted into the
         * ringbuffer itself.
         */
        head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
 
-       for (i = 4; i; --i) {
+       for (i = backwards; i; --i) {
                /*
                 * Be paranoid and presume the hw has gone off into the wild -
                 * our ring is smaller than what the hardware (and hence
@@ -3012,14 +3133,19 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
                return NULL;
 
        *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
-       return semaphore_wait_to_signaller_ring(ring, ipehr);
+       if (INTEL_INFO(ring->dev)->gen >= 8) {
+               offset = ioread32(ring->buffer->virtual_start + head + 12);
+               offset <<= 32;
+               offset = ioread32(ring->buffer->virtual_start + head + 8);
+       }
+       return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
 }
 
 static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
        ring->hangcheck.deadlock++;
 
@@ -3031,15 +3157,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-               return -1;
-
        if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
                return 1;
 
-       if (signaller->hangcheck.deadlock)
+       /* cursory check for an unkickable deadlock */
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
        return 0;
@@ -3061,8 +3184,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp;
 
-       if (ring->hangcheck.acthd != acthd)
-               return HANGCHECK_ACTIVE;
+       if (acthd != ring->hangcheck.acthd) {
+               if (acthd > ring->hangcheck.max_acthd) {
+                       ring->hangcheck.max_acthd = acthd;
+                       return HANGCHECK_ACTIVE;
+               }
+
+               return HANGCHECK_ACTIVE_LOOP;
+       }
 
        if (IS_GEN2(dev))
                return HANGCHECK_HUNG;
@@ -3173,8 +3302,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
                                switch (ring->hangcheck.action) {
                                case HANGCHECK_IDLE:
                                case HANGCHECK_WAIT:
-                                       break;
                                case HANGCHECK_ACTIVE:
+                                       break;
+                               case HANGCHECK_ACTIVE_LOOP:
                                        ring->hangcheck.score += BUSY;
                                        break;
                                case HANGCHECK_KICK:
@@ -3194,6 +3324,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
                         */
                        if (ring->hangcheck.score > 0)
                                ring->hangcheck.score--;
+
+                       ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
                }
 
                ring->hangcheck.seqno = seqno;
@@ -3336,7 +3468,9 @@ static void gen8_irq_reset(struct drm_device *dev)
        gen8_gt_irq_reset(dev_priv);
 
        for_each_pipe(pipe)
-               GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+               if (intel_display_power_enabled(dev_priv,
+                                               POWER_DOMAIN_PIPE(pipe)))
+                       GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
        GEN5_IRQ_RESET(GEN8_DE_PORT_);
        GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3345,6 +3479,18 @@ static void gen8_irq_reset(struct drm_device *dev)
        ibx_irq_reset(dev);
 }
 
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+                         ~dev_priv->de_irq_mask[PIPE_B]);
+       GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+                         ~dev_priv->de_irq_mask[PIPE_C]);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3376,18 +3522,17 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
 static void ibx_hpd_irq_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *intel_encoder;
        u32 hotplug_irqs, hotplug, enabled_irqs = 0;
 
        if (HAS_PCH_IBX(dev)) {
                hotplug_irqs = SDE_HOTPLUG_MASK;
-               list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+               for_each_intel_encoder(dev, intel_encoder)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
                                enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
        } else {
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-               list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+               for_each_intel_encoder(dev, intel_encoder)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
                                enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
        }
@@ -3669,8 +3814,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
        for_each_pipe(pipe)
-               GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
-                                 de_pipe_enables);
+               if (intel_display_power_enabled(dev_priv,
+                               POWER_DOMAIN_PIPE(pipe)))
+                       GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+                                         dev_priv->de_irq_mask[pipe],
+                                         de_pipe_enables);
 
        GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
 }
@@ -4303,7 +4451,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
 static void i915_hpd_irq_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *intel_encoder;
        u32 hotplug_en;
 
@@ -4314,7 +4461,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
                hotplug_en &= ~HOTPLUG_INT_EN_MASK;
                /* Note HDMI and DP share hotplug bits */
                /* enable bits are the same for all generations */
-               list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+               for_each_intel_encoder(dev, intel_encoder)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
                                hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
                /* Programming the CRT detection parameters tends
@@ -4507,7 +4654,11 @@ void intel_irq_init(struct drm_device *dev)
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
        /* Let's track the enabled rps events */
-       dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+       if (IS_VALLEYVIEW(dev))
+               /* WaGsvRC0ResidenncyMethod:VLV */
+               dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+       else
+               dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
@@ -4517,6 +4668,9 @@ void intel_irq_init(struct drm_device *dev)
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
+       /* Haven't installed the IRQ handler yet */
+       dev_priv->pm._irqs_disabled = true;
+
        if (IS_GEN2(dev)) {
                dev->max_vblank_count = 0;
                dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4604,7 +4758,9 @@ void intel_hpd_init(struct drm_device *dev)
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                struct intel_connector *intel_connector = to_intel_connector(connector);
                connector->polled = intel_connector->polled;
-               if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+               if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+               if (intel_connector->mst_port)
                        connector->polled = DRM_CONNECTOR_POLL_HPD;
        }
 
@@ -4622,7 +4778,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        dev->driver->irq_uninstall(dev);
-       dev_priv->pm.irqs_disabled = true;
+       dev_priv->pm._irqs_disabled = true;
 }
 
 /* Restore interrupts so we can recover from runtime PM. */
@@ -4630,7 +4786,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       dev_priv->pm.irqs_disabled = false;
+       dev_priv->pm._irqs_disabled = false;
        dev->driver->irq_preinstall(dev);
        dev->driver->irq_postinstall(dev);
 }
This page took 0.035817 seconds and 5 git commands to generate.