Merge branch 'topic/skl-stage1' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 30 Sep 2014 20:36:57 +0000 (22:36 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 30 Sep 2014 20:36:57 +0000 (22:36 +0200)
SKL stage 1 patches still need polish so will likely miss the 3.18
merge window. We've decided to postpone to 3.19 so let's pull this in
to make patch merging and conflict handling easier.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
19 files changed:
1  2 
Documentation/DocBook/drm.tmpl
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c

index 7ad61284ad5fd7b08036181f111482391369e874,be35bc328b775948cd10d1c2cf3f8df76256aeb2..98528b49da6542d969894edafcb0d3f126f97693
@@@ -3406,7 -3406,7 +3406,7 @@@ void (*disable_vblank) (struct drm_devi
      <sect2>
        <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
  !Edrivers/gpu/drm/drm_irq.c
- !Iinclude/drm/drmP.h drm_crtc_vblank_waitqueue
+ !Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
      </sect2>
    </sect1>
  
@@@ -3803,13 -3803,6 +3803,13 @@@ int num_ioctls;</synopsis
            configuration change.
          </para>
        </sect2>
 +      <sect2>
 +        <title>Frontbuffer Tracking</title>
 +!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
 +!Idrivers/gpu/drm/i915/intel_frontbuffer.c
 +!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
 +!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
 +      </sect2>
        <sect2>
          <title>Plane Configuration</title>
          <para>
index 0ba5c7145240c85e2a770ad683e8cfdba0607c3d,707eddd1825f89d177f1cf06ca711e48e05d7d98..14c88c22281c5ce828e53db42522c6509ad1273b
@@@ -516,6 -516,7 +516,6 @@@ static int i915_gem_pageflip_info(struc
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long flags;
        struct intel_crtc *crtc;
        int ret;
  
                const char plane = plane_name(crtc->plane);
                struct intel_unpin_work *work;
  
 -              spin_lock_irqsave(&dev->event_lock, flags);
 +              spin_lock_irq(&dev->event_lock);
                work = crtc->unpin_work;
                if (work == NULL) {
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
                        }
                }
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 +              spin_unlock_irq(&dev->event_lock);
        }
  
        mutex_unlock(&dev->struct_mutex);
@@@ -1985,7 -1986,7 +1985,7 @@@ static int i915_swizzle_info(struct seq
                           I915_READ(MAD_DIMM_C2));
                seq_printf(m, "TILECTL = 0x%08x\n",
                           I915_READ(TILECTL));
-               if (IS_GEN8(dev))
+               if (INTEL_INFO(dev)->gen >= 8)
                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
                                   I915_READ(GAMTARBMODE));
                else
@@@ -3825,6 -3826,7 +3825,6 @@@ i915_drop_caches_set(void *data, u64 va
  {
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_object *obj, *next;
        int ret;
  
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
        if (val & (DROP_RETIRE | DROP_ACTIVE))
                i915_gem_retire_requests(dev);
  
 -      if (val & DROP_BOUND) {
 -              list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
 -                                       global_list) {
 -                      struct i915_vma *vma, *v;
 +      if (val & DROP_BOUND)
 +              i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
  
 -                      ret = 0;
 -                      drm_gem_object_reference(&obj->base);
 -                      list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
 -                              if (vma->pin_count)
 -                                      continue;
 -
 -                              ret = i915_vma_unbind(vma);
 -                              if (ret)
 -                                      break;
 -                      }
 -                      drm_gem_object_unreference(&obj->base);
 -                      if (ret)
 -                              goto unlock;
 -              }
 -      }
 -
 -      if (val & DROP_UNBOUND) {
 -              list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 -                                       global_list)
 -                      if (obj->pages_pin_count == 0) {
 -                              ret = i915_gem_object_put_pages(obj);
 -                              if (ret)
 -                                      goto unlock;
 -                      }
 -      }
 +      if (val & DROP_UNBOUND)
 +              i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
  
  unlock:
        mutex_unlock(&dev->struct_mutex);
index 0bc1583114e72be9f5d46c8f6fd0c266f9635465,f64050d0c3454a698afbdc7c38d9617448d9e5c7..1c035c49577ec997b3c69825f59b98569edd4d03
@@@ -1534,7 -1534,7 +1534,7 @@@ static void intel_device_info_runtime_i
  
        info = (struct intel_device_info *)&dev_priv->info;
  
-       if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
@@@ -1614,7 -1614,7 +1614,7 @@@ int i915_driver_load(struct drm_device 
  
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
 -      spin_lock_init(&dev_priv->backlight_lock);
 +      mutex_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        spin_lock_init(&dev_priv->mmio_flip_lock);
index 8ce1b13ad97e535983a8fb7e3e4ed1ebc07a628b,51fbb3459e50d101e77813982f5942e881a62db3..6948877c881ca54381ad24107acf3d55b9f6ad36
@@@ -356,6 -356,19 +356,19 @@@ static const struct intel_device_info i
        CURSOR_OFFSETS,
  };
  
+ static const struct intel_device_info intel_skylake_info = {
+       .is_preliminary = 1,
+       .is_skylake = 1,
+       .gen = 9, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .has_llc = 1,
+       .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+ };
  /*
   * Make sure any device matches here are from most specific to most
   * general.  For example, since the Quanta match is based on the subsystem
        INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
        INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
        INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
-       INTEL_CHV_IDS(&intel_cherryview_info)
+       INTEL_CHV_IDS(&intel_cherryview_info),  \
+       INTEL_SKL_IDS(&intel_skylake_info)
  
  static const struct pci_device_id pciidlist[] = {             /* aka */
        INTEL_PCI_IDS,
@@@ -461,6 -475,16 +475,16 @@@ void intel_detect_pch(struct drm_devic
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
                                WARN_ON(!IS_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
+                               WARN_ON(IS_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
+                               WARN_ON(!IS_ULT(dev));
                        } else
                                continue;
  
@@@ -680,16 -704,16 +704,16 @@@ static int __i915_drm_thaw(struct drm_d
                }
                mutex_unlock(&dev->struct_mutex);
  
 +              /* We need working interrupts for modeset enabling ... */
                intel_runtime_pm_restore_interrupts(dev);
  
                intel_modeset_init_hw(dev);
  
                {
 -                      unsigned long irqflags;
 -                      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +                      spin_lock_irq(&dev_priv->irq_lock);
                        if (dev_priv->display.hpd_irq_setup)
                                dev_priv->display.hpd_irq_setup(dev);
 -                      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +                      spin_unlock_irq(&dev_priv->irq_lock);
                }
  
                intel_dp_mst_resume(dev);
@@@ -871,6 -895,8 +895,6 @@@ int i915_reset(struct drm_device *dev
                 */
                if (INTEL_INFO(dev)->gen > 5)
                        intel_reset_gt_powersave(dev);
 -
 -              intel_hpd_init(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
        }
index 8f05258ff49b3a0efb3feb3471def50629804f39,c3dbaaed2ff965f785c6989eadbcdf9d4bc1ccc5..4cd2aa347f37f21138289dc2f740eaa38c137e70
@@@ -43,6 -43,7 +43,7 @@@
  #include <linux/i2c-algo-bit.h>
  #include <drm/intel-gtt.h>
  #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+ #include <drm/drm_gem.h>
  #include <linux/backlight.h>
  #include <linux/hashtable.h>
  #include <linux/intel-iommu.h>
@@@ -54,7 -55,7 +55,7 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
 -#define DRIVER_DATE           "20140905"
 +#define DRIVER_DATE           "20140919"
  
  enum pipe {
        INVALID_PIPE = -1,
@@@ -75,6 -76,14 +76,14 @@@ enum transcoder 
  };
  #define transcoder_name(t) ((t) + 'A')
  
+ /*
+  * This is the maximum (across all platforms) number of planes (primary +
+  * sprites) that can be active at the same time on one pipe.
+  *
+  * This value doesn't count the cursor plane.
+  */
+ #define I915_MAX_PLANES       3
  enum plane {
        PLANE_A = 0,
        PLANE_B,
@@@ -550,6 -559,7 +559,7 @@@ struct intel_uncore 
        func(is_ivybridge) sep \
        func(is_valleyview) sep \
        func(is_haswell) sep \
+       func(is_skylake) sep \
        func(is_preliminary) sep \
        func(has_fbc) sep \
        func(has_pipe_cxsr) sep \
@@@ -662,18 -672,6 +672,18 @@@ struct i915_fbc 
  
        bool false_color;
  
 +      /* Tracks whether the HW is actually enabled, not whether the feature is
 +       * possible. */
 +      bool enabled;
 +
 +      /* On gen8 some rings cannont perform fbc clean operation so for now
 +       * we are doing this on SW with mmio.
 +       * This variable works in the opposite information direction
 +       * of ring->fbc_dirty telling software on frontbuffer tracking
 +       * to perform the cache clean on sw side.
 +       */
 +      bool need_sw_cache_clean;
 +
        struct intel_fbc_work {
                struct delayed_work work;
                struct drm_crtc *crtc;
@@@ -715,6 -713,7 +725,7 @@@ enum intel_pch 
        PCH_IBX,        /* Ibexpeak PCH */
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
+       PCH_SPT,        /* Sunrisepoint PCH */
        PCH_NOP,
  };
  
@@@ -1540,7 -1539,7 +1551,7 @@@ struct drm_i915_private 
        struct intel_overlay *overlay;
  
        /* backlight registers and fields in struct intel_panel */
 -      spinlock_t backlight_lock;
 +      struct mutex backlight_lock;
  
        /* LVDS info */
        bool no_aux_handshake;
@@@ -2104,6 -2103,7 +2115,7 @@@ struct drm_i915_cmd_table 
  #define IS_CHERRYVIEW(dev)    (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
  #define IS_HASWELL(dev)       (INTEL_INFO(dev)->is_haswell)
  #define IS_BROADWELL(dev)     (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+ #define IS_SKYLAKE(dev)       (INTEL_INFO(dev)->is_skylake)
  #define IS_MOBILE(dev)                (INTEL_INFO(dev)->is_mobile)
  #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
                                 ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
                                 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 +#define IS_BDW_GT3(dev)               (IS_BROADWELL(dev) && \
 +                               (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
  #define IS_HSW_ULT(dev)               (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
  #define IS_ULT(dev)           (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
  #define IS_GEN6(dev)  (INTEL_INFO(dev)->gen == 6)
  #define IS_GEN7(dev)  (INTEL_INFO(dev)->gen == 7)
  #define IS_GEN8(dev)  (INTEL_INFO(dev)->gen == 8)
+ #define IS_GEN9(dev)  (INTEL_INFO(dev)->gen == 9)
  
  #define RENDER_RING           (1<<RCS)
  #define BSD_RING              (1<<VCS)
  
  #define HAS_HW_CONTEXTS(dev)  (INTEL_INFO(dev)->gen >= 6)
  #define HAS_LOGICAL_RING_CONTEXTS(dev)        (INTEL_INFO(dev)->gen >= 8)
 -#define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >= 6)
 -#define HAS_PPGTT(dev)                (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
  #define USES_PPGTT(dev)               (i915.enable_ppgtt)
  #define USES_FULL_PPGTT(dev)  (i915.enable_ppgtt == 2)
  
  #define INTEL_PCH_PPT_DEVICE_ID_TYPE          0x1e00
  #define INTEL_PCH_LPT_DEVICE_ID_TYPE          0x8c00
  #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE               0x9c00
+ #define INTEL_PCH_SPT_DEVICE_ID_TYPE          0xA100
+ #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE               0x9D00
  
  #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+ #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
  #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
  #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
  #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@@ -2380,12 -2384,6 +2396,12 @@@ int i915_gem_get_aperture_ioctl(struct 
  int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
  void i915_gem_load(struct drm_device *dev);
 +unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
 +                            long target,
 +                            unsigned flags);
 +#define I915_SHRINK_PURGEABLE 0x1
 +#define I915_SHRINK_UNBOUND 0x2
 +#define I915_SHRINK_BOUND 0x4
  void *i915_gem_object_alloc(struct drm_device *dev);
  void i915_gem_object_free(struct drm_i915_gem_object *obj);
  void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@@ -2835,7 -2833,7 +2851,7 @@@ extern void intel_modeset_setup_hw_stat
  extern void i915_redisable_vga(struct drm_device *dev);
  extern void i915_redisable_vga_power_on(struct drm_device *dev);
  extern bool intel_fbc_enabled(struct drm_device *dev);
 -extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
 +extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
  extern void intel_disable_fbc(struct drm_device *dev);
  extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  extern void intel_init_pch_refclk(struct drm_device *dev);
index 55a2ebb510bf8c9c759429cea879dd9bf2ff287c,2a38ef27bfcda2f9ac829deb5b2af2b07cc838ed..e05e0063a3b1db2d3ce5db947e25a129d0b97a7f
@@@ -60,6 -60,7 +60,6 @@@ static unsigned long i915_gem_shrinker_
  static int i915_gem_shrinker_oom(struct notifier_block *nb,
                                 unsigned long event,
                                 void *ptr);
 -static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
  static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  
  static bool cpu_cache_is_coherent(struct drm_device *dev,
@@@ -1740,11 -1741,7 +1740,11 @@@ static int i915_gem_object_create_mmap_
         * offsets on purgeable objects by truncating it and marking it purged,
         * which prevents userspace from ever using that object again.
         */
 -      i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
 +      i915_gem_shrink(dev_priv,
 +                      obj->base.size >> PAGE_SHIFT,
 +                      I915_SHRINK_BOUND |
 +                      I915_SHRINK_UNBOUND |
 +                      I915_SHRINK_PURGEABLE);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
                goto out;
@@@ -1941,11 -1938,12 +1941,11 @@@ i915_gem_object_put_pages(struct drm_i9
        return 0;
  }
  
 -static unsigned long
 -__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 -                bool purgeable_only)
 +unsigned long
 +i915_gem_shrink(struct drm_i915_private *dev_priv,
 +              long target, unsigned flags)
  {
 -      struct list_head still_in_list;
 -      struct drm_i915_gem_object *obj;
 +      const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
        unsigned long count = 0;
  
        /*
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
 -      INIT_LIST_HEAD(&still_in_list);
 -      while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
 -              obj = list_first_entry(&dev_priv->mm.unbound_list,
 -                                     typeof(*obj), global_list);
 -              list_move_tail(&obj->global_list, &still_in_list);
 +      if (flags & I915_SHRINK_UNBOUND) {
 +              struct list_head still_in_list;
  
 -              if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 -                      continue;
 +              INIT_LIST_HEAD(&still_in_list);
 +              while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
 +                      struct drm_i915_gem_object *obj;
  
 -              drm_gem_object_reference(&obj->base);
 +                      obj = list_first_entry(&dev_priv->mm.unbound_list,
 +                                             typeof(*obj), global_list);
 +                      list_move_tail(&obj->global_list, &still_in_list);
  
 -              if (i915_gem_object_put_pages(obj) == 0)
 -                      count += obj->base.size >> PAGE_SHIFT;
 +                      if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 +                              continue;
 +
 +                      drm_gem_object_reference(&obj->base);
  
 -              drm_gem_object_unreference(&obj->base);
 +                      if (i915_gem_object_put_pages(obj) == 0)
 +                              count += obj->base.size >> PAGE_SHIFT;
 +
 +                      drm_gem_object_unreference(&obj->base);
 +              }
 +              list_splice(&still_in_list, &dev_priv->mm.unbound_list);
        }
 -      list_splice(&still_in_list, &dev_priv->mm.unbound_list);
  
 -      INIT_LIST_HEAD(&still_in_list);
 -      while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
 -              struct i915_vma *vma, *v;
 +      if (flags & I915_SHRINK_BOUND) {
 +              struct list_head still_in_list;
  
 -              obj = list_first_entry(&dev_priv->mm.bound_list,
 -                                     typeof(*obj), global_list);
 -              list_move_tail(&obj->global_list, &still_in_list);
 +              INIT_LIST_HEAD(&still_in_list);
 +              while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
 +                      struct drm_i915_gem_object *obj;
 +                      struct i915_vma *vma, *v;
  
 -              if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 -                      continue;
 +                      obj = list_first_entry(&dev_priv->mm.bound_list,
 +                                             typeof(*obj), global_list);
 +                      list_move_tail(&obj->global_list, &still_in_list);
  
 -              drm_gem_object_reference(&obj->base);
 +                      if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
 +                              continue;
  
 -              list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
 -                      if (i915_vma_unbind(vma))
 -                              break;
 +                      drm_gem_object_reference(&obj->base);
  
 -              if (i915_gem_object_put_pages(obj) == 0)
 -                      count += obj->base.size >> PAGE_SHIFT;
 +                      list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
 +                              if (i915_vma_unbind(vma))
 +                                      break;
 +
 +                      if (i915_gem_object_put_pages(obj) == 0)
 +                              count += obj->base.size >> PAGE_SHIFT;
  
 -              drm_gem_object_unreference(&obj->base);
 +                      drm_gem_object_unreference(&obj->base);
 +              }
 +              list_splice(&still_in_list, &dev_priv->mm.bound_list);
        }
 -      list_splice(&still_in_list, &dev_priv->mm.bound_list);
  
        return count;
  }
  
 -static unsigned long
 -i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 -{
 -      return __i915_gem_shrink(dev_priv, target, true);
 -}
 -
  static unsigned long
  i915_gem_shrink_all(struct drm_i915_private *dev_priv)
  {
        i915_gem_evict_everything(dev_priv->dev);
 -      return __i915_gem_shrink(dev_priv, LONG_MAX, false);
 +      return i915_gem_shrink(dev_priv, LONG_MAX,
 +                             I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
  }
  
  static int
@@@ -2075,11 -2067,7 +2075,11 @@@ i915_gem_object_get_pages_gtt(struct dr
        for (i = 0; i < page_count; i++) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                if (IS_ERR(page)) {
 -                      i915_gem_purge(dev_priv, page_count);
 +                      i915_gem_shrink(dev_priv,
 +                                      page_count,
 +                                      I915_SHRINK_BOUND |
 +                                      I915_SHRINK_UNBOUND |
 +                                      I915_SHRINK_PURGEABLE);
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                }
                if (IS_ERR(page)) {
@@@ -2956,9 -2944,6 +2956,9 @@@ int i915_vma_unbind(struct i915_vma *vm
         * cause memory corruption through use-after-free.
         */
  
 +      /* Throw away the active reference before moving to the unbound list */
 +      i915_gem_object_retire(obj);
 +
        if (i915_is_ggtt(vma->vm)) {
                i915_gem_object_finish_gtt(obj);
  
@@@ -3166,6 -3151,7 +3166,7 @@@ static void i915_gem_write_fence(struc
             obj->stride, obj->tiling_mode);
  
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@@ -3351,20 -3337,17 +3352,20 @@@ i915_gem_object_get_fence(struct drm_i9
        return 0;
  }
  
 -static bool i915_gem_valid_gtt_space(struct drm_device *dev,
 -                                   struct drm_mm_node *gtt_space,
 +static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
                                     unsigned long cache_level)
  {
 +      struct drm_mm_node *gtt_space = &vma->node;
        struct drm_mm_node *other;
  
 -      /* On non-LLC machines we have to be careful when putting differing
 -       * types of snoopable memory together to avoid the prefetcher
 -       * crossing memory domains and dying.
 +      /*
 +       * On some machines we have to be careful when putting differing types
 +       * of snoopable memory together to avoid the prefetcher crossing memory
 +       * domains and dying. During vm initialisation, we decide whether or not
 +       * these constraints apply and set the drm_mm.color_adjust
 +       * appropriately.
         */
 -      if (HAS_LLC(dev))
 +      if (vma->vm->mm.color_adjust == NULL)
                return true;
  
        if (!drm_mm_node_allocated(gtt_space))
        return true;
  }
  
 -static void i915_gem_verify_gtt(struct drm_device *dev)
 -{
 -#if WATCH_GTT
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_object *obj;
 -      int err = 0;
 -
 -      list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
 -              if (obj->gtt_space == NULL) {
 -                      printk(KERN_ERR "object found on GTT list with no space reserved\n");
 -                      err++;
 -                      continue;
 -              }
 -
 -              if (obj->cache_level != obj->gtt_space->color) {
 -                      printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
 -                             i915_gem_obj_ggtt_offset(obj),
 -                             i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
 -                             obj->cache_level,
 -                             obj->gtt_space->color);
 -                      err++;
 -                      continue;
 -              }
 -
 -              if (!i915_gem_valid_gtt_space(dev,
 -                                            obj->gtt_space,
 -                                            obj->cache_level)) {
 -                      printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
 -                             i915_gem_obj_ggtt_offset(obj),
 -                             i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
 -                             obj->cache_level);
 -                      err++;
 -                      continue;
 -              }
 -      }
 -
 -      WARN_ON(err);
 -#endif
 -}
 -
  /**
   * Finds free space in the GTT aperture and binds the object there.
   */
@@@ -3462,7 -3485,8 +3463,7 @@@ search_free
  
                goto err_free_vma;
        }
 -      if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
 -                                            obj->cache_level))) {
 +      if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
                ret = -EINVAL;
                goto err_remove_node;
        }
        vma->bind_vma(vma, obj->cache_level,
                      flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
  
 -      i915_gem_verify_gtt(dev);
        return vma;
  
  err_remove_node:
@@@ -3671,7 -3696,7 +3672,7 @@@ int i915_gem_object_set_cache_level(str
        }
  
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
 -              if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
 +              if (!i915_gem_valid_gtt_space(vma, cache_level)) {
                        ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
                                                    old_write_domain);
        }
  
 -      i915_gem_verify_gtt(dev);
        return 0;
  }
  
@@@ -5077,15 -5103,6 +5078,15 @@@ int i915_gem_open(struct drm_device *de
        return ret;
  }
  
 +/**
 + * i915_gem_track_fb - update frontbuffer tracking
 + * old: current GEM buffer for the frontbuffer slots
 + * new: new GEM buffer for the frontbuffer slots
 + * frontbuffer_bits: bitmask of frontbuffer slots
 + *
 + * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 + * from @old and setting them in @new. Both @old and @new can be NULL.
 + */
  void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
@@@ -5245,16 -5262,11 +5246,16 @@@ i915_gem_shrinker_scan(struct shrinker 
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return SHRINK_STOP;
  
 -      freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
 +      freed = i915_gem_shrink(dev_priv,
 +                              sc->nr_to_scan,
 +                              I915_SHRINK_BOUND |
 +                              I915_SHRINK_UNBOUND |
 +                              I915_SHRINK_PURGEABLE);
        if (freed < sc->nr_to_scan)
 -              freed += __i915_gem_shrink(dev_priv,
 -                                         sc->nr_to_scan - freed,
 -                                         false);
 +              freed += i915_gem_shrink(dev_priv,
 +                                       sc->nr_to_scan - freed,
 +                                       I915_SHRINK_BOUND |
 +                                       I915_SHRINK_UNBOUND);
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
  
index 90c9bf6e71b7f9ea40161b7d2e74793d6da67bef,66e4b2ba345a6388d4c33f0784b6d40e54cb74af..273dad964e1b3c195ad09230790c79af84fcd212
@@@ -35,21 -35,13 +35,21 @@@ static void chv_setup_private_ppat(stru
  
  static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
  {
 -      if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
 +      bool has_aliasing_ppgtt;
 +      bool has_full_ppgtt;
 +
 +      has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
 +      has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
 +      if (IS_GEN8(dev))
 +              has_full_ppgtt = false; /* XXX why? */
 +
 +      if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
                return 0;
  
        if (enable_ppgtt == 1)
                return 1;
  
 -      if (enable_ppgtt == 2 && HAS_PPGTT(dev))
 +      if (enable_ppgtt == 2 && has_full_ppgtt)
                return 2;
  
  #ifdef CONFIG_INTEL_IOMMU
@@@ -67,7 -59,7 +67,7 @@@
                return 0;
        }
  
 -      return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
 +      return has_full_ppgtt ? 2 : has_aliasing_ppgtt ? 1 : 0;
  }
  
  
@@@ -1100,7 -1092,7 +1100,7 @@@ static int __hw_ppgtt_init(struct drm_d
  
        if (INTEL_INFO(dev)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
-       else if (IS_GEN8(dev))
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
                return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
        else
                BUG();
@@@ -1760,6 -1752,7 +1760,6 @@@ static int setup_scratch_page(struct dr
        page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
        if (page == NULL)
                return -ENOMEM;
 -      get_page(page);
        set_pages_uc(page, 1);
  
  #ifdef CONFIG_INTEL_IOMMU
@@@ -1784,6 -1777,7 +1784,6 @@@ static void teardown_scratch_page(struc
        set_pages_wb(page, 1);
        pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 -      put_page(page);
        __free_page(page);
  }
  
@@@ -1853,6 -1847,18 +1853,18 @@@ static size_t chv_get_stolen_size(u16 g
                return (gmch_ctrl - 0x17 + 9) << 22;
  }
  
+ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+ {
+       gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+       gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+       if (gen9_gmch_ctl < 0xf0)
+               return gen9_gmch_ctl << 25; /* 32 MB units */
+       else
+               /* 4MB increments starting at 0xf0 for 4MB */
+               return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+ }
  static int ggtt_probe_common(struct drm_device *dev,
                             size_t gtt_size)
  {
@@@ -1949,7 -1955,10 +1961,10 @@@ static int gen8_gmch_probe(struct drm_d
  
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  
-       if (IS_CHERRYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+               gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+       } else if (IS_CHERRYVIEW(dev)) {
                *stolen = chv_get_stolen_size(snb_gmch_ctl);
                gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
        } else {
@@@ -2121,6 -2130,7 +2136,7 @@@ static struct i915_vma *__i915_gem_vma_
        vma->obj = obj;
  
        switch (INTEL_INFO(vm->dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
index 386e45dbeff1f6fcb89680726ddc6693bf03234b,fe1498099f16f0b8aa840909767958044acc446f..e664599de6e70e1b6abe9107f5c9611c1526c30d
@@@ -765,6 -765,7 +765,7 @@@ static void i915_gem_record_fences(stru
  
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@@ -923,6 -924,7 +924,7 @@@ static void i915_record_ring_state(stru
                ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
  
                switch (INTEL_INFO(dev)->gen) {
+               case 9:
                case 8:
                        for (i = 0; i < 4; i++) {
                                ering->vm_info.pdp[i] =
@@@ -1326,12 -1328,13 +1328,12 @@@ void i915_error_state_get(struct drm_de
                          struct i915_error_state_file_priv *error_priv)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long flags;
  
 -      spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
 +      spin_lock_irq(&dev_priv->gpu_error.lock);
        error_priv->error = dev_priv->gpu_error.first_error;
        if (error_priv->error)
                kref_get(&error_priv->error->ref);
 -      spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 +      spin_unlock_irq(&dev_priv->gpu_error.lock);
  
  }
  
@@@ -1345,11 -1348,12 +1347,11 @@@ void i915_destroy_error_state(struct dr
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
 -      unsigned long flags;
  
 -      spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
 +      spin_lock_irq(&dev_priv->gpu_error.lock);
        error = dev_priv->gpu_error.first_error;
        dev_priv->gpu_error.first_error = NULL;
 -      spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 +      spin_unlock_irq(&dev_priv->gpu_error.lock);
  
        if (error)
                kref_put(&error->ref, i915_error_state_free);
@@@ -1387,6 -1391,7 +1389,7 @@@ void i915_get_extra_instdone(struct drm
                WARN_ONCE(1, "Unsupported platform\n");
        case 7:
        case 8:
+       case 9:
                instdone[0] = I915_READ(GEN7_INSTDONE_1);
                instdone[1] = I915_READ(GEN7_SC_INSTDONE);
                instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
index a08cdc62f8413927672582ab710eed1178e66bf1,ba5ba63288dbd4dbbc80278b1392b34637af1507..080981b56a4eee88095bc657e7cbb1ab9c705a66
@@@ -310,8 -310,9 +310,8 @@@ void i9xx_check_fifo_underruns(struct d
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc;
 -      unsigned long flags;
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      spin_lock_irq(&dev_priv->irq_lock);
  
        for_each_intel_crtc(dev, crtc) {
                u32 reg = PIPESTAT(crtc->pipe);
                DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
        }
  
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  }
  
  static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@@ -502,7 -503,7 +502,7 @@@ static bool __intel_set_cpu_fifo_underr
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN8(dev))
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
                broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
  
        return old;
@@@ -588,7 -589,6 +588,7 @@@ __i915_enable_pipestat(struct drm_i915_
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
        assert_spin_locked(&dev_priv->irq_lock);
 +      WARN_ON(!intel_irqs_enabled(dev_priv));
  
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@@ -615,7 -615,6 +615,7 @@@ __i915_disable_pipestat(struct drm_i915
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
        assert_spin_locked(&dev_priv->irq_lock);
 +      WARN_ON(!intel_irqs_enabled(dev_priv));
  
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@@ -695,18 -694,19 +695,18 @@@ i915_disable_pipestat(struct drm_i915_p
  static void i915_enable_asle_pipestat(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long irqflags;
  
        if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
                return;
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
  
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
                                     PIPE_LEGACY_BLC_EVENT_STATUS);
  
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  }
  
  /**
@@@ -1094,17 -1094,18 +1094,17 @@@ static void i915_digport_work_func(stru
  {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, dig_port_work);
 -      unsigned long irqflags;
        u32 long_port_mask, short_port_mask;
        struct intel_digital_port *intel_dig_port;
        int i, ret;
        u32 old_bits = 0;
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        long_port_mask = dev_priv->long_hpd_port_mask;
        dev_priv->long_hpd_port_mask = 0;
        short_port_mask = dev_priv->short_hpd_port_mask;
        dev_priv->short_hpd_port_mask = 0;
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        for (i = 0; i < I915_MAX_PORTS; i++) {
                bool valid = false;
        }
  
        if (old_bits) {
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              spin_lock_irq(&dev_priv->irq_lock);
                dev_priv->hpd_event_bits |= old_bits;
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              spin_unlock_irq(&dev_priv->irq_lock);
                schedule_work(&dev_priv->hotplug_work);
        }
  }
@@@ -1150,6 -1151,7 +1150,6 @@@ static void i915_hotplug_work_func(stru
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
        struct drm_connector *connector;
 -      unsigned long irqflags;
        bool hpd_disabled = false;
        bool changed = false;
        u32 hpd_event_bits;
        mutex_lock(&mode_config->mutex);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
  
        hpd_event_bits = dev_priv->hpd_event_bits;
        dev_priv->hpd_event_bits = 0;
                                 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
        }
  
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
@@@ -1486,6 -1488,7 +1486,6 @@@ static void ivybridge_parity_work(struc
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
        uint32_t misccpctl;
 -      unsigned long flags;
        uint8_t slice = 0;
  
        /* We must turn off DOP level clock gating to access the L3 registers.
  
  out:
        WARN_ON(dev_priv->l3_parity.which_slice);
 -      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        mutex_unlock(&dev_priv->dev->struct_mutex);
  }
@@@ -2584,7 -2587,7 +2584,7 @@@ static irqreturn_t gen8_irq_handler(in
        }
  
        for_each_pipe(dev_priv, pipe) {
-               uint32_t pipe_iir;
+               uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
  
                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
                        continue;
                if (pipe_iir) {
                        ret = IRQ_HANDLED;
                        I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
                        if (pipe_iir & GEN8_PIPE_VBLANK &&
                            intel_pipe_handle_vblank(dev, pipe))
                                intel_check_page_flip(dev, pipe);
  
-                       if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+                       if (IS_GEN9(dev))
+                               flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+                       else
+                               flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+                       if (flip_done) {
                                intel_prepare_page_flip(dev, pipe);
                                intel_finish_page_flip_plane(dev, pipe);
                        }
                                                  pipe_name(pipe));
                        }
  
-                       if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+                       if (IS_GEN9(dev))
+                               fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+                       else
+                               fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+                       if (fault_errors)
                                DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
                                          pipe_name(pipe),
                                          pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-                       }
                } else
                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
        }
@@@ -3475,12 -3489,14 +3486,12 @@@ static void gen8_irq_reset(struct drm_d
  
  void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
  {
 -      unsigned long irqflags;
 -
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
                          ~dev_priv->de_irq_mask[PIPE_B]);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
                          ~dev_priv->de_irq_mask[PIPE_C]);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  }
  
  static void cherryview_irq_preinstall(struct drm_device *dev)
@@@ -3599,6 -3615,7 +3610,6 @@@ static void gen5_gt_irq_postinstall(str
  
  static int ironlake_irq_postinstall(struct drm_device *dev)
  {
 -      unsigned long irqflags;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 display_mask, extra_mask;
  
                 * spinlocking not required here for correctness since interrupt
                 * setup is guaranteed to run in single-threaded context. But we
                 * need it to make the assert_spin_locked happy. */
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              spin_lock_irq(&dev_priv->irq_lock);
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              spin_unlock_irq(&dev_priv->irq_lock);
        }
  
        return 0;
@@@ -3715,7 -3732,7 +3726,7 @@@ void valleyview_enable_display_irqs(str
  
        dev_priv->display_irqs_enabled = true;
  
 -      if (dev_priv->dev->irq_enabled)
 +      if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_install(dev_priv);
  }
  
@@@ -3728,13 -3745,14 +3739,13 @@@ void valleyview_disable_display_irqs(st
  
        dev_priv->display_irqs_enabled = false;
  
 -      if (dev_priv->dev->irq_enabled)
 +      if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_uninstall(dev_priv);
  }
  
  static int valleyview_irq_postinstall(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long irqflags;
  
        dev_priv->irq_mask = ~0;
  
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display_irqs_enabled)
                valleyview_display_irqs_install(dev_priv);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
@@@ -3796,12 -3814,20 +3807,20 @@@ static void gen8_gt_irq_postinstall(str
  
  static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  {
-       uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
-               GEN8_PIPE_CDCLK_CRC_DONE |
-               GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-       uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
-               GEN8_PIPE_FIFO_UNDERRUN;
+       uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       uint32_t de_pipe_enables;
        int pipe;
+       if (IS_GEN9(dev_priv))
+               de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+                                 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+       else
+               de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+                                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+       de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+                                          GEN8_PIPE_FIFO_UNDERRUN;
        dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
@@@ -3842,6 -3868,7 +3861,6 @@@ static int cherryview_irq_postinstall(s
                I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
                PIPE_CRC_DONE_INTERRUPT_STATUS;
 -      unsigned long irqflags;
        int pipe;
  
        /*
        for_each_pipe(dev_priv, pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
        for_each_pipe(dev_priv, pipe)
                i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
@@@ -3884,6 -3911,7 +3903,6 @@@ static void gen8_irq_uninstall(struct d
  static void valleyview_irq_uninstall(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long irqflags;
        int pipe;
  
        if (!dev_priv)
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      /* Interrupt setup is already guaranteed to be single-threaded, this is
 +       * just to make the assert_spin_locked check happy. */
 +      spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display_irqs_enabled)
                valleyview_display_irqs_uninstall(dev_priv);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        dev_priv->irq_mask = 0;
  
@@@ -3989,6 -4015,7 +4008,6 @@@ static void i8xx_irq_preinstall(struct 
  static int i8xx_irq_postinstall(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned long irqflags;
  
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        return 0;
  }
@@@ -4059,6 -4086,7 +4078,6 @@@ static irqreturn_t i8xx_irq_handler(in
        struct drm_i915_private *dev_priv = dev->dev_private;
        u16 iir, new_iir;
        u32 pipe_stats[2];
 -      unsigned long irqflags;
        int pipe;
        u16 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
                        if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
                }
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              spin_unlock(&dev_priv->irq_lock);
  
                I915_WRITE16(IIR, iir & ~flip_mask);
                new_iir = I915_READ16(IIR); /* Flush posted writes */
@@@ -4160,6 -4188,7 +4179,6 @@@ static int i915_irq_postinstall(struct 
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
 -      unsigned long irqflags;
  
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        return 0;
  }
@@@ -4244,6 -4273,7 +4263,6 @@@ static irqreturn_t i915_irq_handler(in
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
 -      unsigned long irqflags;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
                                irq_received = true;
                        }
                }
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              spin_unlock(&dev_priv->irq_lock);
  
                if (!irq_received)
                        break;
@@@ -4381,6 -4411,7 +4400,6 @@@ static int i965_irq_postinstall(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
        u32 error_mask;
 -      unsigned long irqflags;
  
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        /*
         * Enable some error detection, note the instruction error mask
@@@ -4470,6 -4501,7 +4489,6 @@@ static irqreturn_t i965_irq_handler(in
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
 -      unsigned long irqflags;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
                                irq_received = true;
                        }
                }
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +              spin_unlock(&dev_priv->irq_lock);
  
                if (!irq_received)
                        break;
@@@ -4591,18 -4623,19 +4610,18 @@@ static void i965_irq_uninstall(struct d
        I915_WRITE(IIR, I915_READ(IIR));
  }
  
 -static void intel_hpd_irq_reenable(struct work_struct *work)
 +static void intel_hpd_irq_reenable_work(struct work_struct *work)
  {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             hotplug_reenable_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
 -      unsigned long irqflags;
        int i;
  
        intel_runtime_pm_get(dev_priv);
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
                struct drm_connector *connector;
  
        }
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  
        intel_runtime_pm_put(dev_priv);
  }
@@@ -4652,7 -4685,7 +4671,7 @@@ void intel_irq_init(struct drm_device *
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
        INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
 -                        intel_hpd_irq_reenable);
 +                        intel_hpd_irq_reenable_work);
  
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_GEN8(dev)) {
+       } else if (INTEL_INFO(dev)->gen >= 8) {
                dev->driver->irq_handler = gen8_irq_handler;
                dev->driver->irq_preinstall = gen8_irq_reset;
                dev->driver->irq_postinstall = gen8_irq_postinstall;
@@@ -4744,6 -4777,7 +4763,6 @@@ void intel_hpd_init(struct drm_device *
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
 -      unsigned long irqflags;
        int i;
  
        for (i = 1; i < HPD_NUM_PINS; i++) {
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked checks happy. */
 -      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
 -      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      spin_unlock_irq(&dev_priv->irq_lock);
  }
  
  /* Disable interrupts so we can allow runtime PM. */
index 124ea60c138663a2ca2394f98ba4326959a66409,bc14aaa6f091c367e1f468aecc827f6284045c0b..c62f3eb3911d4b8d154c2165abc99ce5d27554ee
@@@ -26,8 -26,8 +26,8 @@@
  #define _I915_REG_H_
  
  #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+ #define _PLANE(plane, a, b) _PIPE(plane, a, b)
  #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
  #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
  #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
                               (pipe) == PIPE_B ? (b) : (c))
  #define GAB_CTL                               0x24000
  #define   GAB_CTL_CONT_AFTER_PAGEFAULT        (1<<8)
  
 +#define GEN7_BIOS_RESERVED            0x1082C0
 +#define GEN7_BIOS_RESERVED_1M         (0 << 5)
 +#define GEN7_BIOS_RESERVED_256K               (1 << 5)
 +#define GEN8_BIOS_RESERVED_SHIFT       7
 +#define GEN7_BIOS_RESERVED_MASK        0x1
 +#define GEN8_BIOS_RESERVED_MASK        0x3
 +
 +
  /* VGA stuff */
  
  #define VGA_ST01_MDA 0x3ba
@@@ -3642,6 -3634,7 +3642,7 @@@ enum punit_power_well 
  #define   DP_AUX_CH_CTL_PRECHARGE_TEST            (1 << 11)
  #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
  #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+ #define   DP_AUX_CH_CTL_SYNC_PULSE_SKL(c)   ((c) - 1)
  
  /*
   * Computing GMCH M and N values for the Display Port link
  #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
  #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
  
+ /* Skylake plane registers */
+ #define _PLANE_CTL_1_A                                0x70180
+ #define _PLANE_CTL_2_A                                0x70280
+ #define _PLANE_CTL_3_A                                0x70380
+ #define   PLANE_CTL_ENABLE                    (1 << 31)
+ #define   PLANE_CTL_PIPE_GAMMA_ENABLE         (1 << 30)
+ #define   PLANE_CTL_FORMAT_MASK                       (0xf << 24)
+ #define   PLANE_CTL_FORMAT_YUV422             (  0 << 24)
+ #define   PLANE_CTL_FORMAT_NV12                       (  1 << 24)
+ #define   PLANE_CTL_FORMAT_XRGB_2101010               (  2 << 24)
+ #define   PLANE_CTL_FORMAT_XRGB_8888          (  4 << 24)
+ #define   PLANE_CTL_FORMAT_XRGB_16161616F     (  6 << 24)
+ #define   PLANE_CTL_FORMAT_AYUV                       (  8 << 24)
+ #define   PLANE_CTL_FORMAT_INDEXED            ( 12 << 24)
+ #define   PLANE_CTL_FORMAT_RGB_565            ( 14 << 24)
+ #define   PLANE_CTL_PIPE_CSC_ENABLE           (1 << 23)
+ #define   PLANE_CTL_KEY_ENABLE_MASK           (0x3 << 21)
+ #define   PLANE_CTL_KEY_ENABLE_SOURCE         (  1 << 21)
+ #define   PLANE_CTL_KEY_ENABLE_DESTINATION    (  2 << 21)
+ #define   PLANE_CTL_ORDER_BGRX                        (0 << 20)
+ #define   PLANE_CTL_ORDER_RGBX                        (1 << 20)
+ #define   PLANE_CTL_YUV422_ORDER_MASK         (0x3 << 16)
+ #define   PLANE_CTL_YUV422_YUYV                       (  0 << 16)
+ #define   PLANE_CTL_YUV422_UYVY                       (  1 << 16)
+ #define   PLANE_CTL_YUV422_YVYU                       (  2 << 16)
+ #define   PLANE_CTL_YUV422_VYUY                       (  3 << 16)
+ #define   PLANE_CTL_DECOMPRESSION_ENABLE      (1 << 15)
+ #define   PLANE_CTL_TRICKLE_FEED_DISABLE      (1 << 14)
+ #define   PLANE_CTL_PLANE_GAMMA_DISABLE               (1 << 13)
+ #define   PLANE_CTL_TILED_MASK                        (0x7 << 10)
+ #define   PLANE_CTL_TILED_LINEAR              (  0 << 10)
+ #define   PLANE_CTL_TILED_X                   (  1 << 10)
+ #define   PLANE_CTL_TILED_Y                   (  4 << 10)
+ #define   PLANE_CTL_TILED_YF                  (  5 << 10)
+ #define   PLANE_CTL_ALPHA_MASK                        (0x3 << 4)
+ #define   PLANE_CTL_ALPHA_DISABLE             (  0 << 4)
+ #define   PLANE_CTL_ALPHA_SW_PREMULTIPLY      (  2 << 4)
+ #define   PLANE_CTL_ALPHA_HW_PREMULTIPLY      (  3 << 4)
+ #define _PLANE_STRIDE_1_A                     0x70188
+ #define _PLANE_STRIDE_2_A                     0x70288
+ #define _PLANE_STRIDE_3_A                     0x70388
+ #define _PLANE_POS_1_A                                0x7018c
+ #define _PLANE_POS_2_A                                0x7028c
+ #define _PLANE_POS_3_A                                0x7038c
+ #define _PLANE_SIZE_1_A                               0x70190
+ #define _PLANE_SIZE_2_A                               0x70290
+ #define _PLANE_SIZE_3_A                               0x70390
+ #define _PLANE_SURF_1_A                               0x7019c
+ #define _PLANE_SURF_2_A                               0x7029c
+ #define _PLANE_SURF_3_A                               0x7039c
+ #define _PLANE_OFFSET_1_A                     0x701a4
+ #define _PLANE_OFFSET_2_A                     0x702a4
+ #define _PLANE_OFFSET_3_A                     0x703a4
+ #define _PLANE_KEYVAL_1_A                     0x70194
+ #define _PLANE_KEYVAL_2_A                     0x70294
+ #define _PLANE_KEYMSK_1_A                     0x70198
+ #define _PLANE_KEYMSK_2_A                     0x70298
+ #define _PLANE_KEYMAX_1_A                     0x701a0
+ #define _PLANE_KEYMAX_2_A                     0x702a0
+ #define _PLANE_CTL_1_B                                0x71180
+ #define _PLANE_CTL_2_B                                0x71280
+ #define _PLANE_CTL_3_B                                0x71380
+ #define _PLANE_CTL_1(pipe)    _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+ #define _PLANE_CTL_2(pipe)    _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+ #define _PLANE_CTL_3(pipe)    _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+ #define PLANE_CTL(pipe, plane)        \
+       _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+ #define _PLANE_STRIDE_1_B                     0x71188
+ #define _PLANE_STRIDE_2_B                     0x71288
+ #define _PLANE_STRIDE_3_B                     0x71388
+ #define _PLANE_STRIDE_1(pipe) \
+       _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+ #define _PLANE_STRIDE_2(pipe) \
+       _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+ #define _PLANE_STRIDE_3(pipe) \
+       _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+ #define PLANE_STRIDE(pipe, plane)     \
+       _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+ #define _PLANE_POS_1_B                                0x7118c
+ #define _PLANE_POS_2_B                                0x7128c
+ #define _PLANE_POS_3_B                                0x7138c
+ #define _PLANE_POS_1(pipe)    _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+ #define _PLANE_POS_2(pipe)    _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+ #define _PLANE_POS_3(pipe)    _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+ #define PLANE_POS(pipe, plane)        \
+       _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+ #define _PLANE_SIZE_1_B                               0x71190
+ #define _PLANE_SIZE_2_B                               0x71290
+ #define _PLANE_SIZE_3_B                               0x71390
+ #define _PLANE_SIZE_1(pipe)   _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+ #define _PLANE_SIZE_2(pipe)   _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+ #define _PLANE_SIZE_3(pipe)   _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+ #define PLANE_SIZE(pipe, plane)       \
+       _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+ #define _PLANE_SURF_1_B                               0x7119c
+ #define _PLANE_SURF_2_B                               0x7129c
+ #define _PLANE_SURF_3_B                               0x7139c
+ #define _PLANE_SURF_1(pipe)   _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+ #define _PLANE_SURF_2(pipe)   _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+ #define _PLANE_SURF_3(pipe)   _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+ #define PLANE_SURF(pipe, plane)       \
+       _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+ #define _PLANE_OFFSET_1_B                     0x711a4
+ #define _PLANE_OFFSET_2_B                     0x712a4
+ #define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+ #define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+ #define PLANE_OFFSET(pipe, plane)     \
+       _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+ #define _PLANE_KEYVAL_1_B                     0x71194
+ #define _PLANE_KEYVAL_2_B                     0x71294
+ #define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+ #define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+ #define PLANE_KEYVAL(pipe, plane)     \
+       _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+ #define _PLANE_KEYMSK_1_B                     0x71198
+ #define _PLANE_KEYMSK_2_B                     0x71298
+ #define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+ #define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+ #define PLANE_KEYMSK(pipe, plane)     \
+       _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+ #define _PLANE_KEYMAX_1_B                     0x711a0
+ #define _PLANE_KEYMAX_2_B                     0x712a0
+ #define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+ #define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+ #define PLANE_KEYMAX(pipe, plane)     \
+       _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
  /* VBIOS regs */
  #define VGACNTRL              0x71400
  # define VGA_DISP_DISABLE                     (1 << 31)
  #define  GEN8_PIPE_SCAN_LINE_EVENT    (1 << 2)
  #define  GEN8_PIPE_VSYNC              (1 << 1)
  #define  GEN8_PIPE_VBLANK             (1 << 0)
+ #define  GEN9_PIPE_CURSOR_FAULT               (1 << 11)
+ #define  GEN9_PIPE_PLANE3_FAULT               (1 << 9)
+ #define  GEN9_PIPE_PLANE2_FAULT               (1 << 8)
+ #define  GEN9_PIPE_PLANE1_FAULT               (1 << 7)
+ #define  GEN9_PIPE_PLANE3_FLIP_DONE   (1 << 5)
+ #define  GEN9_PIPE_PLANE2_FLIP_DONE   (1 << 4)
+ #define  GEN9_PIPE_PLANE1_FLIP_DONE   (1 << 3)
+ #define  GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
  #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
        (GEN8_PIPE_CURSOR_FAULT | \
         GEN8_PIPE_SPRITE_FAULT | \
         GEN8_PIPE_PRIMARY_FAULT)
+ #define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+       (GEN9_PIPE_CURSOR_FAULT | \
+        GEN9_PIPE_PLANE3_FAULT | \
+        GEN9_PIPE_PLANE2_FAULT | \
+        GEN9_PIPE_PLANE1_FAULT)
  
  #define GEN8_DE_PORT_ISR 0x44440
  #define GEN8_DE_PORT_IMR 0x44444
  /* GEN8 chicken */
  #define HDC_CHICKEN0                          0x7300
  #define  HDC_FORCE_NON_COHERENT                       (1<<4)
 +#define  HDC_FENCE_DEST_SLM_DISABLE           (1<<14)
  
  /* WaCatErrorRejectionIssue */
  #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG                0x9030
  #define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
  #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE        (1<<3)
  
+ #define GEN9_HALF_SLICE_CHICKEN5      0xe188
+ #define   GEN9_DG_MIRROR_FIX_ENABLE   (1<<5)
  #define GEN8_ROW_CHICKEN              0xe4f0
  #define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE       (1<<8)
  #define   STALL_DOP_GATING_DISABLE            (1<<5)
index 29582781143311b92d47dd4147b2805b54917f75,ba1103f23e2f93473775a3616afc6fdfcebea500..c9f4b3c43614f121e79ad20e7a3b0711745b9035
@@@ -95,8 -95,8 +95,8 @@@ static const struct ddi_buf_trans bdw_d
        { 0x00BEFFFF, 0x00140006 },
        { 0x80B2CFFF, 0x001B0002 },
        { 0x00FFFFFF, 0x000E000A },
 -      { 0x00D75FFF, 0x00180004 },
 -      { 0x80CB2FFF, 0x001B0002 },
 +      { 0x00DB6FFF, 0x00160005 },
 +      { 0x80C71FFF, 0x001A0002 },
        { 0x00F7DFFF, 0x00180004 },
        { 0x80D75FFF, 0x001B0002 },
  };
@@@ -127,6 -127,32 +127,32 @@@ static const struct ddi_buf_trans bdw_d
        { 0x80FFFFFF, 0x001B0002 },     /* 9:   1000    1000    0       */
  };
  
+ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+       { 0x00000018, 0x000000a0 },
+       { 0x00004014, 0x00000098 },
+       { 0x00006012, 0x00000088 },
+       { 0x00008010, 0x00000080 },
+       { 0x00000018, 0x00000098 },
+       { 0x00004014, 0x00000088 },
+       { 0x00006012, 0x00000080 },
+       { 0x00000018, 0x00000088 },
+       { 0x00004014, 0x00000080 },
+ };
+ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+                                       /* Idx  NT mV   T mV    db  */
+       { 0x00000018, 0x000000a0 },     /* 0:   400     400     0   */
+       { 0x00004014, 0x00000098 },     /* 1:   400     600     3.5 */
+       { 0x00006012, 0x00000088 },     /* 2:   400     800     6   */
+       { 0x00000018, 0x0000003c },     /* 3:   450     450     0   */
+       { 0x00000018, 0x00000098 },     /* 4:   600     600     0   */
+       { 0x00003015, 0x00000088 },     /* 5:   600     800     2.5 */
+       { 0x00005013, 0x00000080 },     /* 6:   600     1000    4.5 */
+       { 0x00000018, 0x00000088 },     /* 7:   800     800     0   */
+       { 0x00000096, 0x00000080 },     /* 8:   800     1000    2   */
+       { 0x00000018, 0x00000080 },     /* 9:   1200    1200    0   */
+ };
  enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
  {
        struct drm_encoder *encoder = &intel_encoder->base;
@@@ -169,7 -195,14 +195,14 @@@ static void intel_prepare_ddi_buffers(s
        const struct ddi_buf_trans *ddi_translations_hdmi;
        const struct ddi_buf_trans *ddi_translations;
  
-       if (IS_BROADWELL(dev)) {
+       if (IS_SKYLAKE(dev)) {
+               ddi_translations_fdi = NULL;
+               ddi_translations_dp = skl_ddi_translations_dp;
+               ddi_translations_edp = skl_ddi_translations_dp;
+               ddi_translations_hdmi = skl_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+               hdmi_800mV_0dB = 7;
+       } else if (IS_BROADWELL(dev)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
                ddi_translations_edp = bdw_ddi_translations_edp;
                        ddi_translations = ddi_translations_dp;
                break;
        case PORT_E:
-               ddi_translations = ddi_translations_fdi;
+               if (ddi_translations_fdi)
+                       ddi_translations = ddi_translations_fdi;
+               else
+                       ddi_translations = ddi_translations_dp;
                break;
        default:
                BUG();
index 858011d22482f991642f827420e0435ed720c653,1bda97c7100ac263eae4769ed41398923fb98b6d..b8488a8c1e9f6c977ad69d5b8413fd21e24aff91
@@@ -76,6 -76,8 +76,6 @@@ static const uint32_t intel_cursor_form
  #define DIV_ROUND_CLOSEST_ULL(ll, d)  \
  ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
  
 -static void intel_increase_pllclock(struct drm_device *dev,
 -                                  enum pipe pipe);
  static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  
  static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@@ -890,6 -892,60 +890,6 @@@ enum transcoder intel_pipe_to_cpu_trans
        return intel_crtc->config.cpu_transcoder;
  }
  
 -static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 -
 -      frame = I915_READ(frame_reg);
 -
 -      if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
 -              WARN(1, "vblank wait on pipe %c timed out\n",
 -                   pipe_name(pipe));
 -}
 -
 -/**
 - * intel_wait_for_vblank - wait for vblank on a given pipe
 - * @dev: drm device
 - * @pipe: pipe to wait for
 - *
 - * Wait for vblank to occur on a given pipe.  Needed for various bits of
 - * mode setting code.
 - */
 -void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int pipestat_reg = PIPESTAT(pipe);
 -
 -      if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 -              g4x_wait_for_vblank(dev, pipe);
 -              return;
 -      }
 -
 -      /* Clear existing vblank status. Note this will clear any other
 -       * sticky status fields as well.
 -       *
 -       * This races with i915_driver_irq_handler() with the result
 -       * that either function could miss a vblank event.  Here it is not
 -       * fatal, as we will either wait upon the next vblank interrupt or
 -       * timeout.  Generally speaking intel_wait_for_vblank() is only
 -       * called during modeset at which time the GPU should be idle and
 -       * should *not* be performing page flips and thus not waiting on
 -       * vblanks...
 -       * Currently, the result of us stealing a vblank from the irq
 -       * handler is that a single frame will be skipped during swapbuffers.
 -       */
 -      I915_WRITE(pipestat_reg,
 -                 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
 -
 -      /* Wait for vblank interrupt bit to set */
 -      if (wait_for(I915_READ(pipestat_reg) &
 -                   PIPE_VBLANK_INTERRUPT_STATUS,
 -                   50))
 -              DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
 -                            pipe_name(pipe));
 -}
 -
  static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -1136,8 -1192,8 +1136,8 @@@ void assert_fdi_rx_pll(struct drm_i915_
             state_string(state), state_string(cur_state));
  }
  
 -static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 -                                enum pipe pipe)
 +void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 +                         enum pipe pipe)
  {
        struct drm_device *dev = dev_priv->dev;
        int pp_reg;
@@@ -1279,7 -1335,14 +1279,14 @@@ static void assert_sprites_disabled(str
        int reg, sprite;
        u32 val;
  
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               for_each_sprite(pipe, sprite) {
+                       val = I915_READ(PLANE_CTL(pipe, sprite));
+                       WARN(val & PLANE_CTL_ENABLE,
+                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
+                            sprite, pipe_name(pipe));
+               }
+       } else if (IS_VALLEYVIEW(dev)) {
                for_each_sprite(pipe, sprite) {
                        reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
@@@ -1556,18 -1619,6 +1563,18 @@@ static void chv_enable_pll(struct intel
        mutex_unlock(&dev_priv->dpio_lock);
  }
  
 +static int intel_num_dvo_pipes(struct drm_device *dev)
 +{
 +      struct intel_crtc *crtc;
 +      int count = 0;
 +
 +      for_each_intel_crtc(dev, crtc)
 +              count += crtc->active &&
 +                      intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
 +
 +      return count;
 +}
 +
  static void i9xx_enable_pll(struct intel_crtc *crtc)
  {
        struct drm_device *dev = crtc->base.dev;
        if (IS_MOBILE(dev) && !IS_I830(dev))
                assert_panel_unlocked(dev_priv, crtc->pipe);
  
 -      I915_WRITE(reg, dpll);
 +      /* Enable DVO 2x clock on both PLLs if necessary */
 +      if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
 +              /*
 +               * It appears to be important that we don't enable this
 +               * for the current pipe before otherwise configuring the
 +               * PLL. No idea how this should be handled if multiple
 +               * DVO outputs are enabled simultaneosly.
 +               */
 +              dpll |= DPLL_DVO_2X_MODE;
 +              I915_WRITE(DPLL(!crtc->pipe),
 +                         I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
 +      }
  
        /* Wait for the clocks to stabilize. */
        POSTING_READ(reg);
   *
   * Note!  This is for pre-ILK only.
   */
 -static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 +static void i9xx_disable_pll(struct intel_crtc *crtc)
  {
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      enum pipe pipe = crtc->pipe;
 +
 +      /* Disable DVO 2x clock on both PLLs if necessary */
 +      if (IS_I830(dev) &&
 +          intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
 +          intel_num_dvo_pipes(dev) == 1) {
 +              I915_WRITE(DPLL(PIPE_B),
 +                         I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
 +              I915_WRITE(DPLL(PIPE_A),
 +                         I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
 +      }
 +
        /* Don't disable pipe or pipe PLLs if needed */
        if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
@@@ -2180,7 -2206,9 +2187,9 @@@ intel_pin_and_fence_fb_obj(struct drm_d
  
        switch (obj->tiling_mode) {
        case I915_TILING_NONE:
-               if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
                else if (INTEL_INFO(dev)->gen >= 4)
                        alignment = 4 * 1024;
                        alignment = 64 * 1024;
                break;
        case I915_TILING_X:
-               /* pin() will align the object as required by fence */
-               alignment = 0;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else {
+                       /* pin() will align the object as required by fence */
+                       alignment = 0;
+               }
                break;
        case I915_TILING_Y:
                WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@@ -2619,6 -2651,90 +2632,90 @@@ static void ironlake_update_primary_pla
        POSTING_READ(reg);
  }
  
+ static void skylake_update_primary_plane(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y)
+ {
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj;
+       int pipe = intel_crtc->pipe;
+       u32 plane_ctl, stride;
+       if (!intel_crtc->primary_enabled) {
+               I915_WRITE(PLANE_CTL(pipe, 0), 0);
+               I915_WRITE(PLANE_SURF(pipe, 0), 0);
+               POSTING_READ(PLANE_CTL(pipe, 0));
+               return;
+       }
+       plane_ctl = PLANE_CTL_ENABLE |
+                   PLANE_CTL_PIPE_GAMMA_ENABLE |
+                   PLANE_CTL_PIPE_CSC_ENABLE;
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       default:
+               BUG();
+       }
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+       plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+       I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+       DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+                     i915_gem_obj_ggtt_offset(obj),
+                     x, y, fb->width, fb->height,
+                     fb->pitches[0]);
+       I915_WRITE(PLANE_POS(pipe, 0), 0);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+       I915_WRITE(PLANE_SIZE(pipe, 0),
+                  (intel_crtc->config.pipe_src_h - 1) << 16 |
+                  (intel_crtc->config.pipe_src_w - 1));
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+       POSTING_READ(PLANE_SURF(pipe, 0));
+ }
  /* Assume fb object is pinned & idle & fenced and just update base pointers */
  static int
  intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
 -      intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
  
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
  
@@@ -2708,58 -2825,20 +2805,58 @@@ static bool intel_crtc_has_pending_flip
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long flags;
        bool pending;
  
        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
                return false;
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 +      spin_lock_irq(&dev->event_lock);
        pending = to_intel_crtc(crtc)->unpin_work != NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      spin_unlock_irq(&dev->event_lock);
  
        return pending;
  }
  
 +static void intel_update_pipe_size(struct intel_crtc *crtc)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      const struct drm_display_mode *adjusted_mode;
 +
 +      if (!i915.fastboot)
 +              return;
 +
 +      /*
 +       * Update pipe size and adjust fitter if needed: the reason for this is
 +       * that in compute_mode_changes we check the native mode (not the pfit
 +       * mode) to see if we can flip rather than do a full mode set. In the
 +       * fastboot case, we'll flip, but if we don't update the pipesrc and
 +       * pfit state, we'll end up with a big fb scanned out into the wrong
 +       * sized surface.
 +       *
 +       * To fix this properly, we need to hoist the checks up into
 +       * compute_mode_changes (or above), check the actual pfit state and
 +       * whether the platform allows pfit disable with pipe active, and only
 +       * then update the pipesrc and pfit state, even on the flip path.
 +       */
 +
 +      adjusted_mode = &crtc->config.adjusted_mode;
 +
 +      I915_WRITE(PIPESRC(crtc->pipe),
 +                 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
 +                 (adjusted_mode->crtc_vdisplay - 1));
 +      if (!crtc->config.pch_pfit.enabled &&
 +          (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) ||
 +           intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) {
 +              I915_WRITE(PF_CTL(crtc->pipe), 0);
 +              I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
 +              I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
 +      }
 +      crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
 +      crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
 +}
 +
  static int
  intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *fb)
                return ret;
        }
  
 -      /*
 -       * Update pipe size and adjust fitter if needed: the reason for this is
 -       * that in compute_mode_changes we check the native mode (not the pfit
 -       * mode) to see if we can flip rather than do a full mode set. In the
 -       * fastboot case, we'll flip, but if we don't update the pipesrc and
 -       * pfit state, we'll end up with a big fb scanned out into the wrong
 -       * sized surface.
 -       *
 -       * To fix this properly, we need to hoist the checks up into
 -       * compute_mode_changes (or above), check the actual pfit state and
 -       * whether the platform allows pfit disable with pipe active, and only
 -       * then update the pipesrc and pfit state, even on the flip path.
 -       */
 -      if (i915.fastboot) {
 -              const struct drm_display_mode *adjusted_mode =
 -                      &intel_crtc->config.adjusted_mode;
 -
 -              I915_WRITE(PIPESRC(intel_crtc->pipe),
 -                         ((adjusted_mode->crtc_hdisplay - 1) << 16) |
 -                         (adjusted_mode->crtc_vdisplay - 1));
 -              if (!intel_crtc->config.pch_pfit.enabled &&
 -                  (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
 -                   intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
 -                      I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
 -                      I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
 -                      I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
 -              }
 -              intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
 -              intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
 -      }
 +      intel_update_pipe_size(intel_crtc);
  
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
  
@@@ -3427,13 -3535,14 +3524,13 @@@ void intel_crtc_wait_for_pending_flips(
                                       !intel_crtc_has_pending_flip(crtc),
                                       60*HZ) == 0)) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -              unsigned long flags;
  
 -              spin_lock_irqsave(&dev->event_lock, flags);
 +              spin_lock_irq(&dev->event_lock);
                if (intel_crtc->unpin_work) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 +              spin_unlock_irq(&dev->event_lock);
        }
  
        if (crtc->primary->fb) {
@@@ -3992,6 -4101,10 +4089,6 @@@ static void intel_crtc_enable_planes(st
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
  
 -      assert_vblank_disabled(crtc);
 -
 -      drm_vblank_on(dev, pipe);
 -
        intel_enable_primary_hw_plane(crtc->primary, crtc);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@@ -4037,6 -4150,10 +4134,6 @@@ static void intel_crtc_disable_planes(s
         * consider this a flip to a NULL plane.
         */
        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
 -
 -      drm_vblank_off(dev, pipe);
 -
 -      assert_vblank_disabled(crtc);
  }
  
  static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
  
 +      assert_vblank_disabled(crtc);
 +      drm_crtc_vblank_on(crtc);
 +
        intel_crtc_enable_planes(crtc);
  }
  
@@@ -4216,9 -4330,6 +4313,9 @@@ static void haswell_crtc_enable(struct 
                intel_opregion_notify_encoder(encoder, true);
        }
  
 +      assert_vblank_disabled(crtc);
 +      drm_crtc_vblank_on(crtc);
 +
        /* If we change the relative order between pipe/planes enabling, we need
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
@@@ -4254,9 -4365,6 +4351,9 @@@ static void ironlake_crtc_disable(struc
  
        intel_crtc_disable_planes(crtc);
  
 +      drm_crtc_vblank_off(crtc);
 +      assert_vblank_disabled(crtc);
 +
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
  
@@@ -4319,9 -4427,6 +4416,9 @@@ static void haswell_crtc_disable(struc
  
        intel_crtc_disable_planes(crtc);
  
 +      drm_crtc_vblank_off(crtc);
 +      assert_vblank_disabled(crtc);
 +
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                intel_opregion_notify_encoder(encoder, false);
                encoder->disable(encoder);
@@@ -4788,9 -4893,6 +4885,9 @@@ static void valleyview_crtc_enable(stru
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
  
 +      assert_vblank_disabled(crtc);
 +      drm_crtc_vblank_on(crtc);
 +
        intel_crtc_enable_planes(crtc);
  
        /* Underruns don't raise interrupts, so check manually. */
@@@ -4848,9 -4950,6 +4945,9 @@@ static void i9xx_crtc_enable(struct drm
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
  
 +      assert_vblank_disabled(crtc);
 +      drm_crtc_vblank_on(crtc);
 +
        intel_crtc_enable_planes(crtc);
  
        /*
@@@ -4914,6 -5013,9 +5011,6 @@@ static void i9xx_crtc_disable(struct dr
        intel_set_memory_cxsr(dev_priv, false);
        intel_crtc_disable_planes(crtc);
  
 -      for_each_encoder_on_crtc(dev, crtc, encoder)
 -              encoder->disable(encoder);
 -
        /*
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
         */
        intel_wait_for_vblank(dev, pipe);
  
 +      drm_crtc_vblank_off(crtc);
 +      assert_vblank_disabled(crtc);
 +
 +      for_each_encoder_on_crtc(dev, crtc, encoder)
 +              encoder->disable(encoder);
 +
        intel_disable_pipe(intel_crtc);
  
        i9xx_pfit_disable(intel_crtc);
                else if (IS_VALLEYVIEW(dev))
                        vlv_disable_pll(dev_priv, pipe);
                else
 -                      i9xx_disable_pll(dev_priv, pipe);
 +                      i9xx_disable_pll(intel_crtc);
        }
  
        if (!IS_GEN2(dev))
@@@ -5946,7 -6042,7 +6043,7 @@@ static void i8xx_update_pll(struct inte
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
  
 -      if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
 +      if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
                dpll |= DPLL_DVO_2X_MODE;
  
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
@@@ -6452,14 -6548,6 +6549,14 @@@ static bool i9xx_get_pipe_config(struc
        }
        pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
        if (!IS_VALLEYVIEW(dev)) {
 +              /*
 +               * DPLL_DVO_2X_MODE must be enabled for both DPLLs
 +               * on 830. Filter it out here so that we don't
 +               * report errors due to that.
 +               */
 +              if (IS_I830(dev))
 +                      pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
 +
                pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
                pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
        } else {
@@@ -6983,7 -7071,7 +7080,7 @@@ static void haswell_set_pipeconf(struc
        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
  
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                val = 0;
  
                switch (intel_crtc->config.pipe_bpp) {
@@@ -7600,6 -7688,7 +7697,6 @@@ static void hsw_disable_lcpll(struct dr
  static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  {
        uint32_t val;
 -      unsigned long irqflags;
  
        val = I915_READ(LCPLL_CTL);
  
         * to call special forcewake code that doesn't touch runtime PM and
         * doesn't enable the forcewake delayed work.
         */
 -      spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 +      spin_lock_irq(&dev_priv->uncore.lock);
        if (dev_priv->uncore.forcewake_count++ == 0)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 +      spin_unlock_irq(&dev_priv->uncore.lock);
  
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
        }
  
        /* See the big comment above. */
 -      spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 +      spin_lock_irq(&dev_priv->uncore.lock);
        if (--dev_priv->uncore.forcewake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 -      spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 +      spin_unlock_irq(&dev_priv->uncore.lock);
  }
  
  /*
@@@ -7785,7 -7874,8 +7882,8 @@@ static void haswell_get_ddi_port_state(
         * DDI E. So just check whether this pipe is wired to DDI E and whether
         * the PCH transcoder is on.
         */
-       if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+       if (INTEL_INFO(dev)->gen < 9 &&
+           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
                pipe_config->has_pch_encoder = true;
  
                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@@ -8211,10 -8301,8 +8309,10 @@@ static void i845_update_cursor(struct d
                intel_crtc->cursor_cntl = 0;
        }
  
 -      if (intel_crtc->cursor_base != base)
 +      if (intel_crtc->cursor_base != base) {
                I915_WRITE(_CURABASE, base);
 +              intel_crtc->cursor_base = base;
 +      }
  
        if (intel_crtc->cursor_size != size) {
                I915_WRITE(CURSIZE, size);
@@@ -8254,10 -8342,9 +8352,10 @@@ static void i9xx_update_cursor(struct d
                                return;
                }
                cntl |= pipe << 28; /* Connect to correct pipe */
 +
 +              if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 +                      cntl |= CURSOR_PIPE_CSC_ENABLE;
        }
 -      if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 -              cntl |= CURSOR_PIPE_CSC_ENABLE;
  
        if (intel_crtc->cursor_cntl != cntl) {
                I915_WRITE(CURCNTR(pipe), cntl);
        /* and commit changes on next vblank */
        I915_WRITE(CURBASE(pipe), base);
        POSTING_READ(CURBASE(pipe));
 +
 +      intel_crtc->cursor_base = base;
  }
  
  /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@@ -8320,6 -8405,7 +8418,6 @@@ static void intel_crtc_update_cursor(st
                i845_update_cursor(crtc, base);
        else
                i9xx_update_cursor(crtc, base);
 -      intel_crtc->cursor_base = base;
  }
  
  static bool cursor_size_ok(struct drm_device *dev,
@@@ -8983,6 -9069,35 +9081,6 @@@ struct drm_display_mode *intel_crtc_mod
        return mode;
  }
  
 -static void intel_increase_pllclock(struct drm_device *dev,
 -                                  enum pipe pipe)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int dpll_reg = DPLL(pipe);
 -      int dpll;
 -
 -      if (!HAS_GMCH_DISPLAY(dev))
 -              return;
 -
 -      if (!dev_priv->lvds_downclock_avail)
 -              return;
 -
 -      dpll = I915_READ(dpll_reg);
 -      if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 -              DRM_DEBUG_DRIVER("upclocking LVDS\n");
 -
 -              assert_panel_unlocked(dev_priv, pipe);
 -
 -              dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 -              I915_WRITE(dpll_reg, dpll);
 -              intel_wait_for_vblank(dev, pipe);
 -
 -              dpll = I915_READ(dpll_reg);
 -              if (dpll & DISPLAY_RATE_SELECT_FPA1)
 -                      DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 -      }
 -}
 -
  static void intel_decrease_pllclock(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
        intel_runtime_pm_put(dev_priv);
  }
  
 -
 -/**
 - * intel_mark_fb_busy - mark given planes as busy
 - * @dev: DRM device
 - * @frontbuffer_bits: bits for the affected planes
 - * @ring: optional ring for asynchronous commands
 - *
 - * This function gets called every time the screen contents change. It can be
 - * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
 - */
 -static void intel_mark_fb_busy(struct drm_device *dev,
 -                             unsigned frontbuffer_bits,
 -                             struct intel_engine_cs *ring)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      enum pipe pipe;
 -
 -      if (!i915.powersave)
 -              return;
 -
 -      for_each_pipe(dev_priv, pipe) {
 -              if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
 -                      continue;
 -
 -              intel_increase_pllclock(dev, pipe);
 -              if (ring && intel_fbc_enabled(dev))
 -                      ring->fbc_dirty = true;
 -      }
 -}
 -
 -/**
 - * intel_fb_obj_invalidate - invalidate frontbuffer object
 - * @obj: GEM object to invalidate
 - * @ring: set for asynchronous rendering
 - *
 - * This function gets called every time rendering on the given object starts and
 - * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
 - * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
 - * until the rendering completes or a flip on this frontbuffer plane is
 - * scheduled.
 - */
 -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 -                           struct intel_engine_cs *ring)
 -{
 -      struct drm_device *dev = obj->base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 -
 -      if (!obj->frontbuffer_bits)
 -              return;
 -
 -      if (ring) {
 -              mutex_lock(&dev_priv->fb_tracking.lock);
 -              dev_priv->fb_tracking.busy_bits
 -                      |= obj->frontbuffer_bits;
 -              dev_priv->fb_tracking.flip_bits
 -                      &= ~obj->frontbuffer_bits;
 -              mutex_unlock(&dev_priv->fb_tracking.lock);
 -      }
 -
 -      intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
 -
 -      intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
 -}
 -
 -/**
 - * intel_frontbuffer_flush - flush frontbuffer
 - * @dev: DRM device
 - * @frontbuffer_bits: frontbuffer plane tracking bits
 - *
 - * This function gets called every time rendering on the given planes has
 - * completed and frontbuffer caching can be started again. Flushes will get
 - * delayed if they're blocked by some oustanding asynchronous rendering.
 - *
 - * Can be called without any locks held.
 - */
 -void intel_frontbuffer_flush(struct drm_device *dev,
 -                           unsigned frontbuffer_bits)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      /* Delay flushing when rings are still busy.*/
 -      mutex_lock(&dev_priv->fb_tracking.lock);
 -      frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
 -      mutex_unlock(&dev_priv->fb_tracking.lock);
 -
 -      intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
 -
 -      intel_edp_psr_flush(dev, frontbuffer_bits);
 -
 -      /*
 -       * FIXME: Unconditional fbc flushing here is a rather gross hack and
 -       * needs to be reworked into a proper frontbuffer tracking scheme like
 -       * psr employs.
 -       */
 -      if (IS_BROADWELL(dev))
 -              gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
 -}
 -
 -/**
 - * intel_fb_obj_flush - flush frontbuffer object
 - * @obj: GEM object to flush
 - * @retire: set when retiring asynchronous rendering
 - *
 - * This function gets called every time rendering on the given object has
 - * completed and frontbuffer caching can be started again. If @retire is true
 - * then any delayed flushes will be unblocked.
 - */
 -void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 -                      bool retire)
 -{
 -      struct drm_device *dev = obj->base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned frontbuffer_bits;
 -
 -      WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 -
 -      if (!obj->frontbuffer_bits)
 -              return;
 -
 -      frontbuffer_bits = obj->frontbuffer_bits;
 -
 -      if (retire) {
 -              mutex_lock(&dev_priv->fb_tracking.lock);
 -              /* Filter out new bits since rendering started. */
 -              frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
 -
 -              dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 -              mutex_unlock(&dev_priv->fb_tracking.lock);
 -      }
 -
 -      intel_frontbuffer_flush(dev, frontbuffer_bits);
 -}
 -
 -/**
 - * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
 - * @dev: DRM device
 - * @frontbuffer_bits: frontbuffer plane tracking bits
 - *
 - * This function gets called after scheduling a flip on @obj. The actual
 - * frontbuffer flushing will be delayed until completion is signalled with
 - * intel_frontbuffer_flip_complete. If an invalidate happens in between this
 - * flush will be cancelled.
 - *
 - * Can be called without any locks held.
 - */
 -void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 -                                  unsigned frontbuffer_bits)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      mutex_lock(&dev_priv->fb_tracking.lock);
 -      dev_priv->fb_tracking.flip_bits
 -              |= frontbuffer_bits;
 -      mutex_unlock(&dev_priv->fb_tracking.lock);
 -}
 -
 -/**
 - * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
 - * @dev: DRM device
 - * @frontbuffer_bits: frontbuffer plane tracking bits
 - *
 - * This function gets called after the flip has been latched and will complete
 - * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
 - *
 - * Can be called without any locks held.
 - */
 -void intel_frontbuffer_flip_complete(struct drm_device *dev,
 -                                   unsigned frontbuffer_bits)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      mutex_lock(&dev_priv->fb_tracking.lock);
 -      /* Mask any cancelled flips. */
 -      frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
 -      dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 -      mutex_unlock(&dev_priv->fb_tracking.lock);
 -
 -      intel_frontbuffer_flush(dev, frontbuffer_bits);
 -}
 -
  static void intel_crtc_destroy(struct drm_crtc *crtc)
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct intel_unpin_work *work;
 -      unsigned long flags;
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 +      spin_lock_irq(&dev->event_lock);
        work = intel_crtc->unpin_work;
        intel_crtc->unpin_work = NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      spin_unlock_irq(&dev->event_lock);
  
        if (work) {
                cancel_work_sync(&work->work);
@@@ -9113,10 -9411,6 +9211,10 @@@ static void do_intel_finish_page_flip(s
        if (intel_crtc == NULL)
                return;
  
 +      /*
 +       * This is called both by irq handlers and the reset code (to complete
 +       * lost pageflips) so needs the full irqsave spinlocks.
 +       */
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
  
@@@ -9198,12 -9492,7 +9296,12 @@@ void intel_prepare_page_flip(struct drm
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
  
 -      /* NB: An MMIO update of the plane base pointer will also
 +
 +      /*
 +       * This is called both by irq handlers and the reset code (to complete
 +       * lost pageflips) so needs the full irqsave spinlocks.
 +       *
 +       * NB: An MMIO update of the plane base pointer will also
         * generate a page-flip completion irq, i.e. every modeset
         * is also accompanied by a spurious intel_prepare_page_flip().
         */
@@@ -9578,6 -9867,7 +9676,6 @@@ static int intel_queue_mmio_flip(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long irq_flags;
        int ret;
  
        if (WARN_ON(intel_crtc->mmio_flip.seqno))
                return 0;
        }
  
 -      spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
 +      spin_lock_irq(&dev_priv->mmio_flip_lock);
        intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
        intel_crtc->mmio_flip.ring_id = obj->ring->id;
 -      spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
 +      spin_unlock_irq(&dev_priv->mmio_flip_lock);
  
        /*
         * Double check to catch cases where irq fired before
@@@ -9659,19 -9949,18 +9757,19 @@@ void intel_check_page_flip(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long flags;
 +
 +      WARN_ON(!in_irq());
  
        if (crtc == NULL)
                return;
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 +      spin_lock(&dev->event_lock);
        if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
                         intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
                page_flip_completed(intel_crtc);
        }
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      spin_unlock(&dev->event_lock);
  }
  
  static int intel_crtc_page_flip(struct drm_crtc *crtc,
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
 -      unsigned long flags;
        int ret;
  
        //trigger software GT busyness calculation
                goto free_work;
  
        /* We borrow the event spin lock for protecting unpin_work */
 -      spin_lock_irqsave(&dev->event_lock, flags);
 +      spin_lock_irq(&dev->event_lock);
        if (intel_crtc->unpin_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
                        page_flip_completed(intel_crtc);
                } else {
                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 -                      spin_unlock_irqrestore(&dev->event_lock, flags);
 +                      spin_unlock_irq(&dev->event_lock);
  
                        drm_crtc_vblank_put(crtc);
                        kfree(work);
                }
        }
        intel_crtc->unpin_work = work;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      spin_unlock_irq(&dev->event_lock);
  
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
                flush_workqueue(dev_priv->wq);
@@@ -9835,9 -10125,9 +9933,9 @@@ cleanup_pending
        mutex_unlock(&dev->struct_mutex);
  
  cleanup:
 -      spin_lock_irqsave(&dev->event_lock, flags);
 +      spin_lock_irq(&dev->event_lock);
        intel_crtc->unpin_work = NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      spin_unlock_irq(&dev->event_lock);
  
        drm_crtc_vblank_put(crtc);
  free_work:
  out_hang:
                intel_crtc_wait_for_pending_flips(crtc);
                ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
 -              if (ret == 0 && event)
 +              if (ret == 0 && event) {
 +                      spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
 +                      spin_unlock_irq(&dev->event_lock);
 +              }
        }
        return ret;
  }
@@@ -11570,37 -11857,89 +11668,37 @@@ disable_unpin
  }
  
  static int
 -intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
 -                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 -                           unsigned int crtc_w, unsigned int crtc_h,
 -                           uint32_t src_x, uint32_t src_y,
 -                           uint32_t src_w, uint32_t src_h)
 +intel_check_primary_plane(struct drm_plane *plane,
 +                        struct intel_plane_state *state)
  {
 +      struct drm_crtc *crtc = state->crtc;
 +      struct drm_framebuffer *fb = state->fb;
 +      struct drm_rect *dest = &state->dst;
 +      struct drm_rect *src = &state->src;
 +      const struct drm_rect *clip = &state->clip;
 +
 +      return drm_plane_helper_check_update(plane, crtc, fb,
 +                                          src, dest, clip,
 +                                          DRM_PLANE_HELPER_NO_SCALING,
 +                                          DRM_PLANE_HELPER_NO_SCALING,
 +                                          false, true, &state->visible);
 +}
 +
 +static int
 +intel_commit_primary_plane(struct drm_plane *plane,
 +                         struct intel_plane_state *state)
 +{
 +      struct drm_crtc *crtc = state->crtc;
 +      struct drm_framebuffer *fb = state->fb;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
 -      struct drm_rect dest = {
 -              /* integer pixels */
 -              .x1 = crtc_x,
 -              .y1 = crtc_y,
 -              .x2 = crtc_x + crtc_w,
 -              .y2 = crtc_y + crtc_h,
 -      };
 -      struct drm_rect src = {
 -              /* 16.16 fixed point */
 -              .x1 = src_x,
 -              .y1 = src_y,
 -              .x2 = src_x + src_w,
 -              .y2 = src_y + src_h,
 -      };
 -      const struct drm_rect clip = {
 -              /* integer pixels */
 -              .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
 -              .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
 -      };
 -      const struct {
 -              int crtc_x, crtc_y;
 -              unsigned int crtc_w, crtc_h;
 -              uint32_t src_x, src_y, src_w, src_h;
 -      } orig = {
 -              .crtc_x = crtc_x,
 -              .crtc_y = crtc_y,
 -              .crtc_w = crtc_w,
 -              .crtc_h = crtc_h,
 -              .src_x = src_x,
 -              .src_y = src_y,
 -              .src_w = src_w,
 -              .src_h = src_h,
 -      };
        struct intel_plane *intel_plane = to_intel_plane(plane);
 -      bool visible;
 +      struct drm_rect *src = &state->src;
        int ret;
  
 -      ret = drm_plane_helper_check_update(plane, crtc, fb,
 -                                          &src, &dest, &clip,
 -                                          DRM_PLANE_HELPER_NO_SCALING,
 -                                          DRM_PLANE_HELPER_NO_SCALING,
 -                                          false, true, &visible);
 -
 -      if (ret)
 -              return ret;
 -
 -      /*
 -       * If the CRTC isn't enabled, we're just pinning the framebuffer,
 -       * updating the fb pointer, and returning without touching the
 -       * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
 -       * turn on the display with all planes setup as desired.
 -       */
 -      if (!crtc->enabled) {
 -              mutex_lock(&dev->struct_mutex);
 -
 -              /*
 -               * If we already called setplane while the crtc was disabled,
 -               * we may have an fb pinned; unpin it.
 -               */
 -              if (plane->fb)
 -                      intel_unpin_fb_obj(old_obj);
 -
 -              i915_gem_track_fb(old_obj, obj,
 -                                INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
 -
 -              /* Pin and return without programming hardware */
 -              ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 -              mutex_unlock(&dev->struct_mutex);
 -
 -              return ret;
 -      }
 -
        intel_crtc_wait_for_pending_flips(crtc);
  
        /*
         * happens if userspace explicitly disables the plane by passing fb=0
         * because plane->fb still gets set and pinned.
         */
 -      if (!visible) {
 +      if (!state->visible) {
                mutex_lock(&dev->struct_mutex);
  
                /*
                                intel_disable_fbc(dev);
                        }
                }
 -              ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
 +              ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb);
                if (ret)
                        return ret;
  
                        intel_enable_primary_hw_plane(plane, crtc);
        }
  
 -      intel_plane->crtc_x = orig.crtc_x;
 -      intel_plane->crtc_y = orig.crtc_y;
 -      intel_plane->crtc_w = orig.crtc_w;
 -      intel_plane->crtc_h = orig.crtc_h;
 -      intel_plane->src_x = orig.src_x;
 -      intel_plane->src_y = orig.src_y;
 -      intel_plane->src_w = orig.src_w;
 -      intel_plane->src_h = orig.src_h;
 +      intel_plane->crtc_x = state->orig_dst.x1;
 +      intel_plane->crtc_y = state->orig_dst.y1;
 +      intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
 +      intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
 +      intel_plane->src_x = state->orig_src.x1;
 +      intel_plane->src_y = state->orig_src.y1;
 +      intel_plane->src_w = drm_rect_width(&state->orig_src);
 +      intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
  
        return 0;
  }
  
 +static int
 +intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
 +                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 +                           unsigned int crtc_w, unsigned int crtc_h,
 +                           uint32_t src_x, uint32_t src_y,
 +                           uint32_t src_w, uint32_t src_h)
 +{
 +      struct intel_plane_state state;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int ret;
 +
 +      state.crtc = crtc;
 +      state.fb = fb;
 +
 +      /* sample coordinates in 16.16 fixed point */
 +      state.src.x1 = src_x;
 +      state.src.x2 = src_x + src_w;
 +      state.src.y1 = src_y;
 +      state.src.y2 = src_y + src_h;
 +
 +      /* integer pixels */
 +      state.dst.x1 = crtc_x;
 +      state.dst.x2 = crtc_x + crtc_w;
 +      state.dst.y1 = crtc_y;
 +      state.dst.y2 = crtc_y + crtc_h;
 +
 +      state.clip.x1 = 0;
 +      state.clip.y1 = 0;
 +      state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
 +      state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 +
 +      state.orig_src = state.src;
 +      state.orig_dst = state.dst;
 +
 +      ret = intel_check_primary_plane(plane, &state);
 +      if (ret)
 +              return ret;
 +
 +      intel_commit_primary_plane(plane, &state);
 +
 +      return 0;
 +}
 +
  /* Common destruction function for both primary and cursor planes */
  static void intel_plane_destroy(struct drm_plane *plane)
  {
@@@ -11794,41 -12090,51 +11892,41 @@@ intel_cursor_plane_disable(struct drm_p
  }
  
  static int
 -intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 -                        struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 -                        unsigned int crtc_w, unsigned int crtc_h,
 -                        uint32_t src_x, uint32_t src_y,
 -                        uint32_t src_w, uint32_t src_h)
 +intel_check_cursor_plane(struct drm_plane *plane,
 +                       struct intel_plane_state *state)
  {
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 -      struct drm_i915_gem_object *obj = intel_fb->obj;
 -      struct drm_rect dest = {
 -              /* integer pixels */
 -              .x1 = crtc_x,
 -              .y1 = crtc_y,
 -              .x2 = crtc_x + crtc_w,
 -              .y2 = crtc_y + crtc_h,
 -      };
 -      struct drm_rect src = {
 -              /* 16.16 fixed point */
 -              .x1 = src_x,
 -              .y1 = src_y,
 -              .x2 = src_x + src_w,
 -              .y2 = src_y + src_h,
 -      };
 -      const struct drm_rect clip = {
 -              /* integer pixels */
 -              .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
 -              .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
 -      };
 -      bool visible;
 -      int ret;
 +      struct drm_crtc *crtc = state->crtc;
 +      struct drm_framebuffer *fb = state->fb;
 +      struct drm_rect *dest = &state->dst;
 +      struct drm_rect *src = &state->src;
 +      const struct drm_rect *clip = &state->clip;
  
 -      ret = drm_plane_helper_check_update(plane, crtc, fb,
 -                                          &src, &dest, &clip,
 +      return drm_plane_helper_check_update(plane, crtc, fb,
 +                                          src, dest, clip,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
 -                                          true, true, &visible);
 -      if (ret)
 -              return ret;
 +                                          true, true, &state->visible);
 +}
 +
 +static int
 +intel_commit_cursor_plane(struct drm_plane *plane,
 +                        struct intel_plane_state *state)
 +{
 +      struct drm_crtc *crtc = state->crtc;
 +      struct drm_framebuffer *fb = state->fb;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 +      struct drm_i915_gem_object *obj = intel_fb->obj;
 +      int crtc_w, crtc_h;
  
 -      crtc->cursor_x = crtc_x;
 -      crtc->cursor_y = crtc_y;
 +      crtc->cursor_x = state->orig_dst.x1;
 +      crtc->cursor_y = state->orig_dst.y1;
        if (fb != crtc->cursor->fb) {
 +              crtc_w = drm_rect_width(&state->orig_dst);
 +              crtc_h = drm_rect_height(&state->orig_dst);
                return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
        } else {
 -              intel_crtc_update_cursor(crtc, visible);
 +              intel_crtc_update_cursor(crtc, state->visible);
  
                intel_frontbuffer_flip(crtc->dev,
                                       INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
                return 0;
        }
  }
 +
 +static int
 +intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 +                        struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 +                        unsigned int crtc_w, unsigned int crtc_h,
 +                        uint32_t src_x, uint32_t src_y,
 +                        uint32_t src_w, uint32_t src_h)
 +{
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_plane_state state;
 +      int ret;
 +
 +      state.crtc = crtc;
 +      state.fb = fb;
 +
 +      /* sample coordinates in 16.16 fixed point */
 +      state.src.x1 = src_x;
 +      state.src.x2 = src_x + src_w;
 +      state.src.y1 = src_y;
 +      state.src.y2 = src_y + src_h;
 +
 +      /* integer pixels */
 +      state.dst.x1 = crtc_x;
 +      state.dst.x2 = crtc_x + crtc_w;
 +      state.dst.y1 = crtc_y;
 +      state.dst.y2 = crtc_y + crtc_h;
 +
 +      state.clip.x1 = 0;
 +      state.clip.y1 = 0;
 +      state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
 +      state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 +
 +      state.orig_src = state.src;
 +      state.orig_dst = state.dst;
 +
 +      ret = intel_check_cursor_plane(plane, &state);
 +      if (ret)
 +              return ret;
 +
 +      return intel_commit_cursor_plane(plane, &state);
 +}
 +
  static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .update_plane = intel_cursor_plane_update,
        .disable_plane = intel_cursor_plane_disable,
@@@ -12066,6 -12330,9 +12164,9 @@@ static bool intel_crt_present(struct dr
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
+       if (INTEL_INFO(dev)->gen >= 9)
+               return false;
        if (IS_ULT(dev))
                return false;
  
@@@ -12409,8 -12676,12 +12510,12 @@@ static void intel_init_display(struct d
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       dev_priv->display.update_primary_plane =
+                               skylake_update_primary_plane;
+               else
+                       dev_priv->display.update_primary_plane =
+                               ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.get_plane_config = ironlake_get_plane_config;
                dev_priv->display.modeset_global_resources =
                        valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
+       } else if (INTEL_INFO(dev)->gen >= 9) {
+               dev_priv->display.write_eld = haswell_write_eld;
+               dev_priv->display.modeset_global_resources =
+                       haswell_modeset_global_resources;
        }
  
        /* Default just returns -ENODEV to indicate unsupported */
@@@ -13559,8 -13834,9 +13668,8 @@@ void intel_modeset_preclose(struct drm_
  
        for_each_intel_crtc(dev, crtc) {
                struct intel_unpin_work *work;
 -              unsigned long irqflags;
  
 -              spin_lock_irqsave(&dev->event_lock, irqflags);
 +              spin_lock_irq(&dev->event_lock);
  
                work = crtc->unpin_work;
  
                        work->event = NULL;
                }
  
 -              spin_unlock_irqrestore(&dev->event_lock, irqflags);
 +              spin_unlock_irq(&dev->event_lock);
        }
  }
index 342d624f8312599f4d7df2f430aada8c2ff24d16,2e9a6ab1e36dc319cfcd1c53a672f7949127c482..799918f7822ce27adc5e6d5b8d5fcba8c84801fa
@@@ -661,6 -661,16 +661,16 @@@ static uint32_t vlv_get_aux_clock_divid
        return index ? 0 : 100;
  }
  
+ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+ {
+       /*
+        * SKL doesn't need us to program the AUX clock divider (Hardware will
+        * derive the clock from CDCLK automatically). We still implement the
+        * get_aux_clock_divider vfunc to plug-in into the existing code.
+        */
+       return index ? 0 : 1;
+ }
  static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
                                      bool has_aux_irq,
                                      int send_bytes,
               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
  }
  
+ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+                                     bool has_aux_irq,
+                                     int send_bytes,
+                                     uint32_t unused)
+ {
+       return DP_AUX_CH_CTL_SEND_BUSY |
+              DP_AUX_CH_CTL_DONE |
+              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_TIME_OUT_ERROR |
+              DP_AUX_CH_CTL_TIME_OUT_1600us |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+ }
  static int
  intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
@@@ -925,7 -950,16 +950,16 @@@ intel_dp_aux_init(struct intel_dp *inte
                BUG();
        }
  
-       if (!HAS_DDI(dev))
+       /*
+        * The AUX_CTL register is usually DP_CTL + 0x10.
+        *
+        * On Haswell and Broadwell though:
+        *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+        *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+        *
+        * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+        */
+       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
                intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
  
        intel_dp->aux.name = name;
@@@ -1068,15 -1102,23 +1102,15 @@@ intel_dp_compute_config(struct intel_en
                        bpp = dev_priv->vbt.edp_bpp;
                }
  
 -              if (IS_BROADWELL(dev)) {
 -                      /* Yes, it's an ugly hack. */
 -                      min_lane_count = max_lane_count;
 -                      DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
 -                                    min_lane_count);
 -              } else if (dev_priv->vbt.edp_lanes) {
 -                      min_lane_count = min(dev_priv->vbt.edp_lanes,
 -                                           max_lane_count);
 -                      DRM_DEBUG_KMS("using min %u lanes per VBT\n",
 -                                    min_lane_count);
 -              }
 -
 -              if (dev_priv->vbt.edp_rate) {
 -                      min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
 -                      DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
 -                                    bws[min_clock]);
 -              }
 +              /*
 +               * Use the maximum clock and number of lanes the eDP panel
 +               * advertizes being capable of. The panels are generally
 +               * designed to support only a single clock and lane
 +               * configuration, and typically these values correspond to the
 +               * native resolution of the panel.
 +               */
 +              min_lane_count = max_lane_count;
 +              min_clock = max_clock;
        }
  
        for (; bpp >= 6*3; bpp -= 2*3) {
@@@ -1991,8 -2033,10 +2025,8 @@@ static void intel_edp_psr_write_vsc(str
        POSTING_READ(ctl_reg);
  }
  
 -static void intel_edp_psr_setup(struct intel_dp *intel_dp)
 +static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
  {
 -      struct drm_device *dev = intel_dp_to_dev(intel_dp);
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_vsc_psr psr_vsc;
  
        /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
        psr_vsc.sdp_header.HB2 = 0x2;
        psr_vsc.sdp_header.HB3 = 0x8;
        intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 -
 -      /* Avoid continuous PSR exit by masking memup and hpd */
 -      I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
 -                 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
  }
  
  static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
@@@ -2121,7 -2169,10 +2155,7 @@@ static void intel_edp_psr_do_enable(str
        WARN_ON(dev_priv->psr.active);
        lockdep_assert_held(&dev_priv->psr.lock);
  
 -      /* Enable PSR on the panel */
 -      intel_edp_psr_enable_sink(intel_dp);
 -
 -      /* Enable PSR on the host */
 +      /* Enable/Re-enable PSR on the host */
        intel_edp_psr_enable_source(intel_dp);
  
        dev_priv->psr.active = true;
@@@ -2145,25 -2196,17 +2179,25 @@@ void intel_edp_psr_enable(struct intel_
        mutex_lock(&dev_priv->psr.lock);
        if (dev_priv->psr.enabled) {
                DRM_DEBUG_KMS("PSR already in use\n");
 -              mutex_unlock(&dev_priv->psr.lock);
 -              return;
 +              goto unlock;
        }
  
 +      if (!intel_edp_psr_match_conditions(intel_dp))
 +              goto unlock;
 +
        dev_priv->psr.busy_frontbuffer_bits = 0;
  
 -      /* Setup PSR once */
 -      intel_edp_psr_setup(intel_dp);
 +      intel_edp_psr_setup_vsc(intel_dp);
  
 -      if (intel_edp_psr_match_conditions(intel_dp))
 -              dev_priv->psr.enabled = intel_dp;
 +      /* Avoid continuous PSR exit by masking memup and hpd */
 +      I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
 +                 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 +
 +      /* Enable PSR on the panel */
 +      intel_edp_psr_enable_sink(intel_dp);
 +
 +      dev_priv->psr.enabled = intel_dp;
 +unlock:
        mutex_unlock(&dev_priv->psr.lock);
  }
  
@@@ -2204,17 -2247,6 +2238,17 @@@ static void intel_edp_psr_work(struct w
                container_of(work, typeof(*dev_priv), psr.work.work);
        struct intel_dp *intel_dp = dev_priv->psr.enabled;
  
 +      /* We have to make sure PSR is ready for re-enable
 +       * otherwise it keeps disabled until next full enable/disable cycle.
 +       * PSR might take some time to get fully disabled
 +       * and be ready for re-enable.
 +       */
 +      if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
 +                    EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
 +              DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
 +              return;
 +      }
 +
        mutex_lock(&dev_priv->psr.lock);
        intel_dp = dev_priv->psr.enabled;
  
@@@ -2842,7 -2874,9 +2876,9 @@@ intel_dp_voltage_max(struct intel_dp *i
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
  
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+       else if (IS_VALLEYVIEW(dev))
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
        else if (IS_GEN7(dev) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@@ -2858,7 -2892,18 +2894,18 @@@ intel_dp_pre_emphasis_max(struct intel_
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
  
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               default:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
+               }
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@@ -3340,7 -3385,7 +3387,7 @@@ intel_dp_set_signal_levels(struct intel
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
  
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
@@@ -3734,7 -3779,7 +3781,7 @@@ intel_dp_get_dpcd(struct intel_dp *inte
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
            intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
                intel_dp->use_tps3 = true;
 -              DRM_DEBUG_KMS("Displayport TPS3 supported");
 +              DRM_DEBUG_KMS("Displayport TPS3 supported\n");
        } else
                intel_dp->use_tps3 = false;
  
@@@ -3807,41 -3852,26 +3854,41 @@@ int intel_dp_sink_crc(struct intel_dp *
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(intel_dig_port->base.base.crtc);
 -      u8 buf[1];
 +      u8 buf;
 +      int test_crc_count;
 +      int attempts = 6;
  
 -      if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
 -              return -EAGAIN;
 +      if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
 +              return -EIO;
  
 -      if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
 +      if (!(buf & DP_TEST_CRC_SUPPORTED))
                return -ENOTTY;
  
 +      drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf);
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
 -                             DP_TEST_SINK_START) < 0)
 -              return -EAGAIN;
 +                              buf | DP_TEST_SINK_START) < 0)
 +              return -EIO;
 +
 +      drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf);
 +      test_crc_count = buf & DP_TEST_COUNT_MASK;
  
 -      /* Wait 2 vblanks to be sure we will have the correct CRC value */
 -      intel_wait_for_vblank(dev, intel_crtc->pipe);
 -      intel_wait_for_vblank(dev, intel_crtc->pipe);
 +      do {
 +              drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf);
 +              intel_wait_for_vblank(dev, intel_crtc->pipe);
 +      } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
 +
 +      if (attempts == 0) {
 +              DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n");
 +              return -EIO;
 +      }
  
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
 -              return -EAGAIN;
 +              return -EIO;
 +
 +      drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf);
 +      drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
 +                      buf & ~DP_TEST_SINK_START);
  
 -      drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
        return 0;
  }
  
@@@ -4412,7 -4442,7 +4459,7 @@@ intel_dp_connector_destroy(struct drm_c
  {
        struct intel_connector *intel_connector = to_intel_connector(connector);
  
 -      intel_dp_unset_edid(intel_attached_dp(connector));
 +      kfree(intel_connector->detect_edid);
  
        if (!IS_ERR_OR_NULL(intel_connector->edid))
                kfree(intel_connector->edid);
@@@ -5078,7 -5108,9 +5125,9 @@@ intel_dp_init_connector(struct intel_di
        intel_dp->pps_pipe = INVALID_PIPE;
  
        /* intel_dp vfuncs */
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+       else if (IS_VALLEYVIEW(dev))
                intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
        else
                intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
  
-       intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+       else
+               intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
  
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
index 6171267868191692e02a0c0bca44293dc20be89c,a09e4adda0bf8466641dbf197ce6264f2f97c6e9..072e69f4080e5d9c76584778376218d7fd3b044d
@@@ -34,7 -34,6 +34,7 @@@
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_fb_helper.h>
  #include <drm/drm_dp_mst_helper.h>
 +#include <drm/drm_rect.h>
  
  /**
   * _wait_for - magic (register) wait macro
@@@ -238,17 -237,6 +238,17 @@@ typedef struct dpll 
        int     p;
  } intel_clock_t;
  
 +struct intel_plane_state {
 +      struct drm_crtc *crtc;
 +      struct drm_framebuffer *fb;
 +      struct drm_rect src;
 +      struct drm_rect dst;
 +      struct drm_rect clip;
 +      struct drm_rect orig_src;
 +      struct drm_rect orig_dst;
 +      bool visible;
 +};
 +
  struct intel_plane_config {
        bool tiled;
        int size;
@@@ -743,6 -731,14 +743,14 @@@ hdmi_to_dig_port(struct intel_hdmi *int
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
  }
  
+ /*
+  * Returns the number of planes for this pipe, ie the number of sprites + 1
+  * (primary plane). This doesn't count the cursor plane then.
+  */
+ static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+ {
+       return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+ }
  
  /* i915_irq.c */
  bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
@@@ -801,7 -797,11 +809,7 @@@ void intel_ddi_clock_get(struct intel_e
                         struct intel_crtc_config *pipe_config);
  void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
  
 -/* intel_display.c */
 -const char *intel_output_name(int output);
 -bool intel_has_pending_fb_unpin(struct drm_device *dev);
 -int intel_pch_rawclk(struct drm_device *dev);
 -void intel_mark_busy(struct drm_device *dev);
 +/* intel_frontbuffer.c */
  void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
                             struct intel_engine_cs *ring);
  void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@@ -829,13 -829,6 +837,13 @@@ void intel_frontbuffer_flip(struct drm_
  }
  
  void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
 +
 +
 +/* intel_display.c */
 +const char *intel_output_name(int output);
 +bool intel_has_pending_fb_unpin(struct drm_device *dev);
 +int intel_pch_rawclk(struct drm_device *dev);
 +void intel_mark_busy(struct drm_device *dev);
  void intel_mark_idle(struct drm_device *dev);
  void intel_crtc_restore_mode(struct drm_crtc *crtc);
  void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@@ -856,11 -849,7 +864,11 @@@ int intel_get_pipe_from_crtc_id(struct 
                                struct drm_file *file_priv);
  enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
                                             enum pipe pipe);
 -void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 +static inline void
 +intel_wait_for_vblank(struct drm_device *dev, int pipe)
 +{
 +      drm_wait_one_vblank(dev, pipe);
 +}
  int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
  void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                         struct intel_digital_port *dport);
@@@ -894,8 -883,6 +902,8 @@@ struct intel_shared_dpll *intel_get_sha
  void intel_put_shared_dpll(struct intel_crtc *crtc);
  
  /* modesetting asserts */
 +void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 +                         enum pipe pipe);
  void assert_pll(struct drm_i915_private *dev_priv,
                enum pipe pipe, bool state);
  #define assert_pll_enabled(d, p) assert_pll(d, p, true)
index f17ada3742de651325f96b3ac114d92b594d2b86,d69a3cb6b689cf15768f39d74767b995243cd421..543e0f17ee629c45898b54d4c19a67d7cee3e088
@@@ -538,13 -538,14 +538,13 @@@ static u32 intel_panel_get_backlight(st
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
 -      unsigned long flags;
  
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
  
        val = dev_priv->display.get_backlight(connector);
        val = intel_panel_compute_brightness(connector, val);
  
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
        return val;
@@@ -628,11 -629,12 +628,11 @@@ static void intel_panel_set_backlight(s
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
 -      unsigned long flags;
  
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
  
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
  
        WARN_ON(panel->backlight.max == 0);
  
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
  
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  }
  
  /* set backlight brightness to level in range [0..max], assuming hw min is
@@@ -656,11 -658,12 +656,11 @@@ void intel_panel_set_backlight_acpi(str
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
 -      unsigned long flags;
  
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
  
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
  
        WARN_ON(panel->backlight.max == 0);
  
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
  
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  }
  
  static void pch_disable_backlight(struct intel_connector *connector)
@@@ -730,6 -733,7 +730,6 @@@ void intel_panel_disable_backlight(stru
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
 -      unsigned long flags;
  
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
                return;
        }
  
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
  
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
        panel->backlight.enabled = false;
        dev_priv->display.disable_backlight(connector);
  
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  }
  
  static void bdw_enable_backlight(struct intel_connector *connector)
@@@ -933,13 -937,14 +933,13 @@@ void intel_panel_enable_backlight(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
 -      unsigned long flags;
  
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
  
        DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
  
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
  
        WARN_ON(panel->backlight.max == 0);
  
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_UNBLANK;
  
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  }
  
  #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@@ -1262,6 -1267,7 +1262,6 @@@ int intel_panel_setup_backlight(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_panel *panel = &intel_connector->panel;
 -      unsigned long flags;
        int ret;
  
        if (!dev_priv->vbt.backlight.present) {
        }
  
        /* set level and max in panel struct */
 -      spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 +      mutex_lock(&dev_priv->backlight_lock);
        ret = dev_priv->display.setup_backlight(intel_connector);
 -      spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 +      mutex_unlock(&dev_priv->backlight_lock);
  
        if (ret) {
                DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@@ -1311,7 -1317,7 +1311,7 @@@ void intel_panel_init_backlight_funcs(s
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
                dev_priv->display.setup_backlight = bdw_setup_backlight;
                dev_priv->display.enable_backlight = bdw_enable_backlight;
                dev_priv->display.disable_backlight = pch_disable_backlight;
index 011892d5356e60abe020ae64c7546ad87f77e02e,81eb8bca5628f71e3b6e425afeb69a17a46eca74..043c5a8eae20f9c0513bd2aa1b045d29c0abd9f8
   * i915.i915_enable_fbc parameter
   */
  
+ static void gen9_init_clock_gating(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       /*
+        * WaDisableSDEUnitClockGating:skl
+        * This seems to be a pre-production w/a.
+        */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+       /*
+        * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+        * This is a pre-production w/a.
+        */
+       I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+                  I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+                  ~GEN9_DG_MIRROR_FIX_ENABLE);
+       /* Wa4x4STCOptimizationDisable:skl */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+ }
  static void i8xx_disable_fbc(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 fbc_ctl;
  
 +      dev_priv->fbc.enabled = false;
 +
        /* Disable compression */
        fbc_ctl = I915_READ(FBC_CONTROL);
        if ((fbc_ctl & FBC_CTL_EN) == 0)
@@@ -101,8 -123,6 +125,8 @@@ static void i8xx_enable_fbc(struct drm_
        int i;
        u32 fbc_ctl;
  
 +      dev_priv->fbc.enabled = true;
 +
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
@@@ -157,8 -177,6 +181,8 @@@ static void g4x_enable_fbc(struct drm_c
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
 +      dev_priv->fbc.enabled = true;
 +
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@@ -179,8 -197,6 +203,8 @@@ static void g4x_disable_fbc(struct drm_
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
  
 +      dev_priv->fbc.enabled = false;
 +
        /* Disable compression */
        dpfc_ctl = I915_READ(DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@@ -232,8 -248,6 +256,8 @@@ static void ironlake_enable_fbc(struct 
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
 +      dev_priv->fbc.enabled = true;
 +
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@@ -274,8 -288,6 +298,8 @@@ static void ironlake_disable_fbc(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
  
 +      dev_priv->fbc.enabled = false;
 +
        /* Disable compression */
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@@ -302,8 -314,6 +326,8 @@@ static void gen7_enable_fbc(struct drm_
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
 +      dev_priv->fbc.enabled = true;
 +
        dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@@ -353,19 -363,19 +377,19 @@@ bool intel_fbc_enabled(struct drm_devic
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      if (!dev_priv->display.fbc_enabled)
 -              return false;
 -
 -      return dev_priv->display.fbc_enabled(dev);
 +      return dev_priv->fbc.enabled;
  }
  
 -void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
 +void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (!IS_GEN8(dev))
                return;
  
 +      if (!intel_fbc_enabled(dev))
 +              return;
 +
        I915_WRITE(MSG_FBC_REND_STATE, value);
  }
  
@@@ -1084,17 -1094,6 +1108,17 @@@ static unsigned long intel_calculate_wm
                wm_size = wm->max_wm;
        if (wm_size <= 0)
                wm_size = wm->default_wm;
 +
 +      /*
 +       * Bspec seems to indicate that the value shouldn't be lower than
 +       * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
 +       * Lets go for 8 which is the burst size since certain platforms
 +       * already use a hardcoded 8 (which is what the spec says should be
 +       * done).
 +       */
 +      if (wm_size <= 8)
 +              wm_size = 8;
 +
        return wm_size;
  }
  
@@@ -6298,7 -6297,7 +6322,7 @@@ static void hsw_power_well_post_enable(
        outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  
-       if (IS_BROADWELL(dev))
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
                gen8_irq_power_well_post_enable(dev_priv);
  }
  
@@@ -7363,43 -7362,33 +7387,43 @@@ void intel_fini_runtime_pm(struct drm_i
        pm_runtime_disable(device);
  }
  
 +static void intel_init_fbc(struct drm_i915_private *dev_priv)
 +{
 +      if (!HAS_FBC(dev_priv)) {
 +              dev_priv->fbc.enabled = false;
 +              return;
 +      }
 +
 +      if (INTEL_INFO(dev_priv)->gen >= 7) {
 +              dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 +              dev_priv->display.enable_fbc = gen7_enable_fbc;
 +              dev_priv->display.disable_fbc = ironlake_disable_fbc;
 +      } else if (INTEL_INFO(dev_priv)->gen >= 5) {
 +              dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 +              dev_priv->display.enable_fbc = ironlake_enable_fbc;
 +              dev_priv->display.disable_fbc = ironlake_disable_fbc;
 +      } else if (IS_GM45(dev_priv)) {
 +              dev_priv->display.fbc_enabled = g4x_fbc_enabled;
 +              dev_priv->display.enable_fbc = g4x_enable_fbc;
 +              dev_priv->display.disable_fbc = g4x_disable_fbc;
 +      } else {
 +              dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
 +              dev_priv->display.enable_fbc = i8xx_enable_fbc;
 +              dev_priv->display.disable_fbc = i8xx_disable_fbc;
 +
 +              /* This value was pulled out of someone's hat */
 +              I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 +      }
 +
 +      dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
 +}
 +
  /* Set up chip specific power management-related functions */
  void intel_init_pm(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      if (HAS_FBC(dev)) {
 -              if (INTEL_INFO(dev)->gen >= 7) {
 -                      dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 -                      dev_priv->display.enable_fbc = gen7_enable_fbc;
 -                      dev_priv->display.disable_fbc = ironlake_disable_fbc;
 -              } else if (INTEL_INFO(dev)->gen >= 5) {
 -                      dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 -                      dev_priv->display.enable_fbc = ironlake_enable_fbc;
 -                      dev_priv->display.disable_fbc = ironlake_disable_fbc;
 -              } else if (IS_GM45(dev)) {
 -                      dev_priv->display.fbc_enabled = g4x_fbc_enabled;
 -                      dev_priv->display.enable_fbc = g4x_enable_fbc;
 -                      dev_priv->display.disable_fbc = g4x_disable_fbc;
 -              } else {
 -                      dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
 -                      dev_priv->display.enable_fbc = i8xx_enable_fbc;
 -                      dev_priv->display.disable_fbc = i8xx_disable_fbc;
 -
 -                      /* This value was pulled out of someone's hat */
 -                      I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 -              }
 -      }
 +      intel_init_fbc(dev_priv);
  
        /* For cxsr */
        if (IS_PINEVIEW(dev))
                i915_ironlake_get_mem_freq(dev);
  
        /* For FIFO watermark updates */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_GEN9(dev)) {
+               dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+       } else if (HAS_PCH_SPLIT(dev)) {
                ilk_setup_wm_latency(dev);
  
                if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
index c21aaad55982a2153fe3bf71293969f56f95a29c,a6a64aee8f740005d6cf31d0f3e14095bef422fc..cc50bf65d35a2a727f3fa2242e8f695189bb008f
@@@ -740,12 -740,8 +740,12 @@@ static int bdw_init_workarounds(struct 
         * workaround for for a possible hang in the unlikely event a TLB
         * invalidation occurs during a PSD flush.
         */
 +      /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
        intel_ring_emit_wa(ring, HDC_CHICKEN0,
 -                         _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
 +                         _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT |
 +                                            (IS_BDW_GT3(dev) ?
 +                                             HDC_FENCE_DEST_SLM_DISABLE : 0)
 +                                 ));
  
        /* Wa4x4STCOptimizationDisable:bdw */
        intel_ring_emit_wa(ring, CACHE_MODE_1,
@@@ -827,7 -823,7 +827,7 @@@ static int init_render_ring(struct inte
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
         */
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  
        /* Required for the hardware to program scanline values for waiting */
@@@ -1201,7 -1197,7 +1201,7 @@@ gen5_ring_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 +      if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@@ -1232,7 -1228,7 +1232,7 @@@ i9xx_ring_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 +      if (!intel_irqs_enabled(dev_priv))
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@@ -1269,7 -1265,7 +1269,7 @@@ i8xx_ring_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 +      if (!intel_irqs_enabled(dev_priv))
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@@ -1403,8 -1399,8 +1403,8 @@@ gen6_ring_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 -             return false;
 +      if (WARN_ON(!intel_irqs_enabled(dev_priv)))
 +              return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
@@@ -1446,7 -1442,7 +1446,7 @@@ hsw_vebox_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 +      if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@@ -1466,6 -1462,9 +1466,6 @@@ hsw_vebox_put_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 -              return;
 -
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
                I915_WRITE_IMR(ring, ~0);
@@@ -1481,7 -1480,7 +1481,7 @@@ gen8_ring_get_irq(struct intel_engine_c
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
  
 -      if (!dev->irq_enabled)
 +      if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@@ -2204,9 -2203,8 +2204,9 @@@ hsw_ring_dispatch_execbuffer(struct int
                return ret;
  
        intel_ring_emit(ring,
 -                      MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
 -                      (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
 +                      MI_BATCH_BUFFER_START |
 +                      (flags & I915_DISPATCH_SECURE ?
 +                       0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@@ -2241,7 -2239,6 +2241,7 @@@ static int gen6_ring_flush(struct intel
                           u32 invalidate, u32 flush)
  {
        struct drm_device *dev = ring->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
  
        }
        intel_ring_advance(ring);
  
 -      if (IS_GEN7(dev) && !invalidate && flush)
 -              return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 +      if (!invalidate && flush) {
 +              if (IS_GEN7(dev))
 +                      return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 +              else if (IS_BROADWELL(dev))
 +                      dev_priv->fbc.need_sw_cache_clean = true;
 +      }
  
        return 0;
  }
index 78044bbed8c9fc02357b41e4737de1b61f83af06,57e7190c4c87b9906599b2406c53f08635776b1a..750b634d45eced1387e7a9a8b950bb7f5fe881ed
@@@ -138,6 -138,184 +138,184 @@@ static void intel_update_primary_plane(
                I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
  }
  
+ static void
+ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+ {
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+       u32 plane_ctl, stride;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+       /* Mask out pixel format bits in case we change it */
+       plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+       plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+       plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+       plane_ctl &= ~PLANE_CTL_TILED_MASK;
+       plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+       /* Trickle feed has to be enabled */
+       plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       /*
+        * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+        * to be already pre-multiplied. We need to add a knob (or a different
+        * DRM_FORMAT) for user-space to configure that.
+        */
+       case DRM_FORMAT_ABGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ORDER_RGBX |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_ARGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_YUYV:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+               break;
+       default:
+               BUG();
+       }
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+       plane_ctl |= PLANE_CTL_ENABLE;
+       plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+       intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+                                      pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+       I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+       I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+       I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+       I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+       I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+       POSTING_READ(PLANE_SURF(pipe, plane));
+ }
+ static void
+ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+ {
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+       I915_WRITE(PLANE_CTL(pipe, plane),
+                  I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+       /* Activate double buffered register update */
+       I915_WRITE(PLANE_CTL(pipe, plane), 0);
+       POSTING_READ(PLANE_CTL(pipe, plane));
+       intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+ }
+ static int
+ skl_update_colorkey(struct drm_plane *drm_plane,
+                   struct drm_intel_sprite_colorkey *key)
+ {
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+       I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+       I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+       I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+       plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+       POSTING_READ(PLANE_CTL(pipe, plane));
+       return 0;
+ }
+ static void
+ skl_get_colorkey(struct drm_plane *drm_plane,
+                struct drm_intel_sprite_colorkey *key)
+ {
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+       key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+       key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+       key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+       switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+       case PLANE_CTL_KEY_ENABLE_DESTINATION:
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+               break;
+       case PLANE_CTL_KEY_ENABLE_SOURCE:
+               key->flags = I915_SET_COLORKEY_SOURCE;
+               break;
+       default:
+               key->flags = I915_SET_COLORKEY_NONE;
+       }
+ }
  static void
  vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
                 struct drm_framebuffer *fb,
@@@ -845,24 -1023,57 +1023,24 @@@ static bool colorkey_enabled(struct int
  }
  
  static int
 -intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 -                 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 -                 unsigned int crtc_w, unsigned int crtc_h,
 -                 uint32_t src_x, uint32_t src_y,
 -                 uint32_t src_w, uint32_t src_h)
 +intel_check_sprite_plane(struct drm_plane *plane,
 +                       struct intel_plane_state *state)
  {
 -      struct drm_device *dev = plane->dev;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
 -      enum pipe pipe = intel_crtc->pipe;
 +      struct drm_framebuffer *fb = state->fb;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
 -      struct drm_i915_gem_object *old_obj = intel_plane->obj;
 -      int ret;
 -      bool primary_enabled;
 -      bool visible;
 +      int crtc_x, crtc_y;
 +      unsigned int crtc_w, crtc_h;
 +      uint32_t src_x, src_y, src_w, src_h;
 +      struct drm_rect *src = &state->src;
 +      struct drm_rect *dst = &state->dst;
 +      struct drm_rect *orig_src = &state->orig_src;
 +      const struct drm_rect *clip = &state->clip;
        int hscale, vscale;
        int max_scale, min_scale;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 -      struct drm_rect src = {
 -              /* sample coordinates in 16.16 fixed point */
 -              .x1 = src_x,
 -              .x2 = src_x + src_w,
 -              .y1 = src_y,
 -              .y2 = src_y + src_h,
 -      };
 -      struct drm_rect dst = {
 -              /* integer pixels */
 -              .x1 = crtc_x,
 -              .x2 = crtc_x + crtc_w,
 -              .y1 = crtc_y,
 -              .y2 = crtc_y + crtc_h,
 -      };
 -      const struct drm_rect clip = {
 -              .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
 -              .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
 -      };
 -      const struct {
 -              int crtc_x, crtc_y;
 -              unsigned int crtc_w, crtc_h;
 -              uint32_t src_x, src_y, src_w, src_h;
 -      } orig = {
 -              .crtc_x = crtc_x,
 -              .crtc_y = crtc_y,
 -              .crtc_w = crtc_w,
 -              .crtc_h = crtc_h,
 -              .src_x = src_x,
 -              .src_y = src_y,
 -              .src_w = src_w,
 -              .src_h = src_h,
 -      };
  
        /* Don't modify another pipe's plane */
        if (intel_plane->pipe != intel_crtc->pipe) {
        max_scale = intel_plane->max_downscale << 16;
        min_scale = intel_plane->can_scale ? 1 : (1 << 16);
  
 -      drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
 +      drm_rect_rotate(src, fb->width << 16, fb->height << 16,
                        intel_plane->rotation);
  
 -      hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
 +      hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(hscale < 0);
  
 -      vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
 +      vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(vscale < 0);
  
 -      visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
 +      state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
  
 -      crtc_x = dst.x1;
 -      crtc_y = dst.y1;
 -      crtc_w = drm_rect_width(&dst);
 -      crtc_h = drm_rect_height(&dst);
 +      crtc_x = dst->x1;
 +      crtc_y = dst->y1;
 +      crtc_w = drm_rect_width(dst);
 +      crtc_h = drm_rect_height(dst);
  
 -      if (visible) {
 +      if (state->visible) {
                /* check again in case clipping clamped the results */
 -              hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
 +              hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
                if (hscale < 0) {
                        DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
 -                      drm_rect_debug_print(&src, true);
 -                      drm_rect_debug_print(&dst, false);
 +                      drm_rect_debug_print(src, true);
 +                      drm_rect_debug_print(dst, false);
  
                        return hscale;
                }
  
 -              vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
 +              vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
                if (vscale < 0) {
                        DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
 -                      drm_rect_debug_print(&src, true);
 -                      drm_rect_debug_print(&dst, false);
 +                      drm_rect_debug_print(src, true);
 +                      drm_rect_debug_print(dst, false);
  
                        return vscale;
                }
  
                /* Make the source viewport size an exact multiple of the scaling factors. */
 -              drm_rect_adjust_size(&src,
 -                                   drm_rect_width(&dst) * hscale - drm_rect_width(&src),
 -                                   drm_rect_height(&dst) * vscale - drm_rect_height(&src));
 +              drm_rect_adjust_size(src,
 +                                   drm_rect_width(dst) * hscale - drm_rect_width(src),
 +                                   drm_rect_height(dst) * vscale - drm_rect_height(src));
  
 -              drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
 +              drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
                                    intel_plane->rotation);
  
                /* sanity check to make sure the src viewport wasn't enlarged */
 -              WARN_ON(src.x1 < (int) src_x ||
 -                      src.y1 < (int) src_y ||
 -                      src.x2 > (int) (src_x + src_w) ||
 -                      src.y2 > (int) (src_y + src_h));
 +              WARN_ON(src->x1 < (int) orig_src->x1 ||
 +                      src->y1 < (int) orig_src->y1 ||
 +                      src->x2 > (int) orig_src->x2 ||
 +                      src->y2 > (int) orig_src->y2);
  
                /*
                 * Hardware doesn't handle subpixel coordinates.
                 * increase the source viewport size, because that could
                 * push the downscaling factor out of bounds.
                 */
 -              src_x = src.x1 >> 16;
 -              src_w = drm_rect_width(&src) >> 16;
 -              src_y = src.y1 >> 16;
 -              src_h = drm_rect_height(&src) >> 16;
 +              src_x = src->x1 >> 16;
 +              src_w = drm_rect_width(src) >> 16;
 +              src_y = src->y1 >> 16;
 +              src_h = drm_rect_height(src) >> 16;
  
                if (format_is_yuv(fb->pixel_format)) {
                        src_x &= ~1;
                                crtc_w &= ~1;
  
                        if (crtc_w == 0)
 -                              visible = false;
 +                              state->visible = false;
                }
        }
  
        /* Check size restrictions when scaling */
 -      if (visible && (src_w != crtc_w || src_h != crtc_h)) {
 +      if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
                unsigned int width_bytes;
  
                WARN_ON(!intel_plane->can_scale);
                /* FIXME interlacing min height is 6 */
  
                if (crtc_w < 3 || crtc_h < 3)
 -                      visible = false;
 +                      state->visible = false;
  
                if (src_w < 3 || src_h < 3)
 -                      visible = false;
 +                      state->visible = false;
  
 -              width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
 +              width_bytes = ((src_x * pixel_size) & 63) +
 +                                      src_w * pixel_size;
  
                if (src_w > 2048 || src_h > 2048 ||
                    width_bytes > 4096 || fb->pitches[0] > 4096) {
                }
        }
  
 -      dst.x1 = crtc_x;
 -      dst.x2 = crtc_x + crtc_w;
 -      dst.y1 = crtc_y;
 -      dst.y2 = crtc_y + crtc_h;
 +      if (state->visible) {
 +              src->x1 = src_x;
 +              src->x2 = src_x + src_w;
 +              src->y1 = src_y;
 +              src->y2 = src_y + src_h;
 +      }
 +
 +      dst->x1 = crtc_x;
 +      dst->x2 = crtc_x + crtc_w;
 +      dst->y1 = crtc_y;
 +      dst->y2 = crtc_y + crtc_h;
 +
 +      return 0;
 +}
 +
 +static int
 +intel_commit_sprite_plane(struct drm_plane *plane,
 +                        struct intel_plane_state *state)
 +{
 +      struct drm_device *dev = plane->dev;
 +      struct drm_crtc *crtc = state->crtc;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_plane *intel_plane = to_intel_plane(plane);
 +      enum pipe pipe = intel_crtc->pipe;
 +      struct drm_framebuffer *fb = state->fb;
 +      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 +      struct drm_i915_gem_object *obj = intel_fb->obj;
 +      struct drm_i915_gem_object *old_obj = intel_plane->obj;
 +      int crtc_x, crtc_y;
 +      unsigned int crtc_w, crtc_h;
 +      uint32_t src_x, src_y, src_w, src_h;
 +      struct drm_rect *dst = &state->dst;
 +      const struct drm_rect *clip = &state->clip;
 +      bool primary_enabled;
 +      int ret;
  
        /*
         * If the sprite is completely covering the primary plane,
         * we can disable the primary and save power.
         */
 -      primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
 -      WARN_ON(!primary_enabled && !visible && intel_crtc->active);
 +      primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
 +      WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
  
 -      mutex_lock(&dev->struct_mutex);
 -
 -      /* Note that this will apply the VT-d workaround for scanouts,
 -       * which is more restrictive than required for sprites. (The
 -       * primary plane requires 256KiB alignment with 64 PTE padding,
 -       * the sprite planes only require 128KiB alignment and 32 PTE padding.
 -       */
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
  
 -      i915_gem_track_fb(old_obj, obj,
 -                        INTEL_FRONTBUFFER_SPRITE(pipe));
 -      mutex_unlock(&dev->struct_mutex);
 +      if (old_obj != obj) {
 +              mutex_lock(&dev->struct_mutex);
  
 -      if (ret)
 -              return ret;
 +              /* Note that this will apply the VT-d workaround for scanouts,
 +               * which is more restrictive than required for sprites. (The
 +               * primary plane requires 256KiB alignment with 64 PTE padding,
 +               * the sprite planes only require 128KiB alignment and 32 PTE
 +               * padding.
 +               */
 +              ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 +              if (ret == 0)
 +                      i915_gem_track_fb(old_obj, obj,
 +                                        INTEL_FRONTBUFFER_SPRITE(pipe));
 +              mutex_unlock(&dev->struct_mutex);
 +              if (ret)
 +                      return ret;
 +      }
  
 -      intel_plane->crtc_x = orig.crtc_x;
 -      intel_plane->crtc_y = orig.crtc_y;
 -      intel_plane->crtc_w = orig.crtc_w;
 -      intel_plane->crtc_h = orig.crtc_h;
 -      intel_plane->src_x = orig.src_x;
 -      intel_plane->src_y = orig.src_y;
 -      intel_plane->src_w = orig.src_w;
 -      intel_plane->src_h = orig.src_h;
 +      intel_plane->crtc_x = state->orig_dst.x1;
 +      intel_plane->crtc_y = state->orig_dst.y1;
 +      intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
 +      intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
 +      intel_plane->src_x = state->orig_src.x1;
 +      intel_plane->src_y = state->orig_src.y1;
 +      intel_plane->src_w = drm_rect_width(&state->orig_src);
 +      intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
  
        if (intel_crtc->active) {
                if (primary_was_enabled && !primary_enabled)
                        intel_pre_disable_primary(crtc);
  
 -              if (visible)
 +              if (state->visible) {
 +                      crtc_x = state->dst.x1;
 +                      crtc_y = state->dst.y1;
 +                      crtc_w = drm_rect_width(&state->dst);
 +                      crtc_h = drm_rect_height(&state->dst);
 +                      src_x = state->src.x1;
 +                      src_y = state->src.y1;
 +                      src_w = drm_rect_width(&state->src);
 +                      src_h = drm_rect_height(&state->src);
                        intel_plane->update_plane(plane, crtc, fb, obj,
                                                  crtc_x, crtc_y, crtc_w, crtc_h,
                                                  src_x, src_y, src_w, src_h);
 -              else
 +              } else {
                        intel_plane->disable_plane(plane, crtc);
 +              }
 +
  
                intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
  
        }
  
        /* Unpin old obj after new one is active to avoid ugliness */
 -      if (old_obj) {
 +      if (old_obj && old_obj != obj) {
 +
                /*
                 * It's fairly common to simply update the position of
                 * an existing object.  In that case, we don't need to
                 * wait for vblank to avoid ugliness, we only need to
                 * do the pin & ref bookkeeping.
                 */
 -              if (old_obj != obj && intel_crtc->active)
 +              if (intel_crtc->active)
                        intel_wait_for_vblank(dev, intel_crtc->pipe);
  
                mutex_lock(&dev->struct_mutex);
        return 0;
  }
  
 +static int
 +intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 +                 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
 +                 unsigned int crtc_w, unsigned int crtc_h,
 +                 uint32_t src_x, uint32_t src_y,
 +                 uint32_t src_w, uint32_t src_h)
 +{
 +      struct intel_plane_state state;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int ret;
 +
 +      state.crtc = crtc;
 +      state.fb = fb;
 +
 +      /* sample coordinates in 16.16 fixed point */
 +      state.src.x1 = src_x;
 +      state.src.x2 = src_x + src_w;
 +      state.src.y1 = src_y;
 +      state.src.y2 = src_y + src_h;
 +
 +      /* integer pixels */
 +      state.dst.x1 = crtc_x;
 +      state.dst.x2 = crtc_x + crtc_w;
 +      state.dst.y1 = crtc_y;
 +      state.dst.y2 = crtc_y + crtc_h;
 +
 +      state.clip.x1 = 0;
 +      state.clip.y1 = 0;
 +      state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
 +      state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 +      state.orig_src = state.src;
 +      state.orig_dst = state.dst;
 +
 +      ret = intel_check_sprite_plane(plane, &state);
 +      if (ret)
 +              return ret;
 +
 +      return intel_commit_sprite_plane(plane, &state);
 +}
 +
  static int
  intel_disable_plane(struct drm_plane *plane)
  {
@@@ -1358,6 -1483,18 +1536,18 @@@ static uint32_t vlv_plane_formats[] = 
        DRM_FORMAT_VYUY,
  };
  
+ static uint32_t skl_plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+ };
  int
  intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
  {
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                }
                break;
+       case 9:
+               /*
+                * FIXME: Skylake planes can be scaled (with some restrictions),
+                * but this is for another time.
+                */
+               intel_plane->can_scale = false;
+               intel_plane->max_downscale = 1;
+               intel_plane->update_plane = skl_update_plane;
+               intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->update_colorkey = skl_update_colorkey;
+               intel_plane->get_colorkey = skl_get_colorkey;
+               plane_formats = skl_plane_formats;
+               num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+               break;
        default:
                kfree(intel_plane);
                return -ENODEV;
index 0e99852222e14236cb5c758f82fa7bcec0f0b4d0,3b27fb028762451cab08b604f6f93344da49db69..0b0f4f85c4f2942b818481e0db156ddb7bb15e06
@@@ -194,15 -194,13 +194,15 @@@ static void vlv_force_wake_reset(struc
  static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
                                                int fw_engine)
  {
 +      /*
 +       * WaRsDontPollForAckOnClearingFWBits:vlv
 +       * Hardware clears ack bits lazily (only when all ack
 +       * bits become 0) so don't poll for individiual ack
 +       * bits to be clear here like on other platforms.
 +       */
 +
        /* Check for Render Engine */
        if (FORCEWAKE_RENDER & fw_engine) {
 -              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 -                                              FORCEWAKE_ACK_VLV) &
 -                                              FORCEWAKE_KERNEL) == 0,
 -                                      FORCEWAKE_ACK_TIMEOUT_MS))
 -                      DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  
                __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                                   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  
        /* Check for Media Engine */
        if (FORCEWAKE_MEDIA & fw_engine) {
 -              if (wait_for_atomic((__raw_i915_read32(dev_priv,
 -                                              FORCEWAKE_ACK_MEDIA_VLV) &
 -                                              FORCEWAKE_KERNEL) == 0,
 -                                      FORCEWAKE_ACK_TIMEOUT_MS))
 -                      DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
  
                __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
                                   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
@@@ -965,7 -968,7 +965,7 @@@ static const struct register_whitelist 
        /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
        uint32_t gen_bitmask;
  } whitelist[] = {
-       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
  };
  
  int i915_reg_read_ioctl(struct drm_device *dev,
This page took 0.136867 seconds and 5 git commands to generate.