Merge tag 'v3.4-rc6' into drm-intel-next
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 7 May 2012 09:30:46 +0000 (11:30 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 7 May 2012 12:02:14 +0000 (14:02 +0200)
Conflicts:
drivers/gpu/drm/i915/intel_display.c

Ok, this is a fun story of git totally messing things up. There
/shouldn't/ be any conflict in here, because the fixes in -rc6 do only
touch functions that have not been changed in -next.

The offending commits in drm-next are 14415745b2..1fa611065 which
simply move a few functions from intel_display.c to intel_pm.c. The
problem seems to be that git diff gets completely confused:

$ git diff 14415745b2..1fa611065

is a nice mess in intel_display.c, and the diff leaks into totally
unrelated functions, whereas

$git diff --minimal  14415745b2..1fa611065

is exactly what we want.

Unfortunately there seems to be no way to teach similar smarts to the
merge diff and conflict generation code, because with the minimal diff
there really shouldn't be any conflicts. For added hilarity, every
time something in that area changes the + and - lines in the diff move
around like crazy, again resulting in new conflicts. So I fear this
mess will stay with us for a little longer (and might result in
another backmerge down the road).

Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
20 files changed:
1  2 
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/si.c

index a9ca1b80fc28aa9e330f407accc750455906b180,c79870a75c2ffa426125d17bba4fc736ec3233e9..e43eb1a9d8ccc3a5d80bacbfd3726adefa0ba5c7
@@@ -227,7 -227,7 +227,7 @@@ static int drm_mode_object_get(struct d
  again:
        if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
                DRM_ERROR("Ran out memory getting a mode number\n");
 -              return -EINVAL;
 +              return -ENOMEM;
        }
  
        mutex_lock(&dev->mode_config.idr_mutex);
        mutex_unlock(&dev->mode_config.idr_mutex);
        if (ret == -EAGAIN)
                goto again;
 +      else if (ret)
 +              return ret;
  
        obj->id = new_id;
        obj->type = obj_type;
@@@ -2187,47 -2185,6 +2187,47 @@@ static int format_check(struct drm_mode
        }
  }
  
 +static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
 +{
 +      int ret, hsub, vsub, num_planes, i;
 +
 +      ret = format_check(r);
 +      if (ret) {
 +              DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
 +              return ret;
 +      }
 +
 +      hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
 +      vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
 +      num_planes = drm_format_num_planes(r->pixel_format);
 +
 +      if (r->width == 0 || r->width % hsub) {
 +              DRM_ERROR("bad framebuffer width %u\n", r->height);
 +              return -EINVAL;
 +      }
 +
 +      if (r->height == 0 || r->height % vsub) {
 +              DRM_ERROR("bad framebuffer height %u\n", r->height);
 +              return -EINVAL;
 +      }
 +
 +      for (i = 0; i < num_planes; i++) {
 +              unsigned int width = r->width / (i != 0 ? hsub : 1);
 +
 +              if (!r->handles[i]) {
 +                      DRM_ERROR("no buffer object handle for plane %d\n", i);
 +                      return -EINVAL;
 +              }
 +
 +              if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
 +                      DRM_ERROR("bad pitch %u for plane %d\n", r->pitches[i], i);
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  /**
   * drm_mode_addfb2 - add an FB to the graphics configuration
   * @inode: inode from the ioctl
@@@ -2267,9 -2224,11 +2267,9 @@@ int drm_mode_addfb2(struct drm_device *
                return -EINVAL;
        }
  
 -      ret = format_check(r);
 -      if (ret) {
 -              DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
 +      ret = framebuffer_check(r);
 +      if (ret)
                return ret;
 -      }
  
        mutex_lock(&dev->mode_config.mutex);
  
@@@ -3376,10 -3335,12 +3376,12 @@@ int drm_mode_page_flip_ioctl(struct drm
  
        ret = crtc->funcs->page_flip(crtc, fb, e);
        if (ret) {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               file_priv->event_space += sizeof e->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               kfree(e);
+               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       file_priv->event_space += sizeof e->event;
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       kfree(e);
+               }
        }
  
  out:
@@@ -3507,140 -3468,3 +3509,140 @@@ void drm_fb_get_bpp_depth(uint32_t form
        }
  }
  EXPORT_SYMBOL(drm_fb_get_bpp_depth);
 +
 +/**
 + * drm_format_num_planes - get the number of planes for format
 + * @format: pixel format (DRM_FORMAT_*)
 + *
 + * RETURNS:
 + * The number of planes used by the specified pixel format.
 + */
 +int drm_format_num_planes(uint32_t format)
 +{
 +      switch (format) {
 +      case DRM_FORMAT_YUV410:
 +      case DRM_FORMAT_YVU410:
 +      case DRM_FORMAT_YUV411:
 +      case DRM_FORMAT_YVU411:
 +      case DRM_FORMAT_YUV420:
 +      case DRM_FORMAT_YVU420:
 +      case DRM_FORMAT_YUV422:
 +      case DRM_FORMAT_YVU422:
 +      case DRM_FORMAT_YUV444:
 +      case DRM_FORMAT_YVU444:
 +              return 3;
 +      case DRM_FORMAT_NV12:
 +      case DRM_FORMAT_NV21:
 +      case DRM_FORMAT_NV16:
 +      case DRM_FORMAT_NV61:
 +              return 2;
 +      default:
 +              return 1;
 +      }
 +}
 +EXPORT_SYMBOL(drm_format_num_planes);
 +
 +/**
 + * drm_format_plane_cpp - determine the bytes per pixel value
 + * @format: pixel format (DRM_FORMAT_*)
 + * @plane: plane index
 + *
 + * RETURNS:
 + * The bytes per pixel value for the specified plane.
 + */
 +int drm_format_plane_cpp(uint32_t format, int plane)
 +{
 +      unsigned int depth;
 +      int bpp;
 +
 +      if (plane >= drm_format_num_planes(format))
 +              return 0;
 +
 +      switch (format) {
 +      case DRM_FORMAT_YUYV:
 +      case DRM_FORMAT_YVYU:
 +      case DRM_FORMAT_UYVY:
 +      case DRM_FORMAT_VYUY:
 +              return 2;
 +      case DRM_FORMAT_NV12:
 +      case DRM_FORMAT_NV21:
 +      case DRM_FORMAT_NV16:
 +      case DRM_FORMAT_NV61:
 +              return plane ? 2 : 1;
 +      case DRM_FORMAT_YUV410:
 +      case DRM_FORMAT_YVU410:
 +      case DRM_FORMAT_YUV411:
 +      case DRM_FORMAT_YVU411:
 +      case DRM_FORMAT_YUV420:
 +      case DRM_FORMAT_YVU420:
 +      case DRM_FORMAT_YUV422:
 +      case DRM_FORMAT_YVU422:
 +      case DRM_FORMAT_YUV444:
 +      case DRM_FORMAT_YVU444:
 +              return 1;
 +      default:
 +              drm_fb_get_bpp_depth(format, &depth, &bpp);
 +              return bpp >> 3;
 +      }
 +}
 +EXPORT_SYMBOL(drm_format_plane_cpp);
 +
 +/**
 + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
 + * @format: pixel format (DRM_FORMAT_*)
 + *
 + * RETURNS:
 + * The horizontal chroma subsampling factor for the
 + * specified pixel format.
 + */
 +int drm_format_horz_chroma_subsampling(uint32_t format)
 +{
 +      switch (format) {
 +      case DRM_FORMAT_YUV411:
 +      case DRM_FORMAT_YVU411:
 +      case DRM_FORMAT_YUV410:
 +      case DRM_FORMAT_YVU410:
 +              return 4;
 +      case DRM_FORMAT_YUYV:
 +      case DRM_FORMAT_YVYU:
 +      case DRM_FORMAT_UYVY:
 +      case DRM_FORMAT_VYUY:
 +      case DRM_FORMAT_NV12:
 +      case DRM_FORMAT_NV21:
 +      case DRM_FORMAT_NV16:
 +      case DRM_FORMAT_NV61:
 +      case DRM_FORMAT_YUV422:
 +      case DRM_FORMAT_YVU422:
 +      case DRM_FORMAT_YUV420:
 +      case DRM_FORMAT_YVU420:
 +              return 2;
 +      default:
 +              return 1;
 +      }
 +}
 +EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
 +
 +/**
 + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
 + * @format: pixel format (DRM_FORMAT_*)
 + *
 + * RETURNS:
 + * The vertical chroma subsampling factor for the
 + * specified pixel format.
 + */
 +int drm_format_vert_chroma_subsampling(uint32_t format)
 +{
 +      switch (format) {
 +      case DRM_FORMAT_YUV410:
 +      case DRM_FORMAT_YVU410:
 +              return 4;
 +      case DRM_FORMAT_YUV420:
 +      case DRM_FORMAT_YVU420:
 +      case DRM_FORMAT_NV12:
 +      case DRM_FORMAT_NV21:
 +              return 2;
 +      default:
 +              return 1;
 +      }
 +}
 +EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
index 35462df7cefde182cb7fb6736bd097a5544db3f3,e6162a1681f0931911bcf7a412c174666b23d50a..a8db38617f4a072e9ba3879112aa24833d54ad19
@@@ -468,45 -468,7 +468,45 @@@ static int i915_interrupt_info(struct s
        if (ret)
                return ret;
  
 -      if (!HAS_PCH_SPLIT(dev)) {
 +      if (IS_VALLEYVIEW(dev)) {
 +              seq_printf(m, "Display IER:\t%08x\n",
 +                         I915_READ(VLV_IER));
 +              seq_printf(m, "Display IIR:\t%08x\n",
 +                         I915_READ(VLV_IIR));
 +              seq_printf(m, "Display IIR_RW:\t%08x\n",
 +                         I915_READ(VLV_IIR_RW));
 +              seq_printf(m, "Display IMR:\t%08x\n",
 +                         I915_READ(VLV_IMR));
 +              for_each_pipe(pipe)
 +                      seq_printf(m, "Pipe %c stat:\t%08x\n",
 +                                 pipe_name(pipe),
 +                                 I915_READ(PIPESTAT(pipe)));
 +
 +              seq_printf(m, "Master IER:\t%08x\n",
 +                         I915_READ(VLV_MASTER_IER));
 +
 +              seq_printf(m, "Render IER:\t%08x\n",
 +                         I915_READ(GTIER));
 +              seq_printf(m, "Render IIR:\t%08x\n",
 +                         I915_READ(GTIIR));
 +              seq_printf(m, "Render IMR:\t%08x\n",
 +                         I915_READ(GTIMR));
 +
 +              seq_printf(m, "PM IER:\t\t%08x\n",
 +                         I915_READ(GEN6_PMIER));
 +              seq_printf(m, "PM IIR:\t\t%08x\n",
 +                         I915_READ(GEN6_PMIIR));
 +              seq_printf(m, "PM IMR:\t\t%08x\n",
 +                         I915_READ(GEN6_PMIMR));
 +
 +              seq_printf(m, "Port hotplug:\t%08x\n",
 +                         I915_READ(PORT_HOTPLUG_EN));
 +              seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 +                         I915_READ(VLV_DPFLIPSTAT));
 +              seq_printf(m, "DPINVGTT:\t%08x\n",
 +                         I915_READ(DPINVGTT));
 +
 +      } else if (!HAS_PCH_SPLIT(dev)) {
                seq_printf(m, "Interrupt enable:    %08x\n",
                           I915_READ(IER));
                seq_printf(m, "Interrupt identity:  %08x\n",
@@@ -742,7 -704,6 +742,7 @@@ static void i915_ring_error_state(struc
                                  struct drm_i915_error_state *error,
                                  unsigned ring)
  {
 +      BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
        seq_printf(m, "%s command stream:\n", ring_str(ring));
        seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
        seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
        if (INTEL_INFO(dev)->gen >= 4)
                seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
        seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
 +      seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
        if (INTEL_INFO(dev)->gen >= 6) {
 -              seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
                seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
                seq_printf(m, "  SYNC_0: 0x%08x\n",
                           error->semaphore_mboxes[ring][0]);
@@@ -1171,17 -1132,6 +1171,17 @@@ static int gen6_drpc_info(struct seq_fi
  
        seq_printf(m, "Core Power Down: %s\n",
                   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
 +
 +      /* Not exactly sure what this is */
 +      seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
 +                 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
 +      seq_printf(m, "RC6 residency since boot: %u\n",
 +                 I915_READ(GEN6_GT_GFX_RC6));
 +      seq_printf(m, "RC6+ residency since boot: %u\n",
 +                 I915_READ(GEN6_GT_GFX_RC6p));
 +      seq_printf(m, "RC6++ residency since boot: %u\n",
 +                 I915_READ(GEN6_GT_GFX_RC6pp));
 +
        return 0;
  }
  
@@@ -1274,6 -1224,9 +1274,9 @@@ static int i915_emon_status(struct seq_
        unsigned long temp, chipset, gfx;
        int ret;
  
+       if (!IS_GEN5(dev))
+               return -ENODEV;
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
@@@ -1552,53 -1505,6 +1555,53 @@@ static int i915_ppgtt_info(struct seq_f
        return 0;
  }
  
 +static int i915_dpio_info(struct seq_file *m, void *data)
 +{
 +      struct drm_info_node *node = (struct drm_info_node *) m->private;
 +      struct drm_device *dev = node->minor->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int ret;
 +
 +
 +      if (!IS_VALLEYVIEW(dev)) {
 +              seq_printf(m, "unsupported\n");
 +              return 0;
 +      }
 +
 +      ret = mutex_lock_interruptible(&dev->mode_config.mutex);
 +      if (ret)
 +              return ret;
 +
 +      seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 +
 +      seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_DIV_A));
 +      seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_DIV_B));
 +
 +      seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
 +      seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
 +
 +      seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
 +      seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
 +
 +      seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
 +      seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
 +
 +      seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
 +                 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
 +
 +      mutex_unlock(&dev->mode_config.mutex);
 +
 +      return 0;
 +}
 +
  static ssize_t
  i915_wedged_read(struct file *filp,
                 char __user *ubuf,
@@@ -1832,7 -1738,7 +1835,7 @@@ static int i915_forcewake_open(struct i
        return 0;
  }
  
 -int i915_forcewake_release(struct inode *inode, struct file *file)
 +static int i915_forcewake_release(struct inode *inode, struct file *file)
  {
        struct drm_device *dev = inode->i_private;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -1933,7 -1839,6 +1936,7 @@@ static struct drm_info_list i915_debugf
        {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
        {"i915_swizzle_info", i915_swizzle_info, 0},
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
 +      {"i915_dpio", i915_dpio_info, 0},
  };
  #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  
index a813f652fa1f3a39e54a7426fb6a1f57772d9586,ba60f3c8f911c187dc636e809af16bd64f51cc8d..068958cdd555a5331af1b295b86ad48c98739553
@@@ -26,8 -26,6 +26,8 @@@
   *
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include "drmP.h"
  #include "drm.h"
  #include "drm_crtc_helper.h"
@@@ -45,7 -43,6 +45,7 @@@
  #include <linux/slab.h>
  #include <linux/module.h>
  #include <acpi/video.h>
 +#include <asm/pat.h>
  
  static void i915_write_hws_pga(struct drm_device *dev)
  {
@@@ -790,9 -787,6 +790,9 @@@ static int i915_getparam(struct drm_dev
        case I915_PARAM_HAS_LLC:
                value = HAS_LLC(dev);
                break;
 +      case I915_PARAM_HAS_ALIASING_PPGTT:
 +              value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
 +              break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@@ -1164,14 -1158,14 +1164,14 @@@ static void i915_switcheroo_set_state(s
        struct drm_device *dev = pci_get_drvdata(pdev);
        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
        if (state == VGA_SWITCHEROO_ON) {
 -              printk(KERN_INFO "i915: switched on\n");
 +              pr_info("switched on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
                i915_resume(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
 -              printk(KERN_ERR "i915: switched off\n");
 +              pr_err("switched off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                i915_suspend(dev, pmm);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@@ -1222,8 -1216,10 +1222,8 @@@ static int i915_load_gem_init(struct dr
                /* PPGTT pdes are stolen from global gtt ptes, so shrink the
                 * aperture accordingly when using aliasing ppgtt. */
                gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
 -              /* For paranoia keep the guard page in between. */
 -              gtt_size -= PAGE_SIZE;
  
 -              i915_gem_do_init(dev, 0, mappable_size, gtt_size);
 +              i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
  
                ret = i915_gem_init_aliasing_ppgtt(dev);
                if (ret) {
                 * should be enough to keep any prefetching inside of the
                 * aperture.
                 */
 -              i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 +              i915_gem_init_global_gtt(dev, 0, mappable_size,
 +                                       gtt_size);
        }
  
        ret = i915_gem_init_hw(dev);
@@@ -1706,6 -1701,9 +1706,9 @@@ void i915_update_gfx_val(struct drm_i91
        unsigned long diffms;
        u32 count;
  
+       if (dev_priv->info->gen != 5)
+               return;
        getrawmonotonic(&now);
        diff1 = timespec_sub(now, dev_priv->last_time2);
  
@@@ -1936,29 -1934,6 +1939,29 @@@ ips_ping_for_i915_load(void
        }
  }
  
 +static void
 +i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
 +              unsigned long size)
 +{
 +      dev_priv->mm.gtt_mtrr = -1;
 +
 +#if defined(CONFIG_X86_PAT)
 +      if (cpu_has_pat)
 +              return;
 +#endif
 +
 +      /* Set up a WC MTRR for non-PAT systems.  This is more common than
 +       * one would think, because the kernel disables PAT on first
 +       * generation Core chips because WC PAT gets overridden by a UC
 +       * MTRR if present.  Even if a UC MTRR isn't present.
 +       */
 +      dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
 +      if (dev_priv->mm.gtt_mtrr < 0) {
 +              DRM_INFO("MTRR allocation failed.  Graphics "
 +                       "performance may suffer.\n");
 +      }
 +}
 +
  /**
   * i915_driver_load - setup chip and create an initial config
   * @dev: DRM device
  int i915_driver_load(struct drm_device *dev, unsigned long flags)
  {
        struct drm_i915_private *dev_priv;
 +      struct intel_device_info *info;
        int ret = 0, mmio_bar;
 -      uint32_t agp_size;
 +      uint32_t aperture_size;
 +
 +      info = (struct intel_device_info *) flags;
 +
 +      /* Refuse to load on gen6+ without kms enabled. */
 +      if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
 +              return -ENODEV;
 +
  
        /* i915 has 4 more counters */
        dev->counters += 4;
  
        dev->dev_private = (void *)dev_priv;
        dev_priv->dev = dev;
 -      dev_priv->info = (struct intel_device_info *) flags;
 +      dev_priv->info = info;
  
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
                goto out_rmmap;
        }
  
 -      agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 +      aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  
        dev_priv->mm.gtt_mapping =
 -              io_mapping_create_wc(dev->agp->base, agp_size);
 +              io_mapping_create_wc(dev->agp->base, aperture_size);
        if (dev_priv->mm.gtt_mapping == NULL) {
                ret = -EIO;
                goto out_rmmap;
        }
  
 -      /* Set up a WC MTRR for non-PAT systems.  This is more common than
 -       * one would think, because the kernel disables PAT on first
 -       * generation Core chips because WC PAT gets overridden by a UC
 -       * MTRR if present.  Even if a UC MTRR isn't present.
 -       */
 -      dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
 -                                       agp_size,
 -                                       MTRR_TYPE_WRCOMB, 1);
 -      if (dev_priv->mm.gtt_mtrr < 0) {
 -              DRM_INFO("MTRR allocation failed.  Graphics "
 -                       "performance may suffer.\n");
 -      }
 +      i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
  
        /* The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
        spin_lock_init(&dev_priv->error_lock);
        spin_lock_init(&dev_priv->rps_lock);
  
 -      if (IS_IVYBRIDGE(dev))
 +      if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
                dev_priv->num_pipe = 3;
        else if (IS_MOBILE(dev) || !IS_GEN2(dev))
                dev_priv->num_pipe = 2;
                }
        }
  
 +      i915_setup_sysfs(dev);
 +
        /* Must be done after probing outputs */
        intel_opregion_init(dev);
        acpi_video_register();
        setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
                    (unsigned long) dev);
  
-       spin_lock(&mchdev_lock);
-       i915_mch_dev = dev_priv;
-       dev_priv->mchdev_lock = &mchdev_lock;
-       spin_unlock(&mchdev_lock);
+       if (IS_GEN5(dev)) {
+               spin_lock(&mchdev_lock);
+               i915_mch_dev = dev_priv;
+               dev_priv->mchdev_lock = &mchdev_lock;
+               spin_unlock(&mchdev_lock);
  
-       ips_ping_for_i915_load();
+               ips_ping_for_i915_load();
+       }
  
        return 0;
  
@@@ -2192,8 -2170,6 +2197,8 @@@ int i915_driver_unload(struct drm_devic
        i915_mch_dev = NULL;
        spin_unlock(&mchdev_lock);
  
 +      i915_teardown_sysfs(dev);
 +
        if (dev_priv->mm.inactive_shrinker.shrink)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  
@@@ -2301,7 -2277,7 +2306,7 @@@ int i915_driver_open(struct drm_device 
   * mode setting case, we want to restore the kernel's initial mode (just
   * in case the last client left us in a bad state).
   *
 - * Additionally, in the non-mode setting case, we'll tear down the AGP
 + * Additionally, in the non-mode setting case, we'll tear down the GTT
   * and DMA structures, since the kernel won't be using them, and clea
   * up any GEM state.
   */
@@@ -2379,10 -2355,16 +2384,10 @@@ struct drm_ioctl_desc i915_ioctls[] = 
  
  int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
  
 -/**
 - * Determine if the device really is AGP or not.
 - *
 - * All Intel graphics chipsets are treated as AGP, even if they are really
 - * PCI-e.
 - *
 - * \param dev   The device to be tested.
 - *
 - * \returns
 - * A value of 1 is always retured to indictate every i9x5 is AGP.
 +/*
 + * This is really ugly: Because old userspace abused the linux agp interface to
 + * manage the gtt, we need to claim that all intel devices are agp.  For
 + * otherwise the drm core refuses to initialize the agp support code.
   */
  int i915_driver_device_is_agp(struct drm_device * dev)
  {
index 7bc4a40132ad2a124751c8296b4419bc1a86e7dd,0d1e4b7b4b99c9bb76460c2fca3ca3c5a6216b11..dd87937e921f16890674e2643e79cab420a8c83e
  static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
  static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 -static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
 -                                                        bool write);
 -static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 -                                                                uint64_t offset,
 -                                                                uint64_t size);
 -static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
  static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                                                    unsigned alignment,
                                                    bool map_and_fenceable);
 -static void i915_gem_clear_fence_reg(struct drm_device *dev,
 -                                   struct drm_i915_fence_reg *reg);
  static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file);
  static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
  
 +static void i915_gem_write_fence(struct drm_device *dev, int reg,
 +                               struct drm_i915_gem_object *obj);
 +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 +                                       struct drm_i915_fence_reg *fence,
 +                                       bool enable);
 +
  static int i915_gem_inactive_shrink(struct shrinker *shrinker,
                                    struct shrink_control *sc);
  static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  
 +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 +{
 +      if (obj->tiling_mode)
 +              i915_gem_release_mmap(obj);
 +
 +      /* As we do not have an associated fence register, we will force
 +       * a tiling change if we ever need to acquire one.
 +       */
 +      obj->tiling_changed = false;
 +      obj->fence_reg = I915_FENCE_REG_NONE;
 +}
 +
  /* some bookkeeping */
  static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
@@@ -135,6 -125,25 +135,6 @@@ i915_gem_object_is_inactive(struct drm_
        return obj->gtt_space && !obj->active && obj->pin_count == 0;
  }
  
 -void i915_gem_do_init(struct drm_device *dev,
 -                    unsigned long start,
 -                    unsigned long mappable_end,
 -                    unsigned long end)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -
 -      drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 -
 -      dev_priv->mm.gtt_start = start;
 -      dev_priv->mm.gtt_mappable_end = mappable_end;
 -      dev_priv->mm.gtt_end = end;
 -      dev_priv->mm.gtt_total = end - start;
 -      dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 -
 -      /* Take over this portion of the GTT */
 -      intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 -}
 -
  int
  i915_gem_init_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
            (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
                return -EINVAL;
  
 +      /* GEM with user mode setting was never supported on ilk and later. */
 +      if (INTEL_INFO(dev)->gen >= 5)
 +              return -ENODEV;
 +
        mutex_lock(&dev->struct_mutex);
 -      i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
 +      i915_gem_init_global_gtt(dev, args->gtt_start,
 +                               args->gtt_end, args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
  
        return 0;
@@@ -255,6 -259,66 +255,6 @@@ static int i915_gem_object_needs_bit17_
                obj->tiling_mode != I915_TILING_NONE;
  }
  
 -/**
 - * This is the fast shmem pread path, which attempts to copy_from_user directly
 - * from the backing pages of the object to the user's address space.  On a
 - * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
 - */
 -static int
 -i915_gem_shmem_pread_fast(struct drm_device *dev,
 -                        struct drm_i915_gem_object *obj,
 -                        struct drm_i915_gem_pread *args,
 -                        struct drm_file *file)
 -{
 -      struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 -      ssize_t remain;
 -      loff_t offset;
 -      char __user *user_data;
 -      int page_offset, page_length;
 -
 -      user_data = (char __user *) (uintptr_t) args->data_ptr;
 -      remain = args->size;
 -
 -      offset = args->offset;
 -
 -      while (remain > 0) {
 -              struct page *page;
 -              char *vaddr;
 -              int ret;
 -
 -              /* Operation in this page
 -               *
 -               * page_offset = offset within page
 -               * page_length = bytes to copy for this page
 -               */
 -              page_offset = offset_in_page(offset);
 -              page_length = remain;
 -              if ((page_offset + remain) > PAGE_SIZE)
 -                      page_length = PAGE_SIZE - page_offset;
 -
 -              page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 -              if (IS_ERR(page))
 -                      return PTR_ERR(page);
 -
 -              vaddr = kmap_atomic(page);
 -              ret = __copy_to_user_inatomic(user_data,
 -                                            vaddr + page_offset,
 -                                            page_length);
 -              kunmap_atomic(vaddr);
 -
 -              mark_page_accessed(page);
 -              page_cache_release(page);
 -              if (ret)
 -                      return -EFAULT;
 -
 -              remain -= page_length;
 -              user_data += page_length;
 -              offset += page_length;
 -      }
 -
 -      return 0;
 -}
 -
  static inline int
  __copy_to_user_swizzled(char __user *cpu_vaddr,
                        const char *gpu_vaddr, int gpu_offset,
@@@ -307,121 -371,37 +307,121 @@@ __copy_from_user_swizzled(char __user *
        return 0;
  }
  
 -/**
 - * This is the fallback shmem pread path, which allocates temporary storage
 - * in kernel space to copy_to_user into outside of the struct_mutex, so we
 - * can copy out of the object's backing pages while holding the struct mutex
 - * and not take page faults.
 - */
 +/* Per-page copy function for the shmem pread fastpath.
 + * Flushes invalid cachelines before reading the target if
 + * needs_clflush is set. */
  static int
 -i915_gem_shmem_pread_slow(struct drm_device *dev,
 -                        struct drm_i915_gem_object *obj,
 -                        struct drm_i915_gem_pread *args,
 -                        struct drm_file *file)
 +shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
 +               char __user *user_data,
 +               bool page_do_bit17_swizzling, bool needs_clflush)
 +{
 +      char *vaddr;
 +      int ret;
 +
 +      if (unlikely(page_do_bit17_swizzling))
 +              return -EINVAL;
 +
 +      vaddr = kmap_atomic(page);
 +      if (needs_clflush)
 +              drm_clflush_virt_range(vaddr + shmem_page_offset,
 +                                     page_length);
 +      ret = __copy_to_user_inatomic(user_data,
 +                                    vaddr + shmem_page_offset,
 +                                    page_length);
 +      kunmap_atomic(vaddr);
 +
 +      return ret;
 +}
 +
 +static void
 +shmem_clflush_swizzled_range(char *addr, unsigned long length,
 +                           bool swizzled)
 +{
 +      if (unlikely(swizzled)) {
 +              unsigned long start = (unsigned long) addr;
 +              unsigned long end = (unsigned long) addr + length;
 +
 +              /* For swizzling simply ensure that we always flush both
 +               * channels. Lame, but simple and it works. Swizzled
 +               * pwrite/pread is far from a hotpath - current userspace
 +               * doesn't use it at all. */
 +              start = round_down(start, 128);
 +              end = round_up(end, 128);
 +
 +              drm_clflush_virt_range((void *)start, end - start);
 +      } else {
 +              drm_clflush_virt_range(addr, length);
 +      }
 +
 +}
 +
 +/* Only difference to the fast-path function is that this can handle bit17
 + * and uses non-atomic copy and kmap functions. */
 +static int
 +shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
 +               char __user *user_data,
 +               bool page_do_bit17_swizzling, bool needs_clflush)
 +{
 +      char *vaddr;
 +      int ret;
 +
 +      vaddr = kmap(page);
 +      if (needs_clflush)
 +              shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
 +                                           page_length,
 +                                           page_do_bit17_swizzling);
 +
 +      if (page_do_bit17_swizzling)
 +              ret = __copy_to_user_swizzled(user_data,
 +                                            vaddr, shmem_page_offset,
 +                                            page_length);
 +      else
 +              ret = __copy_to_user(user_data,
 +                                   vaddr + shmem_page_offset,
 +                                   page_length);
 +      kunmap(page);
 +
 +      return ret;
 +}
 +
 +static int
 +i915_gem_shmem_pread(struct drm_device *dev,
 +                   struct drm_i915_gem_object *obj,
 +                   struct drm_i915_gem_pread *args,
 +                   struct drm_file *file)
  {
        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        char __user *user_data;
        ssize_t remain;
        loff_t offset;
 -      int shmem_page_offset, page_length, ret;
 +      int shmem_page_offset, page_length, ret = 0;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 +      int hit_slowpath = 0;
 +      int prefaulted = 0;
 +      int needs_clflush = 0;
 +      int release_page;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
 -      offset = args->offset;
 +      if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
 +              /* If we're not in the cpu read domain, set ourself into the gtt
 +               * read domain and manually flush cachelines (if required). This
 +               * optimizes for the case when the gpu will dirty the data
 +               * anyway again before the next pread happens. */
 +              if (obj->cache_level == I915_CACHE_NONE)
 +                      needs_clflush = 1;
 +              ret = i915_gem_object_set_to_gtt_domain(obj, false);
 +              if (ret)
 +                      return ret;
 +      }
  
 -      mutex_unlock(&dev->struct_mutex);
 +      offset = args->offset;
  
        while (remain > 0) {
                struct page *page;
 -              char *vaddr;
  
                /* Operation in this page
                 *
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
  
 -              page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 -              if (IS_ERR(page)) {
 -                      ret = PTR_ERR(page);
 -                      goto out;
 +              if (obj->pages) {
 +                      page = obj->pages[offset >> PAGE_SHIFT];
 +                      release_page = 0;
 +              } else {
 +                      page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 +                      if (IS_ERR(page)) {
 +                              ret = PTR_ERR(page);
 +                              goto out;
 +                      }
 +                      release_page = 1;
                }
  
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
  
 -              vaddr = kmap(page);
 -              if (page_do_bit17_swizzling)
 -                      ret = __copy_to_user_swizzled(user_data,
 -                                                    vaddr, shmem_page_offset,
 -                                                    page_length);
 -              else
 -                      ret = __copy_to_user(user_data,
 -                                           vaddr + shmem_page_offset,
 -                                           page_length);
 -              kunmap(page);
 +              ret = shmem_pread_fast(page, shmem_page_offset, page_length,
 +                                     user_data, page_do_bit17_swizzling,
 +                                     needs_clflush);
 +              if (ret == 0)
 +                      goto next_page;
  
 -              mark_page_accessed(page);
 +              hit_slowpath = 1;
 +              page_cache_get(page);
 +              mutex_unlock(&dev->struct_mutex);
 +
 +              if (!prefaulted) {
 +                      ret = fault_in_multipages_writeable(user_data, remain);
 +                      /* Userspace is tricking us, but we've already clobbered
 +                       * its pages with the prefault and promised to write the
 +                       * data up to the first fault. Hence ignore any errors
 +                       * and just continue. */
 +                      (void)ret;
 +                      prefaulted = 1;
 +              }
 +
 +              ret = shmem_pread_slow(page, shmem_page_offset, page_length,
 +                                     user_data, page_do_bit17_swizzling,
 +                                     needs_clflush);
 +
 +              mutex_lock(&dev->struct_mutex);
                page_cache_release(page);
 +next_page:
 +              mark_page_accessed(page);
 +              if (release_page)
 +                      page_cache_release(page);
  
                if (ret) {
                        ret = -EFAULT;
        }
  
  out:
 -      mutex_lock(&dev->struct_mutex);
 -      /* Fixup: Kill any reinstated backing storage pages */
 -      if (obj->madv == __I915_MADV_PURGED)
 -              i915_gem_object_truncate(obj);
 +      if (hit_slowpath) {
 +              /* Fixup: Kill any reinstated backing storage pages */
 +              if (obj->madv == __I915_MADV_PURGED)
 +                      i915_gem_object_truncate(obj);
 +      }
  
        return ret;
  }
@@@ -520,6 -476,11 +520,6 @@@ i915_gem_pread_ioctl(struct drm_device 
                       args->size))
                return -EFAULT;
  
 -      ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
 -                                     args->size);
 -      if (ret)
 -              return -EFAULT;
 -
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
  
        trace_i915_gem_object_pread(obj, args->offset, args->size);
  
 -      ret = i915_gem_object_set_cpu_read_domain_range(obj,
 -                                                      args->offset,
 -                                                      args->size);
 -      if (ret)
 -              goto out;
 -
 -      ret = -EFAULT;
 -      if (!i915_gem_object_needs_bit17_swizzle(obj))
 -              ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
 -      if (ret == -EFAULT)
 -              ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
 +      ret = i915_gem_shmem_pread(dev, obj, args, file);
  
  out:
        drm_gem_object_unreference(&obj->base);
@@@ -568,6 -539,30 +568,6 @@@ fast_user_write(struct io_mapping *mapp
        return unwritten;
  }
  
 -/* Here's the write path which can sleep for
 - * page faults
 - */
 -
 -static inline void
 -slow_kernel_write(struct io_mapping *mapping,
 -                loff_t gtt_base, int gtt_offset,
 -                struct page *user_page, int user_offset,
 -                int length)
 -{
 -      char __iomem *dst_vaddr;
 -      char *src_vaddr;
 -
 -      dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
 -      src_vaddr = kmap(user_page);
 -
 -      memcpy_toio(dst_vaddr + gtt_offset,
 -                  src_vaddr + user_offset,
 -                  length);
 -
 -      kunmap(user_page);
 -      io_mapping_unmap(dst_vaddr);
 -}
 -
  /**
   * This is the fast pwrite path, where we copy the data directly from the
   * user into the GTT, uncached.
@@@ -582,19 -577,7 +582,19 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
 -      int page_offset, page_length;
 +      int page_offset, page_length, ret;
 +
 +      ret = i915_gem_object_pin(obj, 0, true);
 +      if (ret)
 +              goto out;
 +
 +      ret = i915_gem_object_set_to_gtt_domain(obj, true);
 +      if (ret)
 +              goto out_unpin;
 +
 +      ret = i915_gem_object_put_fence(obj);
 +      if (ret)
 +              goto out_unpin;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
                 * retry in the slow path.
                 */
                if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
 -                                  page_offset, user_data, page_length))
 -                      return -EFAULT;
 +                                  page_offset, user_data, page_length)) {
 +                      ret = -EFAULT;
 +                      goto out_unpin;
 +              }
  
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
  
 -      return 0;
 +out_unpin:
 +      i915_gem_object_unpin(obj);
 +out:
 +      return ret;
  }
  
 -/**
 - * This is the fallback GTT pwrite path, which uses get_user_pages to pin
 - * the memory and maps it using kmap_atomic for copying.
 - *
 - * This code resulted in x11perf -rgb10text consuming about 10% more CPU
 - * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
 - */
 +/* Per-page copy function for the shmem pwrite fastpath.
 + * Flushes invalid cachelines before writing to the target if
 + * needs_clflush_before is set and flushes out any written cachelines after
 + * writing if needs_clflush is set. */
  static int
 -i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 -                       struct drm_i915_gem_object *obj,
 -                       struct drm_i915_gem_pwrite *args,
 -                       struct drm_file *file)
 +shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
 +                char __user *user_data,
 +                bool page_do_bit17_swizzling,
 +                bool needs_clflush_before,
 +                bool needs_clflush_after)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      ssize_t remain;
 -      loff_t gtt_page_base, offset;
 -      loff_t first_data_page, last_data_page, num_pages;
 -      loff_t pinned_pages, i;
 -      struct page **user_pages;
 -      struct mm_struct *mm = current->mm;
 -      int gtt_page_offset, data_page_offset, data_page_index, page_length;
 +      char *vaddr;
        int ret;
 -      uint64_t data_ptr = args->data_ptr;
 -
 -      remain = args->size;
 -
 -      /* Pin the user pages containing the data.  We can't fault while
 -       * holding the struct mutex, and all of the pwrite implementations
 -       * want to hold it while dereferencing the user data.
 -       */
 -      first_data_page = data_ptr / PAGE_SIZE;
 -      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 -      num_pages = last_data_page - first_data_page + 1;
 -
 -      user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
 -      if (user_pages == NULL)
 -              return -ENOMEM;
 -
 -      mutex_unlock(&dev->struct_mutex);
 -      down_read(&mm->mmap_sem);
 -      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
 -                                    num_pages, 0, 0, user_pages, NULL);
 -      up_read(&mm->mmap_sem);
 -      mutex_lock(&dev->struct_mutex);
 -      if (pinned_pages < num_pages) {
 -              ret = -EFAULT;
 -              goto out_unpin_pages;
 -      }
 -
 -      ret = i915_gem_object_set_to_gtt_domain(obj, true);
 -      if (ret)
 -              goto out_unpin_pages;
 -
 -      ret = i915_gem_object_put_fence(obj);
 -      if (ret)
 -              goto out_unpin_pages;
 -
 -      offset = obj->gtt_offset + args->offset;
 -
 -      while (remain > 0) {
 -              /* Operation in this page
 -               *
 -               * gtt_page_base = page offset within aperture
 -               * gtt_page_offset = offset within page in aperture
 -               * data_page_index = page number in get_user_pages return
 -               * data_page_offset = offset with data_page_index page.
 -               * page_length = bytes to copy for this page
 -               */
 -              gtt_page_base = offset & PAGE_MASK;
 -              gtt_page_offset = offset_in_page(offset);
 -              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 -              data_page_offset = offset_in_page(data_ptr);
 -
 -              page_length = remain;
 -              if ((gtt_page_offset + page_length) > PAGE_SIZE)
 -                      page_length = PAGE_SIZE - gtt_page_offset;
 -              if ((data_page_offset + page_length) > PAGE_SIZE)
 -                      page_length = PAGE_SIZE - data_page_offset;
  
 -              slow_kernel_write(dev_priv->mm.gtt_mapping,
 -                                gtt_page_base, gtt_page_offset,
 -                                user_pages[data_page_index],
 -                                data_page_offset,
 -                                page_length);
 -
 -              remain -= page_length;
 -              offset += page_length;
 -              data_ptr += page_length;
 -      }
 +      if (unlikely(page_do_bit17_swizzling))
 +              return -EINVAL;
  
 -out_unpin_pages:
 -      for (i = 0; i < pinned_pages; i++)
 -              page_cache_release(user_pages[i]);
 -      drm_free_large(user_pages);
 +      vaddr = kmap_atomic(page);
 +      if (needs_clflush_before)
 +              drm_clflush_virt_range(vaddr + shmem_page_offset,
 +                                     page_length);
 +      ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
 +                                              user_data,
 +                                              page_length);
 +      if (needs_clflush_after)
 +              drm_clflush_virt_range(vaddr + shmem_page_offset,
 +                                     page_length);
 +      kunmap_atomic(vaddr);
  
        return ret;
  }
  
 -/**
 - * This is the fast shmem pwrite path, which attempts to directly
 - * copy_from_user into the kmapped pages backing the object.
 - */
 +/* Only difference to the fast-path function is that this can handle bit17
 + * and uses non-atomic copy and kmap functions. */
  static int
 -i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 -                         struct drm_i915_gem_object *obj,
 -                         struct drm_i915_gem_pwrite *args,
 -                         struct drm_file *file)
 +shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
 +                char __user *user_data,
 +                bool page_do_bit17_swizzling,
 +                bool needs_clflush_before,
 +                bool needs_clflush_after)
  {
 -      struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 -      ssize_t remain;
 -      loff_t offset;
 -      char __user *user_data;
 -      int page_offset, page_length;
 -
 -      user_data = (char __user *) (uintptr_t) args->data_ptr;
 -      remain = args->size;
 -
 -      offset = args->offset;
 -      obj->dirty = 1;
 -
 -      while (remain > 0) {
 -              struct page *page;
 -              char *vaddr;
 -              int ret;
 -
 -              /* Operation in this page
 -               *
 -               * page_offset = offset within page
 -               * page_length = bytes to copy for this page
 -               */
 -              page_offset = offset_in_page(offset);
 -              page_length = remain;
 -              if ((page_offset + remain) > PAGE_SIZE)
 -                      page_length = PAGE_SIZE - page_offset;
 -
 -              page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 -              if (IS_ERR(page))
 -                      return PTR_ERR(page);
 +      char *vaddr;
 +      int ret;
  
 -              vaddr = kmap_atomic(page);
 -              ret = __copy_from_user_inatomic(vaddr + page_offset,
 +      vaddr = kmap(page);
 +      if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
 +              shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
 +                                           page_length,
 +                                           page_do_bit17_swizzling);
 +      if (page_do_bit17_swizzling)
 +              ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
                                                user_data,
                                                page_length);
 -              kunmap_atomic(vaddr);
 -
 -              set_page_dirty(page);
 -              mark_page_accessed(page);
 -              page_cache_release(page);
 -
 -              /* If we get a fault while copying data, then (presumably) our
 -               * source page isn't available.  Return the error and we'll
 -               * retry in the slow path.
 -               */
 -              if (ret)
 -                      return -EFAULT;
 -
 -              remain -= page_length;
 -              user_data += page_length;
 -              offset += page_length;
 -      }
 +      else
 +              ret = __copy_from_user(vaddr + shmem_page_offset,
 +                                     user_data,
 +                                     page_length);
 +      if (needs_clflush_after)
 +              shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
 +                                           page_length,
 +                                           page_do_bit17_swizzling);
 +      kunmap(page);
  
 -      return 0;
 +      return ret;
  }
  
 -/**
 - * This is the fallback shmem pwrite path, which uses get_user_pages to pin
 - * the memory and maps it using kmap_atomic for copying.
 - *
 - * This avoids taking mmap_sem for faulting on the user's address while the
 - * struct_mutex is held.
 - */
  static int
 -i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 -                         struct drm_i915_gem_object *obj,
 -                         struct drm_i915_gem_pwrite *args,
 -                         struct drm_file *file)
 +i915_gem_shmem_pwrite(struct drm_device *dev,
 +                    struct drm_i915_gem_object *obj,
 +                    struct drm_i915_gem_pwrite *args,
 +                    struct drm_file *file)
  {
        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        ssize_t remain;
        loff_t offset;
        char __user *user_data;
 -      int shmem_page_offset, page_length, ret;
 +      int shmem_page_offset, page_length, ret = 0;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 +      int hit_slowpath = 0;
 +      int needs_clflush_after = 0;
 +      int needs_clflush_before = 0;
 +      int release_page;
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
 +      if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 +              /* If we're not in the cpu write domain, set ourself into the gtt
 +               * write domain and manually flush cachelines (if required). This
 +               * optimizes for the case when the gpu will use the data
 +               * right away and we therefore have to clflush anyway. */
 +              if (obj->cache_level == I915_CACHE_NONE)
 +                      needs_clflush_after = 1;
 +              ret = i915_gem_object_set_to_gtt_domain(obj, true);
 +              if (ret)
 +                      return ret;
 +      }
 +      /* Same trick applies for invalidate partially written cachelines before
 +       * writing.  */
 +      if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
 +          && obj->cache_level == I915_CACHE_NONE)
 +              needs_clflush_before = 1;
 +
        offset = args->offset;
        obj->dirty = 1;
  
 -      mutex_unlock(&dev->struct_mutex);
 -
        while (remain > 0) {
                struct page *page;
 -              char *vaddr;
 +              int partial_cacheline_write;
  
                /* Operation in this page
                 *
                if ((shmem_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - shmem_page_offset;
  
 -              page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 -              if (IS_ERR(page)) {
 -                      ret = PTR_ERR(page);
 -                      goto out;
 +              /* If we don't overwrite a cacheline completely we need to be
 +               * careful to have up-to-date data by first clflushing. Don't
 +               * overcomplicate things and flush the entire patch. */
 +              partial_cacheline_write = needs_clflush_before &&
 +                      ((shmem_page_offset | page_length)
 +                              & (boot_cpu_data.x86_clflush_size - 1));
 +
 +              if (obj->pages) {
 +                      page = obj->pages[offset >> PAGE_SHIFT];
 +                      release_page = 0;
 +              } else {
 +                      page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 +                      if (IS_ERR(page)) {
 +                              ret = PTR_ERR(page);
 +                              goto out;
 +                      }
 +                      release_page = 1;
                }
  
                page_do_bit17_swizzling = obj_do_bit17_swizzling &&
                        (page_to_phys(page) & (1 << 17)) != 0;
  
 -              vaddr = kmap(page);
 -              if (page_do_bit17_swizzling)
 -                      ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
 -                                                      user_data,
 -                                                      page_length);
 -              else
 -                      ret = __copy_from_user(vaddr + shmem_page_offset,
 -                                             user_data,
 -                                             page_length);
 -              kunmap(page);
 +              ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
 +                                      user_data, page_do_bit17_swizzling,
 +                                      partial_cacheline_write,
 +                                      needs_clflush_after);
 +              if (ret == 0)
 +                      goto next_page;
 +
 +              hit_slowpath = 1;
 +              page_cache_get(page);
 +              mutex_unlock(&dev->struct_mutex);
  
 +              ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
 +                                      user_data, page_do_bit17_swizzling,
 +                                      partial_cacheline_write,
 +                                      needs_clflush_after);
 +
 +              mutex_lock(&dev->struct_mutex);
 +              page_cache_release(page);
 +next_page:
                set_page_dirty(page);
                mark_page_accessed(page);
 -              page_cache_release(page);
 +              if (release_page)
 +                      page_cache_release(page);
  
                if (ret) {
                        ret = -EFAULT;
        }
  
  out:
 -      mutex_lock(&dev->struct_mutex);
 -      /* Fixup: Kill any reinstated backing storage pages */
 -      if (obj->madv == __I915_MADV_PURGED)
 -              i915_gem_object_truncate(obj);
 -      /* and flush dirty cachelines in case the object isn't in the cpu write
 -       * domain anymore. */
 -      if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 -              i915_gem_clflush_object(obj);
 -              intel_gtt_chipset_flush();
 +      if (hit_slowpath) {
 +              /* Fixup: Kill any reinstated backing storage pages */
 +              if (obj->madv == __I915_MADV_PURGED)
 +                      i915_gem_object_truncate(obj);
 +              /* and flush dirty cachelines in case the object isn't in the cpu write
 +               * domain anymore. */
 +              if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 +                      i915_gem_clflush_object(obj);
 +                      intel_gtt_chipset_flush();
 +              }
        }
  
 +      if (needs_clflush_after)
 +              intel_gtt_chipset_flush();
 +
        return ret;
  }
  
@@@ -854,8 -892,8 +854,8 @@@ i915_gem_pwrite_ioctl(struct drm_devic
                       args->size))
                return -EFAULT;
  
 -      ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
 -                                    args->size);
 +      ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
 +                                         args->size);
        if (ret)
                return -EFAULT;
  
  
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  
 +      ret = -EFAULT;
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
         * it would end up going through the fenced access, and we'll get
         * different detiling behavior between reading and writing.
        if (obj->phys_obj) {
                ret = i915_gem_phys_pwrite(dev, obj, args, file);
                goto out;
 -      }
 -
 -      if (obj->gtt_space &&
 -          obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 -              ret = i915_gem_object_pin(obj, 0, true);
 -              if (ret)
 -                      goto out;
 -
 -              ret = i915_gem_object_set_to_gtt_domain(obj, true);
 -              if (ret)
 -                      goto out_unpin;
 -
 -              ret = i915_gem_object_put_fence(obj);
 -              if (ret)
 -                      goto out_unpin;
 +      }
  
 +      if (obj->gtt_space &&
 +          obj->cache_level == I915_CACHE_NONE &&
 +          obj->tiling_mode == I915_TILING_NONE &&
 +          obj->map_and_fenceable &&
 +          obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
 -              if (ret == -EFAULT)
 -                      ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
 -
 -out_unpin:
 -              i915_gem_object_unpin(obj);
 -
 -              if (ret != -EFAULT)
 -                      goto out;
 -              /* Fall through to the shmfs paths because the gtt paths might
 -               * fail with non-page-backed user pointers (e.g. gtt mappings
 -               * when moving data between textures). */
 +              /* Note that the gtt paths might fail with non-page-backed user
 +               * pointers (e.g. gtt mappings when moving data between
 +               * textures). Fallback to the shmem path in that case. */
        }
  
 -      ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 -      if (ret)
 -              goto out;
 -
 -      ret = -EFAULT;
 -      if (!i915_gem_object_needs_bit17_swizzle(obj))
 -              ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
        if (ret == -EFAULT)
 -              ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
 +              ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  
  out:
        drm_gem_object_unreference(&obj->base);
@@@ -1026,11 -1087,9 +1026,9 @@@ i915_gem_mmap_ioctl(struct drm_device *
        if (obj == NULL)
                return -ENOENT;
  
-       down_write(&current->mm->mmap_sem);
-       addr = do_mmap(obj->filp, 0, args->size,
+       addr = vm_mmap(obj->filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
-       up_write(&current->mm->mmap_sem);
        drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
                return addr;
@@@ -1092,10 -1151,10 +1090,10 @@@ int i915_gem_fault(struct vm_area_struc
                        goto unlock;
        }
  
 -      if (obj->tiling_mode == I915_TILING_NONE)
 -              ret = i915_gem_object_put_fence(obj);
 -      else
 -              ret = i915_gem_object_get_fence(obj, NULL);
 +      if (!obj->has_global_gtt_mapping)
 +              i915_gem_gtt_bind_object(obj, obj->cache_level);
 +
 +      ret = i915_gem_object_get_fence(obj);
        if (ret)
                goto unlock;
  
@@@ -1414,6 -1473,7 +1412,6 @@@ i915_gem_object_move_to_active(struct d
  
        if (obj->fenced_gpu_access) {
                obj->last_fenced_seqno = seqno;
 -              obj->last_fenced_ring = ring;
  
                /* Bump MRU to take account of the delayed flush */
                if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@@ -1460,6 -1520,7 +1458,6 @@@ i915_gem_object_move_to_inactive(struc
        BUG_ON(!list_empty(&obj->gpu_write_list));
        BUG_ON(!obj->active);
        obj->ring = NULL;
 -      obj->last_fenced_ring = NULL;
  
        i915_gem_object_move_off_active(obj);
        obj->fenced_gpu_access = false;
@@@ -1485,9 -1546,6 +1483,9 @@@ i915_gem_object_truncate(struct drm_i91
        inode = obj->base.filp->f_path.dentry->d_inode;
        shmem_truncate_range(inode, 0, (loff_t)-1);
  
 +      if (obj->base.map_list.map)
 +              drm_gem_free_mmap_offset(&obj->base);
 +
        obj->madv = __I915_MADV_PURGED;
  }
  
@@@ -1653,18 -1711,20 +1651,18 @@@ static void i915_gem_reset_fences(struc
  
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 -              struct drm_i915_gem_object *obj = reg->obj;
  
 -              if (!obj)
 -                      continue;
 +              i915_gem_write_fence(dev, i, NULL);
  
 -              if (obj->tiling_mode)
 -                      i915_gem_release_mmap(obj);
 +              if (reg->obj)
 +                      i915_gem_object_fence_lost(reg->obj);
  
 -              reg->obj->fence_reg = I915_FENCE_REG_NONE;
 -              reg->obj->fenced_gpu_access = false;
 -              reg->obj->last_fenced_seqno = 0;
 -              reg->obj->last_fenced_ring = NULL;
 -              i915_gem_clear_fence_reg(dev, reg);
 +              reg->pin_count = 0;
 +              reg->obj = NULL;
 +              INIT_LIST_HEAD(&reg->lru_list);
        }
 +
 +      INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  }
  
  void i915_gem_reset(struct drm_device *dev)
@@@ -1894,8 -1954,6 +1892,8 @@@ i915_wait_request(struct intel_ring_buf
        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
                if (HAS_PCH_SPLIT(ring->dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
 +              else if (IS_VALLEYVIEW(ring->dev))
 +                      ier = I915_READ(GTIER) | I915_READ(VLV_IER);
                else
                        ier = I915_READ(IER);
                if (!ier) {
@@@ -1968,62 -2026,6 +1966,62 @@@ i915_gem_object_wait_rendering(struct d
        return 0;
  }
  
 +/**
 + * i915_gem_object_sync - sync an object to a ring.
 + *
 + * @obj: object which may be in use on another ring.
 + * @to: ring we wish to use the object on. May be NULL.
 + *
 + * This code is meant to abstract object synchronization with the GPU.
 + * Calling with NULL implies synchronizing the object with the CPU
 + * rather than a particular GPU ring.
 + *
 + * Returns 0 if successful, else propagates up the lower layer error.
 + */
 +int
 +i915_gem_object_sync(struct drm_i915_gem_object *obj,
 +                   struct intel_ring_buffer *to)
 +{
 +      struct intel_ring_buffer *from = obj->ring;
 +      u32 seqno;
 +      int ret, idx;
 +
 +      if (from == NULL || to == from)
 +              return 0;
 +
 +      if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
 +              return i915_gem_object_wait_rendering(obj);
 +
 +      idx = intel_ring_sync_index(from, to);
 +
 +      seqno = obj->last_rendering_seqno;
 +      if (seqno <= from->sync_seqno[idx])
 +              return 0;
 +
 +      if (seqno == from->outstanding_lazy_request) {
 +              struct drm_i915_gem_request *request;
 +
 +              request = kzalloc(sizeof(*request), GFP_KERNEL);
 +              if (request == NULL)
 +                      return -ENOMEM;
 +
 +              ret = i915_add_request(from, NULL, request);
 +              if (ret) {
 +                      kfree(request);
 +                      return ret;
 +              }
 +
 +              seqno = request->seqno;
 +      }
 +
 +
 +      ret = to->sync_to(to, from, seqno);
 +      if (!ret)
 +              from->sync_seqno[idx] = seqno;
 +
 +      return ret;
 +}
 +
  static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  {
        u32 old_write_domain, old_read_domains;
@@@ -2098,13 -2100,11 +2096,13 @@@ i915_gem_object_unbind(struct drm_i915_
  
        trace_i915_gem_object_unbind(obj);
  
 -      i915_gem_gtt_unbind_object(obj);
 +      if (obj->has_global_gtt_mapping)
 +              i915_gem_gtt_unbind_object(obj);
        if (obj->has_aliasing_ppgtt_mapping) {
                i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
                obj->has_aliasing_ppgtt_mapping = 0;
        }
 +      i915_gem_gtt_finish_object(obj);
  
        i915_gem_object_put_pages_gtt(obj);
  
@@@ -2178,178 -2178,189 +2176,178 @@@ int i915_gpu_idle(struct drm_device *de
        return 0;
  }
  
 -static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
 -                                     struct intel_ring_buffer *pipelined)
 +static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
 +                                      struct drm_i915_gem_object *obj)
  {
 -      struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      u32 size = obj->gtt_space->size;
 -      int regnum = obj->fence_reg;
        uint64_t val;
  
 -      val = (uint64_t)((obj->gtt_offset + size - 4096) &
 -                       0xfffff000) << 32;
 -      val |= obj->gtt_offset & 0xfffff000;
 -      val |= (uint64_t)((obj->stride / 128) - 1) <<
 -              SANDYBRIDGE_FENCE_PITCH_SHIFT;
 +      if (obj) {
 +              u32 size = obj->gtt_space->size;
  
 -      if (obj->tiling_mode == I915_TILING_Y)
 -              val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 -      val |= I965_FENCE_REG_VALID;
 -
 -      if (pipelined) {
 -              int ret = intel_ring_begin(pipelined, 6);
 -              if (ret)
 -                      return ret;
 +              val = (uint64_t)((obj->gtt_offset + size - 4096) &
 +                               0xfffff000) << 32;
 +              val |= obj->gtt_offset & 0xfffff000;
 +              val |= (uint64_t)((obj->stride / 128) - 1) <<
 +                      SANDYBRIDGE_FENCE_PITCH_SHIFT;
  
 -              intel_ring_emit(pipelined, MI_NOOP);
 -              intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
 -              intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
 -              intel_ring_emit(pipelined, (u32)val);
 -              intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
 -              intel_ring_emit(pipelined, (u32)(val >> 32));
 -              intel_ring_advance(pipelined);
 +              if (obj->tiling_mode == I915_TILING_Y)
 +                      val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 +              val |= I965_FENCE_REG_VALID;
        } else
 -              I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
 +              val = 0;
  
 -      return 0;
 +      I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
 +      POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
  }
  
 -static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
 -                              struct intel_ring_buffer *pipelined)
 +static void i965_write_fence_reg(struct drm_device *dev, int reg,
 +                               struct drm_i915_gem_object *obj)
  {
 -      struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      u32 size = obj->gtt_space->size;
 -      int regnum = obj->fence_reg;
        uint64_t val;
  
 -      val = (uint64_t)((obj->gtt_offset + size - 4096) &
 -                  0xfffff000) << 32;
 -      val |= obj->gtt_offset & 0xfffff000;
 -      val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
 -      if (obj->tiling_mode == I915_TILING_Y)
 -              val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 -      val |= I965_FENCE_REG_VALID;
 -
 -      if (pipelined) {
 -              int ret = intel_ring_begin(pipelined, 6);
 -              if (ret)
 -                      return ret;
 +      if (obj) {
 +              u32 size = obj->gtt_space->size;
  
 -              intel_ring_emit(pipelined, MI_NOOP);
 -              intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
 -              intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
 -              intel_ring_emit(pipelined, (u32)val);
 -              intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
 -              intel_ring_emit(pipelined, (u32)(val >> 32));
 -              intel_ring_advance(pipelined);
 +              val = (uint64_t)((obj->gtt_offset + size - 4096) &
 +                               0xfffff000) << 32;
 +              val |= obj->gtt_offset & 0xfffff000;
 +              val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
 +              if (obj->tiling_mode == I915_TILING_Y)
 +                      val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 +              val |= I965_FENCE_REG_VALID;
        } else
 -              I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
 +              val = 0;
  
 -      return 0;
 +      I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
 +      POSTING_READ(FENCE_REG_965_0 + reg * 8);
  }
  
 -static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
 -                              struct intel_ring_buffer *pipelined)
 +static void i915_write_fence_reg(struct drm_device *dev, int reg,
 +                               struct drm_i915_gem_object *obj)
  {
 -      struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      u32 size = obj->gtt_space->size;
 -      u32 fence_reg, val, pitch_val;
 -      int tile_width;
 -
 -      if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
 -               (size & -size) != size ||
 -               (obj->gtt_offset & (size - 1)),
 -               "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
 -               obj->gtt_offset, obj->map_and_fenceable, size))
 -              return -EINVAL;
 +      u32 val;
  
 -      if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 -              tile_width = 128;
 -      else
 -              tile_width = 512;
 -
 -      /* Note: pitch better be a power of two tile widths */
 -      pitch_val = obj->stride / tile_width;
 -      pitch_val = ffs(pitch_val) - 1;
 -
 -      val = obj->gtt_offset;
 -      if (obj->tiling_mode == I915_TILING_Y)
 -              val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 -      val |= I915_FENCE_SIZE_BITS(size);
 -      val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 -      val |= I830_FENCE_REG_VALID;
 -
 -      fence_reg = obj->fence_reg;
 -      if (fence_reg < 8)
 -              fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 -      else
 -              fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
 +      if (obj) {
 +              u32 size = obj->gtt_space->size;
 +              int pitch_val;
 +              int tile_width;
  
 -      if (pipelined) {
 -              int ret = intel_ring_begin(pipelined, 4);
 -              if (ret)
 -                      return ret;
 +              WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
 +                   (size & -size) != size ||
 +                   (obj->gtt_offset & (size - 1)),
 +                   "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
 +                   obj->gtt_offset, obj->map_and_fenceable, size);
  
 -              intel_ring_emit(pipelined, MI_NOOP);
 -              intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
 -              intel_ring_emit(pipelined, fence_reg);
 -              intel_ring_emit(pipelined, val);
 -              intel_ring_advance(pipelined);
 +              if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 +                      tile_width = 128;
 +              else
 +                      tile_width = 512;
 +
 +              /* Note: pitch better be a power of two tile widths */
 +              pitch_val = obj->stride / tile_width;
 +              pitch_val = ffs(pitch_val) - 1;
 +
 +              val = obj->gtt_offset;
 +              if (obj->tiling_mode == I915_TILING_Y)
 +                      val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 +              val |= I915_FENCE_SIZE_BITS(size);
 +              val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 +              val |= I830_FENCE_REG_VALID;
        } else
 -              I915_WRITE(fence_reg, val);
 +              val = 0;
  
 -      return 0;
 +      if (reg < 8)
 +              reg = FENCE_REG_830_0 + reg * 4;
 +      else
 +              reg = FENCE_REG_945_8 + (reg - 8) * 4;
 +
 +      I915_WRITE(reg, val);
 +      POSTING_READ(reg);
  }
  
 -static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
 -                              struct intel_ring_buffer *pipelined)
 +static void i830_write_fence_reg(struct drm_device *dev, int reg,
 +                              struct drm_i915_gem_object *obj)
  {
 -      struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      u32 size = obj->gtt_space->size;
 -      int regnum = obj->fence_reg;
        uint32_t val;
 -      uint32_t pitch_val;
 -
 -      if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
 -               (size & -size) != size ||
 -               (obj->gtt_offset & (size - 1)),
 -               "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
 -               obj->gtt_offset, size))
 -              return -EINVAL;
  
 -      pitch_val = obj->stride / 128;
 -      pitch_val = ffs(pitch_val) - 1;
 -
 -      val = obj->gtt_offset;
 -      if (obj->tiling_mode == I915_TILING_Y)
 -              val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 -      val |= I830_FENCE_SIZE_BITS(size);
 -      val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 -      val |= I830_FENCE_REG_VALID;
 +      if (obj) {
 +              u32 size = obj->gtt_space->size;
 +              uint32_t pitch_val;
 +
 +              WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
 +                   (size & -size) != size ||
 +                   (obj->gtt_offset & (size - 1)),
 +                   "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
 +                   obj->gtt_offset, size);
 +
 +              pitch_val = obj->stride / 128;
 +              pitch_val = ffs(pitch_val) - 1;
 +
 +              val = obj->gtt_offset;
 +              if (obj->tiling_mode == I915_TILING_Y)
 +                      val |= 1 << I830_FENCE_TILING_Y_SHIFT;
 +              val |= I830_FENCE_SIZE_BITS(size);
 +              val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 +              val |= I830_FENCE_REG_VALID;
 +      } else
 +              val = 0;
  
 -      if (pipelined) {
 -              int ret = intel_ring_begin(pipelined, 4);
 -              if (ret)
 -                      return ret;
 +      I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
 +      POSTING_READ(FENCE_REG_830_0 + reg * 4);
 +}
  
 -              intel_ring_emit(pipelined, MI_NOOP);
 -              intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
 -              intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
 -              intel_ring_emit(pipelined, val);
 -              intel_ring_advance(pipelined);
 -      } else
 -              I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
 +static void i915_gem_write_fence(struct drm_device *dev, int reg,
 +                               struct drm_i915_gem_object *obj)
 +{
 +      switch (INTEL_INFO(dev)->gen) {
 +      case 7:
 +      case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
 +      case 5:
 +      case 4: i965_write_fence_reg(dev, reg, obj); break;
 +      case 3: i915_write_fence_reg(dev, reg, obj); break;
 +      case 2: i830_write_fence_reg(dev, reg, obj); break;
 +      default: break;
 +      }
 +}
  
 -      return 0;
 +static inline int fence_number(struct drm_i915_private *dev_priv,
 +                             struct drm_i915_fence_reg *fence)
 +{
 +      return fence - dev_priv->fence_regs;
  }
  
 -static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 +                                       struct drm_i915_fence_reg *fence,
 +                                       bool enable)
  {
 -      return i915_seqno_passed(ring->get_seqno(ring), seqno);
 +      struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 +      int reg = fence_number(dev_priv, fence);
 +
 +      i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 +
 +      if (enable) {
 +              obj->fence_reg = reg;
 +              fence->obj = obj;
 +              list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
 +      } else {
 +              obj->fence_reg = I915_FENCE_REG_NONE;
 +              fence->obj = NULL;
 +              list_del_init(&fence->lru_list);
 +      }
  }
  
  static int
 -i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
 -                          struct intel_ring_buffer *pipelined)
 +i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
  {
        int ret;
  
        if (obj->fenced_gpu_access) {
                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
 -                      ret = i915_gem_flush_ring(obj->last_fenced_ring,
 +                      ret = i915_gem_flush_ring(obj->ring,
                                                  0, obj->base.write_domain);
                        if (ret)
                                return ret;
                obj->fenced_gpu_access = false;
        }
  
 -      if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
 -              if (!ring_passed_seqno(obj->last_fenced_ring,
 -                                     obj->last_fenced_seqno)) {
 -                      ret = i915_wait_request(obj->last_fenced_ring,
 -                                              obj->last_fenced_seqno,
 -                                              true);
 -                      if (ret)
 -                              return ret;
 -              }
 +      if (obj->last_fenced_seqno) {
 +              ret = i915_wait_request(obj->ring,
 +                                      obj->last_fenced_seqno,
 +                                      false);
 +              if (ret)
 +                      return ret;
  
                obj->last_fenced_seqno = 0;
 -              obj->last_fenced_ring = NULL;
        }
  
        /* Ensure that all CPU reads are completed before installing a fence
  int
  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  {
 +      struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
  
 -      if (obj->tiling_mode)
 -              i915_gem_release_mmap(obj);
 -
 -      ret = i915_gem_object_flush_fence(obj, NULL);
 +      ret = i915_gem_object_flush_fence(obj);
        if (ret)
                return ret;
  
 -      if (obj->fence_reg != I915_FENCE_REG_NONE) {
 -              struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 -
 -              WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
 -              i915_gem_clear_fence_reg(obj->base.dev,
 -                                       &dev_priv->fence_regs[obj->fence_reg]);
 +      if (obj->fence_reg == I915_FENCE_REG_NONE)
 +              return 0;
  
 -              obj->fence_reg = I915_FENCE_REG_NONE;
 -      }
 +      i915_gem_object_update_fence(obj,
 +                                   &dev_priv->fence_regs[obj->fence_reg],
 +                                   false);
 +      i915_gem_object_fence_lost(obj);
  
        return 0;
  }
  
  static struct drm_i915_fence_reg *
 -i915_find_fence_reg(struct drm_device *dev,
 -                  struct intel_ring_buffer *pipelined)
 +i915_find_fence_reg(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_fence_reg *reg, *first, *avail;
 +      struct drm_i915_fence_reg *reg, *avail;
        int i;
  
        /* First try to find a free reg */
                return NULL;
  
        /* None available, try to steal one or wait for a user to finish */
 -      avail = first = NULL;
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
                if (reg->pin_count)
                        continue;
  
 -              if (first == NULL)
 -                      first = reg;
 -
 -              if (!pipelined ||
 -                  !reg->obj->last_fenced_ring ||
 -                  reg->obj->last_fenced_ring == pipelined) {
 -                      avail = reg;
 -                      break;
 -              }
 +              return reg;
        }
  
 -      if (avail == NULL)
 -              avail = first;
 -
 -      return avail;
 +      return NULL;
  }
  
  /**
 - * i915_gem_object_get_fence - set up a fence reg for an object
 + * i915_gem_object_get_fence - set up fencing for an object
   * @obj: object to map through a fence reg
 - * @pipelined: ring on which to queue the change, or NULL for CPU access
 - * @interruptible: must we wait uninterruptibly for the register to retire?
   *
   * When mapping objects through the GTT, userspace wants to be able to write
   * to them without having to worry about swizzling if the object is tiled.
 - *
   * This function walks the fence regs looking for a free one for @obj,
   * stealing one if it can't find any.
   *
   * It then sets up the reg based on the object's properties: address, pitch
   * and tiling format.
 + *
 + * For an untiled surface, this removes any existing fence.
   */
  int
 -i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 -                        struct intel_ring_buffer *pipelined)
 +i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      bool enable = obj->tiling_mode != I915_TILING_NONE;
        struct drm_i915_fence_reg *reg;
        int ret;
  
 -      /* XXX disable pipelining. There are bugs. Shocking. */
 -      pipelined = NULL;
 +      /* Have we updated the tiling parameters upon the object and so
 +       * will need to serialise the write to the associated fence register?
 +       */
 +      if (obj->tiling_changed) {
 +              ret = i915_gem_object_flush_fence(obj);
 +              if (ret)
 +                      return ret;
 +      }
  
        /* Just update our place in the LRU if our fence is getting reused. */
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
                reg = &dev_priv->fence_regs[obj->fence_reg];
 -              list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 -
 -              if (obj->tiling_changed) {
 -                      ret = i915_gem_object_flush_fence(obj, pipelined);
 -                      if (ret)
 -                              return ret;
 -
 -                      if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
 -                              pipelined = NULL;
 -
 -                      if (pipelined) {
 -                              reg->setup_seqno =
 -                                      i915_gem_next_request_seqno(pipelined);
 -                              obj->last_fenced_seqno = reg->setup_seqno;
 -                              obj->last_fenced_ring = pipelined;
 -                      }
 -
 -                      goto update;
 +              if (!obj->tiling_changed) {
 +                      list_move_tail(&reg->lru_list,
 +                                     &dev_priv->mm.fence_list);
 +                      return 0;
                }
 +      } else if (enable) {
 +              reg = i915_find_fence_reg(dev);
 +              if (reg == NULL)
 +                      return -EDEADLK;
  
 -              if (!pipelined) {
 -                      if (reg->setup_seqno) {
 -                              if (!ring_passed_seqno(obj->last_fenced_ring,
 -                                                     reg->setup_seqno)) {
 -                                      ret = i915_wait_request(obj->last_fenced_ring,
 -                                                              reg->setup_seqno,
 -                                                              true);
 -                                      if (ret)
 -                                              return ret;
 -                              }
 +              if (reg->obj) {
 +                      struct drm_i915_gem_object *old = reg->obj;
  
 -                              reg->setup_seqno = 0;
 -                      }
 -              } else if (obj->last_fenced_ring &&
 -                         obj->last_fenced_ring != pipelined) {
 -                      ret = i915_gem_object_flush_fence(obj, pipelined);
 +                      ret = i915_gem_object_flush_fence(old);
                        if (ret)
                                return ret;
 -              }
 -
 -              return 0;
 -      }
  
 -      reg = i915_find_fence_reg(dev, pipelined);
 -      if (reg == NULL)
 -              return -EDEADLK;
 -
 -      ret = i915_gem_object_flush_fence(obj, pipelined);
 -      if (ret)
 -              return ret;
 -
 -      if (reg->obj) {
 -              struct drm_i915_gem_object *old = reg->obj;
 -
 -              drm_gem_object_reference(&old->base);
 -
 -              if (old->tiling_mode)
 -                      i915_gem_release_mmap(old);
 -
 -              ret = i915_gem_object_flush_fence(old, pipelined);
 -              if (ret) {
 -                      drm_gem_object_unreference(&old->base);
 -                      return ret;
 +                      i915_gem_object_fence_lost(old);
                }
 +      } else
 +              return 0;
  
 -              if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
 -                      pipelined = NULL;
 -
 -              old->fence_reg = I915_FENCE_REG_NONE;
 -              old->last_fenced_ring = pipelined;
 -              old->last_fenced_seqno =
 -                      pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 -
 -              drm_gem_object_unreference(&old->base);
 -      } else if (obj->last_fenced_seqno == 0)
 -              pipelined = NULL;
 -
 -      reg->obj = obj;
 -      list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 -      obj->fence_reg = reg - dev_priv->fence_regs;
 -      obj->last_fenced_ring = pipelined;
 -
 -      reg->setup_seqno =
 -              pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
 -      obj->last_fenced_seqno = reg->setup_seqno;
 -
 -update:
 +      i915_gem_object_update_fence(obj, reg, enable);
        obj->tiling_changed = false;
 -      switch (INTEL_INFO(dev)->gen) {
 -      case 7:
 -      case 6:
 -              ret = sandybridge_write_fence_reg(obj, pipelined);
 -              break;
 -      case 5:
 -      case 4:
 -              ret = i965_write_fence_reg(obj, pipelined);
 -              break;
 -      case 3:
 -              ret = i915_write_fence_reg(obj, pipelined);
 -              break;
 -      case 2:
 -              ret = i830_write_fence_reg(obj, pipelined);
 -              break;
 -      }
 -
 -      return ret;
 -}
 -
 -/**
 - * i915_gem_clear_fence_reg - clear out fence register info
 - * @obj: object to clear
 - *
 - * Zeroes out the fence register itself and clears out the associated
 - * data structures in dev_priv and obj.
 - */
 -static void
 -i915_gem_clear_fence_reg(struct drm_device *dev,
 -                       struct drm_i915_fence_reg *reg)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      uint32_t fence_reg = reg - dev_priv->fence_regs;
 -
 -      switch (INTEL_INFO(dev)->gen) {
 -      case 7:
 -      case 6:
 -              I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
 -              break;
 -      case 5:
 -      case 4:
 -              I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
 -              break;
 -      case 3:
 -              if (fence_reg >= 8)
 -                      fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
 -              else
 -      case 2:
 -                      fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 -
 -              I915_WRITE(fence_reg, 0);
 -              break;
 -      }
  
 -      list_del_init(&reg->lru_list);
 -      reg->obj = NULL;
 -      reg->setup_seqno = 0;
 -      reg->pin_count = 0;
 +      return 0;
  }
  
  /**
@@@ -2602,7 -2749,7 +2600,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
                return ret;
        }
  
 -      ret = i915_gem_gtt_bind_object(obj);
 +      ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_put_pages_gtt(obj);
                drm_mm_put_block(obj->gtt_space);
                goto search_free;
        }
  
 +      if (!dev_priv->mm.aliasing_ppgtt)
 +              i915_gem_gtt_bind_object(obj, obj->cache_level);
 +
        list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  
@@@ -2809,8 -2953,7 +2807,8 @@@ int i915_gem_object_set_cache_level(str
                                return ret;
                }
  
 -              i915_gem_gtt_rebind_object(obj, cache_level);
 +              if (obj->has_global_gtt_mapping)
 +                      i915_gem_gtt_bind_object(obj, cache_level);
                if (obj->has_aliasing_ppgtt_mapping)
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
   * Prepare buffer for display plane (scanout, cursors, etc).
   * Can be called from an uninterruptible phase (modesetting) and allows
   * any flushes to be pipelined (for pageflips).
 - *
 - * For the display plane, we want to be in the GTT but out of any write
 - * domains. So in many ways this looks like set_to_gtt_domain() apart from the
 - * ability to pipeline the waits, pinning and any additional subtleties
 - * that may differentiate the display plane from ordinary buffers.
   */
  int
  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                return ret;
  
        if (pipelined != obj->ring) {
 -              ret = i915_gem_object_wait_rendering(obj);
 -              if (ret == -ERESTARTSYS)
 +              ret = i915_gem_object_sync(obj, pipelined);
 +              if (ret)
                        return ret;
        }
  
@@@ -2934,7 -3082,7 +2932,7 @@@ i915_gem_object_finish_gpu(struct drm_i
   * This function returns when the move is complete, including waiting on
   * flushes to occur.
   */
 -static int
 +int
  i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  {
        uint32_t old_write_domain, old_read_domains;
        if (ret)
                return ret;
  
 -      ret = i915_gem_object_wait_rendering(obj);
 -      if (ret)
 -              return ret;
 +      if (write || obj->pending_gpu_write) {
 +              ret = i915_gem_object_wait_rendering(obj);
 +              if (ret)
 +                      return ret;
 +      }
  
        i915_gem_object_flush_gtt_write_domain(obj);
  
 -      /* If we have a partially-valid cache of the object in the CPU,
 -       * finish invalidating it and free the per-page flags.
 -       */
 -      i915_gem_object_set_to_full_cpu_read_domain(obj);
 -
        old_write_domain = obj->base.write_domain;
        old_read_domains = obj->base.read_domains;
  
        return 0;
  }
  
 -/**
 - * Moves the object from a partially CPU read to a full one.
 - *
 - * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
 - * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
 - */
 -static void
 -i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
 -{
 -      if (!obj->page_cpu_valid)
 -              return;
 -
 -      /* If we're partially in the CPU read domain, finish moving it in.
 -       */
 -      if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
 -              int i;
 -
 -              for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
 -                      if (obj->page_cpu_valid[i])
 -                              continue;
 -                      drm_clflush_pages(obj->pages + i, 1);
 -              }
 -      }
 -
 -      /* Free the page_cpu_valid mappings which are now stale, whether
 -       * or not we've got I915_GEM_DOMAIN_CPU.
 -       */
 -      kfree(obj->page_cpu_valid);
 -      obj->page_cpu_valid = NULL;
 -}
 -
 -/**
 - * Set the CPU read domain on a range of the object.
 - *
 - * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
 - * not entirely valid.  The page_cpu_valid member of the object flags which
 - * pages have been flushed, and will be respected by
 - * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
 - * of the whole object.
 - *
 - * This function returns when the move is complete, including waiting on
 - * flushes to occur.
 - */
 -static int
 -i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 -                                        uint64_t offset, uint64_t size)
 -{
 -      uint32_t old_read_domains;
 -      int i, ret;
 -
 -      if (offset == 0 && size == obj->base.size)
 -              return i915_gem_object_set_to_cpu_domain(obj, 0);
 -
 -      ret = i915_gem_object_flush_gpu_write_domain(obj);
 -      if (ret)
 -              return ret;
 -
 -      ret = i915_gem_object_wait_rendering(obj);
 -      if (ret)
 -              return ret;
 -
 -      i915_gem_object_flush_gtt_write_domain(obj);
 -
 -      /* If we're already fully in the CPU read domain, we're done. */
 -      if (obj->page_cpu_valid == NULL &&
 -          (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
 -              return 0;
 -
 -      /* Otherwise, create/clear the per-page CPU read domain flag if we're
 -       * newly adding I915_GEM_DOMAIN_CPU
 -       */
 -      if (obj->page_cpu_valid == NULL) {
 -              obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
 -                                            GFP_KERNEL);
 -              if (obj->page_cpu_valid == NULL)
 -                      return -ENOMEM;
 -      } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
 -              memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
 -
 -      /* Flush the cache on any pages that are still invalid from the CPU's
 -       * perspective.
 -       */
 -      for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
 -           i++) {
 -              if (obj->page_cpu_valid[i])
 -                      continue;
 -
 -              drm_clflush_pages(obj->pages + i, 1);
 -
 -              obj->page_cpu_valid[i] = 1;
 -      }
 -
 -      /* It should now be out of any other write domains, and we can update
 -       * the domain values for our changes.
 -       */
 -      BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 -
 -      old_read_domains = obj->base.read_domains;
 -      obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 -
 -      trace_i915_gem_object_change_domain(obj,
 -                                          old_read_domains,
 -                                          obj->base.write_domain);
 -
 -      return 0;
 -}
 -
  /* Throttle our rendering by waiting until the ring has completed our requests
   * emitted over 20 msec ago.
   *
@@@ -3085,9 -3343,6 +3083,9 @@@ i915_gem_object_pin(struct drm_i915_gem
                        return ret;
        }
  
 +      if (!obj->has_global_gtt_mapping && map_and_fenceable)
 +              i915_gem_gtt_bind_object(obj, obj->cache_level);
 +
        if (obj->pin_count++ == 0) {
                if (!obj->active)
                        list_move_tail(&obj->mm_list,
@@@ -3409,6 -3664,7 +3407,6 @@@ static void i915_gem_free_object_tail(s
        drm_gem_object_release(&obj->base);
        i915_gem_info_remove_obj(dev_priv, obj->base.size);
  
 -      kfree(obj->page_cpu_valid);
        kfree(obj->bit_17);
        kfree(obj);
  }
@@@ -3531,15 -3787,7 +3529,15 @@@ void i915_gem_init_ppgtt(struct drm_dev
        pd_offset <<= 16;
  
        if (INTEL_INFO(dev)->gen == 6) {
 -              uint32_t ecochk = I915_READ(GAM_ECOCHK);
 +              uint32_t ecochk, gab_ctl, ecobits;
 +
 +              ecobits = I915_READ(GAC_ECO_BITS); 
 +              I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 +
 +              gab_ctl = I915_READ(GAB_CTL);
 +              I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 +
 +              ecochk = I915_READ(GAM_ECOCHK);
                I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
                                       ECOCHK_PPGTT_CACHE64B);
                I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
@@@ -3730,7 -3978,9 +3728,7 @@@ i915_gem_load(struct drm_device *dev
                dev_priv->num_fence_regs = 8;
  
        /* Initialize fence registers to zero */
 -      for (i = 0; i < dev_priv->num_fence_regs; i++) {
 -              i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
 -      }
 +      i915_gem_reset_fences(dev);
  
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
index 68ec0130a6269c72124f18410620a285750e201c,de431942ded4bb5a7b6f5a380e6009cd6b22b696..c77bfa9ad34019cfcaf2c68b0f731c9d3f9210b9
@@@ -266,12 -266,6 +266,12 @@@ eb_destroy(struct eb_objects *eb
        kfree(eb);
  }
  
 +static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 +{
 +      return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 +              obj->cache_level != I915_CACHE_NONE);
 +}
 +
  static int
  i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                   struct eb_objects *eb,
  {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
 +      struct drm_i915_gem_object *target_i915_obj;
        uint32_t target_offset;
        int ret = -EINVAL;
  
        if (unlikely(target_obj == NULL))
                return -ENOENT;
  
 -      target_offset = to_intel_bo(target_obj)->gtt_offset;
 +      target_i915_obj = to_intel_bo(target_obj);
 +      target_offset = target_i915_obj->gtt_offset;
  
        /* The target buffer should have appeared before us in the
         * exec_object list, so it should have a GTT space bound by now.
                return ret;
        }
  
 +      /* We can't wait for rendering with pagefaults disabled */
 +      if (obj->active && in_atomic())
 +              return -EFAULT;
 +
        reloc->delta += target_offset;
 -      if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
 +      if (use_cpu_reloc(obj)) {
                uint32_t page_offset = reloc->offset & ~PAGE_MASK;
                char *vaddr;
  
 +              ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 +              if (ret)
 +                      return ret;
 +
                vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
                *(uint32_t *)(vaddr + page_offset) = reloc->delta;
                kunmap_atomic(vaddr);
                uint32_t __iomem *reloc_entry;
                void __iomem *reloc_page;
  
 -              /* We can't wait for rendering with pagefaults disabled */
 -              if (obj->active && in_atomic())
 -                      return -EFAULT;
 +              ret = i915_gem_object_set_to_gtt_domain(obj, true);
 +              if (ret)
 +                      return ret;
  
 -              ret = i915_gem_object_set_to_gtt_domain(obj, 1);
 +              ret = i915_gem_object_put_fence(obj);
                if (ret)
                        return ret;
  
                io_mapping_unmap_atomic(reloc_page);
        }
  
 +      /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 +       * pipe_control writes because the gpu doesn't properly redirect them
 +       * through the ppgtt for non_secure batchbuffers. */
 +      if (unlikely(IS_GEN6(dev) &&
 +          reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 +          !target_i915_obj->has_global_gtt_mapping)) {
 +              i915_gem_gtt_bind_object(target_i915_obj,
 +                                       target_i915_obj->cache_level);
 +      }
 +
        /* and update the user's relocation entry */
        reloc->presumed_offset = target_offset;
  
@@@ -419,46 -393,30 +419,46 @@@ static in
  i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
                                    struct eb_objects *eb)
  {
 +#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 +      struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
        struct drm_i915_gem_relocation_entry __user *user_relocs;
        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 -      int i, ret;
 +      int remain, ret;
  
        user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
 -      for (i = 0; i < entry->relocation_count; i++) {
 -              struct drm_i915_gem_relocation_entry reloc;
  
 -              if (__copy_from_user_inatomic(&reloc,
 -                                            user_relocs+i,
 -                                            sizeof(reloc)))
 +      remain = entry->relocation_count;
 +      while (remain) {
 +              struct drm_i915_gem_relocation_entry *r = stack_reloc;
 +              int count = remain;
 +              if (count > ARRAY_SIZE(stack_reloc))
 +                      count = ARRAY_SIZE(stack_reloc);
 +              remain -= count;
 +
 +              if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
                        return -EFAULT;
  
 -              ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
 -              if (ret)
 -                      return ret;
 +              do {
 +                      u64 offset = r->presumed_offset;
  
 -              if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
 -                                          &reloc.presumed_offset,
 -                                          sizeof(reloc.presumed_offset)))
 -                      return -EFAULT;
 +                      ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
 +                      if (ret)
 +                              return ret;
 +
 +                      if (r->presumed_offset != offset &&
 +                          __copy_to_user_inatomic(&user_relocs->presumed_offset,
 +                                                  &r->presumed_offset,
 +                                                  sizeof(r->presumed_offset))) {
 +                              return -EFAULT;
 +                      }
 +
 +                      user_relocs++;
 +                      r++;
 +              } while (--count);
        }
  
        return 0;
 +#undef N_RELOC
  }
  
  static int
@@@ -506,13 -464,6 +506,13 @@@ i915_gem_execbuffer_relocate(struct drm
  
  #define  __EXEC_OBJECT_HAS_FENCE (1<<31)
  
 +static int
 +need_reloc_mappable(struct drm_i915_gem_object *obj)
 +{
 +      struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
 +      return entry->relocation_count && !use_cpu_reloc(obj);
 +}
 +
  static int
  pin_and_fence_object(struct drm_i915_gem_object *obj,
                     struct intel_ring_buffer *ring)
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
 -      need_mappable =
 -              entry->relocation_count ? true : need_fence;
 +      need_mappable = need_fence || need_reloc_mappable(obj);
  
        ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
        if (ret)
  
        if (has_fenced_gpu_access) {
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 -                      if (obj->tiling_mode) {
 -                              ret = i915_gem_object_get_fence(obj, ring);
 -                              if (ret)
 -                                      goto err_unpin;
 +                      ret = i915_gem_object_get_fence(obj);
 +                      if (ret)
 +                              goto err_unpin;
  
 +                      if (i915_gem_object_pin_fence(obj))
                                entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 -                              i915_gem_object_pin_fence(obj);
 -                      } else {
 -                              ret = i915_gem_object_put_fence(obj);
 -                              if (ret)
 -                                      goto err_unpin;
 -                      }
 +
                        obj->pending_fenced_gpu_access = true;
                }
        }
@@@ -578,7 -535,8 +578,7 @@@ i915_gem_execbuffer_reserve(struct inte
                        has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
 -              need_mappable =
 -                      entry->relocation_count ? true : need_fence;
 +              need_mappable = need_fence || need_reloc_mappable(obj);
  
                if (need_mappable)
                        list_move(&obj->exec_list, &ordered_objects);
                                has_fenced_gpu_access &&
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
 -                      need_mappable =
 -                              entry->relocation_count ? true : need_fence;
 +                      need_mappable = need_fence || need_reloc_mappable(obj);
  
                        if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
                            (need_mappable && !obj->map_and_fenceable))
@@@ -839,6 -798,64 +839,6 @@@ i915_gem_execbuffer_flush(struct drm_de
        return 0;
  }
  
 -static bool
 -intel_enable_semaphores(struct drm_device *dev)
 -{
 -      if (INTEL_INFO(dev)->gen < 6)
 -              return 0;
 -
 -      if (i915_semaphores >= 0)
 -              return i915_semaphores;
 -
 -      /* Disable semaphores on SNB */
 -      if (INTEL_INFO(dev)->gen == 6)
 -              return 0;
 -
 -      return 1;
 -}
 -
 -static int
 -i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 -                             struct intel_ring_buffer *to)
 -{
 -      struct intel_ring_buffer *from = obj->ring;
 -      u32 seqno;
 -      int ret, idx;
 -
 -      if (from == NULL || to == from)
 -              return 0;
 -
 -      /* XXX gpu semaphores are implicated in various hard hangs on SNB */
 -      if (!intel_enable_semaphores(obj->base.dev))
 -              return i915_gem_object_wait_rendering(obj);
 -
 -      idx = intel_ring_sync_index(from, to);
 -
 -      seqno = obj->last_rendering_seqno;
 -      if (seqno <= from->sync_seqno[idx])
 -              return 0;
 -
 -      if (seqno == from->outstanding_lazy_request) {
 -              struct drm_i915_gem_request *request;
 -
 -              request = kzalloc(sizeof(*request), GFP_KERNEL);
 -              if (request == NULL)
 -                      return -ENOMEM;
 -
 -              ret = i915_add_request(from, NULL, request);
 -              if (ret) {
 -                      kfree(request);
 -                      return ret;
 -              }
 -
 -              seqno = request->seqno;
 -      }
 -
 -      from->sync_seqno[idx] = seqno;
 -
 -      return to->sync_to(to, from, seqno - 1);
 -}
 -
  static int
  i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
  {
@@@ -900,7 -917,7 +900,7 @@@ i915_gem_execbuffer_move_to_gpu(struct 
        }
  
        list_for_each_entry(obj, objects, exec_list) {
 -              ret = i915_gem_execbuffer_sync_rings(obj, ring);
 +              ret = i915_gem_object_sync(obj, ring);
                if (ret)
                        return ret;
        }
@@@ -938,7 -955,7 +938,7 @@@ validate_exec_list(struct drm_i915_gem_
                if (!access_ok(VERIFY_WRITE, ptr, length))
                        return -EFAULT;
  
 -              if (fault_in_pages_readable(ptr, length))
 +              if (fault_in_multipages_readable(ptr, length))
                        return -EFAULT;
        }
  
@@@ -1116,6 -1133,11 +1116,11 @@@ i915_gem_do_execbuffer(struct drm_devic
                        return -EINVAL;
                }
  
+               if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+                       DRM_DEBUG("execbuf with %u cliprects\n",
+                                 args->num_cliprects);
+                       return -EINVAL;
+               }
                cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
                                    GFP_KERNEL);
                if (cliprects == NULL) {
@@@ -1387,7 -1409,8 +1392,8 @@@ i915_gem_execbuffer2(struct drm_device 
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
  
-       if (args->buffer_count < 1) {
+       if (args->buffer_count < 1 ||
+           args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
                DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
index 0976137ab79ab5111df61e3fbab99deb8c8bea05,90b9793fd5da3bbe0e209c3778187199a0dca665..417ca99e697d8c842d150d9252157e2503f4a229
@@@ -55,36 -55,18 +55,36 @@@ static struct intel_crt *intel_attached
                            struct intel_crt, base);
  }
  
 -static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
 +static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
  {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 temp, reg;
 +      u32 temp;
  
 -      if (HAS_PCH_SPLIT(dev))
 -              reg = PCH_ADPA;
 -      else
 -              reg = ADPA;
 +      temp = I915_READ(PCH_ADPA);
 +      temp &= ~ADPA_DAC_ENABLE;
 +
 +      switch (mode) {
 +      case DRM_MODE_DPMS_ON:
 +              temp |= ADPA_DAC_ENABLE;
 +              break;
 +      case DRM_MODE_DPMS_STANDBY:
 +      case DRM_MODE_DPMS_SUSPEND:
 +      case DRM_MODE_DPMS_OFF:
 +              /* Just leave port enable cleared */
 +              break;
 +      }
 +
 +      I915_WRITE(PCH_ADPA, temp);
 +}
  
 -      temp = I915_READ(reg);
 +static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
 +{
 +      struct drm_device *dev = encoder->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 temp;
 +
 +      temp = I915_READ(ADPA);
        temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
        temp &= ~ADPA_DAC_ENABLE;
  
                break;
        }
  
 -      I915_WRITE(reg, temp);
 +      I915_WRITE(ADPA, temp);
  }
  
  static int intel_crt_mode_valid(struct drm_connector *connector,
@@@ -296,10 -278,9 +296,10 @@@ static bool intel_crt_detect_ddc(struc
        if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
                struct edid *edid;
                bool is_digital = false;
 +              struct i2c_adapter *i2c;
  
 -              edid = drm_get_edid(connector,
 -                      &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
 +              i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
 +              edid = drm_get_edid(connector, i2c);
                /*
                 * This may be a DVI-I connector with a shared DDC
                 * link between analog and digital outputs, so we
@@@ -449,8 -430,8 +449,8 @@@ intel_crt_detect(struct drm_connector *
  {
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_crtc *crtc;
        enum drm_connector_status status;
+       struct intel_load_detect_pipe tmp;
  
        if (I915_HAS_HOTPLUG(dev)) {
                if (intel_crt_detect_hotplug(connector)) {
                return connector->status;
  
        /* for pre-945g platforms use load detect */
-       crtc = crt->base.base.crtc;
-       if (crtc && crtc->enabled) {
-               status = intel_crt_load_detect(crt);
-       } else {
-               struct intel_load_detect_pipe tmp;
-               if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
-                                              &tmp)) {
-                       if (intel_crt_detect_ddc(connector))
-                               status = connector_status_connected;
-                       else
-                               status = intel_crt_load_detect(crt);
-                       intel_release_load_detect_pipe(&crt->base, connector,
-                                                      &tmp);
-               } else
-                       status = connector_status_unknown;
-       }
+       if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
+                                      &tmp)) {
+               if (intel_crt_detect_ddc(connector))
+                       status = connector_status_connected;
+               else
+                       status = intel_crt_load_detect(crt);
+               intel_release_load_detect_pipe(&crt->base, connector,
+                                              &tmp);
+       } else
+               status = connector_status_unknown;
  
        return status;
  }
@@@ -502,16 -476,15 +495,16 @@@ static int intel_crt_get_modes(struct d
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 +      struct i2c_adapter *i2c;
  
 -      ret = intel_ddc_get_modes(connector,
 -                               &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
 +      i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
 +      ret = intel_ddc_get_modes(connector, i2c);
        if (ret || !IS_G4X(dev))
                return ret;
  
        /* Try to probe digital port for output in DVI-I -> VGA mode. */
 -      return intel_ddc_get_modes(connector,
 -                                 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
 +      i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
 +      return intel_ddc_get_modes(connector, i2c);
  }
  
  static int intel_crt_set_property(struct drm_connector *connector,
@@@ -534,20 -507,12 +527,20 @@@ static void intel_crt_reset(struct drm_
   * Routines for controlling stuff on the analog port
   */
  
 -static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
 -      .dpms = intel_crt_dpms,
 +static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
 +      .mode_fixup = intel_crt_mode_fixup,
 +      .prepare = intel_encoder_prepare,
 +      .commit = intel_encoder_commit,
 +      .mode_set = intel_crt_mode_set,
 +      .dpms = pch_crt_dpms,
 +};
 +
 +static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
        .mode_fixup = intel_crt_mode_fixup,
        .prepare = intel_encoder_prepare,
        .commit = intel_encoder_commit,
        .mode_set = intel_crt_mode_set,
 +      .dpms = gmch_crt_dpms,
  };
  
  static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@@ -571,7 -536,7 +564,7 @@@ static const struct drm_encoder_funcs i
  
  static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
  {
 -      DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
 +      DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
        return 1;
  }
  
@@@ -593,7 -558,6 +586,7 @@@ void intel_crt_init(struct drm_device *
        struct intel_crt *crt;
        struct intel_connector *intel_connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      const struct drm_encoder_helper_funcs *encoder_helper_funcs;
  
        /* Skip machines without VGA that falsely report hotplug events */
        if (dmi_check_system(intel_no_crt))
                connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
  
 -      drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
 +      if (HAS_PCH_SPLIT(dev))
 +              encoder_helper_funcs = &pch_encoder_funcs;
 +      else
 +              encoder_helper_funcs = &gmch_encoder_funcs;
 +
 +      drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
  
        drm_sysfs_connector_add(connector);
index 4c844c68ec809620f15f612c8c351944cc5d5870,1b1cf3b3ff515c8612cf69c42837824ab57bc7d9..8c239f2d6bcd3dd0035aab62098257f84ca5d2ca
@@@ -24,7 -24,7 +24,7 @@@
   *    Eric Anholt <eric@anholt.net>
   */
  
 -#include <linux/cpufreq.h>
 +#include <linux/dmi.h>
  #include <linux/module.h>
  #include <linux/input.h>
  #include <linux/i2c.h>
@@@ -44,6 -44,7 +44,6 @@@
  #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  
  bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 -static void intel_update_watermarks(struct drm_device *dev);
  static void intel_increase_pllclock(struct drm_crtc *crtc);
  static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
  
@@@ -359,110 -360,6 +359,110 @@@ static const intel_limit_t intel_limits
        .find_pll = intel_find_pll_ironlake_dp,
  };
  
 +u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
 +{
 +      unsigned long flags;
 +      u32 val = 0;
 +
 +      spin_lock_irqsave(&dev_priv->dpio_lock, flags);
 +      if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
 +              DRM_ERROR("DPIO idle wait timed out\n");
 +              goto out_unlock;
 +      }
 +
 +      I915_WRITE(DPIO_REG, reg);
 +      I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
 +                 DPIO_BYTE);
 +      if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
 +              DRM_ERROR("DPIO read wait timed out\n");
 +              goto out_unlock;
 +      }
 +      val = I915_READ(DPIO_DATA);
 +
 +out_unlock:
 +      spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
 +      return val;
 +}
 +
 +static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
 +                           u32 val)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&dev_priv->dpio_lock, flags);
 +      if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
 +              DRM_ERROR("DPIO idle wait timed out\n");
 +              goto out_unlock;
 +      }
 +
 +      I915_WRITE(DPIO_DATA, val);
 +      I915_WRITE(DPIO_REG, reg);
 +      I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
 +                 DPIO_BYTE);
 +      if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
 +              DRM_ERROR("DPIO write wait timed out\n");
 +
 +out_unlock:
 +      spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
 +}
 +
 +static void vlv_init_dpio(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      /* Reset the DPIO config */
 +      I915_WRITE(DPIO_CTL, 0);
 +      POSTING_READ(DPIO_CTL);
 +      I915_WRITE(DPIO_CTL, 1);
 +      POSTING_READ(DPIO_CTL);
 +}
 +
 +static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
 +{
 +      DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
 +      return 1;
 +}
 +
 +static const struct dmi_system_id intel_dual_link_lvds[] = {
 +      {
 +              .callback = intel_dual_link_lvds_callback,
 +              .ident = "Apple MacBook Pro (Core i5/i7 Series)",
 +              .matches = {
 +                      DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
 +                      DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
 +              },
 +      },
 +      { }     /* terminating entry */
 +};
 +
 +static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
 +                            unsigned int reg)
 +{
 +      unsigned int val;
 +
 +      /* use the module option value if specified */
 +      if (i915_lvds_channel_mode > 0)
 +              return i915_lvds_channel_mode == 2;
 +
 +      if (dmi_check_system(intel_dual_link_lvds))
 +              return true;
 +
 +      if (dev_priv->lvds_val)
 +              val = dev_priv->lvds_val;
 +      else {
 +              /* BIOS should set the proper LVDS register value at boot, but
 +               * in reality, it doesn't set the value when the lid is closed;
 +               * we need to check "the value to be set" in VBT when LVDS
 +               * register is uninitialized.
 +               */
 +              val = I915_READ(reg);
 +              if (!(val & ~LVDS_DETECTED))
 +                      val = dev_priv->bios_lvds_val;
 +              dev_priv->lvds_val = val;
 +      }
 +      return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
 +}
 +
  static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
                                                int refclk)
  {
        const intel_limit_t *limit;
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 -              if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
 -                  LVDS_CLKB_POWER_UP) {
 +              if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
                        /* LVDS dual channel */
                        if (refclk == 100000)
                                limit = &intel_limits_ironlake_dual_lvds_100m;
@@@ -499,7 -397,8 +499,7 @@@ static const intel_limit_t *intel_g4x_l
        const intel_limit_t *limit;
  
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 -              if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
 -                  LVDS_CLKB_POWER_UP)
 +              if (is_dual_link_lvds(dev_priv, LVDS))
                        /* LVDS with dual channel */
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
@@@ -637,7 -536,8 +637,7 @@@ intel_find_best_PLL(const intel_limit_
                 * reliably set up different single/dual channel state, if we
                 * even can.
                 */
 -              if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
 -                  LVDS_CLKB_POWER_UP)
 +              if (is_dual_link_lvds(dev_priv, LVDS))
                        clock.p2 = limit->p2.p2_fast;
                else
                        clock.p2 = limit->p2.p2_slow;
@@@ -1515,7 -1415,7 +1515,7 @@@ static void intel_disable_pipe(struct d
   * Plane regs are double buffered, going from enabled->disabled needs a
   * trigger in order to latch.  The display address reg provides this.
   */
 -static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 +void intel_flush_display_plane(struct drm_i915_private *dev_priv,
                                      enum plane plane)
  {
        I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@@ -1626,6 -1526,490 +1626,6 @@@ static void intel_disable_pch_ports(str
        disable_pch_hdmi(dev_priv, pipe, HDMID);
  }
  
 -static void i8xx_disable_fbc(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 fbc_ctl;
 -
 -      /* Disable compression */
 -      fbc_ctl = I915_READ(FBC_CONTROL);
 -      if ((fbc_ctl & FBC_CTL_EN) == 0)
 -              return;
 -
 -      fbc_ctl &= ~FBC_CTL_EN;
 -      I915_WRITE(FBC_CONTROL, fbc_ctl);
 -
 -      /* Wait for compressing bit to clear */
 -      if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
 -              DRM_DEBUG_KMS("FBC idle timed out\n");
 -              return;
 -      }
 -
 -      DRM_DEBUG_KMS("disabled FBC\n");
 -}
 -
 -static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_framebuffer *fb = crtc->fb;
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 -      struct drm_i915_gem_object *obj = intel_fb->obj;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int cfb_pitch;
 -      int plane, i;
 -      u32 fbc_ctl, fbc_ctl2;
 -
 -      cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
 -      if (fb->pitches[0] < cfb_pitch)
 -              cfb_pitch = fb->pitches[0];
 -
 -      /* FBC_CTL wants 64B units */
 -      cfb_pitch = (cfb_pitch / 64) - 1;
 -      plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 -
 -      /* Clear old tags */
 -      for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
 -              I915_WRITE(FBC_TAG + (i * 4), 0);
 -
 -      /* Set it up... */
 -      fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
 -      fbc_ctl2 |= plane;
 -      I915_WRITE(FBC_CONTROL2, fbc_ctl2);
 -      I915_WRITE(FBC_FENCE_OFF, crtc->y);
 -
 -      /* enable it... */
 -      fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
 -      if (IS_I945GM(dev))
 -              fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 -      fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 -      fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
 -      fbc_ctl |= obj->fence_reg;
 -      I915_WRITE(FBC_CONTROL, fbc_ctl);
 -
 -      DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
 -                    cfb_pitch, crtc->y, intel_crtc->plane);
 -}
 -
 -static bool i8xx_fbc_enabled(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 -}
 -
 -static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_framebuffer *fb = crtc->fb;
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 -      struct drm_i915_gem_object *obj = intel_fb->obj;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 -      unsigned long stall_watermark = 200;
 -      u32 dpfc_ctl;
 -
 -      dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
 -      dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 -      I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 -
 -      I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
 -                 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
 -                 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 -      I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 -
 -      /* enable it... */
 -      I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 -
 -      DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 -}
 -
 -static void g4x_disable_fbc(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 dpfc_ctl;
 -
 -      /* Disable compression */
 -      dpfc_ctl = I915_READ(DPFC_CONTROL);
 -      if (dpfc_ctl & DPFC_CTL_EN) {
 -              dpfc_ctl &= ~DPFC_CTL_EN;
 -              I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 -
 -              DRM_DEBUG_KMS("disabled FBC\n");
 -      }
 -}
 -
 -static bool g4x_fbc_enabled(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 -}
 -
 -static void sandybridge_blit_fbc_update(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 blt_ecoskpd;
 -
 -      /* Make sure blitter notifies FBC of writes */
 -      gen6_gt_force_wake_get(dev_priv);
 -      blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
 -      blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
 -              GEN6_BLITTER_LOCK_SHIFT;
 -      I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 -      blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
 -      I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 -      blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
 -                       GEN6_BLITTER_LOCK_SHIFT);
 -      I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 -      POSTING_READ(GEN6_BLITTER_ECOSKPD);
 -      gen6_gt_force_wake_put(dev_priv);
 -}
 -
 -static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_framebuffer *fb = crtc->fb;
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 -      struct drm_i915_gem_object *obj = intel_fb->obj;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 -      unsigned long stall_watermark = 200;
 -      u32 dpfc_ctl;
 -
 -      dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 -      dpfc_ctl &= DPFC_RESERVED;
 -      dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
 -      /* Set persistent mode for front-buffer rendering, ala X. */
 -      dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
 -      dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
 -      I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 -
 -      I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
 -                 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
 -                 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 -      I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
 -      I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
 -      /* enable it... */
 -      I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 -
 -      if (IS_GEN6(dev)) {
 -              I915_WRITE(SNB_DPFC_CTL_SA,
 -                         SNB_CPU_FENCE_ENABLE | obj->fence_reg);
 -              I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
 -              sandybridge_blit_fbc_update(dev);
 -      }
 -
 -      DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 -}
 -
 -static void ironlake_disable_fbc(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 dpfc_ctl;
 -
 -      /* Disable compression */
 -      dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 -      if (dpfc_ctl & DPFC_CTL_EN) {
 -              dpfc_ctl &= ~DPFC_CTL_EN;
 -              I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 -
 -              DRM_DEBUG_KMS("disabled FBC\n");
 -      }
 -}
 -
 -static bool ironlake_fbc_enabled(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 -}
 -
 -bool intel_fbc_enabled(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (!dev_priv->display.fbc_enabled)
 -              return false;
 -
 -      return dev_priv->display.fbc_enabled(dev);
 -}
 -
 -static void intel_fbc_work_fn(struct work_struct *__work)
 -{
 -      struct intel_fbc_work *work =
 -              container_of(to_delayed_work(__work),
 -                           struct intel_fbc_work, work);
 -      struct drm_device *dev = work->crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      mutex_lock(&dev->struct_mutex);
 -      if (work == dev_priv->fbc_work) {
 -              /* Double check that we haven't switched fb without cancelling
 -               * the prior work.
 -               */
 -              if (work->crtc->fb == work->fb) {
 -                      dev_priv->display.enable_fbc(work->crtc,
 -                                                   work->interval);
 -
 -                      dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
 -                      dev_priv->cfb_fb = work->crtc->fb->base.id;
 -                      dev_priv->cfb_y = work->crtc->y;
 -              }
 -
 -              dev_priv->fbc_work = NULL;
 -      }
 -      mutex_unlock(&dev->struct_mutex);
 -
 -      kfree(work);
 -}
 -
 -static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
 -{
 -      if (dev_priv->fbc_work == NULL)
 -              return;
 -
 -      DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 -
 -      /* Synchronisation is provided by struct_mutex and checking of
 -       * dev_priv->fbc_work, so we can perform the cancellation
 -       * entirely asynchronously.
 -       */
 -      if (cancel_delayed_work(&dev_priv->fbc_work->work))
 -              /* tasklet was killed before being run, clean up */
 -              kfree(dev_priv->fbc_work);
 -
 -      /* Mark the work as no longer wanted so that if it does
 -       * wake-up (because the work was already running and waiting
 -       * for our mutex), it will discover that is no longer
 -       * necessary to run.
 -       */
 -      dev_priv->fbc_work = NULL;
 -}
 -
 -static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 -{
 -      struct intel_fbc_work *work;
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (!dev_priv->display.enable_fbc)
 -              return;
 -
 -      intel_cancel_fbc_work(dev_priv);
 -
 -      work = kzalloc(sizeof *work, GFP_KERNEL);
 -      if (work == NULL) {
 -              dev_priv->display.enable_fbc(crtc, interval);
 -              return;
 -      }
 -
 -      work->crtc = crtc;
 -      work->fb = crtc->fb;
 -      work->interval = interval;
 -      INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 -
 -      dev_priv->fbc_work = work;
 -
 -      DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
 -
 -      /* Delay the actual enabling to let pageflipping cease and the
 -       * display to settle before starting the compression. Note that
 -       * this delay also serves a second purpose: it allows for a
 -       * vblank to pass after disabling the FBC before we attempt
 -       * to modify the control registers.
 -       *
 -       * A more complicated solution would involve tracking vblanks
 -       * following the termination of the page-flipping sequence
 -       * and indeed performing the enable as a co-routine and not
 -       * waiting synchronously upon the vblank.
 -       */
 -      schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 -}
 -
 -void intel_disable_fbc(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      intel_cancel_fbc_work(dev_priv);
 -
 -      if (!dev_priv->display.disable_fbc)
 -              return;
 -
 -      dev_priv->display.disable_fbc(dev);
 -      dev_priv->cfb_plane = -1;
 -}
 -
 -/**
 - * intel_update_fbc - enable/disable FBC as needed
 - * @dev: the drm_device
 - *
 - * Set up the framebuffer compression hardware at mode set time.  We
 - * enable it if possible:
 - *   - plane A only (on pre-965)
 - *   - no pixel mulitply/line duplication
 - *   - no alpha buffer discard
 - *   - no dual wide
 - *   - framebuffer <= 2048 in width, 1536 in height
 - *
 - * We can't assume that any compression will take place (worst case),
 - * so the compressed buffer has to be the same size as the uncompressed
 - * one.  It also must reside (along with the line length buffer) in
 - * stolen memory.
 - *
 - * We need to enable/disable FBC on a global basis.
 - */
 -static void intel_update_fbc(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc = NULL, *tmp_crtc;
 -      struct intel_crtc *intel_crtc;
 -      struct drm_framebuffer *fb;
 -      struct intel_framebuffer *intel_fb;
 -      struct drm_i915_gem_object *obj;
 -      int enable_fbc;
 -
 -      DRM_DEBUG_KMS("\n");
 -
 -      if (!i915_powersave)
 -              return;
 -
 -      if (!I915_HAS_FBC(dev))
 -              return;
 -
 -      /*
 -       * If FBC is already on, we just have to verify that we can
 -       * keep it that way...
 -       * Need to disable if:
 -       *   - more than one pipe is active
 -       *   - changing FBC params (stride, fence, mode)
 -       *   - new fb is too large to fit in compressed buffer
 -       *   - going to an unsupported config (interlace, pixel multiply, etc.)
 -       */
 -      list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
 -              if (tmp_crtc->enabled && tmp_crtc->fb) {
 -                      if (crtc) {
 -                              DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
 -                              dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
 -                              goto out_disable;
 -                      }
 -                      crtc = tmp_crtc;
 -              }
 -      }
 -
 -      if (!crtc || crtc->fb == NULL) {
 -              DRM_DEBUG_KMS("no output, disabling\n");
 -              dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
 -              goto out_disable;
 -      }
 -
 -      intel_crtc = to_intel_crtc(crtc);
 -      fb = crtc->fb;
 -      intel_fb = to_intel_framebuffer(fb);
 -      obj = intel_fb->obj;
 -
 -      enable_fbc = i915_enable_fbc;
 -      if (enable_fbc < 0) {
 -              DRM_DEBUG_KMS("fbc set to per-chip default\n");
 -              enable_fbc = 1;
 -              if (INTEL_INFO(dev)->gen <= 6)
 -                      enable_fbc = 0;
 -      }
 -      if (!enable_fbc) {
 -              DRM_DEBUG_KMS("fbc disabled per module param\n");
 -              dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
 -              goto out_disable;
 -      }
 -      if (intel_fb->obj->base.size > dev_priv->cfb_size) {
 -              DRM_DEBUG_KMS("framebuffer too large, disabling "
 -                            "compression\n");
 -              dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
 -              goto out_disable;
 -      }
 -      if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
 -          (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
 -              DRM_DEBUG_KMS("mode incompatible with compression, "
 -                            "disabling\n");
 -              dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
 -              goto out_disable;
 -      }
 -      if ((crtc->mode.hdisplay > 2048) ||
 -          (crtc->mode.vdisplay > 1536)) {
 -              DRM_DEBUG_KMS("mode too large for compression, disabling\n");
 -              dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
 -              goto out_disable;
 -      }
 -      if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
 -              DRM_DEBUG_KMS("plane not 0, disabling compression\n");
 -              dev_priv->no_fbc_reason = FBC_BAD_PLANE;
 -              goto out_disable;
 -      }
 -
 -      /* The use of a CPU fence is mandatory in order to detect writes
 -       * by the CPU to the scanout and trigger updates to the FBC.
 -       */
 -      if (obj->tiling_mode != I915_TILING_X ||
 -          obj->fence_reg == I915_FENCE_REG_NONE) {
 -              DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
 -              dev_priv->no_fbc_reason = FBC_NOT_TILED;
 -              goto out_disable;
 -      }
 -
 -      /* If the kernel debugger is active, always disable compression */
 -      if (in_dbg_master())
 -              goto out_disable;
 -
 -      /* If the scanout has not changed, don't modify the FBC settings.
 -       * Note that we make the fundamental assumption that the fb->obj
 -       * cannot be unpinned (and have its GTT offset and fence revoked)
 -       * without first being decoupled from the scanout and FBC disabled.
 -       */
 -      if (dev_priv->cfb_plane == intel_crtc->plane &&
 -          dev_priv->cfb_fb == fb->base.id &&
 -          dev_priv->cfb_y == crtc->y)
 -              return;
 -
 -      if (intel_fbc_enabled(dev)) {
 -              /* We update FBC along two paths, after changing fb/crtc
 -               * configuration (modeswitching) and after page-flipping
 -               * finishes. For the latter, we know that not only did
 -               * we disable the FBC at the start of the page-flip
 -               * sequence, but also more than one vblank has passed.
 -               *
 -               * For the former case of modeswitching, it is possible
 -               * to switch between two FBC valid configurations
 -               * instantaneously so we do need to disable the FBC
 -               * before we can modify its control registers. We also
 -               * have to wait for the next vblank for that to take
 -               * effect. However, since we delay enabling FBC we can
 -               * assume that a vblank has passed since disabling and
 -               * that we can safely alter the registers in the deferred
 -               * callback.
 -               *
 -               * In the scenario that we go from a valid to invalid
 -               * and then back to valid FBC configuration we have
 -               * no strict enforcement that a vblank occurred since
 -               * disabling the FBC. However, along all current pipe
 -               * disabling paths we do need to wait for a vblank at
 -               * some point. And we wait before enabling FBC anyway.
 -               */
 -              DRM_DEBUG_KMS("disabling active FBC for update\n");
 -              intel_disable_fbc(dev);
 -      }
 -
 -      intel_enable_fbc(crtc, 500);
 -      return;
 -
 -out_disable:
 -      /* Multiple disables should be harmless */
 -      if (intel_fbc_enabled(dev)) {
 -              DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
 -              intel_disable_fbc(dev);
 -      }
 -}
 -
  int
  intel_pin_and_fence_fb_obj(struct drm_device *dev,
                           struct drm_i915_gem_object *obj,
         * framebuffer compression.  For simplicity, we always install
         * a fence as the cost is not that onerous.
         */
 -      if (obj->tiling_mode != I915_TILING_NONE) {
 -              ret = i915_gem_object_get_fence(obj, pipelined);
 -              if (ret)
 -                      goto err_unpin;
 +      ret = i915_gem_object_get_fence(obj);
 +      if (ret)
 +              goto err_unpin;
  
 -              i915_gem_object_pin_fence(obj);
 -      }
 +      i915_gem_object_pin_fence(obj);
  
        dev_priv->mm.interruptible = true;
        return 0;
@@@ -1751,7 -2137,7 +1751,7 @@@ static int i9xx_update_plane(struct drm
                      Start, Offset, x, y, fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
 -              I915_WRITE(DSPSURF(plane), Start);
 +              I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPADDR(plane), Offset);
        } else
@@@ -1831,7 -2217,7 +1831,7 @@@ static int ironlake_update_plane(struc
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
                      Start, Offset, x, y, fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
 -      I915_WRITE(DSPSURF(plane), Start);
 +      I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
        I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
        I915_WRITE(DSPADDR(plane), Offset);
        POSTING_READ(reg);
@@@ -1846,12 -2232,16 +1846,12 @@@ intel_pipe_set_base_atomic(struct drm_c
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      int ret;
 -
 -      ret = dev_priv->display.update_plane(crtc, fb, x, y);
 -      if (ret)
 -              return ret;
  
 -      intel_update_fbc(dev);
 +      if (dev_priv->display.disable_fbc)
 +              dev_priv->display.disable_fbc(dev);
        intel_increase_pllclock(crtc);
  
 -      return 0;
 +      return dev_priv->display.update_plane(crtc, fb, x, y);
  }
  
  static int
@@@ -1886,7 -2276,6 +1886,7 @@@ intel_pipe_set_base(struct drm_crtc *cr
                    struct drm_framebuffer *old_fb)
  {
        struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int ret;
        if (old_fb)
                intel_finish_fb(old_fb);
  
 -      ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
 -                                       LEAVE_ATOMIC_MODE_SET);
 +      ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
        if (ret) {
                intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
                intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
        }
  
 +      intel_update_fbc(dev);
        mutex_unlock(&dev->struct_mutex);
  
        if (!dev->primary->master)
@@@ -2158,7 -2547,7 +2158,7 @@@ static void gen6_fdi_link_train(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 -      u32 reg, temp, i;
 +      u32 reg, temp, i, retry;
  
        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
           for train result */
                POSTING_READ(reg);
                udelay(500);
  
 -              reg = FDI_RX_IIR(pipe);
 -              temp = I915_READ(reg);
 -              DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 -
 -              if (temp & FDI_RX_BIT_LOCK) {
 -                      I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
 -                      DRM_DEBUG_KMS("FDI train 1 done.\n");
 -                      break;
 +              for (retry = 0; retry < 5; retry++) {
 +                      reg = FDI_RX_IIR(pipe);
 +                      temp = I915_READ(reg);
 +                      DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 +                      if (temp & FDI_RX_BIT_LOCK) {
 +                              I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
 +                              DRM_DEBUG_KMS("FDI train 1 done.\n");
 +                              break;
 +                      }
 +                      udelay(50);
                }
 +              if (retry < 5)
 +                      break;
        }
        if (i == 4)
                DRM_ERROR("FDI train 1 fail!\n");
                POSTING_READ(reg);
                udelay(500);
  
 -              reg = FDI_RX_IIR(pipe);
 -              temp = I915_READ(reg);
 -              DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 -
 -              if (temp & FDI_RX_SYMBOL_LOCK) {
 -                      I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
 -                      DRM_DEBUG_KMS("FDI train 2 done.\n");
 -                      break;
 +              for (retry = 0; retry < 5; retry++) {
 +                      reg = FDI_RX_IIR(pipe);
 +                      temp = I915_READ(reg);
 +                      DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 +                      if (temp & FDI_RX_SYMBOL_LOCK) {
 +                              I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
 +                              DRM_DEBUG_KMS("FDI train 2 done.\n");
 +                              break;
 +                      }
 +                      udelay(50);
                }
 +              if (retry < 5)
 +                      break;
        }
        if (i == 4)
                DRM_ERROR("FDI train 2 fail!\n");
@@@ -2529,14 -2910,16 +2529,14 @@@ static void intel_clear_scanline_wait(s
  
  static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  {
 -      struct drm_i915_gem_object *obj;
 -      struct drm_i915_private *dev_priv;
 +      struct drm_device *dev = crtc->dev;
  
        if (crtc->fb == NULL)
                return;
  
 -      obj = to_intel_framebuffer(crtc->fb)->obj;
 -      dev_priv = crtc->dev->dev_private;
 -      wait_event(dev_priv->pending_flip_queue,
 -                 atomic_read(&obj->pending_flip) == 0);
 +      mutex_lock(&dev->struct_mutex);
 +      intel_finish_fb(crtc->fb);
 +      mutex_unlock(&dev->struct_mutex);
  }
  
  static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@@ -2998,6 -3381,23 +2998,6 @@@ static void intel_crtc_disable(struct d
        struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        struct drm_device *dev = crtc->dev;
  
 -      /* Flush any pending WAITs before we disable the pipe. Note that
 -       * we need to drop the struct_mutex in order to acquire it again
 -       * during the lowlevel dpms routines around a couple of the
 -       * operations. It does not look trivial nor desirable to move
 -       * that locking higher. So instead we leave a window for the
 -       * submission of further commands on the fb before we can actually
 -       * disable it. This race with userspace exists anyway, and we can
 -       * only rely on the pipe being disabled by userspace after it
 -       * receives the hotplug notification and has flushed any pending
 -       * batches.
 -       */
 -      if (crtc->fb) {
 -              mutex_lock(&dev->struct_mutex);
 -              intel_finish_fb(crtc->fb);
 -              mutex_unlock(&dev->struct_mutex);
 -      }
 -
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
        assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
        assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@@ -3078,17 -3478,15 +3078,20 @@@ static bool intel_crtc_mode_fixup(struc
                        return false;
        }
  
-       /* All interlaced capable intel hw wants timings in frames. */
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
+       /* All interlaced capable intel hw wants timings in frames. Note though
+        * that intel_lvds_mode_fixup does some funny tricks with the crtc
+        * timings, so we need to be careful not to clobber these.*/
+       if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+               drm_mode_set_crtcinfo(adjusted_mode, 0);
  
        return true;
  }
  
 +static int valleyview_get_display_clock_speed(struct drm_device *dev)
 +{
 +      return 400000; /* FIXME */
 +}
 +
  static int i945_get_display_clock_speed(struct drm_device *dev)
  {
        return 400000;
@@@ -3186,804 -3584,1570 +3189,804 @@@ ironlake_compute_m_n(int bits_per_pixel
        fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
  }
  
 -
 -struct intel_watermark_params {
 -      unsigned long fifo_size;
 -      unsigned long max_wm;
 -      unsigned long default_wm;
 -      unsigned long guard_size;
 -      unsigned long cacheline_size;
 -};
 -
 -/* Pineview has different values for various configs */
 -static const struct intel_watermark_params pineview_display_wm = {
 -      PINEVIEW_DISPLAY_FIFO,
 -      PINEVIEW_MAX_WM,
 -      PINEVIEW_DFT_WM,
 -      PINEVIEW_GUARD_WM,
 -      PINEVIEW_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params pineview_display_hplloff_wm = {
 -      PINEVIEW_DISPLAY_FIFO,
 -      PINEVIEW_MAX_WM,
 -      PINEVIEW_DFT_HPLLOFF_WM,
 -      PINEVIEW_GUARD_WM,
 -      PINEVIEW_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params pineview_cursor_wm = {
 -      PINEVIEW_CURSOR_FIFO,
 -      PINEVIEW_CURSOR_MAX_WM,
 -      PINEVIEW_CURSOR_DFT_WM,
 -      PINEVIEW_CURSOR_GUARD_WM,
 -      PINEVIEW_FIFO_LINE_SIZE,
 -};
 -static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
 -      PINEVIEW_CURSOR_FIFO,
 -      PINEVIEW_CURSOR_MAX_WM,
 -      PINEVIEW_CURSOR_DFT_WM,
 -      PINEVIEW_CURSOR_GUARD_WM,
 -      PINEVIEW_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params g4x_wm_info = {
 -      G4X_FIFO_SIZE,
 -      G4X_MAX_WM,
 -      G4X_MAX_WM,
 -      2,
 -      G4X_FIFO_LINE_SIZE,
 -};
 -static const struct intel_watermark_params g4x_cursor_wm_info = {
 -      I965_CURSOR_FIFO,
 -      I965_CURSOR_MAX_WM,
 -      I965_CURSOR_DFT_WM,
 -      2,
 -      G4X_FIFO_LINE_SIZE,
 -};
 -static const struct intel_watermark_params i965_cursor_wm_info = {
 -      I965_CURSOR_FIFO,
 -      I965_CURSOR_MAX_WM,
 -      I965_CURSOR_DFT_WM,
 -      2,
 -      I915_FIFO_LINE_SIZE,
 -};
 -static const struct intel_watermark_params i945_wm_info = {
 -      I945_FIFO_SIZE,
 -      I915_MAX_WM,
 -      1,
 -      2,
 -      I915_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params i915_wm_info = {
 -      I915_FIFO_SIZE,
 -      I915_MAX_WM,
 -      1,
 -      2,
 -      I915_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params i855_wm_info = {
 -      I855GM_FIFO_SIZE,
 -      I915_MAX_WM,
 -      1,
 -      2,
 -      I830_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params i830_wm_info = {
 -      I830_FIFO_SIZE,
 -      I915_MAX_WM,
 -      1,
 -      2,
 -      I830_FIFO_LINE_SIZE
 -};
 -
 -static const struct intel_watermark_params ironlake_display_wm_info = {
 -      ILK_DISPLAY_FIFO,
 -      ILK_DISPLAY_MAXWM,
 -      ILK_DISPLAY_DFTWM,
 -      2,
 -      ILK_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params ironlake_cursor_wm_info = {
 -      ILK_CURSOR_FIFO,
 -      ILK_CURSOR_MAXWM,
 -      ILK_CURSOR_DFTWM,
 -      2,
 -      ILK_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params ironlake_display_srwm_info = {
 -      ILK_DISPLAY_SR_FIFO,
 -      ILK_DISPLAY_MAX_SRWM,
 -      ILK_DISPLAY_DFT_SRWM,
 -      2,
 -      ILK_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params ironlake_cursor_srwm_info = {
 -      ILK_CURSOR_SR_FIFO,
 -      ILK_CURSOR_MAX_SRWM,
 -      ILK_CURSOR_DFT_SRWM,
 -      2,
 -      ILK_FIFO_LINE_SIZE
 -};
 -
 -static const struct intel_watermark_params sandybridge_display_wm_info = {
 -      SNB_DISPLAY_FIFO,
 -      SNB_DISPLAY_MAXWM,
 -      SNB_DISPLAY_DFTWM,
 -      2,
 -      SNB_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params sandybridge_cursor_wm_info = {
 -      SNB_CURSOR_FIFO,
 -      SNB_CURSOR_MAXWM,
 -      SNB_CURSOR_DFTWM,
 -      2,
 -      SNB_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params sandybridge_display_srwm_info = {
 -      SNB_DISPLAY_SR_FIFO,
 -      SNB_DISPLAY_MAX_SRWM,
 -      SNB_DISPLAY_DFT_SRWM,
 -      2,
 -      SNB_FIFO_LINE_SIZE
 -};
 -static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
 -      SNB_CURSOR_SR_FIFO,
 -      SNB_CURSOR_MAX_SRWM,
 -      SNB_CURSOR_DFT_SRWM,
 -      2,
 -      SNB_FIFO_LINE_SIZE
 -};
 -
 +static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 +{
 +      if (i915_panel_use_ssc >= 0)
 +              return i915_panel_use_ssc != 0;
 +      return dev_priv->lvds_use_ssc
 +              && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 +}
  
  /**
 - * intel_calculate_wm - calculate watermark level
 - * @clock_in_khz: pixel clock
 - * @wm: chip FIFO params
 - * @pixel_size: display pixel size
 - * @latency_ns: memory latency for the platform
 + * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
 + * @crtc: CRTC structure
 + * @mode: requested mode
   *
 - * Calculate the watermark level (the level at which the display plane will
 - * start fetching from memory again).  Each chip has a different display
 - * FIFO size and allocation, so the caller needs to figure that out and pass
 - * in the correct intel_watermark_params structure.
 + * A pipe may be connected to one or more outputs.  Based on the depth of the
 + * attached framebuffer, choose a good color depth to use on the pipe.
 + *
 + * If possible, match the pipe depth to the fb depth.  In some cases, this
 + * isn't ideal, because the connected output supports a lesser or restricted
 + * set of depths.  Resolve that here:
 + *    LVDS typically supports only 6bpc, so clamp down in that case
 + *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
 + *    Displays may support a restricted set as well, check EDID and clamp as
 + *      appropriate.
 + *    DP may want to dither down to 6bpc to fit larger modes
   *
 - * As the pixel clock runs, the FIFO will be drained at a rate that depends
 - * on the pixel size.  When it reaches the watermark level, it'll start
 - * fetching FIFO line sized based chunks from memory until the FIFO fills
 - * past the watermark point.  If the FIFO drains completely, a FIFO underrun
 - * will occur, and a display engine hang could result.
 + * RETURNS:
 + * Dithering requirement (i.e. false if display bpc and pipe bpc match,
 + * true if they don't match).
   */
 -static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
 -                                      const struct intel_watermark_params *wm,
 -                                      int fifo_size,
 -                                      int pixel_size,
 -                                      unsigned long latency_ns)
 +static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 +                                       unsigned int *pipe_bpp,
 +                                       struct drm_display_mode *mode)
  {
 -      long entries_required, wm_size;
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_encoder *encoder;
 +      struct drm_connector *connector;
 +      unsigned int display_bpc = UINT_MAX, bpc;
  
 -      /*
 -       * Note: we need to make sure we don't overflow for various clock &
 -       * latency values.
 -       * clocks go from a few thousand to several hundred thousand.
 -       * latency is usually a few thousand
 -       */
 -      entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
 -              1000;
 -      entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 +      /* Walk the encoders & connectors on this crtc, get min bpc */
 +      list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 +              struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  
 -      DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
 +              if (encoder->crtc != crtc)
 +                      continue;
  
 -      wm_size = fifo_size - (entries_required + wm->guard_size);
 +              if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
 +                      unsigned int lvds_bpc;
  
 -      DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
 +                      if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
 +                          LVDS_A3_POWER_UP)
 +                              lvds_bpc = 8;
 +                      else
 +                              lvds_bpc = 6;
  
 -      /* Don't promote wm_size to unsigned... */
 -      if (wm_size > (long)wm->max_wm)
 -              wm_size = wm->max_wm;
 -      if (wm_size <= 0)
 -              wm_size = wm->default_wm;
 -      return wm_size;
 -}
 +                      if (lvds_bpc < display_bpc) {
 +                              DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
 +                              display_bpc = lvds_bpc;
 +                      }
 +                      continue;
 +              }
  
 -struct cxsr_latency {
 -      int is_desktop;
 -      int is_ddr3;
 -      unsigned long fsb_freq;
 -      unsigned long mem_freq;
 -      unsigned long display_sr;
 -      unsigned long display_hpll_disable;
 -      unsigned long cursor_sr;
 -      unsigned long cursor_hpll_disable;
 -};
 +              if (intel_encoder->type == INTEL_OUTPUT_EDP) {
 +                      /* Use VBT settings if we have an eDP panel */
 +                      unsigned int edp_bpc = dev_priv->edp.bpp / 3;
  
 -static const struct cxsr_latency cxsr_latency_table[] = {
 -      {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
 -      {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
 -      {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
 -      {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
 -      {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
 -
 -      {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
 -      {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
 -      {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
 -      {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
 -      {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
 -
 -      {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
 -      {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
 -      {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
 -      {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
 -      {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
 -
 -      {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
 -      {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
 -      {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
 -      {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
 -      {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
 -
 -      {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
 -      {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
 -      {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
 -      {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
 -      {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
 -
 -      {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
 -      {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
 -      {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
 -      {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
 -      {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 -};
 +                      if (edp_bpc < display_bpc) {
 +                              DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
 +                              display_bpc = edp_bpc;
 +                      }
 +                      continue;
 +              }
  
 -static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
 -                                                       int is_ddr3,
 -                                                       int fsb,
 -                                                       int mem)
 -{
 -      const struct cxsr_latency *latency;
 -      int i;
 +              /* Not one of the known troublemakers, check the EDID */
 +              list_for_each_entry(connector, &dev->mode_config.connector_list,
 +                                  head) {
 +                      if (connector->encoder != encoder)
 +                              continue;
  
 -      if (fsb == 0 || mem == 0)
 -              return NULL;
 +                      /* Don't use an invalid EDID bpc value */
 +                      if (connector->display_info.bpc &&
 +                          connector->display_info.bpc < display_bpc) {
 +                              DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
 +                              display_bpc = connector->display_info.bpc;
 +                      }
 +              }
  
 -      for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
 -              latency = &cxsr_latency_table[i];
 -              if (is_desktop == latency->is_desktop &&
 -                  is_ddr3 == latency->is_ddr3 &&
 -                  fsb == latency->fsb_freq && mem == latency->mem_freq)
 -                      return latency;
 +              /*
 +               * HDMI is either 12 or 8, so if the display lets 10bpc sneak
 +               * through, clamp it down.  (Note: >12bpc will be caught below.)
 +               */
 +              if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
 +                      if (display_bpc > 8 && display_bpc < 12) {
 +                              DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
 +                              display_bpc = 12;
 +                      } else {
 +                              DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
 +                              display_bpc = 8;
 +                      }
 +              }
        }
  
 -      DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 -
 -      return NULL;
 -}
 -
 -static void pineview_disable_cxsr(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
 +              DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
 +              display_bpc = 6;
 +      }
  
 -      /* deactivate cxsr */
 -      I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
 -}
 +      /*
 +       * We could just drive the pipe at the highest bpc all the time and
 +       * enable dithering as needed, but that costs bandwidth.  So choose
 +       * the minimum value that expresses the full color range of the fb but
 +       * also stays within the max display bpc discovered above.
 +       */
  
 -/*
 - * Latency for FIFO fetches is dependent on several factors:
 - *   - memory configuration (speed, channels)
 - *   - chipset
 - *   - current MCH state
 - * It can be fairly high in some situations, so here we assume a fairly
 - * pessimal value.  It's a tradeoff between extra memory fetches (if we
 - * set this value too high, the FIFO will fetch frequently to stay full)
 - * and power consumption (set it too low to save power and we might see
 - * FIFO underruns and display "flicker").
 - *
 - * A value of 5us seems to be a good balance; safe for very low end
 - * platforms but not overly aggressive on lower latency configs.
 - */
 -static const int latency_ns = 5000;
 +      switch (crtc->fb->depth) {
 +      case 8:
 +              bpc = 8; /* since we go through a colormap */
 +              break;
 +      case 15:
 +      case 16:
 +              bpc = 6; /* min is 18bpp */
 +              break;
 +      case 24:
 +              bpc = 8;
 +              break;
 +      case 30:
 +              bpc = 10;
 +              break;
 +      case 48:
 +              bpc = 12;
 +              break;
 +      default:
 +              DRM_DEBUG("unsupported depth, assuming 24 bits\n");
 +              bpc = min((unsigned int)8, display_bpc);
 +              break;
 +      }
  
 -static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dsparb = I915_READ(DSPARB);
 -      int size;
 +      display_bpc = min(display_bpc, bpc);
  
 -      size = dsparb & 0x7f;
 -      if (plane)
 -              size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 +      DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
 +                    bpc, display_bpc);
  
 -      DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
 -                    plane ? "B" : "A", size);
 +      *pipe_bpp = display_bpc * 3;
  
 -      return size;
 +      return display_bpc != bpc;
  }
  
 -static int i85x_get_fifo_size(struct drm_device *dev, int plane)
 +static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dsparb = I915_READ(DSPARB);
 -      int size;
 -
 -      size = dsparb & 0x1ff;
 -      if (plane)
 -              size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
 -      size >>= 1; /* Convert to cachelines */
 +      int refclk;
  
 -      DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
 -                    plane ? "B" : "A", size);
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +          intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 +              refclk = dev_priv->lvds_ssc_freq * 1000;
 +              DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 +                            refclk / 1000);
 +      } else if (!IS_GEN2(dev)) {
 +              refclk = 96000;
 +      } else {
 +              refclk = 48000;
 +      }
  
 -      return size;
 +      return refclk;
  }
  
 -static int i845_get_fifo_size(struct drm_device *dev, int plane)
 +static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
 +                                    intel_clock_t *clock)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dsparb = I915_READ(DSPARB);
 -      int size;
 -
 -      size = dsparb & 0x7f;
 -      size >>= 2; /* Convert to cachelines */
 -
 -      DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
 -                    plane ? "B" : "A",
 -                    size);
 -
 -      return size;
 +      /* SDVO TV has fixed PLL values depend on its clock range,
 +         this mirrors vbios setting. */
 +      if (adjusted_mode->clock >= 100000
 +          && adjusted_mode->clock < 140500) {
 +              clock->p1 = 2;
 +              clock->p2 = 10;
 +              clock->n = 3;
 +              clock->m1 = 16;
 +              clock->m2 = 8;
 +      } else if (adjusted_mode->clock >= 140500
 +                 && adjusted_mode->clock <= 200000) {
 +              clock->p1 = 1;
 +              clock->p2 = 10;
 +              clock->n = 6;
 +              clock->m1 = 12;
 +              clock->m2 = 8;
 +      }
  }
  
 -static int i830_get_fifo_size(struct drm_device *dev, int plane)
 +static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
 +                                   intel_clock_t *clock,
 +                                   intel_clock_t *reduced_clock)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dsparb = I915_READ(DSPARB);
 -      int size;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      u32 fp, fp2 = 0;
  
 -      size = dsparb & 0x7f;
 -      size >>= 1; /* Convert to cachelines */
 +      if (IS_PINEVIEW(dev)) {
 +              fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
 +              if (reduced_clock)
 +                      fp2 = (1 << reduced_clock->n) << 16 |
 +                              reduced_clock->m1 << 8 | reduced_clock->m2;
 +      } else {
 +              fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
 +              if (reduced_clock)
 +                      fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
 +                              reduced_clock->m2;
 +      }
  
 -      DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
 -                    plane ? "B" : "A", size);
 +      I915_WRITE(FP0(pipe), fp);
  
 -      return size;
 +      intel_crtc->lowfreq_avail = false;
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +          reduced_clock && i915_powersave) {
 +              I915_WRITE(FP1(pipe), fp2);
 +              intel_crtc->lowfreq_avail = true;
 +      } else {
 +              I915_WRITE(FP1(pipe), fp);
 +      }
  }
  
 -static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
 +static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
 +                            struct drm_display_mode *adjusted_mode)
  {
 -      struct drm_crtc *crtc, *enabled = NULL;
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      u32 temp;
  
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              if (crtc->enabled && crtc->fb) {
 -                      if (enabled)
 -                              return NULL;
 -                      enabled = crtc;
 -              }
 -      }
 -
 -      return enabled;
 -}
 -
 -static void pineview_update_wm(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc;
 -      const struct cxsr_latency *latency;
 -      u32 reg;
 -      unsigned long wm;
 -
 -      latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
 -                                       dev_priv->fsb_freq, dev_priv->mem_freq);
 -      if (!latency) {
 -              DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 -              pineview_disable_cxsr(dev);
 -              return;
 -      }
 -
 -      crtc = single_enabled_crtc(dev);
 -      if (crtc) {
 -              int clock = crtc->mode.clock;
 -              int pixel_size = crtc->fb->bits_per_pixel / 8;
 -
 -              /* Display SR */
 -              wm = intel_calculate_wm(clock, &pineview_display_wm,
 -                                      pineview_display_wm.fifo_size,
 -                                      pixel_size, latency->display_sr);
 -              reg = I915_READ(DSPFW1);
 -              reg &= ~DSPFW_SR_MASK;
 -              reg |= wm << DSPFW_SR_SHIFT;
 -              I915_WRITE(DSPFW1, reg);
 -              DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 -
 -              /* cursor SR */
 -              wm = intel_calculate_wm(clock, &pineview_cursor_wm,
 -                                      pineview_display_wm.fifo_size,
 -                                      pixel_size, latency->cursor_sr);
 -              reg = I915_READ(DSPFW3);
 -              reg &= ~DSPFW_CURSOR_SR_MASK;
 -              reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
 -              I915_WRITE(DSPFW3, reg);
 -
 -              /* Display HPLL off SR */
 -              wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
 -                                      pineview_display_hplloff_wm.fifo_size,
 -                                      pixel_size, latency->display_hpll_disable);
 -              reg = I915_READ(DSPFW3);
 -              reg &= ~DSPFW_HPLL_SR_MASK;
 -              reg |= wm & DSPFW_HPLL_SR_MASK;
 -              I915_WRITE(DSPFW3, reg);
 -
 -              /* cursor HPLL off SR */
 -              wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
 -                                      pineview_display_hplloff_wm.fifo_size,
 -                                      pixel_size, latency->cursor_hpll_disable);
 -              reg = I915_READ(DSPFW3);
 -              reg &= ~DSPFW_HPLL_CURSOR_MASK;
 -              reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
 -              I915_WRITE(DSPFW3, reg);
 -              DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 -
 -              /* activate cxsr */
 -              I915_WRITE(DSPFW3,
 -                         I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
 -              DRM_DEBUG_KMS("Self-refresh is enabled\n");
 +      temp = I915_READ(LVDS);
 +      temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 +      if (pipe == 1) {
 +              temp |= LVDS_PIPEB_SELECT;
        } else {
 -              pineview_disable_cxsr(dev);
 -              DRM_DEBUG_KMS("Self-refresh is disabled\n");
 +              temp &= ~LVDS_PIPEB_SELECT;
        }
 -}
 +      /* set the corresponsding LVDS_BORDER bit */
 +      temp |= dev_priv->lvds_border_bits;
 +      /* Set the B0-B3 data pairs corresponding to whether we're going to
 +       * set the DPLLs for dual-channel mode or not.
 +       */
 +      if (clock->p2 == 7)
 +              temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
 +      else
 +              temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
  
 -static bool g4x_compute_wm0(struct drm_device *dev,
 -                          int plane,
 -                          const struct intel_watermark_params *display,
 -                          int display_latency_ns,
 -                          const struct intel_watermark_params *cursor,
 -                          int cursor_latency_ns,
 -                          int *plane_wm,
 -                          int *cursor_wm)
 -{
 -      struct drm_crtc *crtc;
 -      int htotal, hdisplay, clock, pixel_size;
 -      int line_time_us, line_count;
 -      int entries, tlb_miss;
 -
 -      crtc = intel_get_crtc_for_plane(dev, plane);
 -      if (crtc->fb == NULL || !crtc->enabled) {
 -              *cursor_wm = cursor->guard_size;
 -              *plane_wm = display->guard_size;
 -              return false;
 +      /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
 +       * appropriately here, but we need to look more thoroughly into how
 +       * panels behave in the two modes.
 +       */
 +      /* set the dithering flag on LVDS as needed */
 +      if (INTEL_INFO(dev)->gen >= 4) {
 +              if (dev_priv->lvds_dither)
 +                      temp |= LVDS_ENABLE_DITHER;
 +              else
 +                      temp &= ~LVDS_ENABLE_DITHER;
        }
 -
 -      htotal = crtc->mode.htotal;
 -      hdisplay = crtc->mode.hdisplay;
 -      clock = crtc->mode.clock;
 -      pixel_size = crtc->fb->bits_per_pixel / 8;
 -
 -      /* Use the small buffer method to calculate plane watermark */
 -      entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
 -      tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
 -      if (tlb_miss > 0)
 -              entries += tlb_miss;
 -      entries = DIV_ROUND_UP(entries, display->cacheline_size);
 -      *plane_wm = entries + display->guard_size;
 -      if (*plane_wm > (int)display->max_wm)
 -              *plane_wm = display->max_wm;
 -
 -      /* Use the large buffer method to calculate cursor watermark */
 -      line_time_us = ((htotal * 1000) / clock);
 -      line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
 -      entries = line_count * 64 * pixel_size;
 -      tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
 -      if (tlb_miss > 0)
 -              entries += tlb_miss;
 -      entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 -      *cursor_wm = entries + cursor->guard_size;
 -      if (*cursor_wm > (int)cursor->max_wm)
 -              *cursor_wm = (int)cursor->max_wm;
 -
 -      return true;
 +      temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 +              temp |= LVDS_HSYNC_POLARITY;
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 +              temp |= LVDS_VSYNC_POLARITY;
 +      I915_WRITE(LVDS, temp);
  }
  
 -/*
 - * Check the wm result.
 - *
 - * If any calculated watermark values is larger than the maximum value that
 - * can be programmed into the associated watermark register, that watermark
 - * must be disabled.
 - */
 -static bool g4x_check_srwm(struct drm_device *dev,
 -                         int display_wm, int cursor_wm,
 -                         const struct intel_watermark_params *display,
 -                         const struct intel_watermark_params *cursor)
 +static void i9xx_update_pll(struct drm_crtc *crtc,
 +                          struct drm_display_mode *mode,
 +                          struct drm_display_mode *adjusted_mode,
 +                          intel_clock_t *clock, intel_clock_t *reduced_clock,
 +                          int num_connectors)
  {
 -      DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
 -                    display_wm, cursor_wm);
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      u32 dpll;
 +      bool is_sdvo;
  
 -      if (display_wm > display->max_wm) {
 -              DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
 -                            display_wm, display->max_wm);
 -              return false;
 -      }
 +      is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
 +              intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
  
 -      if (cursor_wm > cursor->max_wm) {
 -              DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
 -                            cursor_wm, cursor->max_wm);
 -              return false;
 -      }
 +      dpll = DPLL_VGA_MODE_DIS;
  
 -      if (!(display_wm || cursor_wm)) {
 -              DRM_DEBUG_KMS("SR latency is 0, disabling\n");
 -              return false;
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 +              dpll |= DPLLB_MODE_LVDS;
 +      else
 +              dpll |= DPLLB_MODE_DAC_SERIAL;
 +      if (is_sdvo) {
 +              int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 +              if (pixel_multiplier > 1) {
 +                      if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 +                              dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
 +              }
 +              dpll |= DPLL_DVO_HIGH_SPEED;
        }
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
 +              dpll |= DPLL_DVO_HIGH_SPEED;
  
 -      return true;
 -}
 -
 -static bool g4x_compute_srwm(struct drm_device *dev,
 -                           int plane,
 -                           int latency_ns,
 -                           const struct intel_watermark_params *display,
 -                           const struct intel_watermark_params *cursor,
 -                           int *display_wm, int *cursor_wm)
 -{
 -      struct drm_crtc *crtc;
 -      int hdisplay, htotal, pixel_size, clock;
 -      unsigned long line_time_us;
 -      int line_count, line_size;
 -      int small, large;
 -      int entries;
 -
 -      if (!latency_ns) {
 -              *display_wm = *cursor_wm = 0;
 -              return false;
 +      /* compute bitmask from p1 value */
 +      if (IS_PINEVIEW(dev))
 +              dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
 +      else {
 +              dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 +              if (IS_G4X(dev) && reduced_clock)
 +                      dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
        }
 +      switch (clock->p2) {
 +      case 5:
 +              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 +              break;
 +      case 7:
 +              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 +              break;
 +      case 10:
 +              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 +              break;
 +      case 14:
 +              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 +              break;
 +      }
 +      if (INTEL_INFO(dev)->gen >= 4)
 +              dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  
 -      crtc = intel_get_crtc_for_plane(dev, plane);
 -      hdisplay = crtc->mode.hdisplay;
 -      htotal = crtc->mode.htotal;
 -      clock = crtc->mode.clock;
 -      pixel_size = crtc->fb->bits_per_pixel / 8;
 -
 -      line_time_us = (htotal * 1000) / clock;
 -      line_count = (latency_ns / line_time_us + 1000) / 1000;
 -      line_size = hdisplay * pixel_size;
 +      if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
 +              dpll |= PLL_REF_INPUT_TVCLKINBC;
 +      else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
 +              /* XXX: just matching BIOS for now */
 +              /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
 +              dpll |= 3;
 +      else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +               intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 +              dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 +      else
 +              dpll |= PLL_REF_INPUT_DREFCLK;
  
 -      /* Use the minimum of the small and large buffer method for primary */
 -      small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
 -      large = line_count * line_size;
 +      dpll |= DPLL_VCO_ENABLE;
 +      I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 +      POSTING_READ(DPLL(pipe));
 +      udelay(150);
  
 -      entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
 -      *display_wm = entries + display->guard_size;
 +      /* The LVDS pin pair needs to be on before the DPLLs are enabled.
 +       * This is an exception to the general rule that mode_set doesn't turn
 +       * things on.
 +       */
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 +              intel_update_lvds(crtc, clock, adjusted_mode);
  
 -      /* calculate the self-refresh watermark for display cursor */
 -      entries = line_count * pixel_size * 64;
 -      entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 -      *cursor_wm = entries + cursor->guard_size;
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
 +              intel_dp_set_m_n(crtc, mode, adjusted_mode);
  
 -      return g4x_check_srwm(dev,
 -                            *display_wm, *cursor_wm,
 -                            display, cursor);
 -}
 +      I915_WRITE(DPLL(pipe), dpll);
  
 -#define single_plane_enabled(mask) is_power_of_2(mask)
 +      /* Wait for the clocks to stabilize. */
 +      POSTING_READ(DPLL(pipe));
 +      udelay(150);
  
 -static void g4x_update_wm(struct drm_device *dev)
 -{
 -      static const int sr_latency_ns = 12000;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
 -      int plane_sr, cursor_sr;
 -      unsigned int enabled = 0;
 -
 -      if (g4x_compute_wm0(dev, 0,
 -                          &g4x_wm_info, latency_ns,
 -                          &g4x_cursor_wm_info, latency_ns,
 -                          &planea_wm, &cursora_wm))
 -              enabled |= 1;
 -
 -      if (g4x_compute_wm0(dev, 1,
 -                          &g4x_wm_info, latency_ns,
 -                          &g4x_cursor_wm_info, latency_ns,
 -                          &planeb_wm, &cursorb_wm))
 -              enabled |= 2;
 -
 -      plane_sr = cursor_sr = 0;
 -      if (single_plane_enabled(enabled) &&
 -          g4x_compute_srwm(dev, ffs(enabled) - 1,
 -                           sr_latency_ns,
 -                           &g4x_wm_info,
 -                           &g4x_cursor_wm_info,
 -                           &plane_sr, &cursor_sr))
 -              I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
 -      else
 -              I915_WRITE(FW_BLC_SELF,
 -                         I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
 -
 -      DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
 -                    planea_wm, cursora_wm,
 -                    planeb_wm, cursorb_wm,
 -                    plane_sr, cursor_sr);
 -
 -      I915_WRITE(DSPFW1,
 -                 (plane_sr << DSPFW_SR_SHIFT) |
 -                 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
 -                 (planeb_wm << DSPFW_PLANEB_SHIFT) |
 -                 planea_wm);
 -      I915_WRITE(DSPFW2,
 -                 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
 -                 (cursora_wm << DSPFW_CURSORA_SHIFT));
 -      /* HPLL off in SR has some issues on G4x... disable it */
 -      I915_WRITE(DSPFW3,
 -                 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
 -                 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 -}
 -
 -static void i965_update_wm(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc;
 -      int srwm = 1;
 -      int cursor_sr = 16;
 -
 -      /* Calc sr entries for one plane configs */
 -      crtc = single_enabled_crtc(dev);
 -      if (crtc) {
 -              /* self-refresh has much higher latency */
 -              static const int sr_latency_ns = 12000;
 -              int clock = crtc->mode.clock;
 -              int htotal = crtc->mode.htotal;
 -              int hdisplay = crtc->mode.hdisplay;
 -              int pixel_size = crtc->fb->bits_per_pixel / 8;
 -              unsigned long line_time_us;
 -              int entries;
 -
 -              line_time_us = ((htotal * 1000) / clock);
 -
 -              /* Use ns/us then divide to preserve precision */
 -              entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
 -                      pixel_size * hdisplay;
 -              entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
 -              srwm = I965_FIFO_SIZE - entries;
 -              if (srwm < 0)
 -                      srwm = 1;
 -              srwm &= 0x1ff;
 -              DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
 -                            entries, srwm);
 -
 -              entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
 -                      pixel_size * 64;
 -              entries = DIV_ROUND_UP(entries,
 -                                        i965_cursor_wm_info.cacheline_size);
 -              cursor_sr = i965_cursor_wm_info.fifo_size -
 -                      (entries + i965_cursor_wm_info.guard_size);
 -
 -              if (cursor_sr > i965_cursor_wm_info.max_wm)
 -                      cursor_sr = i965_cursor_wm_info.max_wm;
 -
 -              DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
 -                            "cursor %d\n", srwm, cursor_sr);
 -
 -              if (IS_CRESTLINE(dev))
 -                      I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
 +      if (INTEL_INFO(dev)->gen >= 4) {
 +              u32 temp = 0;
 +              if (is_sdvo) {
 +                      temp = intel_mode_get_pixel_multiplier(adjusted_mode);
 +                      if (temp > 1)
 +                              temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 +                      else
 +                              temp = 0;
 +              }
 +              I915_WRITE(DPLL_MD(pipe), temp);
        } else {
 -              /* Turn off self refresh if both pipes are enabled */
 -              if (IS_CRESTLINE(dev))
 -                      I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
 -                                 & ~FW_BLC_SELF_EN);
 +              /* The pixel multiplier can only be updated once the
 +               * DPLL is enabled and the clocks are stable.
 +               *
 +               * So write it again.
 +               */
 +              I915_WRITE(DPLL(pipe), dpll);
        }
 -
 -      DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
 -                    srwm);
 -
 -      /* 965 has limitations... */
 -      I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
 -                 (8 << 16) | (8 << 8) | (8 << 0));
 -      I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
 -      /* update cursor SR watermark */
 -      I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  }
  
 -static void i9xx_update_wm(struct drm_device *dev)
 +static void i8xx_update_pll(struct drm_crtc *crtc,
 +                          struct drm_display_mode *adjusted_mode,
 +                          intel_clock_t *clock,
 +                          int num_connectors)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      const struct intel_watermark_params *wm_info;
 -      uint32_t fwater_lo;
 -      uint32_t fwater_hi;
 -      int cwm, srwm = 1;
 -      int fifo_size;
 -      int planea_wm, planeb_wm;
 -      struct drm_crtc *crtc, *enabled = NULL;
 -
 -      if (IS_I945GM(dev))
 -              wm_info = &i945_wm_info;
 -      else if (!IS_GEN2(dev))
 -              wm_info = &i915_wm_info;
 -      else
 -              wm_info = &i855_wm_info;
 -
 -      fifo_size = dev_priv->display.get_fifo_size(dev, 0);
 -      crtc = intel_get_crtc_for_plane(dev, 0);
 -      if (crtc->enabled && crtc->fb) {
 -              planea_wm = intel_calculate_wm(crtc->mode.clock,
 -                                             wm_info, fifo_size,
 -                                             crtc->fb->bits_per_pixel / 8,
 -                                             latency_ns);
 -              enabled = crtc;
 -      } else
 -              planea_wm = fifo_size - wm_info->guard_size;
 -
 -      fifo_size = dev_priv->display.get_fifo_size(dev, 1);
 -      crtc = intel_get_crtc_for_plane(dev, 1);
 -      if (crtc->enabled && crtc->fb) {
 -              planeb_wm = intel_calculate_wm(crtc->mode.clock,
 -                                             wm_info, fifo_size,
 -                                             crtc->fb->bits_per_pixel / 8,
 -                                             latency_ns);
 -              if (enabled == NULL)
 -                      enabled = crtc;
 -              else
 -                      enabled = NULL;
 -      } else
 -              planeb_wm = fifo_size - wm_info->guard_size;
 -
 -      DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      u32 dpll;
  
 -      /*
 -       * Overlay gets an aggressive default since video jitter is bad.
 -       */
 -      cwm = 2;
 +      dpll = DPLL_VGA_MODE_DIS;
  
 -      /* Play safe and disable self-refresh before adjusting watermarks. */
 -      if (IS_I945G(dev) || IS_I945GM(dev))
 -              I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
 -      else if (IS_I915GM(dev))
 -              I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
 -
 -      /* Calc sr entries for one plane configs */
 -      if (HAS_FW_BLC(dev) && enabled) {
 -              /* self-refresh has much higher latency */
 -              static const int sr_latency_ns = 6000;
 -              int clock = enabled->mode.clock;
 -              int htotal = enabled->mode.htotal;
 -              int hdisplay = enabled->mode.hdisplay;
 -              int pixel_size = enabled->fb->bits_per_pixel / 8;
 -              unsigned long line_time_us;
 -              int entries;
 -
 -              line_time_us = (htotal * 1000) / clock;
 -
 -              /* Use ns/us then divide to preserve precision */
 -              entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
 -                      pixel_size * hdisplay;
 -              entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
 -              DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
 -              srwm = wm_info->fifo_size - entries;
 -              if (srwm < 0)
 -                      srwm = 1;
 -
 -              if (IS_I945G(dev) || IS_I945GM(dev))
 -                      I915_WRITE(FW_BLC_SELF,
 -                                 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
 -              else if (IS_I915GM(dev))
 -                      I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
 -      }
 -
 -      DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
 -                    planea_wm, planeb_wm, cwm, srwm);
 -
 -      fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
 -      fwater_hi = (cwm & 0x1f);
 -
 -      /* Set request length to 8 cachelines per fetch */
 -      fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
 -      fwater_hi = fwater_hi | (1 << 8);
 -
 -      I915_WRITE(FW_BLC, fwater_lo);
 -      I915_WRITE(FW_BLC2, fwater_hi);
 -
 -      if (HAS_FW_BLC(dev)) {
 -              if (enabled) {
 -                      if (IS_I945G(dev) || IS_I945GM(dev))
 -                              I915_WRITE(FW_BLC_SELF,
 -                                         FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
 -                      else if (IS_I915GM(dev))
 -                              I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
 -                      DRM_DEBUG_KMS("memory self refresh enabled\n");
 -              } else
 -                      DRM_DEBUG_KMS("memory self refresh disabled\n");
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
 +              dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 +      } else {
 +              if (clock->p1 == 2)
 +                      dpll |= PLL_P1_DIVIDE_BY_TWO;
 +              else
 +                      dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 +              if (clock->p2 == 4)
 +                      dpll |= PLL_P2_DIVIDE_BY_4;
        }
 -}
  
 -static void i830_update_wm(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc;
 -      uint32_t fwater_lo;
 -      int planea_wm;
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
 +              /* XXX: just matching BIOS for now */
 +              /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
 +              dpll |= 3;
 +      else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 +               intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 +              dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 +      else
 +              dpll |= PLL_REF_INPUT_DREFCLK;
  
 -      crtc = single_enabled_crtc(dev);
 -      if (crtc == NULL)
 -              return;
 +      dpll |= DPLL_VCO_ENABLE;
 +      I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 +      POSTING_READ(DPLL(pipe));
 +      udelay(150);
  
 -      planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
 -                                     dev_priv->display.get_fifo_size(dev, 0),
 -                                     crtc->fb->bits_per_pixel / 8,
 -                                     latency_ns);
 -      fwater_lo = I915_READ(FW_BLC) & ~0xfff;
 -      fwater_lo |= (3<<8) | planea_wm;
 +      I915_WRITE(DPLL(pipe), dpll);
  
 -      DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
 +      /* Wait for the clocks to stabilize. */
 +      POSTING_READ(DPLL(pipe));
 +      udelay(150);
  
 -      I915_WRITE(FW_BLC, fwater_lo);
 -}
 +      /* The LVDS pin pair needs to be on before the DPLLs are enabled.
 +       * This is an exception to the general rule that mode_set doesn't turn
 +       * things on.
 +       */
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 +              intel_update_lvds(crtc, clock, adjusted_mode);
  
 -#define ILK_LP0_PLANE_LATENCY         700
 -#define ILK_LP0_CURSOR_LATENCY                1300
 +      /* The pixel multiplier can only be updated once the
 +       * DPLL is enabled and the clocks are stable.
 +       *
 +       * So write it again.
 +       */
 +      I915_WRITE(DPLL(pipe), dpll);
 +}
  
 -/*
 - * Check the wm result.
 - *
 - * If any calculated watermark values is larger than the maximum value that
 - * can be programmed into the associated watermark register, that watermark
 - * must be disabled.
 - */
 -static bool ironlake_check_srwm(struct drm_device *dev, int level,
 -                              int fbc_wm, int display_wm, int cursor_wm,
 -                              const struct intel_watermark_params *display,
 -                              const struct intel_watermark_params *cursor)
 +static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 +                            struct drm_display_mode *mode,
 +                            struct drm_display_mode *adjusted_mode,
 +                            int x, int y,
 +                            struct drm_framebuffer *old_fb)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int plane = intel_crtc->plane;
 +      int refclk, num_connectors = 0;
 +      intel_clock_t clock, reduced_clock;
 +      u32 dspcntr, pipeconf, vsyncshift;
 +      bool ok, has_reduced_clock = false, is_sdvo = false;
 +      bool is_lvds = false, is_tv = false, is_dp = false;
 +      struct drm_mode_config *mode_config = &dev->mode_config;
 +      struct intel_encoder *encoder;
 +      const intel_limit_t *limit;
 +      int ret;
  
 -      DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
 -                    " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
 -
 -      if (fbc_wm > SNB_FBC_MAX_SRWM) {
 -              DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
 -                            fbc_wm, SNB_FBC_MAX_SRWM, level);
 +      list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 +              if (encoder->base.crtc != crtc)
 +                      continue;
  
 -              /* fbc has it's own way to disable FBC WM */
 -              I915_WRITE(DISP_ARB_CTL,
 -                         I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
 -              return false;
 -      }
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      is_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_SDVO:
 +              case INTEL_OUTPUT_HDMI:
 +                      is_sdvo = true;
 +                      if (encoder->needs_tv_clock)
 +                              is_tv = true;
 +                      break;
 +              case INTEL_OUTPUT_TVOUT:
 +                      is_tv = true;
 +                      break;
 +              case INTEL_OUTPUT_DISPLAYPORT:
 +                      is_dp = true;
 +                      break;
 +              }
  
 -      if (display_wm > display->max_wm) {
 -              DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
 -                            display_wm, SNB_DISPLAY_MAX_SRWM, level);
 -              return false;
 +              num_connectors++;
        }
  
 -      if (cursor_wm > cursor->max_wm) {
 -              DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
 -                            cursor_wm, SNB_CURSOR_MAX_SRWM, level);
 -              return false;
 -      }
 +      refclk = i9xx_get_refclk(crtc, num_connectors);
  
 -      if (!(fbc_wm || display_wm || cursor_wm)) {
 -              DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
 -              return false;
 +      /*
 +       * Returns a set of divisors for the desired target clock with the given
 +       * refclk, or FALSE.  The returned values represent the clock equation:
 +       * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
 +       */
 +      limit = intel_limit(crtc, refclk);
 +      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
 +                           &clock);
 +      if (!ok) {
 +              DRM_ERROR("Couldn't find PLL settings for mode!\n");
 +              return -EINVAL;
        }
  
 -      return true;
 -}
 +      /* Ensure that the cursor is valid for the new mode before changing... */
 +      intel_crtc_update_cursor(crtc, true);
  
 -/*
 - * Compute watermark values of WM[1-3],
 - */
 -static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
 -                                int latency_ns,
 -                                const struct intel_watermark_params *display,
 -                                const struct intel_watermark_params *cursor,
 -                                int *fbc_wm, int *display_wm, int *cursor_wm)
 -{
 -      struct drm_crtc *crtc;
 -      unsigned long line_time_us;
 -      int hdisplay, htotal, pixel_size, clock;
 -      int line_count, line_size;
 -      int small, large;
 -      int entries;
 -
 -      if (!latency_ns) {
 -              *fbc_wm = *display_wm = *cursor_wm = 0;
 -              return false;
 +      if (is_lvds && dev_priv->lvds_downclock_avail) {
 +              /*
 +               * Ensure we match the reduced clock's P to the target clock.
 +               * If the clocks don't match, we can't switch the display clock
 +               * by using the FP0/FP1. In such case we will disable the LVDS
 +               * downclock feature.
 +              */
 +              has_reduced_clock = limit->find_pll(limit, crtc,
 +                                                  dev_priv->lvds_downclock,
 +                                                  refclk,
 +                                                  &clock,
 +                                                  &reduced_clock);
        }
  
 -      crtc = intel_get_crtc_for_plane(dev, plane);
 -      hdisplay = crtc->mode.hdisplay;
 -      htotal = crtc->mode.htotal;
 -      clock = crtc->mode.clock;
 -      pixel_size = crtc->fb->bits_per_pixel / 8;
 -
 -      line_time_us = (htotal * 1000) / clock;
 -      line_count = (latency_ns / line_time_us + 1000) / 1000;
 -      line_size = hdisplay * pixel_size;
 +      if (is_sdvo && is_tv)
 +              i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
  
 -      /* Use the minimum of the small and large buffer method for primary */
 -      small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
 -      large = line_count * line_size;
 +      i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
 +                               &reduced_clock : NULL);
  
 -      entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
 -      *display_wm = entries + display->guard_size;
 +      if (IS_GEN2(dev))
 +              i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
 +      else
 +              i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
 +                              has_reduced_clock ? &reduced_clock : NULL,
 +                              num_connectors);
  
 -      /*
 -       * Spec says:
 -       * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
 -       */
 -      *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
 +      /* setup pipeconf */
 +      pipeconf = I915_READ(PIPECONF(pipe));
  
 -      /* calculate the self-refresh watermark for display cursor */
 -      entries = line_count * pixel_size * 64;
 -      entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 -      *cursor_wm = entries + cursor->guard_size;
 +      /* Set up the display plane register */
 +      dspcntr = DISPPLANE_GAMMA_ENABLE;
  
 -      return ironlake_check_srwm(dev, level,
 -                                 *fbc_wm, *display_wm, *cursor_wm,
 -                                 display, cursor);
 -}
 +      if (pipe == 0)
 +              dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
 +      else
 +              dspcntr |= DISPPLANE_SEL_PIPE_B;
  
 -static void ironlake_update_wm(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int fbc_wm, plane_wm, cursor_wm;
 -      unsigned int enabled;
 -
 -      enabled = 0;
 -      if (g4x_compute_wm0(dev, 0,
 -                          &ironlake_display_wm_info,
 -                          ILK_LP0_PLANE_LATENCY,
 -                          &ironlake_cursor_wm_info,
 -                          ILK_LP0_CURSOR_LATENCY,
 -                          &plane_wm, &cursor_wm)) {
 -              I915_WRITE(WM0_PIPEA_ILK,
 -                         (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 -              DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
 -                            " plane %d, " "cursor: %d\n",
 -                            plane_wm, cursor_wm);
 -              enabled |= 1;
 -      }
 -
 -      if (g4x_compute_wm0(dev, 1,
 -                          &ironlake_display_wm_info,
 -                          ILK_LP0_PLANE_LATENCY,
 -                          &ironlake_cursor_wm_info,
 -                          ILK_LP0_CURSOR_LATENCY,
 -                          &plane_wm, &cursor_wm)) {
 -              I915_WRITE(WM0_PIPEB_ILK,
 -                         (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 -              DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
 -                            " plane %d, cursor: %d\n",
 -                            plane_wm, cursor_wm);
 -              enabled |= 2;
 +      if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
 +              /* Enable pixel doubling when the dot clock is > 90% of the (display)
 +               * core speed.
 +               *
 +               * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
 +               * pipe == 0 check?
 +               */
 +              if (mode->clock >
 +                  dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
 +                      pipeconf |= PIPECONF_DOUBLE_WIDE;
 +              else
 +                      pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
  
 -      /*
 -       * Calculate and update the self-refresh watermark only when one
 -       * display plane is used.
 -       */
 -      I915_WRITE(WM3_LP_ILK, 0);
 -      I915_WRITE(WM2_LP_ILK, 0);
 -      I915_WRITE(WM1_LP_ILK, 0);
 -
 -      if (!single_plane_enabled(enabled))
 -              return;
 -      enabled = ffs(enabled) - 1;
 -
 -      /* WM1 */
 -      if (!ironlake_compute_srwm(dev, 1, enabled,
 -                                 ILK_READ_WM1_LATENCY() * 500,
 -                                 &ironlake_display_srwm_info,
 -                                 &ironlake_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 -              return;
 -
 -      I915_WRITE(WM1_LP_ILK,
 -                 WM1_LP_SR_EN |
 -                 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 -                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 -                 (plane_wm << WM1_LP_SR_SHIFT) |
 -                 cursor_wm);
 -
 -      /* WM2 */
 -      if (!ironlake_compute_srwm(dev, 2, enabled,
 -                                 ILK_READ_WM2_LATENCY() * 500,
 -                                 &ironlake_display_srwm_info,
 -                                 &ironlake_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 -              return;
 +      /* default to 8bpc */
 +      pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
 +      if (is_dp) {
 +              if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
 +                      pipeconf |= PIPECONF_BPP_6 |
 +                                  PIPECONF_DITHER_EN |
 +                                  PIPECONF_DITHER_TYPE_SP;
 +              }
 +      }
  
 -      I915_WRITE(WM2_LP_ILK,
 -                 WM2_LP_EN |
 -                 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 -                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 -                 (plane_wm << WM1_LP_SR_SHIFT) |
 -                 cursor_wm);
 +      DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 +      drm_mode_debug_printmodeline(mode);
  
 -      /*
 -       * WM3 is unsupported on ILK, probably because we don't have latency
 -       * data for that power state
 -       */
 -}
 +      if (HAS_PIPE_CXSR(dev)) {
 +              if (intel_crtc->lowfreq_avail) {
 +                      DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 +                      pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 +              } else {
 +                      DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 +                      pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 +              }
 +      }
  
 -void sandybridge_update_wm(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
 -      u32 val;
 -      int fbc_wm, plane_wm, cursor_wm;
 -      unsigned int enabled;
 -
 -      enabled = 0;
 -      if (g4x_compute_wm0(dev, 0,
 -                          &sandybridge_display_wm_info, latency,
 -                          &sandybridge_cursor_wm_info, latency,
 -                          &plane_wm, &cursor_wm)) {
 -              val = I915_READ(WM0_PIPEA_ILK);
 -              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 -              I915_WRITE(WM0_PIPEA_ILK, val |
 -                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
 -              DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
 -                            " plane %d, " "cursor: %d\n",
 -                            plane_wm, cursor_wm);
 -              enabled |= 1;
 -      }
 -
 -      if (g4x_compute_wm0(dev, 1,
 -                          &sandybridge_display_wm_info, latency,
 -                          &sandybridge_cursor_wm_info, latency,
 -                          &plane_wm, &cursor_wm)) {
 -              val = I915_READ(WM0_PIPEB_ILK);
 -              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 -              I915_WRITE(WM0_PIPEB_ILK, val |
 -                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
 -              DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
 -                            " plane %d, cursor: %d\n",
 -                            plane_wm, cursor_wm);
 -              enabled |= 2;
 -      }
 -
 -      /* IVB has 3 pipes */
 -      if (IS_IVYBRIDGE(dev) &&
 -          g4x_compute_wm0(dev, 2,
 -                          &sandybridge_display_wm_info, latency,
 -                          &sandybridge_cursor_wm_info, latency,
 -                          &plane_wm, &cursor_wm)) {
 -              val = I915_READ(WM0_PIPEC_IVB);
 -              val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
 -              I915_WRITE(WM0_PIPEC_IVB, val |
 -                         ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
 -              DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
 -                            " plane %d, cursor: %d\n",
 -                            plane_wm, cursor_wm);
 -              enabled |= 3;
 +      pipeconf &= ~PIPECONF_INTERLACE_MASK;
 +      if (!IS_GEN2(dev) &&
 +          adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 +              pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 +              /* the chip adds 2 halflines automatically */
 +              adjusted_mode->crtc_vtotal -= 1;
 +              adjusted_mode->crtc_vblank_end -= 1;
 +              vsyncshift = adjusted_mode->crtc_hsync_start
 +                           - adjusted_mode->crtc_htotal/2;
 +      } else {
 +              pipeconf |= PIPECONF_PROGRESSIVE;
 +              vsyncshift = 0;
        }
  
 -      /*
 -       * Calculate and update the self-refresh watermark only when one
 -       * display plane is used.
 -       *
 -       * SNB support 3 levels of watermark.
 -       *
 -       * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
 -       * and disabled in the descending order
 -       *
 -       */
 -      I915_WRITE(WM3_LP_ILK, 0);
 -      I915_WRITE(WM2_LP_ILK, 0);
 -      I915_WRITE(WM1_LP_ILK, 0);
 +      if (!IS_GEN3(dev))
 +              I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
  
 -      if (!single_plane_enabled(enabled) ||
 -          dev_priv->sprite_scaling_enabled)
 -              return;
 -      enabled = ffs(enabled) - 1;
 -
 -      /* WM1 */
 -      if (!ironlake_compute_srwm(dev, 1, enabled,
 -                                 SNB_READ_WM1_LATENCY() * 500,
 -                                 &sandybridge_display_srwm_info,
 -                                 &sandybridge_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 -              return;
 +      I915_WRITE(HTOTAL(pipe),
 +                 (adjusted_mode->crtc_hdisplay - 1) |
 +                 ((adjusted_mode->crtc_htotal - 1) << 16));
 +      I915_WRITE(HBLANK(pipe),
 +                 (adjusted_mode->crtc_hblank_start - 1) |
 +                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
 +      I915_WRITE(HSYNC(pipe),
 +                 (adjusted_mode->crtc_hsync_start - 1) |
 +                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
  
 -      I915_WRITE(WM1_LP_ILK,
 -                 WM1_LP_SR_EN |
 -                 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 -                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 -                 (plane_wm << WM1_LP_SR_SHIFT) |
 -                 cursor_wm);
 -
 -      /* WM2 */
 -      if (!ironlake_compute_srwm(dev, 2, enabled,
 -                                 SNB_READ_WM2_LATENCY() * 500,
 -                                 &sandybridge_display_srwm_info,
 -                                 &sandybridge_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 -              return;
 +      I915_WRITE(VTOTAL(pipe),
 +                 (adjusted_mode->crtc_vdisplay - 1) |
 +                 ((adjusted_mode->crtc_vtotal - 1) << 16));
 +      I915_WRITE(VBLANK(pipe),
 +                 (adjusted_mode->crtc_vblank_start - 1) |
 +                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
 +      I915_WRITE(VSYNC(pipe),
 +                 (adjusted_mode->crtc_vsync_start - 1) |
 +                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
  
 -      I915_WRITE(WM2_LP_ILK,
 -                 WM2_LP_EN |
 -                 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 -                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 -                 (plane_wm << WM1_LP_SR_SHIFT) |
 -                 cursor_wm);
 -
 -      /* WM3 */
 -      if (!ironlake_compute_srwm(dev, 3, enabled,
 -                                 SNB_READ_WM3_LATENCY() * 500,
 -                                 &sandybridge_display_srwm_info,
 -                                 &sandybridge_cursor_srwm_info,
 -                                 &fbc_wm, &plane_wm, &cursor_wm))
 -              return;
 +      /* pipesrc and dspsize control the size that is scaled from,
 +       * which should always be the user's requested size.
 +       */
 +      I915_WRITE(DSPSIZE(plane),
 +                 ((mode->vdisplay - 1) << 16) |
 +                 (mode->hdisplay - 1));
 +      I915_WRITE(DSPPOS(plane), 0);
 +      I915_WRITE(PIPESRC(pipe),
 +                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
  
 -      I915_WRITE(WM3_LP_ILK,
 -                 WM3_LP_EN |
 -                 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
 -                 (fbc_wm << WM1_LP_FBC_SHIFT) |
 -                 (plane_wm << WM1_LP_SR_SHIFT) |
 -                 cursor_wm);
 -}
 +      I915_WRITE(PIPECONF(pipe), pipeconf);
 +      POSTING_READ(PIPECONF(pipe));
 +      intel_enable_pipe(dev_priv, pipe, false);
  
 -static bool
 -sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
 -                            uint32_t sprite_width, int pixel_size,
 -                            const struct intel_watermark_params *display,
 -                            int display_latency_ns, int *sprite_wm)
 -{
 -      struct drm_crtc *crtc;
 -      int clock;
 -      int entries, tlb_miss;
 +      intel_wait_for_vblank(dev, pipe);
  
 -      crtc = intel_get_crtc_for_plane(dev, plane);
 -      if (crtc->fb == NULL || !crtc->enabled) {
 -              *sprite_wm = display->guard_size;
 -              return false;
 -      }
 +      I915_WRITE(DSPCNTR(plane), dspcntr);
 +      POSTING_READ(DSPCNTR(plane));
 +      intel_enable_plane(dev_priv, plane, pipe);
  
 -      clock = crtc->mode.clock;
 +      ret = intel_pipe_set_base(crtc, x, y, old_fb);
  
 -      /* Use the small buffer method to calculate the sprite watermark */
 -      entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
 -      tlb_miss = display->fifo_size*display->cacheline_size -
 -              sprite_width * 8;
 -      if (tlb_miss > 0)
 -              entries += tlb_miss;
 -      entries = DIV_ROUND_UP(entries, display->cacheline_size);
 -      *sprite_wm = entries + display->guard_size;
 -      if (*sprite_wm > (int)display->max_wm)
 -              *sprite_wm = display->max_wm;
 +      intel_update_watermarks(dev);
  
 -      return true;
 +      return ret;
  }
  
 -static bool
 -sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
 -                              uint32_t sprite_width, int pixel_size,
 -                              const struct intel_watermark_params *display,
 -                              int latency_ns, int *sprite_wm)
 +/*
 + * Initialize reference clocks when the driver loads
 + */
 +void ironlake_init_pch_refclk(struct drm_device *dev)
  {
 -      struct drm_crtc *crtc;
 -      unsigned long line_time_us;
 -      int clock;
 -      int line_count, line_size;
 -      int small, large;
 -      int entries;
 -
 -      if (!latency_ns) {
 -              *sprite_wm = 0;
 -              return false;
 -      }
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_mode_config *mode_config = &dev->mode_config;
 +      struct intel_encoder *encoder;
 +      u32 temp;
 +      bool has_lvds = false;
 +      bool has_cpu_edp = false;
 +      bool has_pch_edp = false;
 +      bool has_panel = false;
 +      bool has_ck505 = false;
 +      bool can_ssc = false;
  
 -      crtc = intel_get_crtc_for_plane(dev, plane);
 -      clock = crtc->mode.clock;
 -      if (!clock) {
 -              *sprite_wm = 0;
 -              return false;
 +      /* We need to take the global config into account */
 +      list_for_each_entry(encoder, &mode_config->encoder_list,
 +                          base.head) {
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      has_panel = true;
 +                      has_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_EDP:
 +                      has_panel = true;
 +                      if (intel_encoder_is_pch_edp(&encoder->base))
 +                              has_pch_edp = true;
 +                      else
 +                              has_cpu_edp = true;
 +                      break;
 +              }
        }
  
 -      line_time_us = (sprite_width * 1000) / clock;
 -      if (!line_time_us) {
 -              *sprite_wm = 0;
 -              return false;
 +      if (HAS_PCH_IBX(dev)) {
 +              has_ck505 = dev_priv->display_clock_mode;
 +              can_ssc = has_ck505;
 +      } else {
 +              has_ck505 = false;
 +              can_ssc = true;
        }
  
 -      line_count = (latency_ns / line_time_us + 1000) / 1000;
 -      line_size = sprite_width * pixel_size;
 +      DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
 +                    has_panel, has_lvds, has_pch_edp, has_cpu_edp,
 +                    has_ck505);
  
 -      /* Use the minimum of the small and large buffer method for primary */
 -      small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
 -      large = line_count * line_size;
 +      /* Ironlake: try to setup display ref clock before DPLL
 +       * enabling. This is only under driver's control after
 +       * PCH B stepping, previous chipset stepping should be
 +       * ignoring this setting.
 +       */
 +      temp = I915_READ(PCH_DREF_CONTROL);
 +      /* Always enable nonspread source */
 +      temp &= ~DREF_NONSPREAD_SOURCE_MASK;
  
 -      entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
 -      *sprite_wm = entries + display->guard_size;
 +      if (has_ck505)
 +              temp |= DREF_NONSPREAD_CK505_ENABLE;
 +      else
 +              temp |= DREF_NONSPREAD_SOURCE_ENABLE;
  
 -      return *sprite_wm > 0x3ff ? false : true;
 -}
 +      if (has_panel) {
 +              temp &= ~DREF_SSC_SOURCE_MASK;
 +              temp |= DREF_SSC_SOURCE_ENABLE;
  
 -static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
 -                                       uint32_t sprite_width, int pixel_size)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
 -      u32 val;
 -      int sprite_wm, reg;
 -      int ret;
 +              /* SSC must be turned on before enabling the CPU output  */
 +              if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 +                      DRM_DEBUG_KMS("Using SSC on panel\n");
 +                      temp |= DREF_SSC1_ENABLE;
 +              } else
 +                      temp &= ~DREF_SSC1_ENABLE;
  
 -      switch (pipe) {
 -      case 0:
 -              reg = WM0_PIPEA_ILK;
 -              break;
 -      case 1:
 -              reg = WM0_PIPEB_ILK;
 -              break;
 -      case 2:
 -              reg = WM0_PIPEC_IVB;
 -              break;
 -      default:
 -              return; /* bad pipe */
 -      }
 +              /* Get SSC going before enabling the outputs */
 +              I915_WRITE(PCH_DREF_CONTROL, temp);
 +              POSTING_READ(PCH_DREF_CONTROL);
 +              udelay(200);
  
 -      ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
 -                                          &sandybridge_display_wm_info,
 -                                          latency, &sprite_wm);
 -      if (!ret) {
 -              DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
 -                            pipe);
 -              return;
 -      }
 +              temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  
 -      val = I915_READ(reg);
 -      val &= ~WM0_PIPE_SPRITE_MASK;
 -      I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
 -      DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
 -
 -
 -      ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
 -                                            pixel_size,
 -                                            &sandybridge_display_srwm_info,
 -                                            SNB_READ_WM1_LATENCY() * 500,
 -                                            &sprite_wm);
 -      if (!ret) {
 -              DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
 -                            pipe);
 -              return;
 -      }
 -      I915_WRITE(WM1S_LP_ILK, sprite_wm);
 +              /* Enable CPU source on CPU attached eDP */
 +              if (has_cpu_edp) {
 +                      if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 +                              DRM_DEBUG_KMS("Using SSC on eDP\n");
 +                              temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 +                      }
 +                      else
 +                              temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 +              } else
 +                      temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  
 -      /* Only IVB has two more LP watermarks for sprite */
 -      if (!IS_IVYBRIDGE(dev))
 -              return;
 +              I915_WRITE(PCH_DREF_CONTROL, temp);
 +              POSTING_READ(PCH_DREF_CONTROL);
 +              udelay(200);
 +      } else {
 +              DRM_DEBUG_KMS("Disabling SSC entirely\n");
  
 -      ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
 -                                            pixel_size,
 -                                            &sandybridge_display_srwm_info,
 -                                            SNB_READ_WM2_LATENCY() * 500,
 -                                            &sprite_wm);
 -      if (!ret) {
 -              DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
 -                            pipe);
 -              return;
 -      }
 -      I915_WRITE(WM2S_LP_IVB, sprite_wm);
 +              temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  
 -      ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
 -                                            pixel_size,
 -                                            &sandybridge_display_srwm_info,
 -                                            SNB_READ_WM3_LATENCY() * 500,
 -                                            &sprite_wm);
 -      if (!ret) {
 -              DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
 -                            pipe);
 -              return;
 -      }
 -      I915_WRITE(WM3S_LP_IVB, sprite_wm);
 -}
 +              /* Turn off CPU output */
 +              temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  
 -/**
 - * intel_update_watermarks - update FIFO watermark values based on current modes
 - *
 - * Calculate watermark values for the various WM regs based on current mode
 - * and plane configuration.
 - *
 - * There are several cases to deal with here:
 - *   - normal (i.e. non-self-refresh)
 - *   - self-refresh (SR) mode
 - *   - lines are large relative to FIFO size (buffer can hold up to 2)
 - *   - lines are small relative to FIFO size (buffer can hold more than 2
 - *     lines), so need to account for TLB latency
 - *
 - *   The normal calculation is:
 - *     watermark = dotclock * bytes per pixel * latency
 - *   where latency is platform & configuration dependent (we assume pessimal
 - *   values here).
 - *
 - *   The SR calculation is:
 - *     watermark = (trunc(latency/line time)+1) * surface width *
 - *       bytes per pixel
 - *   where
 - *     line time = htotal / dotclock
 - *     surface width = hdisplay for normal plane and 64 for cursor
 - *   and latency is assumed to be high, as above.
 - *
 - * The final value programmed to the register should always be rounded up,
 - * and include an extra 2 entries to account for clock crossings.
 - *
 - * We don't use the sprite, so we can ignore that.  And on Crestline we have
 - * to set the non-SR watermarks to 8.
 - */
 -static void intel_update_watermarks(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +              I915_WRITE(PCH_DREF_CONTROL, temp);
 +              POSTING_READ(PCH_DREF_CONTROL);
 +              udelay(200);
  
 -      if (dev_priv->display.update_wm)
 -              dev_priv->display.update_wm(dev);
 -}
 +              /* Turn off the SSC source */
 +              temp &= ~DREF_SSC_SOURCE_MASK;
 +              temp |= DREF_SSC_SOURCE_DISABLE;
  
 -void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 -                                  uint32_t sprite_width, int pixel_size)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +              /* Turn off SSC1 */
 +              temp &= ~ DREF_SSC1_ENABLE;
  
 -      if (dev_priv->display.update_sprite_wm)
 -              dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
 -                                                 pixel_size);
 +              I915_WRITE(PCH_DREF_CONTROL, temp);
 +              POSTING_READ(PCH_DREF_CONTROL);
 +              udelay(200);
 +      }
  }
  
 -static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 -{
 -      if (i915_panel_use_ssc >= 0)
 -              return i915_panel_use_ssc != 0;
 -      return dev_priv->lvds_use_ssc
 -              && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 -}
 -
 -/**
 - * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
 - * @crtc: CRTC structure
 - * @mode: requested mode
 - *
 - * A pipe may be connected to one or more outputs.  Based on the depth of the
 - * attached framebuffer, choose a good color depth to use on the pipe.
 - *
 - * If possible, match the pipe depth to the fb depth.  In some cases, this
 - * isn't ideal, because the connected output supports a lesser or restricted
 - * set of depths.  Resolve that here:
 - *    LVDS typically supports only 6bpc, so clamp down in that case
 - *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
 - *    Displays may support a restricted set as well, check EDID and clamp as
 - *      appropriate.
 - *    DP may want to dither down to 6bpc to fit larger modes
 - *
 - * RETURNS:
 - * Dithering requirement (i.e. false if display bpc and pipe bpc match,
 - * true if they don't match).
 - */
 -static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 -                                       unsigned int *pipe_bpp,
 -                                       struct drm_display_mode *mode)
 +static int ironlake_get_refclk(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_encoder *encoder;
 -      struct drm_connector *connector;
 -      unsigned int display_bpc = UINT_MAX, bpc;
 -
 -      /* Walk the encoders & connectors on this crtc, get min bpc */
 -      list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 -              struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 -
 -              if (encoder->crtc != crtc)
 -                      continue;
 -
 -              if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
 -                      unsigned int lvds_bpc;
 -
 -                      if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
 -                          LVDS_A3_POWER_UP)
 -                              lvds_bpc = 8;
 -                      else
 -                              lvds_bpc = 6;
 -
 -                      if (lvds_bpc < display_bpc) {
 -                              DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
 -                              display_bpc = lvds_bpc;
 -                      }
 -                      continue;
 -              }
 -
 -              if (intel_encoder->type == INTEL_OUTPUT_EDP) {
 -                      /* Use VBT settings if we have an eDP panel */
 -                      unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 +      struct intel_encoder *encoder;
 +      struct drm_mode_config *mode_config = &dev->mode_config;
 +      struct intel_encoder *edp_encoder = NULL;
 +      int num_connectors = 0;
 +      bool is_lvds = false;
  
 -                      if (edp_bpc < display_bpc) {
 -                              DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
 -                              display_bpc = edp_bpc;
 -                      }
 +      list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 +              if (encoder->base.crtc != crtc)
                        continue;
 -              }
 -
 -              /* Not one of the known troublemakers, check the EDID */
 -              list_for_each_entry(connector, &dev->mode_config.connector_list,
 -                                  head) {
 -                      if (connector->encoder != encoder)
 -                              continue;
 -
 -                      /* Don't use an invalid EDID bpc value */
 -                      if (connector->display_info.bpc &&
 -                          connector->display_info.bpc < display_bpc) {
 -                              DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
 -                              display_bpc = connector->display_info.bpc;
 -                      }
 -              }
  
 -              /*
 -               * HDMI is either 12 or 8, so if the display lets 10bpc sneak
 -               * through, clamp it down.  (Note: >12bpc will be caught below.)
 -               */
 -              if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
 -                      if (display_bpc > 8 && display_bpc < 12) {
 -                              DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
 -                              display_bpc = 12;
 -                      } else {
 -                              DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
 -                              display_bpc = 8;
 -                      }
 +              switch (encoder->type) {
 +              case INTEL_OUTPUT_LVDS:
 +                      is_lvds = true;
 +                      break;
 +              case INTEL_OUTPUT_EDP:
 +                      edp_encoder = encoder;
 +                      break;
                }
 +              num_connectors++;
        }
  
 -      if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
 -              DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
 -              display_bpc = 6;
 -      }
 -
 -      /*
 -       * We could just drive the pipe at the highest bpc all the time and
 -       * enable dithering as needed, but that costs bandwidth.  So choose
 -       * the minimum value that expresses the full color range of the fb but
 -       * also stays within the max display bpc discovered above.
 -       */
 -
 -      switch (crtc->fb->depth) {
 -      case 8:
 -              bpc = 8; /* since we go through a colormap */
 -              break;
 -      case 15:
 -      case 16:
 -              bpc = 6; /* min is 18bpp */
 -              break;
 -      case 24:
 -              bpc = 8;
 -              break;
 -      case 30:
 -              bpc = 10;
 -              break;
 -      case 48:
 -              bpc = 12;
 -              break;
 -      default:
 -              DRM_DEBUG("unsupported depth, assuming 24 bits\n");
 -              bpc = min((unsigned int)8, display_bpc);
 -              break;
 -      }
 -
 -      display_bpc = min(display_bpc, bpc);
 -
 -      DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
 -                    bpc, display_bpc);
 -
 -      *pipe_bpp = display_bpc * 3;
 -
 -      return display_bpc != bpc;
 -}
 -
 -static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int refclk;
 -
 -      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 -          intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 -              refclk = dev_priv->lvds_ssc_freq * 1000;
 +      if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
                DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 -                            refclk / 1000);
 -      } else if (!IS_GEN2(dev)) {
 -              refclk = 96000;
 -      } else {
 -              refclk = 48000;
 -      }
 -
 -      return refclk;
 -}
 -
 -static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
 -                                    intel_clock_t *clock)
 -{
 -      /* SDVO TV has fixed PLL values depend on its clock range,
 -         this mirrors vbios setting. */
 -      if (adjusted_mode->clock >= 100000
 -          && adjusted_mode->clock < 140500) {
 -              clock->p1 = 2;
 -              clock->p2 = 10;
 -              clock->n = 3;
 -              clock->m1 = 16;
 -              clock->m2 = 8;
 -      } else if (adjusted_mode->clock >= 140500
 -                 && adjusted_mode->clock <= 200000) {
 -              clock->p1 = 1;
 -              clock->p2 = 10;
 -              clock->n = 6;
 -              clock->m1 = 12;
 -              clock->m2 = 8;
 -      }
 -}
 -
 -static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
 -                                   intel_clock_t *clock,
 -                                   intel_clock_t *reduced_clock)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      u32 fp, fp2 = 0;
 -
 -      if (IS_PINEVIEW(dev)) {
 -              fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
 -              if (reduced_clock)
 -                      fp2 = (1 << reduced_clock->n) << 16 |
 -                              reduced_clock->m1 << 8 | reduced_clock->m2;
 -      } else {
 -              fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
 -              if (reduced_clock)
 -                      fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
 -                              reduced_clock->m2;
 +                            dev_priv->lvds_ssc_freq);
 +              return dev_priv->lvds_ssc_freq * 1000;
        }
  
 -      I915_WRITE(FP0(pipe), fp);
 -
 -      intel_crtc->lowfreq_avail = false;
 -      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 -          reduced_clock && i915_powersave) {
 -              I915_WRITE(FP1(pipe), fp2);
 -              intel_crtc->lowfreq_avail = true;
 -      } else {
 -              I915_WRITE(FP1(pipe), fp);
 -      }
 +      return 120000;
  }
  
 -static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 -                            struct drm_display_mode *mode,
 -                            struct drm_display_mode *adjusted_mode,
 -                            int x, int y,
 -                            struct drm_framebuffer *old_fb)
 +static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 +                                struct drm_display_mode *mode,
 +                                struct drm_display_mode *adjusted_mode,
 +                                int x, int y,
 +                                struct drm_framebuffer *old_fb)
  {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
 -      u32 dpll, dspcntr, pipeconf, vsyncshift;
 -      bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
 +      u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
 +      bool ok, has_reduced_clock = false, is_sdvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        struct drm_mode_config *mode_config = &dev->mode_config;
 -      struct intel_encoder *encoder;
 +      struct intel_encoder *encoder, *edp_encoder = NULL;
        const intel_limit_t *limit;
        int ret;
 +      struct fdi_m_n m_n = {0};
        u32 temp;
 -      u32 lvds_sync = 0;
 +      int target_clock, pixel_multiplier, lane, link_bw, factor;
 +      unsigned int pipe_bpp;
 +      bool dither;
 +      bool is_cpu_edp = false, is_pch_edp = false;
  
        list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
                if (encoder->base.crtc != crtc)
                        if (encoder->needs_tv_clock)
                                is_tv = true;
                        break;
 -              case INTEL_OUTPUT_DVO:
 -                      is_dvo = true;
 -                      break;
                case INTEL_OUTPUT_TVOUT:
                        is_tv = true;
                        break;
                case INTEL_OUTPUT_DISPLAYPORT:
                        is_dp = true;
                        break;
 +              case INTEL_OUTPUT_EDP:
 +                      is_dp = true;
 +                      if (intel_encoder_is_pch_edp(&encoder->base))
 +                              is_pch_edp = true;
 +                      else
 +                              is_cpu_edp = true;
 +                      edp_encoder = encoder;
 +                      break;
                }
  
                num_connectors++;
        }
  
 -      refclk = i9xx_get_refclk(crtc, num_connectors);
 +      refclk = ironlake_get_refclk(crtc);
  
        /*
         * Returns a set of divisors for the desired target clock with the given
                                                    &clock,
                                                    &reduced_clock);
        }
 -
 -      if (is_sdvo && is_tv)
 -              i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 -
 -      i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
 -                               &reduced_clock : NULL);
 -
 -      dpll = DPLL_VGA_MODE_DIS;
 -
 -      if (!IS_GEN2(dev)) {
 -              if (is_lvds)
 -                      dpll |= DPLLB_MODE_LVDS;
 -              else
 -                      dpll |= DPLLB_MODE_DAC_SERIAL;
 -              if (is_sdvo) {
 -                      int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 -                      if (pixel_multiplier > 1) {
 -                              if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 -                                      dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
 -                      }
 -                      dpll |= DPLL_DVO_HIGH_SPEED;
 +      /* SDVO TV has fixed PLL values depend on its clock range,
 +         this mirrors vbios setting. */
 +      if (is_sdvo && is_tv) {
 +              if (adjusted_mode->clock >= 100000
 +                  && adjusted_mode->clock < 140500) {
 +                      clock.p1 = 2;
 +                      clock.p2 = 10;
 +                      clock.n = 3;
 +                      clock.m1 = 16;
 +                      clock.m2 = 8;
 +              } else if (adjusted_mode->clock >= 140500
 +                         && adjusted_mode->clock <= 200000) {
 +                      clock.p1 = 1;
 +                      clock.p2 = 10;
 +                      clock.n = 6;
 +                      clock.m1 = 12;
 +                      clock.m2 = 8;
                }
 -              if (is_dp)
 -                      dpll |= DPLL_DVO_HIGH_SPEED;
 +      }
  
 -              /* compute bitmask from p1 value */
 -              if (IS_PINEVIEW(dev))
 -                      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
 -              else {
 -                      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 -                      if (IS_G4X(dev) && has_reduced_clock)
 -                              dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 -              }
 -              switch (clock.p2) {
 -              case 5:
 -                      dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 -                      break;
 -              case 7:
 -                      dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 -                      break;
 -              case 10:
 -                      dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 -                      break;
 -              case 14:
 -                      dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 -                      break;
 -              }
 -              if (INTEL_INFO(dev)->gen >= 4)
 -                      dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 +      /* FDI link */
 +      pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 +      lane = 0;
 +      /* CPU eDP doesn't require FDI link, so just set DP M/N
 +         according to current link config */
 +      if (is_cpu_edp) {
 +              target_clock = mode->clock;
 +              intel_edp_link_config(edp_encoder, &lane, &link_bw);
        } else {
 -              if (is_lvds) {
 -                      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 -              } else {
 -                      if (clock.p1 == 2)
 -                              dpll |= PLL_P1_DIVIDE_BY_TWO;
 -                      else
 -                              dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 -                      if (clock.p2 == 4)
 -                              dpll |= PLL_P2_DIVIDE_BY_4;
 +              /* [e]DP over FDI requires target mode clock
 +                 instead of link clock */
 +              if (is_dp)
 +                      target_clock = mode->clock;
 +              else
 +                      target_clock = adjusted_mode->clock;
 +
 +              /* FDI is a binary signal running at ~2.7GHz, encoding
 +               * each output octet as 10 bits. The actual frequency
 +               * is stored as a divider into a 100MHz clock, and the
 +               * mode pixel clock is stored in units of 1KHz.
 +               * Hence the bw of each lane in terms of the mode signal
 +               * is:
 +               */
 +              link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 +      }
 +
 +      /* determine panel color depth */
 +      temp = I915_READ(PIPECONF(pipe));
 +      temp &= ~PIPE_BPC_MASK;
 +      dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
 +      switch (pipe_bpp) {
 +      case 18:
 +              temp |= PIPE_6BPC;
 +              break;
 +      case 24:
 +              temp |= PIPE_8BPC;
 +              break;
 +      case 30:
 +              temp |= PIPE_10BPC;
 +              break;
 +      case 36:
 +              temp |= PIPE_12BPC;
 +              break;
 +      default:
 +              WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
 +                      pipe_bpp);
 +              temp |= PIPE_8BPC;
 +              pipe_bpp = 24;
 +              break;
 +      }
 +
 +      intel_crtc->bpp = pipe_bpp;
 +      I915_WRITE(PIPECONF(pipe), temp);
 +
 +      if (!lane) {
 +              /*
 +               * Account for spread spectrum to avoid
 +               * oversubscribing the link. Max center spread
 +               * is 2.5%; use 5% for safety's sake.
 +               */
 +              u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
 +              lane = bps / (link_bw * 8) + 1;
 +      }
 +
 +      intel_crtc->fdi_lanes = lane;
 +
 +      if (pixel_multiplier > 1)
 +              link_bw *= pixel_multiplier;
 +      ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
 +                           &m_n);
 +
 +      fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 +      if (has_reduced_clock)
 +              fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 +                      reduced_clock.m2;
 +
 +      /* Enable autotuning of the PLL clock (if permissible) */
 +      factor = 21;
 +      if (is_lvds) {
 +              if ((intel_panel_use_ssc(dev_priv) &&
 +                   dev_priv->lvds_ssc_freq == 100) ||
 +                  (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
 +                      factor = 25;
 +      } else if (is_sdvo && is_tv)
 +              factor = 20;
 +
 +      if (clock.m < factor * clock.n)
 +              fp |= FP_CB_TUNE;
 +
 +      dpll = 0;
 +
 +      if (is_lvds)
 +              dpll |= DPLLB_MODE_LVDS;
 +      else
 +              dpll |= DPLLB_MODE_DAC_SERIAL;
 +      if (is_sdvo) {
 +              int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 +              if (pixel_multiplier > 1) {
 +                      dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
                }
 +              dpll |= DPLL_DVO_HIGH_SPEED;
 +      }
 +      if (is_dp && !is_cpu_edp)
 +              dpll |= DPLL_DVO_HIGH_SPEED;
 +
 +      /* compute bitmask from p1 value */
 +      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 +      /* also FPA1 */
 +      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 +
 +      switch (clock.p2) {
 +      case 5:
 +              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 +              break;
 +      case 7:
 +              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 +              break;
 +      case 10:
 +              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 +              break;
 +      case 14:
 +              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 +              break;
        }
  
        if (is_sdvo && is_tv)
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
  
 -      if (pipe == 0)
 -              dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
 -      else
 -              dspcntr |= DISPPLANE_SEL_PIPE_B;
 +      DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 +      drm_mode_debug_printmodeline(mode);
  
 -      if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
 -              /* Enable pixel doubling when the dot clock is > 90% of the (display)
 -               * core speed.
 -               *
 -               * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
 -               * pipe == 0 check?
 -               */
 -              if (mode->clock >
 -                  dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
 -                      pipeconf |= PIPECONF_DOUBLE_WIDE;
 -              else
 -                      pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 -      }
 +      /* PCH eDP needs FDI, but CPU eDP does not */
 +      if (!intel_crtc->no_pll) {
 +              if (!is_cpu_edp) {
 +                      I915_WRITE(PCH_FP0(pipe), fp);
 +                      I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
  
 -      /* default to 8bpc */
 -      pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
 -      if (is_dp) {
 -              if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
 -                      pipeconf |= PIPECONF_BPP_6 |
 -                                  PIPECONF_DITHER_EN |
 -                                  PIPECONF_DITHER_TYPE_SP;
 +                      POSTING_READ(PCH_DPLL(pipe));
 +                      udelay(150);
 +              }
 +      } else {
 +              if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
 +                  fp == I915_READ(PCH_FP0(0))) {
 +                      intel_crtc->use_pll_a = true;
 +                      DRM_DEBUG_KMS("using pipe a dpll\n");
 +              } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
 +                         fp == I915_READ(PCH_FP0(1))) {
 +                      intel_crtc->use_pll_a = false;
 +                      DRM_DEBUG_KMS("using pipe b dpll\n");
 +              } else {
 +                      DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
 +                      return -EINVAL;
                }
        }
  
 -      dpll |= DPLL_VCO_ENABLE;
 -
 -      DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 -      drm_mode_debug_printmodeline(mode);
 -
 -      I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 -
 -      POSTING_READ(DPLL(pipe));
 -      udelay(150);
 -
        /* The LVDS pin pair needs to be on before the DPLLs are enabled.
         * This is an exception to the general rule that mode_set doesn't turn
         * things on.
         */
        if (is_lvds) {
 -              temp = I915_READ(LVDS);
 +              temp = I915_READ(PCH_LVDS);
                temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 -              if (pipe == 1) {
 -                      temp |= LVDS_PIPEB_SELECT;
 +              if (HAS_PCH_CPT(dev)) {
 +                      temp &= ~PORT_TRANS_SEL_MASK;
 +                      temp |= PORT_TRANS_SEL_CPT(pipe);
                } else {
 -                      temp &= ~LVDS_PIPEB_SELECT;
 +                      if (pipe == 1)
 +                              temp |= LVDS_PIPEB_SELECT;
 +                      else
 +                              temp &= ~LVDS_PIPEB_SELECT;
                }
 +
                /* set the corresponsding LVDS_BORDER bit */
                temp |= dev_priv->lvds_border_bits;
                /* Set the B0-B3 data pairs corresponding to whether we're going to
                 * appropriately here, but we need to look more thoroughly into how
                 * panels behave in the two modes.
                 */
 -              /* set the dithering flag on LVDS as needed */
 -              if (INTEL_INFO(dev)->gen >= 4) {
 -                      if (dev_priv->lvds_dither)
 -                              temp |= LVDS_ENABLE_DITHER;
 -                      else
 -                              temp &= ~LVDS_ENABLE_DITHER;
 -              }
 +              temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
                if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 -                      lvds_sync |= LVDS_HSYNC_POLARITY;
 +                      temp |= LVDS_HSYNC_POLARITY;
                if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 -                      lvds_sync |= LVDS_VSYNC_POLARITY;
 -              if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
 -                  != lvds_sync) {
 -                      char flags[2] = "-+";
 -                      DRM_INFO("Changing LVDS panel from "
 -                               "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
 -                               flags[!(temp & LVDS_HSYNC_POLARITY)],
 -                               flags[!(temp & LVDS_VSYNC_POLARITY)],
 -                               flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
 -                               flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
 -                      temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
 -                      temp |= lvds_sync;
 -              }
 -              I915_WRITE(LVDS, temp);
 +                      temp |= LVDS_VSYNC_POLARITY;
 +              I915_WRITE(PCH_LVDS, temp);
        }
  
 -      if (is_dp) {
 +      pipeconf &= ~PIPECONF_DITHER_EN;
 +      pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
 +      if ((is_lvds && dev_priv->lvds_dither) || dither) {
 +              pipeconf |= PIPECONF_DITHER_EN;
 +              pipeconf |= PIPECONF_DITHER_TYPE_SP;
 +      }
 +      if (is_dp && !is_cpu_edp) {
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
 +      } else {
 +              /* For non-DP output, clear any trans DP clock recovery setting.*/
 +              I915_WRITE(TRANSDATA_M1(pipe), 0);
 +              I915_WRITE(TRANSDATA_N1(pipe), 0);
 +              I915_WRITE(TRANSDPLINK_M1(pipe), 0);
 +              I915_WRITE(TRANSDPLINK_N1(pipe), 0);
        }
  
 -      I915_WRITE(DPLL(pipe), dpll);
 +      if (!intel_crtc->no_pll && (!edp_encoder || is_pch_edp)) {
 +              I915_WRITE(PCH_DPLL(pipe), dpll);
  
 -      /* Wait for the clocks to stabilize. */
 -      POSTING_READ(DPLL(pipe));
 -      udelay(150);
 +              /* Wait for the clocks to stabilize. */
 +              POSTING_READ(PCH_DPLL(pipe));
 +              udelay(150);
  
 -      if (INTEL_INFO(dev)->gen >= 4) {
 -              temp = 0;
 -              if (is_sdvo) {
 -                      temp = intel_mode_get_pixel_multiplier(adjusted_mode);
 -                      if (temp > 1)
 -                              temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 -                      else
 -                              temp = 0;
 -              }
 -              I915_WRITE(DPLL_MD(pipe), temp);
 -      } else {
                /* The pixel multiplier can only be updated once the
                 * DPLL is enabled and the clocks are stable.
                 *
                 * So write it again.
                 */
 -              I915_WRITE(DPLL(pipe), dpll);
 +              I915_WRITE(PCH_DPLL(pipe), dpll);
        }
  
 -      if (HAS_PIPE_CXSR(dev)) {
 -              if (intel_crtc->lowfreq_avail) {
 -                      DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 -                      pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 +      intel_crtc->lowfreq_avail = false;
 +      if (!intel_crtc->no_pll) {
 +              if (is_lvds && has_reduced_clock && i915_powersave) {
 +                      I915_WRITE(PCH_FP1(pipe), fp2);
 +                      intel_crtc->lowfreq_avail = true;
 +                      if (HAS_PIPE_CXSR(dev)) {
 +                              DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 +                              pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 +                      }
                } else {
 -                      DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 -                      pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 +                      I915_WRITE(PCH_FP1(pipe), fp);
 +                      if (HAS_PIPE_CXSR(dev)) {
 +                              DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 +                              pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 +                      }
                }
        }
  
        pipeconf &= ~PIPECONF_INTERLACE_MASK;
 -      if (!IS_GEN2(dev) &&
 -          adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 -              pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
 +      if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 +              pipeconf |= PIPECONF_INTERLACED_ILK;
                /* the chip adds 2 halflines automatically */
                adjusted_mode->crtc_vtotal -= 1;
                adjusted_mode->crtc_vblank_end -= 1;
 -              vsyncshift = adjusted_mode->crtc_hsync_start
 -                           - adjusted_mode->crtc_htotal/2;
 +              I915_WRITE(VSYNCSHIFT(pipe),
 +                         adjusted_mode->crtc_hsync_start
 +                         - adjusted_mode->crtc_htotal/2);
        } else {
                pipeconf |= PIPECONF_PROGRESSIVE;
 -              vsyncshift = 0;
 +              I915_WRITE(VSYNCSHIFT(pipe), 0);
        }
  
 -      if (!IS_GEN3(dev))
 -              I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
 -
        I915_WRITE(HTOTAL(pipe),
                   (adjusted_mode->crtc_hdisplay - 1) |
                   ((adjusted_mode->crtc_htotal - 1) << 16));
                   (adjusted_mode->crtc_vsync_start - 1) |
                   ((adjusted_mode->crtc_vsync_end - 1) << 16));
  
 -      /* pipesrc and dspsize control the size that is scaled from,
 -       * which should always be the user's requested size.
 +      /* pipesrc controls the size that is scaled from, which should
 +       * always be the user's requested size.
         */
 -      I915_WRITE(DSPSIZE(plane),
 -                 ((mode->vdisplay - 1) << 16) |
 -                 (mode->hdisplay - 1));
 -      I915_WRITE(DSPPOS(plane), 0);
        I915_WRITE(PIPESRC(pipe),
                   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
  
 +      I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 +      I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 +      I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 +      I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 +
 +      if (is_cpu_edp)
 +              ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 +
        I915_WRITE(PIPECONF(pipe), pipeconf);
        POSTING_READ(PIPECONF(pipe));
 -      intel_enable_pipe(dev_priv, pipe, false);
  
        intel_wait_for_vblank(dev, pipe);
  
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
 -      intel_enable_plane(dev_priv, plane, pipe);
  
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
  
        return ret;
  }
  
 -/*
 - * Initialize reference clocks when the driver loads
 - */
 -void ironlake_init_pch_refclk(struct drm_device *dev)
 +static int intel_crtc_mode_set(struct drm_crtc *crtc,
 +                             struct drm_display_mode *mode,
 +                             struct drm_display_mode *adjusted_mode,
 +                             int x, int y,
 +                             struct drm_framebuffer *old_fb)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_mode_config *mode_config = &dev->mode_config;
 -      struct intel_encoder *encoder;
 -      u32 temp;
 -      bool has_lvds = false;
 -      bool has_cpu_edp = false;
 -      bool has_pch_edp = false;
 -      bool has_panel = false;
 -      bool has_ck505 = false;
 -      bool can_ssc = false;
 -
 -      /* We need to take the global config into account */
 -      list_for_each_entry(encoder, &mode_config->encoder_list,
 -                          base.head) {
 -              switch (encoder->type) {
 -              case INTEL_OUTPUT_LVDS:
 -                      has_panel = true;
 -                      has_lvds = true;
 -                      break;
 -              case INTEL_OUTPUT_EDP:
 -                      has_panel = true;
 -                      if (intel_encoder_is_pch_edp(&encoder->base))
 -                              has_pch_edp = true;
 -                      else
 -                              has_cpu_edp = true;
 -                      break;
 -              }
 -      }
 -
 -      if (HAS_PCH_IBX(dev)) {
 -              has_ck505 = dev_priv->display_clock_mode;
 -              can_ssc = has_ck505;
 -      } else {
 -              has_ck505 = false;
 -              can_ssc = true;
 -      }
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int ret;
  
 -      DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
 -                    has_panel, has_lvds, has_pch_edp, has_cpu_edp,
 -                    has_ck505);
 +      drm_vblank_pre_modeset(dev, pipe);
  
 -      /* Ironlake: try to setup display ref clock before DPLL
 -       * enabling. This is only under driver's control after
 -       * PCH B stepping, previous chipset stepping should be
 -       * ignoring this setting.
 -       */
 -      temp = I915_READ(PCH_DREF_CONTROL);
 -      /* Always enable nonspread source */
 -      temp &= ~DREF_NONSPREAD_SOURCE_MASK;
 +      ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
 +                                            x, y, old_fb);
 +      drm_vblank_post_modeset(dev, pipe);
  
 -      if (has_ck505)
 -              temp |= DREF_NONSPREAD_CK505_ENABLE;
 +      if (ret)
 +              intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
        else
 -              temp |= DREF_NONSPREAD_SOURCE_ENABLE;
 -
 -      if (has_panel) {
 -              temp &= ~DREF_SSC_SOURCE_MASK;
 -              temp |= DREF_SSC_SOURCE_ENABLE;
 +              intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
  
 -              /* SSC must be turned on before enabling the CPU output  */
 -              if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 -                      DRM_DEBUG_KMS("Using SSC on panel\n");
 -                      temp |= DREF_SSC1_ENABLE;
 -              } else
 -                      temp &= ~DREF_SSC1_ENABLE;
 +      return ret;
 +}
  
 -              /* Get SSC going before enabling the outputs */
 -              I915_WRITE(PCH_DREF_CONTROL, temp);
 -              POSTING_READ(PCH_DREF_CONTROL);
 -              udelay(200);
 +static bool intel_eld_uptodate(struct drm_connector *connector,
 +                             int reg_eldv, uint32_t bits_eldv,
 +                             int reg_elda, uint32_t bits_elda,
 +                             int reg_edid)
 +{
 +      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      uint8_t *eld = connector->eld;
 +      uint32_t i;
  
 -              temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 +      i = I915_READ(reg_eldv);
 +      i &= bits_eldv;
  
 -              /* Enable CPU source on CPU attached eDP */
 -              if (has_cpu_edp) {
 -                      if (intel_panel_use_ssc(dev_priv) && can_ssc) {
 -                              DRM_DEBUG_KMS("Using SSC on eDP\n");
 -                              temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 -                      }
 -                      else
 -                              temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 -              } else
 -                      temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 +      if (!eld[0])
 +              return !i;
  
 -              I915_WRITE(PCH_DREF_CONTROL, temp);
 -              POSTING_READ(PCH_DREF_CONTROL);
 -              udelay(200);
 -      } else {
 -              DRM_DEBUG_KMS("Disabling SSC entirely\n");
 +      if (!i)
 +              return false;
  
 -              temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 +      i = I915_READ(reg_elda);
 +      i &= ~bits_elda;
 +      I915_WRITE(reg_elda, i);
  
 -              /* Turn off CPU output */
 -              temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 +      for (i = 0; i < eld[2]; i++)
 +              if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
 +                      return false;
  
 -              I915_WRITE(PCH_DREF_CONTROL, temp);
 -              POSTING_READ(PCH_DREF_CONTROL);
 -              udelay(200);
 +      return true;
 +}
  
 -              /* Turn off the SSC source */
 -              temp &= ~DREF_SSC_SOURCE_MASK;
 -              temp |= DREF_SSC_SOURCE_DISABLE;
 +static void g4x_write_eld(struct drm_connector *connector,
 +                        struct drm_crtc *crtc)
 +{
 +      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      uint8_t *eld = connector->eld;
 +      uint32_t eldv;
 +      uint32_t len;
 +      uint32_t i;
  
 -              /* Turn off SSC1 */
 -              temp &= ~ DREF_SSC1_ENABLE;
 +      i = I915_READ(G4X_AUD_VID_DID);
  
 -              I915_WRITE(PCH_DREF_CONTROL, temp);
 -              POSTING_READ(PCH_DREF_CONTROL);
 -              udelay(200);
 -      }
 -}
 +      if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
 +              eldv = G4X_ELDV_DEVCL_DEVBLC;
 +      else
 +              eldv = G4X_ELDV_DEVCTG;
  
 -static int ironlake_get_refclk(struct drm_crtc *crtc)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_encoder *encoder;
 -      struct drm_mode_config *mode_config = &dev->mode_config;
 -      struct intel_encoder *edp_encoder = NULL;
 -      int num_connectors = 0;
 -      bool is_lvds = false;
 +      if (intel_eld_uptodate(connector,
 +                             G4X_AUD_CNTL_ST, eldv,
 +                             G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
 +                             G4X_HDMIW_HDMIEDID))
 +              return;
  
 -      list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 -              if (encoder->base.crtc != crtc)
 -                      continue;
 +      i = I915_READ(G4X_AUD_CNTL_ST);
 +      i &= ~(eldv | G4X_ELD_ADDR);
 +      len = (i >> 9) & 0x1f;          /* ELD buffer size */
 +      I915_WRITE(G4X_AUD_CNTL_ST, i);
  
 -              switch (encoder->type) {
 -              case INTEL_OUTPUT_LVDS:
 -                      is_lvds = true;
 -                      break;
 -              case INTEL_OUTPUT_EDP:
 -                      edp_encoder = encoder;
 -                      break;
 -              }
 -              num_connectors++;
 -      }
 +      if (!eld[0])
 +              return;
  
 -      if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 -              DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 -                            dev_priv->lvds_ssc_freq);
 -              return dev_priv->lvds_ssc_freq * 1000;
 -      }
 +      len = min_t(uint8_t, eld[2], len);
 +      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 +      for (i = 0; i < len; i++)
 +              I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
  
 -      return 120000;
 +      i = I915_READ(G4X_AUD_CNTL_ST);
 +      i |= eldv;
 +      I915_WRITE(G4X_AUD_CNTL_ST, i);
  }
  
 -static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 -                                struct drm_display_mode *mode,
 -                                struct drm_display_mode *adjusted_mode,
 -                                int x, int y,
 -                                struct drm_framebuffer *old_fb)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      int plane = intel_crtc->plane;
 -      int refclk, num_connectors = 0;
 -      intel_clock_t clock, reduced_clock;
 -      u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
 -      bool ok, has_reduced_clock = false, is_sdvo = false;
 -      bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
 -      struct intel_encoder *has_edp_encoder = NULL;
 -      struct drm_mode_config *mode_config = &dev->mode_config;
 -      struct intel_encoder *encoder;
 -      const intel_limit_t *limit;
 -      int ret;
 -      struct fdi_m_n m_n = {0};
 -      u32 temp;
 -      u32 lvds_sync = 0;
 -      int target_clock, pixel_multiplier, lane, link_bw, factor;
 -      unsigned int pipe_bpp;
 -      bool dither;
 -
 -      list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
 -              if (encoder->base.crtc != crtc)
 -                      continue;
 -
 -              switch (encoder->type) {
 -              case INTEL_OUTPUT_LVDS:
 -                      is_lvds = true;
 -                      break;
 -              case INTEL_OUTPUT_SDVO:
 -              case INTEL_OUTPUT_HDMI:
 -                      is_sdvo = true;
 -                      if (encoder->needs_tv_clock)
 -                              is_tv = true;
 -                      break;
 -              case INTEL_OUTPUT_TVOUT:
 -                      is_tv = true;
 -                      break;
 -              case INTEL_OUTPUT_ANALOG:
 -                      is_crt = true;
 -                      break;
 -              case INTEL_OUTPUT_DISPLAYPORT:
 -                      is_dp = true;
 -                      break;
 -              case INTEL_OUTPUT_EDP:
 -                      has_edp_encoder = encoder;
 -                      break;
 -              }
 -
 -              num_connectors++;
 -      }
 -
 -      refclk = ironlake_get_refclk(crtc);
 -
 -      /*
 -       * Returns a set of divisors for the desired target clock with the given
 -       * refclk, or FALSE.  The returned values represent the clock equation:
 -       * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
 -       */
 -      limit = intel_limit(crtc, refclk);
 -      ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
 -                           &clock);
 -      if (!ok) {
 -              DRM_ERROR("Couldn't find PLL settings for mode!\n");
 -              return -EINVAL;
 -      }
 -
 -      /* Ensure that the cursor is valid for the new mode before changing... */
 -      intel_crtc_update_cursor(crtc, true);
 -
 -      if (is_lvds && dev_priv->lvds_downclock_avail) {
 -              /*
 -               * Ensure we match the reduced clock's P to the target clock.
 -               * If the clocks don't match, we can't switch the display clock
 -               * by using the FP0/FP1. In such case we will disable the LVDS
 -               * downclock feature.
 -              */
 -              has_reduced_clock = limit->find_pll(limit, crtc,
 -                                                  dev_priv->lvds_downclock,
 -                                                  refclk,
 -                                                  &clock,
 -                                                  &reduced_clock);
 -      }
 -      /* SDVO TV has fixed PLL values depend on its clock range,
 -         this mirrors vbios setting. */
 -      if (is_sdvo && is_tv) {
 -              if (adjusted_mode->clock >= 100000
 -                  && adjusted_mode->clock < 140500) {
 -                      clock.p1 = 2;
 -                      clock.p2 = 10;
 -                      clock.n = 3;
 -                      clock.m1 = 16;
 -                      clock.m2 = 8;
 -              } else if (adjusted_mode->clock >= 140500
 -                         && adjusted_mode->clock <= 200000) {
 -                      clock.p1 = 1;
 -                      clock.p2 = 10;
 -                      clock.n = 6;
 -                      clock.m1 = 12;
 -                      clock.m2 = 8;
 -              }
 -      }
 -
 -      /* FDI link */
 -      pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 -      lane = 0;
 -      /* CPU eDP doesn't require FDI link, so just set DP M/N
 -         according to current link config */
 -      if (has_edp_encoder &&
 -          !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 -              target_clock = mode->clock;
 -              intel_edp_link_config(has_edp_encoder,
 -                                    &lane, &link_bw);
 -      } else {
 -              /* [e]DP over FDI requires target mode clock
 -                 instead of link clock */
 -              if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
 -                      target_clock = mode->clock;
 -              else
 -                      target_clock = adjusted_mode->clock;
 -
 -              /* FDI is a binary signal running at ~2.7GHz, encoding
 -               * each output octet as 10 bits. The actual frequency
 -               * is stored as a divider into a 100MHz clock, and the
 -               * mode pixel clock is stored in units of 1KHz.
 -               * Hence the bw of each lane in terms of the mode signal
 -               * is:
 -               */
 -              link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 -      }
 -
 -      /* determine panel color depth */
 -      temp = I915_READ(PIPECONF(pipe));
 -      temp &= ~PIPE_BPC_MASK;
 -      dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
 -      switch (pipe_bpp) {
 -      case 18:
 -              temp |= PIPE_6BPC;
 -              break;
 -      case 24:
 -              temp |= PIPE_8BPC;
 -              break;
 -      case 30:
 -              temp |= PIPE_10BPC;
 -              break;
 -      case 36:
 -              temp |= PIPE_12BPC;
 -              break;
 -      default:
 -              WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
 -                      pipe_bpp);
 -              temp |= PIPE_8BPC;
 -              pipe_bpp = 24;
 -              break;
 -      }
 -
 -      intel_crtc->bpp = pipe_bpp;
 -      I915_WRITE(PIPECONF(pipe), temp);
 -
 -      if (!lane) {
 -              /*
 -               * Account for spread spectrum to avoid
 -               * oversubscribing the link. Max center spread
 -               * is 2.5%; use 5% for safety's sake.
 -               */
 -              u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
 -              lane = bps / (link_bw * 8) + 1;
 -      }
 -
 -      intel_crtc->fdi_lanes = lane;
 -
 -      if (pixel_multiplier > 1)
 -              link_bw *= pixel_multiplier;
 -      ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
 -                           &m_n);
 -
 -      fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
 -      if (has_reduced_clock)
 -              fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
 -                      reduced_clock.m2;
 -
 -      /* Enable autotuning of the PLL clock (if permissible) */
 -      factor = 21;
 -      if (is_lvds) {
 -              if ((intel_panel_use_ssc(dev_priv) &&
 -                   dev_priv->lvds_ssc_freq == 100) ||
 -                  (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
 -                      factor = 25;
 -      } else if (is_sdvo && is_tv)
 -              factor = 20;
 -
 -      if (clock.m < factor * clock.n)
 -              fp |= FP_CB_TUNE;
 -
 -      dpll = 0;
 -
 -      if (is_lvds)
 -              dpll |= DPLLB_MODE_LVDS;
 -      else
 -              dpll |= DPLLB_MODE_DAC_SERIAL;
 -      if (is_sdvo) {
 -              int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 -              if (pixel_multiplier > 1) {
 -                      dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 -              }
 -              dpll |= DPLL_DVO_HIGH_SPEED;
 -      }
 -      if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
 -              dpll |= DPLL_DVO_HIGH_SPEED;
 -
 -      /* compute bitmask from p1 value */
 -      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 -      /* also FPA1 */
 -      dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 -
 -      switch (clock.p2) {
 -      case 5:
 -              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 -              break;
 -      case 7:
 -              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
 -              break;
 -      case 10:
 -              dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
 -              break;
 -      case 14:
 -              dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
 -              break;
 -      }
 -
 -      if (is_sdvo && is_tv)
 -              dpll |= PLL_REF_INPUT_TVCLKINBC;
 -      else if (is_tv)
 -              /* XXX: just matching BIOS for now */
 -              /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
 -              dpll |= 3;
 -      else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 -              dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 -      else
 -              dpll |= PLL_REF_INPUT_DREFCLK;
 -
 -      /* setup pipeconf */
 -      pipeconf = I915_READ(PIPECONF(pipe));
 -
 -      /* Set up the display plane register */
 -      dspcntr = DISPPLANE_GAMMA_ENABLE;
 -
 -      DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
 -      drm_mode_debug_printmodeline(mode);
 -
 -      /* PCH eDP needs FDI, but CPU eDP does not */
 -      if (!intel_crtc->no_pll) {
 -              if (!has_edp_encoder ||
 -                  intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 -                      I915_WRITE(PCH_FP0(pipe), fp);
 -                      I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 -
 -                      POSTING_READ(PCH_DPLL(pipe));
 -                      udelay(150);
 -              }
 -      } else {
 -              if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
 -                  fp == I915_READ(PCH_FP0(0))) {
 -                      intel_crtc->use_pll_a = true;
 -                      DRM_DEBUG_KMS("using pipe a dpll\n");
 -              } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
 -                         fp == I915_READ(PCH_FP0(1))) {
 -                      intel_crtc->use_pll_a = false;
 -                      DRM_DEBUG_KMS("using pipe b dpll\n");
 -              } else {
 -                      DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
 -                      return -EINVAL;
 -              }
 -      }
 -
 -      /* The LVDS pin pair needs to be on before the DPLLs are enabled.
 -       * This is an exception to the general rule that mode_set doesn't turn
 -       * things on.
 -       */
 -      if (is_lvds) {
 -              temp = I915_READ(PCH_LVDS);
 -              temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 -              if (HAS_PCH_CPT(dev)) {
 -                      temp &= ~PORT_TRANS_SEL_MASK;
 -                      temp |= PORT_TRANS_SEL_CPT(pipe);
 -              } else {
 -                      if (pipe == 1)
 -                              temp |= LVDS_PIPEB_SELECT;
 -                      else
 -                              temp &= ~LVDS_PIPEB_SELECT;
 -              }
 -
 -              /* set the corresponsding LVDS_BORDER bit */
 -              temp |= dev_priv->lvds_border_bits;
 -              /* Set the B0-B3 data pairs corresponding to whether we're going to
 -               * set the DPLLs for dual-channel mode or not.
 -               */
 -              if (clock.p2 == 7)
 -                      temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
 -              else
 -                      temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 -
 -              /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
 -               * appropriately here, but we need to look more thoroughly into how
 -               * panels behave in the two modes.
 -               */
 -              if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
 -                      lvds_sync |= LVDS_HSYNC_POLARITY;
 -              if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 -                      lvds_sync |= LVDS_VSYNC_POLARITY;
 -              if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
 -                  != lvds_sync) {
 -                      char flags[2] = "-+";
 -                      DRM_INFO("Changing LVDS panel from "
 -                               "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
 -                               flags[!(temp & LVDS_HSYNC_POLARITY)],
 -                               flags[!(temp & LVDS_VSYNC_POLARITY)],
 -                               flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
 -                               flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
 -                      temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
 -                      temp |= lvds_sync;
 -              }
 -              I915_WRITE(PCH_LVDS, temp);
 -      }
 -
 -      pipeconf &= ~PIPECONF_DITHER_EN;
 -      pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
 -      if ((is_lvds && dev_priv->lvds_dither) || dither) {
 -              pipeconf |= PIPECONF_DITHER_EN;
 -              pipeconf |= PIPECONF_DITHER_TYPE_SP;
 -      }
 -      if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 -              intel_dp_set_m_n(crtc, mode, adjusted_mode);
 -      } else {
 -              /* For non-DP output, clear any trans DP clock recovery setting.*/
 -              I915_WRITE(TRANSDATA_M1(pipe), 0);
 -              I915_WRITE(TRANSDATA_N1(pipe), 0);
 -              I915_WRITE(TRANSDPLINK_M1(pipe), 0);
 -              I915_WRITE(TRANSDPLINK_N1(pipe), 0);
 -      }
 -
 -      if (!intel_crtc->no_pll &&
 -          (!has_edp_encoder ||
 -           intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
 -              I915_WRITE(PCH_DPLL(pipe), dpll);
 -
 -              /* Wait for the clocks to stabilize. */
 -              POSTING_READ(PCH_DPLL(pipe));
 -              udelay(150);
 -
 -              /* The pixel multiplier can only be updated once the
 -               * DPLL is enabled and the clocks are stable.
 -               *
 -               * So write it again.
 -               */
 -              I915_WRITE(PCH_DPLL(pipe), dpll);
 -      }
 -
 -      intel_crtc->lowfreq_avail = false;
 -      if (!intel_crtc->no_pll) {
 -              if (is_lvds && has_reduced_clock && i915_powersave) {
 -                      I915_WRITE(PCH_FP1(pipe), fp2);
 -                      intel_crtc->lowfreq_avail = true;
 -                      if (HAS_PIPE_CXSR(dev)) {
 -                              DRM_DEBUG_KMS("enabling CxSR downclocking\n");
 -                              pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
 -                      }
 -              } else {
 -                      I915_WRITE(PCH_FP1(pipe), fp);
 -                      if (HAS_PIPE_CXSR(dev)) {
 -                              DRM_DEBUG_KMS("disabling CxSR downclocking\n");
 -                              pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
 -                      }
 -              }
 -      }
 -
 -      pipeconf &= ~PIPECONF_INTERLACE_MASK;
 -      if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 -              pipeconf |= PIPECONF_INTERLACED_ILK;
 -              /* the chip adds 2 halflines automatically */
 -              adjusted_mode->crtc_vtotal -= 1;
 -              adjusted_mode->crtc_vblank_end -= 1;
 -              I915_WRITE(VSYNCSHIFT(pipe),
 -                         adjusted_mode->crtc_hsync_start
 -                         - adjusted_mode->crtc_htotal/2);
 -      } else {
 -              pipeconf |= PIPECONF_PROGRESSIVE;
 -              I915_WRITE(VSYNCSHIFT(pipe), 0);
 -      }
 -
 -      I915_WRITE(HTOTAL(pipe),
 -                 (adjusted_mode->crtc_hdisplay - 1) |
 -                 ((adjusted_mode->crtc_htotal - 1) << 16));
 -      I915_WRITE(HBLANK(pipe),
 -                 (adjusted_mode->crtc_hblank_start - 1) |
 -                 ((adjusted_mode->crtc_hblank_end - 1) << 16));
 -      I915_WRITE(HSYNC(pipe),
 -                 (adjusted_mode->crtc_hsync_start - 1) |
 -                 ((adjusted_mode->crtc_hsync_end - 1) << 16));
 -
 -      I915_WRITE(VTOTAL(pipe),
 -                 (adjusted_mode->crtc_vdisplay - 1) |
 -                 ((adjusted_mode->crtc_vtotal - 1) << 16));
 -      I915_WRITE(VBLANK(pipe),
 -                 (adjusted_mode->crtc_vblank_start - 1) |
 -                 ((adjusted_mode->crtc_vblank_end - 1) << 16));
 -      I915_WRITE(VSYNC(pipe),
 -                 (adjusted_mode->crtc_vsync_start - 1) |
 -                 ((adjusted_mode->crtc_vsync_end - 1) << 16));
 -
 -      /* pipesrc controls the size that is scaled from, which should
 -       * always be the user's requested size.
 -       */
 -      I915_WRITE(PIPESRC(pipe),
 -                 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 -
 -      I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
 -      I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
 -      I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
 -      I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 -
 -      if (has_edp_encoder &&
 -          !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 -              ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 -      }
 -
 -      I915_WRITE(PIPECONF(pipe), pipeconf);
 -      POSTING_READ(PIPECONF(pipe));
 -
 -      intel_wait_for_vblank(dev, pipe);
 -
 -      I915_WRITE(DSPCNTR(plane), dspcntr);
 -      POSTING_READ(DSPCNTR(plane));
 -
 -      ret = intel_pipe_set_base(crtc, x, y, old_fb);
 -
 -      intel_update_watermarks(dev);
 -
 -      return ret;
 -}
 -
 -static int intel_crtc_mode_set(struct drm_crtc *crtc,
 -                             struct drm_display_mode *mode,
 -                             struct drm_display_mode *adjusted_mode,
 -                             int x, int y,
 -                             struct drm_framebuffer *old_fb)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      int ret;
 -
 -      drm_vblank_pre_modeset(dev, pipe);
 -
 -      ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
 -                                            x, y, old_fb);
 -      drm_vblank_post_modeset(dev, pipe);
 -
 -      if (ret)
 -              intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
 -      else
 -              intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
 -
 -      return ret;
 -}
 -
 -static bool intel_eld_uptodate(struct drm_connector *connector,
 -                             int reg_eldv, uint32_t bits_eldv,
 -                             int reg_elda, uint32_t bits_elda,
 -                             int reg_edid)
 -{
 -      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 -      uint8_t *eld = connector->eld;
 -      uint32_t i;
 -
 -      i = I915_READ(reg_eldv);
 -      i &= bits_eldv;
 -
 -      if (!eld[0])
 -              return !i;
 -
 -      if (!i)
 -              return false;
 -
 -      i = I915_READ(reg_elda);
 -      i &= ~bits_elda;
 -      I915_WRITE(reg_elda, i);
 -
 -      for (i = 0; i < eld[2]; i++)
 -              if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
 -                      return false;
 -
 -      return true;
 -}
 -
 -static void g4x_write_eld(struct drm_connector *connector,
 -                        struct drm_crtc *crtc)
 -{
 -      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 -      uint8_t *eld = connector->eld;
 -      uint32_t eldv;
 -      uint32_t len;
 -      uint32_t i;
 -
 -      i = I915_READ(G4X_AUD_VID_DID);
 -
 -      if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
 -              eldv = G4X_ELDV_DEVCL_DEVBLC;
 -      else
 -              eldv = G4X_ELDV_DEVCTG;
 -
 -      if (intel_eld_uptodate(connector,
 -                             G4X_AUD_CNTL_ST, eldv,
 -                             G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
 -                             G4X_HDMIW_HDMIEDID))
 -              return;
 -
 -      i = I915_READ(G4X_AUD_CNTL_ST);
 -      i &= ~(eldv | G4X_ELD_ADDR);
 -      len = (i >> 9) & 0x1f;          /* ELD buffer size */
 -      I915_WRITE(G4X_AUD_CNTL_ST, i);
 -
 -      if (!eld[0])
 -              return;
 -
 -      len = min_t(uint8_t, eld[2], len);
 -      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 -      for (i = 0; i < len; i++)
 -              I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
 -
 -      i = I915_READ(G4X_AUD_CNTL_ST);
 -      i |= eldv;
 -      I915_WRITE(G4X_AUD_CNTL_ST, i);
 -}
 -
 -static void ironlake_write_eld(struct drm_connector *connector,
 -                                   struct drm_crtc *crtc)
 -{
 -      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 -      uint8_t *eld = connector->eld;
 -      uint32_t eldv;
 -      uint32_t i;
 -      int len;
 -      int hdmiw_hdmiedid;
 -      int aud_config;
 -      int aud_cntl_st;
 -      int aud_cntrl_st2;
 -
 -      if (HAS_PCH_IBX(connector->dev)) {
 -              hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
 -              aud_config = IBX_AUD_CONFIG_A;
 -              aud_cntl_st = IBX_AUD_CNTL_ST_A;
 -              aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
 -      } else {
 -              hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
 -              aud_config = CPT_AUD_CONFIG_A;
 -              aud_cntl_st = CPT_AUD_CNTL_ST_A;
 -              aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
 -      }
 -
 -      i = to_intel_crtc(crtc)->pipe;
 -      hdmiw_hdmiedid += i * 0x100;
 -      aud_cntl_st += i * 0x100;
 -      aud_config += i * 0x100;
 -
 -      DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
 -
 -      i = I915_READ(aud_cntl_st);
 -      i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
 -      if (!i) {
 -              DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 -              /* operate blindly on all ports */
 -              eldv = IBX_ELD_VALIDB;
 -              eldv |= IBX_ELD_VALIDB << 4;
 -              eldv |= IBX_ELD_VALIDB << 8;
 -      } else {
 -              DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
 -              eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
 -      }
 -
 -      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 -              DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 -              eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
 -              I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
 -      } else
 -              I915_WRITE(aud_config, 0);
 -
 -      if (intel_eld_uptodate(connector,
 -                             aud_cntrl_st2, eldv,
 -                             aud_cntl_st, IBX_ELD_ADDRESS,
 -                             hdmiw_hdmiedid))
 -              return;
 -
 -      i = I915_READ(aud_cntrl_st2);
 -      i &= ~eldv;
 -      I915_WRITE(aud_cntrl_st2, i);
 -
 -      if (!eld[0])
 -              return;
 -
 -      i = I915_READ(aud_cntl_st);
 -      i &= ~IBX_ELD_ADDRESS;
 -      I915_WRITE(aud_cntl_st, i);
 -
 -      len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
 -      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 -      for (i = 0; i < len; i++)
 -              I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 -
 -      i = I915_READ(aud_cntrl_st2);
 -      i |= eldv;
 -      I915_WRITE(aud_cntrl_st2, i);
 -}
 -
 -void intel_write_eld(struct drm_encoder *encoder,
 -                   struct drm_display_mode *mode)
 -{
 -      struct drm_crtc *crtc = encoder->crtc;
 -      struct drm_connector *connector;
 -      struct drm_device *dev = encoder->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      connector = drm_select_eld(encoder, mode);
 -      if (!connector)
 -              return;
 -
 -      DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 -                       connector->base.id,
 -                       drm_get_connector_name(connector),
 -                       connector->encoder->base.id,
 -                       drm_get_encoder_name(connector->encoder));
 -
 -      connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 -
 -      if (dev_priv->display.write_eld)
 -              dev_priv->display.write_eld(connector, crtc);
 -}
 -
 -/** Loads the palette/gamma unit for the CRTC with the prepared values */
 -void intel_crtc_load_lut(struct drm_crtc *crtc)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int palreg = PALETTE(intel_crtc->pipe);
 -      int i;
 -
 -      /* The clocks have to be on to load the palette. */
 -      if (!crtc->enabled || !intel_crtc->active)
 -              return;
 -
 -      /* use legacy palette for Ironlake */
 -      if (HAS_PCH_SPLIT(dev))
 -              palreg = LGC_PALETTE(intel_crtc->pipe);
 -
 -      for (i = 0; i < 256; i++) {
 -              I915_WRITE(palreg + 4 * i,
 -                         (intel_crtc->lut_r[i] << 16) |
 -                         (intel_crtc->lut_g[i] << 8) |
 -                         intel_crtc->lut_b[i]);
 -      }
 -}
 -
 -static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      bool visible = base != 0;
 -      u32 cntl;
 -
 -      if (intel_crtc->cursor_visible == visible)
 -              return;
 -
 -      cntl = I915_READ(_CURACNTR);
 -      if (visible) {
 -              /* On these chipsets we can only modify the base whilst
 -               * the cursor is disabled.
 -               */
 -              I915_WRITE(_CURABASE, base);
 -
 -              cntl &= ~(CURSOR_FORMAT_MASK);
 -              /* XXX width must be 64, stride 256 => 0x00 << 28 */
 -              cntl |= CURSOR_ENABLE |
 -                      CURSOR_GAMMA_ENABLE |
 -                      CURSOR_FORMAT_ARGB;
 -      } else
 -              cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
 -      I915_WRITE(_CURACNTR, cntl);
 -
 -      intel_crtc->cursor_visible = visible;
 -}
 -
 -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      bool visible = base != 0;
 -
 -      if (intel_crtc->cursor_visible != visible) {
 -              uint32_t cntl = I915_READ(CURCNTR(pipe));
 -              if (base) {
 -                      cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
 -                      cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
 -                      cntl |= pipe << 28; /* Connect to correct pipe */
 -              } else {
 -                      cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 -                      cntl |= CURSOR_MODE_DISABLE;
 -              }
 -              I915_WRITE(CURCNTR(pipe), cntl);
 -
 -              intel_crtc->cursor_visible = visible;
 -      }
 -      /* and commit changes on next vblank */
 -      I915_WRITE(CURBASE(pipe), base);
 -}
 -
 -static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      bool visible = base != 0;
 -
 -      if (intel_crtc->cursor_visible != visible) {
 -              uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
 -              if (base) {
 -                      cntl &= ~CURSOR_MODE;
 -                      cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
 -              } else {
 -                      cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 -                      cntl |= CURSOR_MODE_DISABLE;
 -              }
 -              I915_WRITE(CURCNTR_IVB(pipe), cntl);
 -
 -              intel_crtc->cursor_visible = visible;
 -      }
 -      /* and commit changes on next vblank */
 -      I915_WRITE(CURBASE_IVB(pipe), base);
 -}
 -
 -/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 -static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 -                                   bool on)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      int x = intel_crtc->cursor_x;
 -      int y = intel_crtc->cursor_y;
 -      u32 base, pos;
 -      bool visible;
 -
 -      pos = 0;
 -
 -      if (on && crtc->enabled && crtc->fb) {
 -              base = intel_crtc->cursor_addr;
 -              if (x > (int) crtc->fb->width)
 -                      base = 0;
 -
 -              if (y > (int) crtc->fb->height)
 -                      base = 0;
 -      } else
 -              base = 0;
 -
 -      if (x < 0) {
 -              if (x + intel_crtc->cursor_width < 0)
 -                      base = 0;
 -
 -              pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
 -              x = -x;
 -      }
 -      pos |= x << CURSOR_X_SHIFT;
 -
 -      if (y < 0) {
 -              if (y + intel_crtc->cursor_height < 0)
 -                      base = 0;
 -
 -              pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
 -              y = -y;
 -      }
 -      pos |= y << CURSOR_Y_SHIFT;
 -
 -      visible = base != 0;
 -      if (!visible && !intel_crtc->cursor_visible)
 -              return;
 -
 -      if (IS_IVYBRIDGE(dev)) {
 -              I915_WRITE(CURPOS_IVB(pipe), pos);
 -              ivb_update_cursor(crtc, base);
 -      } else {
 -              I915_WRITE(CURPOS(pipe), pos);
 -              if (IS_845G(dev) || IS_I865G(dev))
 -                      i845_update_cursor(crtc, base);
 -              else
 -                      i9xx_update_cursor(crtc, base);
 -      }
 -
 -      if (visible)
 -              intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
 -}
 -
 -static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 -                               struct drm_file *file,
 -                               uint32_t handle,
 -                               uint32_t width, uint32_t height)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct drm_i915_gem_object *obj;
 -      uint32_t addr;
 -      int ret;
 -
 -      DRM_DEBUG_KMS("\n");
 -
 -      /* if we want to turn off the cursor ignore width and height */
 -      if (!handle) {
 -              DRM_DEBUG_KMS("cursor off\n");
 -              addr = 0;
 -              obj = NULL;
 -              mutex_lock(&dev->struct_mutex);
 -              goto finish;
 -      }
 -
 -      /* Currently we only support 64x64 cursors */
 -      if (width != 64 || height != 64) {
 -              DRM_ERROR("we currently only support 64x64 cursors\n");
 -              return -EINVAL;
 -      }
 -
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
 -      if (&obj->base == NULL)
 -              return -ENOENT;
 -
 -      if (obj->base.size < width * height * 4) {
 -              DRM_ERROR("buffer is to small\n");
 -              ret = -ENOMEM;
 -              goto fail;
 -      }
 -
 -      /* we only need to pin inside GTT if cursor is non-phy */
 -      mutex_lock(&dev->struct_mutex);
 -      if (!dev_priv->info->cursor_needs_physical) {
 -              if (obj->tiling_mode) {
 -                      DRM_ERROR("cursor cannot be tiled\n");
 -                      ret = -EINVAL;
 -                      goto fail_locked;
 -              }
 -
 -              ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
 -              if (ret) {
 -                      DRM_ERROR("failed to move cursor bo into the GTT\n");
 -                      goto fail_locked;
 -              }
 -
 -              ret = i915_gem_object_put_fence(obj);
 -              if (ret) {
 -                      DRM_ERROR("failed to release fence for cursor");
 -                      goto fail_unpin;
 -              }
 -
 -              addr = obj->gtt_offset;
 -      } else {
 -              int align = IS_I830(dev) ? 16 * 1024 : 256;
 -              ret = i915_gem_attach_phys_object(dev, obj,
 -                                                (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
 -                                                align);
 -              if (ret) {
 -                      DRM_ERROR("failed to attach phys object\n");
 -                      goto fail_locked;
 -              }
 -              addr = obj->phys_obj->handle->busaddr;
 -      }
 -
 -      if (IS_GEN2(dev))
 -              I915_WRITE(CURSIZE, (height << 12) | width);
 -
 - finish:
 -      if (intel_crtc->cursor_bo) {
 -              if (dev_priv->info->cursor_needs_physical) {
 -                      if (intel_crtc->cursor_bo != obj)
 -                              i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 -              } else
 -                      i915_gem_object_unpin(intel_crtc->cursor_bo);
 -              drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 -      }
 -
 -      mutex_unlock(&dev->struct_mutex);
 -
 -      intel_crtc->cursor_addr = addr;
 -      intel_crtc->cursor_bo = obj;
 -      intel_crtc->cursor_width = width;
 -      intel_crtc->cursor_height = height;
 -
 -      intel_crtc_update_cursor(crtc, true);
 -
 -      return 0;
 -fail_unpin:
 -      i915_gem_object_unpin(obj);
 -fail_locked:
 -      mutex_unlock(&dev->struct_mutex);
 -fail:
 -      drm_gem_object_unreference_unlocked(&obj->base);
 -      return ret;
 -}
 -
 -static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 -{
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -
 -      intel_crtc->cursor_x = x;
 -      intel_crtc->cursor_y = y;
 -
 -      intel_crtc_update_cursor(crtc, true);
 -
 -      return 0;
 -}
 -
 -/** Sets the color ramps on behalf of RandR */
 -void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 -                               u16 blue, int regno)
 -{
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -
 -      intel_crtc->lut_r[regno] = red >> 8;
 -      intel_crtc->lut_g[regno] = green >> 8;
 -      intel_crtc->lut_b[regno] = blue >> 8;
 -}
 -
 -void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 -                           u16 *blue, int regno)
 -{
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -
 -      *red = intel_crtc->lut_r[regno] << 8;
 -      *green = intel_crtc->lut_g[regno] << 8;
 -      *blue = intel_crtc->lut_b[regno] << 8;
 -}
 -
 -static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 -                               u16 *blue, uint32_t start, uint32_t size)
 -{
 -      int end = (start + size > 256) ? 256 : start + size, i;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -
 -      for (i = start; i < end; i++) {
 -              intel_crtc->lut_r[i] = red[i] >> 8;
 -              intel_crtc->lut_g[i] = green[i] >> 8;
 -              intel_crtc->lut_b[i] = blue[i] >> 8;
 -      }
 -
 -      intel_crtc_load_lut(crtc);
 -}
 -
 -/**
 - * Get a pipe with a simple mode set on it for doing load-based monitor
 - * detection.
 - *
 - * It will be up to the load-detect code to adjust the pipe as appropriate for
 - * its requirements.  The pipe will be connected to no other encoders.
 - *
 - * Currently this code will only succeed if there is a pipe with no encoders
 - * configured for it.  In the future, it could choose to temporarily disable
 - * some outputs to free up a pipe for its use.
 - *
 - * \return crtc, or NULL if no pipes are available.
 - */
 -
 -/* VESA 640x480x72Hz mode to set on the pipe */
 -static struct drm_display_mode load_detect_mode = {
 -      DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
 -               704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
 -};
 -
 -static struct drm_framebuffer *
 -intel_framebuffer_create(struct drm_device *dev,
 -                       struct drm_mode_fb_cmd2 *mode_cmd,
 -                       struct drm_i915_gem_object *obj)
 -{
 -      struct intel_framebuffer *intel_fb;
 -      int ret;
 -
 -      intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 -      if (!intel_fb) {
 -              drm_gem_object_unreference_unlocked(&obj->base);
 -              return ERR_PTR(-ENOMEM);
 -      }
 -
 -      ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
 -      if (ret) {
 -              drm_gem_object_unreference_unlocked(&obj->base);
 -              kfree(intel_fb);
 -              return ERR_PTR(ret);
 -      }
 -
 -      return &intel_fb->base;
 -}
 -
 -static u32
 -intel_framebuffer_pitch_for_width(int width, int bpp)
 -{
 -      u32 pitch = DIV_ROUND_UP(width * bpp, 8);
 -      return ALIGN(pitch, 64);
 -}
 -
 -static u32
 -intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
 -{
 -      u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
 -      return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
 -}
 -
 -static struct drm_framebuffer *
 -intel_framebuffer_create_for_mode(struct drm_device *dev,
 -                                struct drm_display_mode *mode,
 -                                int depth, int bpp)
 -{
 -      struct drm_i915_gem_object *obj;
 -      struct drm_mode_fb_cmd2 mode_cmd;
 -
 -      obj = i915_gem_alloc_object(dev,
 -                                  intel_framebuffer_size_for_mode(mode, bpp));
 -      if (obj == NULL)
 -              return ERR_PTR(-ENOMEM);
 -
 -      mode_cmd.width = mode->hdisplay;
 -      mode_cmd.height = mode->vdisplay;
 -      mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
 -                                                              bpp);
 -      mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 -
 -      return intel_framebuffer_create(dev, &mode_cmd, obj);
 -}
 -
 -static struct drm_framebuffer *
 -mode_fits_in_fbdev(struct drm_device *dev,
 -                 struct drm_display_mode *mode)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_object *obj;
 -      struct drm_framebuffer *fb;
 -
 -      if (dev_priv->fbdev == NULL)
 -              return NULL;
 -
 -      obj = dev_priv->fbdev->ifb.obj;
 -      if (obj == NULL)
 -              return NULL;
 -
 -      fb = &dev_priv->fbdev->ifb.base;
 -      if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
 -                                                             fb->bits_per_pixel))
 -              return NULL;
 -
 -      if (obj->base.size < mode->vdisplay * fb->pitches[0])
 -              return NULL;
 -
 -      return fb;
 -}
 -
 -bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 -                              struct drm_connector *connector,
 -                              struct drm_display_mode *mode,
 -                              struct intel_load_detect_pipe *old)
 -{
 -      struct intel_crtc *intel_crtc;
 -      struct drm_crtc *possible_crtc;
 -      struct drm_encoder *encoder = &intel_encoder->base;
 -      struct drm_crtc *crtc = NULL;
 -      struct drm_device *dev = encoder->dev;
 -      struct drm_framebuffer *old_fb;
 -      int i = -1;
 -
 -      DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 -                    connector->base.id, drm_get_connector_name(connector),
 -                    encoder->base.id, drm_get_encoder_name(encoder));
 -
 -      /*
 -       * Algorithm gets a little messy:
 -       *
 -       *   - if the connector already has an assigned crtc, use it (but make
 -       *     sure it's on first)
 -       *
 -       *   - try to find the first unused crtc that can drive this connector,
 -       *     and use that if we find one
 -       */
 -
 -      /* See if we already have a CRTC for this connector */
 -      if (encoder->crtc) {
 -              crtc = encoder->crtc;
 -
 -              intel_crtc = to_intel_crtc(crtc);
 -              old->dpms_mode = intel_crtc->dpms_mode;
 -              old->load_detect_temp = false;
 -
 -              /* Make sure the crtc and connector are running */
 -              if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
 -                      struct drm_encoder_helper_funcs *encoder_funcs;
 -                      struct drm_crtc_helper_funcs *crtc_funcs;
 -
 -                      crtc_funcs = crtc->helper_private;
 -                      crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 -
 -                      encoder_funcs = encoder->helper_private;
 -                      encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 -              }
 -
 -              return true;
 -      }
 -
 -      /* Find an unused one (if possible) */
 -      list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
 -              i++;
 -              if (!(encoder->possible_crtcs & (1 << i)))
 -                      continue;
 -              if (!possible_crtc->enabled) {
 -                      crtc = possible_crtc;
 -                      break;
 -              }
 -      }
 +static void ironlake_write_eld(struct drm_connector *connector,
 +                                   struct drm_crtc *crtc)
 +{
 +      struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      uint8_t *eld = connector->eld;
 +      uint32_t eldv;
 +      uint32_t i;
 +      int len;
 +      int hdmiw_hdmiedid;
 +      int aud_config;
 +      int aud_cntl_st;
 +      int aud_cntrl_st2;
  
 -      /*
 -       * If we didn't find an unused CRTC, don't use any.
 -       */
 -      if (!crtc) {
 -              DRM_DEBUG_KMS("no pipe available for load-detect\n");
 -              return false;
 +      if (HAS_PCH_IBX(connector->dev)) {
 +              hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
 +              aud_config = IBX_AUD_CONFIG_A;
 +              aud_cntl_st = IBX_AUD_CNTL_ST_A;
 +              aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
 +      } else {
 +              hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
 +              aud_config = CPT_AUD_CONFIG_A;
 +              aud_cntl_st = CPT_AUD_CNTL_ST_A;
 +              aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
        }
  
 -      encoder->crtc = crtc;
 -      connector->encoder = encoder;
 -
 -      intel_crtc = to_intel_crtc(crtc);
 -      old->dpms_mode = intel_crtc->dpms_mode;
 -      old->load_detect_temp = true;
 -      old->release_fb = NULL;
 -
 -      if (!mode)
 -              mode = &load_detect_mode;
 -
 -      old_fb = crtc->fb;
 +      i = to_intel_crtc(crtc)->pipe;
 +      hdmiw_hdmiedid += i * 0x100;
 +      aud_cntl_st += i * 0x100;
 +      aud_config += i * 0x100;
  
 -      /* We need a framebuffer large enough to accommodate all accesses
 -       * that the plane may generate whilst we perform load detection.
 -       * We can not rely on the fbcon either being present (we get called
 -       * during its initialisation to detect all boot displays, or it may
 -       * not even exist) or that it is large enough to satisfy the
 -       * requested mode.
 -       */
 -      crtc->fb = mode_fits_in_fbdev(dev, mode);
 -      if (crtc->fb == NULL) {
 -              DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
 -              crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
 -              old->release_fb = crtc->fb;
 -      } else
 -              DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
 -      if (IS_ERR(crtc->fb)) {
 -              DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
 -              crtc->fb = old_fb;
 -              return false;
 -      }
 +      DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
  
 -      if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
 -              DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 -              if (old->release_fb)
 -                      old->release_fb->funcs->destroy(old->release_fb);
 -              crtc->fb = old_fb;
 -              return false;
 +      i = I915_READ(aud_cntl_st);
 +      i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
 +      if (!i) {
 +              DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 +              /* operate blindly on all ports */
 +              eldv = IBX_ELD_VALIDB;
 +              eldv |= IBX_ELD_VALIDB << 4;
 +              eldv |= IBX_ELD_VALIDB << 8;
 +      } else {
 +              DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
 +              eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
        }
  
 -      /* let the connector get through one full cycle before testing */
 -      intel_wait_for_vblank(dev, intel_crtc->pipe);
 -
 -      return true;
 -}
 +      if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
 +              DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 +              eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
 +              I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
 +      } else
 +              I915_WRITE(aud_config, 0);
  
 -void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
 -                                  struct drm_connector *connector,
 -                                  struct intel_load_detect_pipe *old)
 -{
 -      struct drm_encoder *encoder = &intel_encoder->base;
 -      struct drm_device *dev = encoder->dev;
 -      struct drm_crtc *crtc = encoder->crtc;
 -      struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 -      struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 +      if (intel_eld_uptodate(connector,
 +                             aud_cntrl_st2, eldv,
 +                             aud_cntl_st, IBX_ELD_ADDRESS,
 +                             hdmiw_hdmiedid))
 +              return;
  
 -      DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 -                    connector->base.id, drm_get_connector_name(connector),
 -                    encoder->base.id, drm_get_encoder_name(encoder));
 +      i = I915_READ(aud_cntrl_st2);
 +      i &= ~eldv;
 +      I915_WRITE(aud_cntrl_st2, i);
  
 -      if (old->load_detect_temp) {
 -              connector->encoder = NULL;
 -              drm_helper_disable_unused_functions(dev);
 +      if (!eld[0])
 +              return;
  
 -              if (old->release_fb)
 -                      old->release_fb->funcs->destroy(old->release_fb);
 +      i = I915_READ(aud_cntl_st);
 +      i &= ~IBX_ELD_ADDRESS;
 +      I915_WRITE(aud_cntl_st, i);
  
 -              return;
 -      }
 +      len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
 +      DRM_DEBUG_DRIVER("ELD size %d\n", len);
 +      for (i = 0; i < len; i++)
 +              I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
  
 -      /* Switch crtc and encoder back off if necessary */
 -      if (old->dpms_mode != DRM_MODE_DPMS_ON) {
 -              encoder_funcs->dpms(encoder, old->dpms_mode);
 -              crtc_funcs->dpms(crtc, old->dpms_mode);
 -      }
 +      i = I915_READ(aud_cntrl_st2);
 +      i |= eldv;
 +      I915_WRITE(aud_cntrl_st2, i);
  }
  
 -/* Returns the clock of the currently programmed mode of the given pipe. */
 -static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
 +void intel_write_eld(struct drm_encoder *encoder,
 +                   struct drm_display_mode *mode)
  {
 +      struct drm_crtc *crtc = encoder->crtc;
 +      struct drm_connector *connector;
 +      struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      u32 dpll = I915_READ(DPLL(pipe));
 -      u32 fp;
 -      intel_clock_t clock;
 -
 -      if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
 -              fp = I915_READ(FP0(pipe));
 -      else
 -              fp = I915_READ(FP1(pipe));
 -
 -      clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
 -      if (IS_PINEVIEW(dev)) {
 -              clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
 -              clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
 -      } else {
 -              clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
 -              clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
 -      }
 -
 -      if (!IS_GEN2(dev)) {
 -              if (IS_PINEVIEW(dev))
 -                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
 -                              DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
 -              else
 -                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
 -                             DPLL_FPA01_P1_POST_DIV_SHIFT);
 -
 -              switch (dpll & DPLL_MODE_MASK) {
 -              case DPLLB_MODE_DAC_SERIAL:
 -                      clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
 -                              5 : 10;
 -                      break;
 -              case DPLLB_MODE_LVDS:
 -                      clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
 -                              7 : 14;
 -                      break;
 -              default:
 -                      DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
 -                                "mode\n", (int)(dpll & DPLL_MODE_MASK));
 -                      return 0;
 -              }
 -
 -              /* XXX: Handle the 100Mhz refclk */
 -              intel_clock(dev, 96000, &clock);
 -      } else {
 -              bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
  
 -              if (is_lvds) {
 -                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
 -                                     DPLL_FPA01_P1_POST_DIV_SHIFT);
 -                      clock.p2 = 14;
 -
 -                      if ((dpll & PLL_REF_INPUT_MASK) ==
 -                          PLLB_REF_INPUT_SPREADSPECTRUMIN) {
 -                              /* XXX: might not be 66MHz */
 -                              intel_clock(dev, 66000, &clock);
 -                      } else
 -                              intel_clock(dev, 48000, &clock);
 -              } else {
 -                      if (dpll & PLL_P1_DIVIDE_BY_TWO)
 -                              clock.p1 = 2;
 -                      else {
 -                              clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
 -                                          DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
 -                      }
 -                      if (dpll & PLL_P2_DIVIDE_BY_4)
 -                              clock.p2 = 4;
 -                      else
 -                              clock.p2 = 2;
 +      connector = drm_select_eld(encoder, mode);
 +      if (!connector)
 +              return;
  
 -                      intel_clock(dev, 48000, &clock);
 -              }
 -      }
 +      DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 +                       connector->base.id,
 +                       drm_get_connector_name(connector),
 +                       connector->encoder->base.id,
 +                       drm_get_encoder_name(connector->encoder));
  
 -      /* XXX: It would be nice to validate the clocks, but we can't reuse
 -       * i830PllIsValid() because it relies on the xf86_config connector
 -       * configuration being accurate, which it isn't necessarily.
 -       */
 +      connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
  
 -      return clock.dot;
 +      if (dev_priv->display.write_eld)
 +              dev_priv->display.write_eld(connector, crtc);
  }
  
 -/** Returns the currently programmed mode of the given pipe. */
 -struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 -                                           struct drm_crtc *crtc)
 +/** Loads the palette/gamma unit for the CRTC with the prepared values */
 +void intel_crtc_load_lut(struct drm_crtc *crtc)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 -      struct drm_display_mode *mode;
 -      int htot = I915_READ(HTOTAL(pipe));
 -      int hsync = I915_READ(HSYNC(pipe));
 -      int vtot = I915_READ(VTOTAL(pipe));
 -      int vsync = I915_READ(VSYNC(pipe));
 -
 -      mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 -      if (!mode)
 -              return NULL;
 +      int palreg = PALETTE(intel_crtc->pipe);
 +      int i;
  
 -      mode->clock = intel_crtc_clock_get(dev, crtc);
 -      mode->hdisplay = (htot & 0xffff) + 1;
 -      mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
 -      mode->hsync_start = (hsync & 0xffff) + 1;
 -      mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
 -      mode->vdisplay = (vtot & 0xffff) + 1;
 -      mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
 -      mode->vsync_start = (vsync & 0xffff) + 1;
 -      mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
 +      /* The clocks have to be on to load the palette. */
 +      if (!crtc->enabled || !intel_crtc->active)
 +              return;
  
 -      drm_mode_set_name(mode);
 -      drm_mode_set_crtcinfo(mode, 0);
 +      /* use legacy palette for Ironlake */
 +      if (HAS_PCH_SPLIT(dev))
 +              palreg = LGC_PALETTE(intel_crtc->pipe);
  
 -      return mode;
 +      for (i = 0; i < 256; i++) {
 +              I915_WRITE(palreg + 4 * i,
 +                         (intel_crtc->lut_r[i] << 16) |
 +                         (intel_crtc->lut_g[i] << 8) |
 +                         intel_crtc->lut_b[i]);
 +      }
  }
  
 -#define GPU_IDLE_TIMEOUT 500 /* ms */
 -
 -/* When this timer fires, we've been idle for awhile */
 -static void intel_gpu_idle_timer(unsigned long arg)
 +static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
  {
 -      struct drm_device *dev = (struct drm_device *)arg;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      bool visible = base != 0;
 +      u32 cntl;
  
 -      if (!list_empty(&dev_priv->mm.active_list)) {
 -              /* Still processing requests, so just re-arm the timer. */
 -              mod_timer(&dev_priv->idle_timer, jiffies +
 -                        msecs_to_jiffies(GPU_IDLE_TIMEOUT));
 +      if (intel_crtc->cursor_visible == visible)
                return;
 -      }
  
 -      dev_priv->busy = false;
 -      queue_work(dev_priv->wq, &dev_priv->idle_work);
 -}
 +      cntl = I915_READ(_CURACNTR);
 +      if (visible) {
 +              /* On these chipsets we can only modify the base whilst
 +               * the cursor is disabled.
 +               */
 +              I915_WRITE(_CURABASE, base);
  
 -#define CRTC_IDLE_TIMEOUT 1000 /* ms */
 +              cntl &= ~(CURSOR_FORMAT_MASK);
 +              /* XXX width must be 64, stride 256 => 0x00 << 28 */
 +              cntl |= CURSOR_ENABLE |
 +                      CURSOR_GAMMA_ENABLE |
 +                      CURSOR_FORMAT_ARGB;
 +      } else
 +              cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
 +      I915_WRITE(_CURACNTR, cntl);
  
 -static void intel_crtc_idle_timer(unsigned long arg)
 +      intel_crtc->cursor_visible = visible;
 +}
 +
 +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
  {
 -      struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
 -      struct drm_crtc *crtc = &intel_crtc->base;
 -      drm_i915_private_t *dev_priv = crtc->dev->dev_private;
 -      struct intel_framebuffer *intel_fb;
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      bool visible = base != 0;
  
 -      intel_fb = to_intel_framebuffer(crtc->fb);
 -      if (intel_fb && intel_fb->obj->active) {
 -              /* The framebuffer is still being accessed by the GPU. */
 -              mod_timer(&intel_crtc->idle_timer, jiffies +
 -                        msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 -              return;
 -      }
 +      if (intel_crtc->cursor_visible != visible) {
 +              uint32_t cntl = I915_READ(CURCNTR(pipe));
 +              if (base) {
 +                      cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
 +                      cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
 +                      cntl |= pipe << 28; /* Connect to correct pipe */
 +              } else {
 +                      cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 +                      cntl |= CURSOR_MODE_DISABLE;
 +              }
 +              I915_WRITE(CURCNTR(pipe), cntl);
  
 -      intel_crtc->busy = false;
 -      queue_work(dev_priv->wq, &dev_priv->idle_work);
 +              intel_crtc->cursor_visible = visible;
 +      }
 +      /* and commit changes on next vblank */
 +      I915_WRITE(CURBASE(pipe), base);
  }
  
 -static void intel_increase_pllclock(struct drm_crtc *crtc)
 +static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
  {
        struct drm_device *dev = crtc->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 -      int dpll_reg = DPLL(pipe);
 -      int dpll;
 +      bool visible = base != 0;
  
 -      if (HAS_PCH_SPLIT(dev))
 -              return;
 +      if (intel_crtc->cursor_visible != visible) {
 +              uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
 +              if (base) {
 +                      cntl &= ~CURSOR_MODE;
 +                      cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
 +              } else {
 +                      cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
 +                      cntl |= CURSOR_MODE_DISABLE;
 +              }
 +              I915_WRITE(CURCNTR_IVB(pipe), cntl);
  
 -      if (!dev_priv->lvds_downclock_avail)
 -              return;
 +              intel_crtc->cursor_visible = visible;
 +      }
 +      /* and commit changes on next vblank */
 +      I915_WRITE(CURBASE_IVB(pipe), base);
 +}
  
 -      dpll = I915_READ(dpll_reg);
 -      if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 -              DRM_DEBUG_DRIVER("upclocking LVDS\n");
 +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 +static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 +                                   bool on)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int x = intel_crtc->cursor_x;
 +      int y = intel_crtc->cursor_y;
 +      u32 base, pos;
 +      bool visible;
  
 -              assert_panel_unlocked(dev_priv, pipe);
 +      pos = 0;
  
 -              dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 -              I915_WRITE(dpll_reg, dpll);
 -              intel_wait_for_vblank(dev, pipe);
 +      if (on && crtc->enabled && crtc->fb) {
 +              base = intel_crtc->cursor_addr;
 +              if (x > (int) crtc->fb->width)
 +                      base = 0;
  
 -              dpll = I915_READ(dpll_reg);
 -              if (dpll & DISPLAY_RATE_SELECT_FPA1)
 -                      DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 +              if (y > (int) crtc->fb->height)
 +                      base = 0;
 +      } else
 +              base = 0;
 +
 +      if (x < 0) {
 +              if (x + intel_crtc->cursor_width < 0)
 +                      base = 0;
 +
 +              pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
 +              x = -x;
        }
 +      pos |= x << CURSOR_X_SHIFT;
  
 -      /* Schedule downclock */
 -      mod_timer(&intel_crtc->idle_timer, jiffies +
 -                msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 -}
 +      if (y < 0) {
 +              if (y + intel_crtc->cursor_height < 0)
 +                      base = 0;
  
 -static void intel_decrease_pllclock(struct drm_crtc *crtc)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +              pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
 +              y = -y;
 +      }
 +      pos |= y << CURSOR_Y_SHIFT;
  
 -      if (HAS_PCH_SPLIT(dev))
 +      visible = base != 0;
 +      if (!visible && !intel_crtc->cursor_visible)
                return;
  
 -      if (!dev_priv->lvds_downclock_avail)
 -              return;
 +      if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 +              I915_WRITE(CURPOS_IVB(pipe), pos);
 +              ivb_update_cursor(crtc, base);
 +      } else {
 +              I915_WRITE(CURPOS(pipe), pos);
 +              if (IS_845G(dev) || IS_I865G(dev))
 +                      i845_update_cursor(crtc, base);
 +              else
 +                      i9xx_update_cursor(crtc, base);
 +      }
  
 -      /*
 -       * Since this is called by a timer, we should never get here in
 -       * the manual case.
 -       */
 -      if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
 -              int pipe = intel_crtc->pipe;
 -              int dpll_reg = DPLL(pipe);
 -              u32 dpll;
 +      if (visible)
 +              intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
 +}
  
 -              DRM_DEBUG_DRIVER("downclocking LVDS\n");
 +static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 +                               struct drm_file *file,
 +                               uint32_t handle,
 +                               uint32_t width, uint32_t height)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct drm_i915_gem_object *obj;
 +      uint32_t addr;
 +      int ret;
  
 -              assert_panel_unlocked(dev_priv, pipe);
 +      DRM_DEBUG_KMS("\n");
  
 -              dpll = I915_READ(dpll_reg);
 -              dpll |= DISPLAY_RATE_SELECT_FPA1;
 -              I915_WRITE(dpll_reg, dpll);
 -              intel_wait_for_vblank(dev, pipe);
 -              dpll = I915_READ(dpll_reg);
 -              if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 -                      DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 +      /* if we want to turn off the cursor ignore width and height */
 +      if (!handle) {
 +              DRM_DEBUG_KMS("cursor off\n");
 +              addr = 0;
 +              obj = NULL;
 +              mutex_lock(&dev->struct_mutex);
 +              goto finish;
        }
 -}
  
 -/**
 - * intel_idle_update - adjust clocks for idleness
 - * @work: work struct
 - *
 - * Either the GPU or display (or both) went idle.  Check the busy status
 - * here and adjust the CRTC and GPU clocks as necessary.
 - */
 -static void intel_idle_update(struct work_struct *work)
 -{
 -      drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 -                                                  idle_work);
 -      struct drm_device *dev = dev_priv->dev;
 -      struct drm_crtc *crtc;
 -      struct intel_crtc *intel_crtc;
 +      /* Currently we only support 64x64 cursors */
 +      if (width != 64 || height != 64) {
 +              DRM_ERROR("we currently only support 64x64 cursors\n");
 +              return -EINVAL;
 +      }
  
 -      if (!i915_powersave)
 -              return;
 +      obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
 +      if (&obj->base == NULL)
 +              return -ENOENT;
 +
 +      if (obj->base.size < width * height * 4) {
 +              DRM_ERROR("buffer is to small\n");
 +              ret = -ENOMEM;
 +              goto fail;
 +      }
  
 +      /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
 +      if (!dev_priv->info->cursor_needs_physical) {
 +              if (obj->tiling_mode) {
 +                      DRM_ERROR("cursor cannot be tiled\n");
 +                      ret = -EINVAL;
 +                      goto fail_locked;
 +              }
  
 -      i915_update_gfx_val(dev_priv);
 +              ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
 +              if (ret) {
 +                      DRM_ERROR("failed to move cursor bo into the GTT\n");
 +                      goto fail_locked;
 +              }
  
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              /* Skip inactive CRTCs */
 -              if (!crtc->fb)
 -                      continue;
 +              ret = i915_gem_object_put_fence(obj);
 +              if (ret) {
 +                      DRM_ERROR("failed to release fence for cursor");
 +                      goto fail_unpin;
 +              }
  
 -              intel_crtc = to_intel_crtc(crtc);
 -              if (!intel_crtc->busy)
 -                      intel_decrease_pllclock(crtc);
 +              addr = obj->gtt_offset;
 +      } else {
 +              int align = IS_I830(dev) ? 16 * 1024 : 256;
 +              ret = i915_gem_attach_phys_object(dev, obj,
 +                                                (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
 +                                                align);
 +              if (ret) {
 +                      DRM_ERROR("failed to attach phys object\n");
 +                      goto fail_locked;
 +              }
 +              addr = obj->phys_obj->handle->busaddr;
        }
  
 +      if (IS_GEN2(dev))
 +              I915_WRITE(CURSIZE, (height << 12) | width);
  
 -      mutex_unlock(&dev->struct_mutex);
 -}
 -
 -/**
 - * intel_mark_busy - mark the GPU and possibly the display busy
 - * @dev: drm device
 - * @obj: object we're operating on
 - *
 - * Callers can use this function to indicate that the GPU is busy processing
 - * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
 - * buffer), we'll also mark the display as busy, so we know to increase its
 - * clock frequency.
 - */
 -void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc = NULL;
 -      struct intel_framebuffer *intel_fb;
 -      struct intel_crtc *intel_crtc;
 + finish:
 +      if (intel_crtc->cursor_bo) {
 +              if (dev_priv->info->cursor_needs_physical) {
 +                      if (intel_crtc->cursor_bo != obj)
 +                              i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 +              } else
 +                      i915_gem_object_unpin(intel_crtc->cursor_bo);
 +              drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 +      }
  
 -      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 -              return;
 +      mutex_unlock(&dev->struct_mutex);
  
 -      if (!dev_priv->busy)
 -              dev_priv->busy = true;
 -      else
 -              mod_timer(&dev_priv->idle_timer, jiffies +
 -                        msecs_to_jiffies(GPU_IDLE_TIMEOUT));
 +      intel_crtc->cursor_addr = addr;
 +      intel_crtc->cursor_bo = obj;
 +      intel_crtc->cursor_width = width;
 +      intel_crtc->cursor_height = height;
  
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              if (!crtc->fb)
 -                      continue;
 +      intel_crtc_update_cursor(crtc, true);
  
 -              intel_crtc = to_intel_crtc(crtc);
 -              intel_fb = to_intel_framebuffer(crtc->fb);
 -              if (intel_fb->obj == obj) {
 -                      if (!intel_crtc->busy) {
 -                              /* Non-busy -> busy, upclock */
 -                              intel_increase_pllclock(crtc);
 -                              intel_crtc->busy = true;
 -                      } else {
 -                              /* Busy -> busy, put off timer */
 -                              mod_timer(&intel_crtc->idle_timer, jiffies +
 -                                        msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 -                      }
 -              }
 -      }
 +      return 0;
 +fail_unpin:
 +      i915_gem_object_unpin(obj);
 +fail_locked:
 +      mutex_unlock(&dev->struct_mutex);
 +fail:
 +      drm_gem_object_unreference_unlocked(&obj->base);
 +      return ret;
  }
  
 -static void intel_crtc_destroy(struct drm_crtc *crtc)
 +static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct drm_device *dev = crtc->dev;
 -      struct intel_unpin_work *work;
 -      unsigned long flags;
 -
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      work = intel_crtc->unpin_work;
 -      intel_crtc->unpin_work = NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      if (work) {
 -              cancel_work_sync(&work->work);
 -              kfree(work);
 -      }
 +      intel_crtc->cursor_x = x;
 +      intel_crtc->cursor_y = y;
  
 -      drm_crtc_cleanup(crtc);
 +      intel_crtc_update_cursor(crtc, true);
  
 -      kfree(intel_crtc);
 +      return 0;
  }
  
 -static void intel_unpin_work_fn(struct work_struct *__work)
 +/** Sets the color ramps on behalf of RandR */
 +void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 +                               u16 blue, int regno)
  {
 -      struct intel_unpin_work *work =
 -              container_of(__work, struct intel_unpin_work, work);
 -
 -      mutex_lock(&work->dev->struct_mutex);
 -      intel_unpin_fb_obj(work->old_fb_obj);
 -      drm_gem_object_unreference(&work->pending_flip_obj->base);
 -      drm_gem_object_unreference(&work->old_fb_obj->base);
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
 -      intel_update_fbc(work->dev);
 -      mutex_unlock(&work->dev->struct_mutex);
 -      kfree(work);
 +      intel_crtc->lut_r[regno] = red >> 8;
 +      intel_crtc->lut_g[regno] = green >> 8;
 +      intel_crtc->lut_b[regno] = blue >> 8;
  }
  
 -static void do_intel_finish_page_flip(struct drm_device *dev,
 -                                    struct drm_crtc *crtc)
 +void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 +                           u16 *blue, int regno)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct intel_unpin_work *work;
 -      struct drm_i915_gem_object *obj;
 -      struct drm_pending_vblank_event *e;
 -      struct timeval tnow, tvbl;
 -      unsigned long flags;
  
 -      /* Ignore early vblank irqs */
 -      if (intel_crtc == NULL)
 -              return;
 +      *red = intel_crtc->lut_r[regno] << 8;
 +      *green = intel_crtc->lut_g[regno] << 8;
 +      *blue = intel_crtc->lut_b[regno] << 8;
 +}
  
 -      do_gettimeofday(&tnow);
 +static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 +                               u16 *blue, uint32_t start, uint32_t size)
 +{
 +      int end = (start + size > 256) ? 256 : start + size, i;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      work = intel_crtc->unpin_work;
 -      if (work == NULL || !work->pending) {
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 -              return;
 +      for (i = start; i < end; i++) {
 +              intel_crtc->lut_r[i] = red[i] >> 8;
 +              intel_crtc->lut_g[i] = green[i] >> 8;
 +              intel_crtc->lut_b[i] = blue[i] >> 8;
        }
  
 -      intel_crtc->unpin_work = NULL;
 -
 -      if (work->event) {
 -              e = work->event;
 -              e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
 -
 -              /* Called before vblank count and timestamps have
 -               * been updated for the vblank interval of flip
 -               * completion? Need to increment vblank count and
 -               * add one videorefresh duration to returned timestamp
 -               * to account for this. We assume this happened if we
 -               * get called over 0.9 frame durations after the last
 -               * timestamped vblank.
 -               *
 -               * This calculation can not be used with vrefresh rates
 -               * below 5Hz (10Hz to be on the safe side) without
 -               * promoting to 64 integers.
 -               */
 -              if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
 -                  9 * crtc->framedur_ns) {
 -                      e->event.sequence++;
 -                      tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
 -                                           crtc->framedur_ns);
 -              }
 -
 -              e->event.tv_sec = tvbl.tv_sec;
 -              e->event.tv_usec = tvbl.tv_usec;
 -
 -              list_add_tail(&e->base.link,
 -                            &e->base.file_priv->event_list);
 -              wake_up_interruptible(&e->base.file_priv->event_wait);
 -      }
 +      intel_crtc_load_lut(crtc);
 +}
  
 -      drm_vblank_put(dev, intel_crtc->pipe);
 +/**
 + * Get a pipe with a simple mode set on it for doing load-based monitor
 + * detection.
 + *
 + * It will be up to the load-detect code to adjust the pipe as appropriate for
 + * its requirements.  The pipe will be connected to no other encoders.
 + *
 + * Currently this code will only succeed if there is a pipe with no encoders
 + * configured for it.  In the future, it could choose to temporarily disable
 + * some outputs to free up a pipe for its use.
 + *
 + * \return crtc, or NULL if no pipes are available.
 + */
  
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +/* VESA 640x480x72Hz mode to set on the pipe */
 +static struct drm_display_mode load_detect_mode = {
 +      DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
 +               704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
 +};
  
 -      obj = work->old_fb_obj;
 +static struct drm_framebuffer *
 +intel_framebuffer_create(struct drm_device *dev,
 +                       struct drm_mode_fb_cmd2 *mode_cmd,
 +                       struct drm_i915_gem_object *obj)
 +{
 +      struct intel_framebuffer *intel_fb;
 +      int ret;
  
 -      atomic_clear_mask(1 << intel_crtc->plane,
 -                        &obj->pending_flip.counter);
 -      if (atomic_read(&obj->pending_flip) == 0)
 -              wake_up(&dev_priv->pending_flip_queue);
 +      intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 +      if (!intel_fb) {
 +              drm_gem_object_unreference_unlocked(&obj->base);
 +              return ERR_PTR(-ENOMEM);
 +      }
  
 -      schedule_work(&work->work);
 +      ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(&obj->base);
 +              kfree(intel_fb);
 +              return ERR_PTR(ret);
 +      }
  
 -      trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
 +      return &intel_fb->base;
  }
  
 -void intel_finish_page_flip(struct drm_device *dev, int pipe)
 +static u32
 +intel_framebuffer_pitch_for_width(int width, int bpp)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 -
 -      do_intel_finish_page_flip(dev, crtc);
 +      u32 pitch = DIV_ROUND_UP(width * bpp, 8);
 +      return ALIGN(pitch, 64);
  }
  
 -void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
 +static u32
 +intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
 -
 -      do_intel_finish_page_flip(dev, crtc);
 +      u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
 +      return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
  }
  
 -void intel_prepare_page_flip(struct drm_device *dev, int plane)
 +static struct drm_framebuffer *
 +intel_framebuffer_create_for_mode(struct drm_device *dev,
 +                                struct drm_display_mode *mode,
 +                                int depth, int bpp)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc =
 -              to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
 -      unsigned long flags;
 +      struct drm_i915_gem_object *obj;
 +      struct drm_mode_fb_cmd2 mode_cmd;
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      if (intel_crtc->unpin_work) {
 -              if ((++intel_crtc->unpin_work->pending) > 1)
 -                      DRM_ERROR("Prepared flip multiple times\n");
 -      } else {
 -              DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
 -      }
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +      obj = i915_gem_alloc_object(dev,
 +                                  intel_framebuffer_size_for_mode(mode, bpp));
 +      if (obj == NULL)
 +              return ERR_PTR(-ENOMEM);
 +
 +      mode_cmd.width = mode->hdisplay;
 +      mode_cmd.height = mode->vdisplay;
 +      mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
 +                                                              bpp);
 +      mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
 +
 +      return intel_framebuffer_create(dev, &mode_cmd, obj);
  }
  
 -static int intel_gen2_queue_flip(struct drm_device *dev,
 -                               struct drm_crtc *crtc,
 -                               struct drm_framebuffer *fb,
 -                               struct drm_i915_gem_object *obj)
 +static struct drm_framebuffer *
 +mode_fits_in_fbdev(struct drm_device *dev,
 +                 struct drm_display_mode *mode)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long offset;
 -      u32 flip_mask;
 -      int ret;
 +      struct drm_i915_gem_object *obj;
 +      struct drm_framebuffer *fb;
  
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 -      if (ret)
 -              goto out;
 +      if (dev_priv->fbdev == NULL)
 +              return NULL;
  
 -      /* Offset into the new buffer for cases of shared fbs between CRTCs */
 -      offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 +      obj = dev_priv->fbdev->ifb.obj;
 +      if (obj == NULL)
 +              return NULL;
  
 -      ret = BEGIN_LP_RING(6);
 -      if (ret)
 -              goto out;
 +      fb = &dev_priv->fbdev->ifb.base;
 +      if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
 +                                                             fb->bits_per_pixel))
 +              return NULL;
  
 -      /* Can't queue multiple flips, so wait for the previous
 -       * one to finish before executing the next.
 -       */
 -      if (intel_crtc->plane)
 -              flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 -      else
 -              flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 -      OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
 -      OUT_RING(MI_NOOP);
 -      OUT_RING(MI_DISPLAY_FLIP |
 -               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 -      OUT_RING(fb->pitches[0]);
 -      OUT_RING(obj->gtt_offset + offset);
 -      OUT_RING(0); /* aux display base address, unused */
 -      ADVANCE_LP_RING();
 -out:
 -      return ret;
 +      if (obj->base.size < mode->vdisplay * fb->pitches[0])
 +              return NULL;
 +
 +      return fb;
  }
  
 -static int intel_gen3_queue_flip(struct drm_device *dev,
 -                               struct drm_crtc *crtc,
 -                               struct drm_framebuffer *fb,
 -                               struct drm_i915_gem_object *obj)
 +bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 +                              struct drm_connector *connector,
 +                              struct drm_display_mode *mode,
 +                              struct intel_load_detect_pipe *old)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long offset;
 -      u32 flip_mask;
 -      int ret;
 +      struct intel_crtc *intel_crtc;
 +      struct drm_crtc *possible_crtc;
 +      struct drm_encoder *encoder = &intel_encoder->base;
 +      struct drm_crtc *crtc = NULL;
 +      struct drm_device *dev = encoder->dev;
 +      struct drm_framebuffer *old_fb;
 +      int i = -1;
  
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 -      if (ret)
 -              goto out;
 +      DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 +                    connector->base.id, drm_get_connector_name(connector),
 +                    encoder->base.id, drm_get_encoder_name(encoder));
  
 -      /* Offset into the new buffer for cases of shared fbs between CRTCs */
 -      offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
 +      /*
 +       * Algorithm gets a little messy:
 +       *
 +       *   - if the connector already has an assigned crtc, use it (but make
 +       *     sure it's on first)
 +       *
 +       *   - try to find the first unused crtc that can drive this connector,
 +       *     and use that if we find one
 +       */
  
 -      ret = BEGIN_LP_RING(6);
 -      if (ret)
 -              goto out;
 +      /* See if we already have a CRTC for this connector */
 +      if (encoder->crtc) {
 +              crtc = encoder->crtc;
  
 -      if (intel_crtc->plane)
 -              flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 -      else
 -              flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 -      OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
 -      OUT_RING(MI_NOOP);
 -      OUT_RING(MI_DISPLAY_FLIP_I915 |
 -               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 -      OUT_RING(fb->pitches[0]);
 -      OUT_RING(obj->gtt_offset + offset);
 -      OUT_RING(MI_NOOP);
 +              intel_crtc = to_intel_crtc(crtc);
 +              old->dpms_mode = intel_crtc->dpms_mode;
 +              old->load_detect_temp = false;
  
 -      ADVANCE_LP_RING();
 -out:
 -      return ret;
 -}
 +              /* Make sure the crtc and connector are running */
 +              if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
 +                      struct drm_encoder_helper_funcs *encoder_funcs;
 +                      struct drm_crtc_helper_funcs *crtc_funcs;
  
 -static int intel_gen4_queue_flip(struct drm_device *dev,
 -                               struct drm_crtc *crtc,
 -                               struct drm_framebuffer *fb,
 -                               struct drm_i915_gem_object *obj)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      uint32_t pf, pipesrc;
 -      int ret;
 +                      crtc_funcs = crtc->helper_private;
 +                      crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 -      if (ret)
 -              goto out;
 +                      encoder_funcs = encoder->helper_private;
 +                      encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 +              }
  
 -      ret = BEGIN_LP_RING(4);
 -      if (ret)
 -              goto out;
 +              return true;
 +      }
  
 -      /* i965+ uses the linear or tiled offsets from the
 -       * Display Registers (which do not change across a page-flip)
 -       * so we need only reprogram the base address.
 -       */
 -      OUT_RING(MI_DISPLAY_FLIP |
 -               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 -      OUT_RING(fb->pitches[0]);
 -      OUT_RING(obj->gtt_offset | obj->tiling_mode);
 +      /* Find an unused one (if possible) */
 +      list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
 +              i++;
 +              if (!(encoder->possible_crtcs & (1 << i)))
 +                      continue;
 +              if (!possible_crtc->enabled) {
 +                      crtc = possible_crtc;
 +                      break;
 +              }
 +      }
  
 -      /* XXX Enabling the panel-fitter across page-flip is so far
 -       * untested on non-native modes, so ignore it for now.
 -       * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
 +      /*
 +       * If we didn't find an unused CRTC, don't use any.
         */
 -      pf = 0;
 -      pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 -      OUT_RING(pf | pipesrc);
 -      ADVANCE_LP_RING();
 -out:
 -      return ret;
 -}
 +      if (!crtc) {
 +              DRM_DEBUG_KMS("no pipe available for load-detect\n");
 +              return false;
 +      }
  
 -static int intel_gen6_queue_flip(struct drm_device *dev,
 -                               struct drm_crtc *crtc,
 -                               struct drm_framebuffer *fb,
 -                               struct drm_i915_gem_object *obj)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      uint32_t pf, pipesrc;
 -      int ret;
 +      encoder->crtc = crtc;
 +      connector->encoder = encoder;
  
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 -      if (ret)
 -              goto out;
 +      intel_crtc = to_intel_crtc(crtc);
 +      old->dpms_mode = intel_crtc->dpms_mode;
 +      old->load_detect_temp = true;
 +      old->release_fb = NULL;
  
 -      ret = BEGIN_LP_RING(4);
 -      if (ret)
 -              goto out;
 +      if (!mode)
 +              mode = &load_detect_mode;
  
 -      OUT_RING(MI_DISPLAY_FLIP |
 -               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 -      OUT_RING(fb->pitches[0] | obj->tiling_mode);
 -      OUT_RING(obj->gtt_offset);
 +      old_fb = crtc->fb;
  
 -      /* Contrary to the suggestions in the documentation,
 -       * "Enable Panel Fitter" does not seem to be required when page
 -       * flipping with a non-native mode, and worse causes a normal
 -       * modeset to fail.
 -       * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
 +      /* We need a framebuffer large enough to accommodate all accesses
 +       * that the plane may generate whilst we perform load detection.
 +       * We can not rely on the fbcon either being present (we get called
 +       * during its initialisation to detect all boot displays, or it may
 +       * not even exist) or that it is large enough to satisfy the
 +       * requested mode.
         */
 -      pf = 0;
 -      pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 -      OUT_RING(pf | pipesrc);
 -      ADVANCE_LP_RING();
 -out:
 -      return ret;
 +      crtc->fb = mode_fits_in_fbdev(dev, mode);
 +      if (crtc->fb == NULL) {
 +              DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
 +              crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
 +              old->release_fb = crtc->fb;
 +      } else
 +              DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
 +      if (IS_ERR(crtc->fb)) {
 +              DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
 +              crtc->fb = old_fb;
 +              return false;
 +      }
 +
 +      if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
 +              DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 +              if (old->release_fb)
 +                      old->release_fb->funcs->destroy(old->release_fb);
 +              crtc->fb = old_fb;
 +              return false;
 +      }
 +
 +      /* let the connector get through one full cycle before testing */
 +      intel_wait_for_vblank(dev, intel_crtc->pipe);
 +
 +      return true;
  }
  
 -/*
 - * On gen7 we currently use the blit ring because (in early silicon at least)
 - * the render ring doesn't give us interrpts for page flip completion, which
 - * means clients will hang after the first flip is queued.  Fortunately the
 - * blit ring generates interrupts properly, so use it instead.
 - */
 -static int intel_gen7_queue_flip(struct drm_device *dev,
 -                               struct drm_crtc *crtc,
 -                               struct drm_framebuffer *fb,
 -                               struct drm_i915_gem_object *obj)
 +void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
 +                                  struct drm_connector *connector,
 +                                  struct intel_load_detect_pipe *old)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 -      int ret;
 +      struct drm_encoder *encoder = &intel_encoder->base;
 +      struct drm_device *dev = encoder->dev;
 +      struct drm_crtc *crtc = encoder->crtc;
 +      struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 +      struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 -      if (ret)
 -              goto out;
 +      DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
 +                    connector->base.id, drm_get_connector_name(connector),
 +                    encoder->base.id, drm_get_encoder_name(encoder));
  
 -      ret = intel_ring_begin(ring, 4);
 -      if (ret)
 -              goto out;
 +      if (old->load_detect_temp) {
 +              connector->encoder = NULL;
 +              drm_helper_disable_unused_functions(dev);
  
 -      intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
 -      intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 -      intel_ring_emit(ring, (obj->gtt_offset));
 -      intel_ring_emit(ring, (MI_NOOP));
 -      intel_ring_advance(ring);
 -out:
 -      return ret;
 -}
 +              if (old->release_fb)
 +                      old->release_fb->funcs->destroy(old->release_fb);
  
 -static int intel_default_queue_flip(struct drm_device *dev,
 -                                  struct drm_crtc *crtc,
 -                                  struct drm_framebuffer *fb,
 -                                  struct drm_i915_gem_object *obj)
 -{
 -      return -ENODEV;
 +              return;
 +      }
 +
 +      /* Switch crtc and encoder back off if necessary */
 +      if (old->dpms_mode != DRM_MODE_DPMS_ON) {
 +              encoder_funcs->dpms(encoder, old->dpms_mode);
 +              crtc_funcs->dpms(crtc, old->dpms_mode);
 +      }
  }
  
 -static int intel_crtc_page_flip(struct drm_crtc *crtc,
 -                              struct drm_framebuffer *fb,
 -                              struct drm_pending_vblank_event *event)
 +/* Returns the clock of the currently programmed mode of the given pipe. */
 +static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
  {
 -      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_framebuffer *intel_fb;
 -      struct drm_i915_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct intel_unpin_work *work;
 -      unsigned long flags;
 -      int ret;
 -
 -      work = kzalloc(sizeof *work, GFP_KERNEL);
 -      if (work == NULL)
 -              return -ENOMEM;
 -
 -      work->event = event;
 -      work->dev = crtc->dev;
 -      intel_fb = to_intel_framebuffer(crtc->fb);
 -      work->old_fb_obj = intel_fb->obj;
 -      INIT_WORK(&work->work, intel_unpin_work_fn);
 -
 -      ret = drm_vblank_get(dev, intel_crtc->pipe);
 -      if (ret)
 -              goto free_work;
 +      int pipe = intel_crtc->pipe;
 +      u32 dpll = I915_READ(DPLL(pipe));
 +      u32 fp;
 +      intel_clock_t clock;
  
 -      /* We borrow the event spin lock for protecting unpin_work */
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      if (intel_crtc->unpin_work) {
 -              spin_unlock_irqrestore(&dev->event_lock, flags);
 -              kfree(work);
 -              drm_vblank_put(dev, intel_crtc->pipe);
 +      if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
 +              fp = I915_READ(FP0(pipe));
 +      else
 +              fp = I915_READ(FP1(pipe));
  
 -              DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 -              return -EBUSY;
 +      clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
 +      if (IS_PINEVIEW(dev)) {
 +              clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
 +              clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
 +      } else {
 +              clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
 +              clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
        }
 -      intel_crtc->unpin_work = work;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      intel_fb = to_intel_framebuffer(fb);
 -      obj = intel_fb->obj;
 +      if (!IS_GEN2(dev)) {
 +              if (IS_PINEVIEW(dev))
 +                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
 +                              DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
 +              else
 +                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
 +                             DPLL_FPA01_P1_POST_DIV_SHIFT);
  
 -      mutex_lock(&dev->struct_mutex);
 +              switch (dpll & DPLL_MODE_MASK) {
 +              case DPLLB_MODE_DAC_SERIAL:
 +                      clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
 +                              5 : 10;
 +                      break;
 +              case DPLLB_MODE_LVDS:
 +                      clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
 +                              7 : 14;
 +                      break;
 +              default:
 +                      DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
 +                                "mode\n", (int)(dpll & DPLL_MODE_MASK));
 +                      return 0;
 +              }
  
 -      /* Reference the objects for the scheduled work. */
 -      drm_gem_object_reference(&work->old_fb_obj->base);
 -      drm_gem_object_reference(&obj->base);
 +              /* XXX: Handle the 100Mhz refclk */
 +              intel_clock(dev, 96000, &clock);
 +      } else {
 +              bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
  
 -      crtc->fb = fb;
 +              if (is_lvds) {
 +                      clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
 +                                     DPLL_FPA01_P1_POST_DIV_SHIFT);
 +                      clock.p2 = 14;
  
 -      work->pending_flip_obj = obj;
 +                      if ((dpll & PLL_REF_INPUT_MASK) ==
 +                          PLLB_REF_INPUT_SPREADSPECTRUMIN) {
 +                              /* XXX: might not be 66MHz */
 +                              intel_clock(dev, 66000, &clock);
 +                      } else
 +                              intel_clock(dev, 48000, &clock);
 +              } else {
 +                      if (dpll & PLL_P1_DIVIDE_BY_TWO)
 +                              clock.p1 = 2;
 +                      else {
 +                              clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
 +                                          DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
 +                      }
 +                      if (dpll & PLL_P2_DIVIDE_BY_4)
 +                              clock.p2 = 4;
 +                      else
 +                              clock.p2 = 2;
  
 -      work->enable_stall_check = true;
 +                      intel_clock(dev, 48000, &clock);
 +              }
 +      }
  
 -      /* Block clients from rendering to the new back buffer until
 -       * the flip occurs and the object is no longer visible.
 +      /* XXX: It would be nice to validate the clocks, but we can't reuse
 +       * i830PllIsValid() because it relies on the xf86_config connector
 +       * configuration being accurate, which it isn't necessarily.
         */
 -      atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
  
 -      ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
 -      if (ret)
 -              goto cleanup_pending;
 +      return clock.dot;
 +}
  
 -      intel_disable_fbc(dev);
 -      mutex_unlock(&dev->struct_mutex);
 +/** Returns the currently programmed mode of the given pipe. */
 +struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 +                                           struct drm_crtc *crtc)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      struct drm_display_mode *mode;
 +      int htot = I915_READ(HTOTAL(pipe));
 +      int hsync = I915_READ(HSYNC(pipe));
 +      int vtot = I915_READ(VTOTAL(pipe));
 +      int vsync = I915_READ(VSYNC(pipe));
 +
 +      mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 +      if (!mode)
 +              return NULL;
 +
 +      mode->clock = intel_crtc_clock_get(dev, crtc);
 +      mode->hdisplay = (htot & 0xffff) + 1;
 +      mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
 +      mode->hsync_start = (hsync & 0xffff) + 1;
 +      mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
 +      mode->vdisplay = (vtot & 0xffff) + 1;
 +      mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
 +      mode->vsync_start = (vsync & 0xffff) + 1;
 +      mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  
 -      trace_i915_flip_request(intel_crtc->plane, obj);
 +      drm_mode_set_name(mode);
 +      drm_mode_set_crtcinfo(mode, 0);
  
 -      return 0;
 +      return mode;
 +}
  
 -cleanup_pending:
 -      atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 -      drm_gem_object_unreference(&work->old_fb_obj->base);
 -      drm_gem_object_unreference(&obj->base);
 -      mutex_unlock(&dev->struct_mutex);
 +#define GPU_IDLE_TIMEOUT 500 /* ms */
  
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      intel_crtc->unpin_work = NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 +/* When this timer fires, we've been idle for awhile */
 +static void intel_gpu_idle_timer(unsigned long arg)
 +{
 +      struct drm_device *dev = (struct drm_device *)arg;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
  
 -      drm_vblank_put(dev, intel_crtc->pipe);
 -free_work:
 -      kfree(work);
 +      if (!list_empty(&dev_priv->mm.active_list)) {
 +              /* Still processing requests, so just re-arm the timer. */
 +              mod_timer(&dev_priv->idle_timer, jiffies +
 +                        msecs_to_jiffies(GPU_IDLE_TIMEOUT));
 +              return;
 +      }
  
 -      return ret;
 +      dev_priv->busy = false;
 +      queue_work(dev_priv->wq, &dev_priv->idle_work);
  }
  
 -static void intel_sanitize_modesetting(struct drm_device *dev,
 -                                     int pipe, int plane)
 +#define CRTC_IDLE_TIMEOUT 1000 /* ms */
 +
 +static void intel_crtc_idle_timer(unsigned long arg)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 reg, val;
 +      struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
 +      struct drm_crtc *crtc = &intel_crtc->base;
 +      drm_i915_private_t *dev_priv = crtc->dev->dev_private;
 +      struct intel_framebuffer *intel_fb;
  
 -      /* Clear any frame start delays used for debugging left by the BIOS */
 -      for_each_pipe(pipe) {
 -              reg = PIPECONF(pipe);
 -              I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 +      intel_fb = to_intel_framebuffer(crtc->fb);
 +      if (intel_fb && intel_fb->obj->active) {
 +              /* The framebuffer is still being accessed by the GPU. */
 +              mod_timer(&intel_crtc->idle_timer, jiffies +
 +                        msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 +              return;
        }
  
 +      intel_crtc->busy = false;
 +      queue_work(dev_priv->wq, &dev_priv->idle_work);
 +}
 +
 +static void intel_increase_pllclock(struct drm_crtc *crtc)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int pipe = intel_crtc->pipe;
 +      int dpll_reg = DPLL(pipe);
 +      int dpll;
 +
        if (HAS_PCH_SPLIT(dev))
                return;
  
 -      /* Who knows what state these registers were left in by the BIOS or
 -       * grub?
 -       *
 -       * If we leave the registers in a conflicting state (e.g. with the
 -       * display plane reading from the other pipe than the one we intend
 -       * to use) then when we attempt to teardown the active mode, we will
 -       * not disable the pipes and planes in the correct order -- leaving
 -       * a plane reading from a disabled pipe and possibly leading to
 -       * undefined behaviour.
 -       */
 +      if (!dev_priv->lvds_downclock_avail)
 +              return;
  
 -      reg = DSPCNTR(plane);
 -      val = I915_READ(reg);
 +      dpll = I915_READ(dpll_reg);
 +      if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 +              DRM_DEBUG_DRIVER("upclocking LVDS\n");
  
 -      if ((val & DISPLAY_PLANE_ENABLE) == 0)
 -              return;
 -      if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
 -              return;
 +              assert_panel_unlocked(dev_priv, pipe);
  
 -      /* This display plane is active and attached to the other CPU pipe. */
 -      pipe = !pipe;
 +              dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 +              I915_WRITE(dpll_reg, dpll);
 +              intel_wait_for_vblank(dev, pipe);
  
 -      /* Disable the plane and wait for it to stop reading from the pipe. */
 -      intel_disable_plane(dev_priv, plane, pipe);
 -      intel_disable_pipe(dev_priv, pipe);
 +              dpll = I915_READ(dpll_reg);
 +              if (dpll & DISPLAY_RATE_SELECT_FPA1)
 +                      DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 +      }
 +
 +      /* Schedule downclock */
 +      mod_timer(&intel_crtc->idle_timer, jiffies +
 +                msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
  }
  
 -static void intel_crtc_reset(struct drm_crtc *crtc)
 +static void intel_decrease_pllclock(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       int dpll_reg = DPLL(pipe);
-       int dpll = I915_READ(dpll_reg);
  
 -      /* Reset flags back to the 'unknown' status so that they
 -       * will be correctly set on the initial modeset.
 -       */
 -      intel_crtc->dpms_mode = -1;
 +      if (HAS_PCH_SPLIT(dev))
 +              return;
  
 -      /* We need to fix up any BIOS configuration that conflicts with
 -       * our expectations.
 +      if (!dev_priv->lvds_downclock_avail)
 +              return;
 +
 +      /*
 +       * Since this is called by a timer, we should never get here in
 +       * the manual case.
         */
 -      intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
 -}
 +      if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
++              int pipe = intel_crtc->pipe;
++              int dpll_reg = DPLL(pipe);
++              int dpll;
 -static struct drm_crtc_helper_funcs intel_helper_funcs = {
 -      .dpms = intel_crtc_dpms,
 -      .mode_fixup = intel_crtc_mode_fixup,
 -      .mode_set = intel_crtc_mode_set,
 -      .mode_set_base = intel_pipe_set_base,
 -      .mode_set_base_atomic = intel_pipe_set_base_atomic,
 -      .load_lut = intel_crtc_load_lut,
 -      .disable = intel_crtc_disable,
 -};
 +              DRM_DEBUG_DRIVER("downclocking LVDS\n");
  
 -static const struct drm_crtc_funcs intel_crtc_funcs = {
 -      .reset = intel_crtc_reset,
 -      .cursor_set = intel_crtc_cursor_set,
 -      .cursor_move = intel_crtc_cursor_move,
 -      .gamma_set = intel_crtc_gamma_set,
 -      .set_config = drm_crtc_helper_set_config,
 -      .destroy = intel_crtc_destroy,
 -      .page_flip = intel_crtc_page_flip,
 -};
 +              assert_panel_unlocked(dev_priv, pipe);
  
 -static void intel_crtc_init(struct drm_device *dev, int pipe)
++              dpll = I915_READ(dpll_reg);
 +              dpll |= DISPLAY_RATE_SELECT_FPA1;
 +              I915_WRITE(dpll_reg, dpll);
 +              intel_wait_for_vblank(dev, pipe);
 +              dpll = I915_READ(dpll_reg);
 +              if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 +                      DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 +      }
 +
 +}
 +
 +/**
 + * intel_idle_update - adjust clocks for idleness
 + * @work: work struct
 + *
 + * Either the GPU or display (or both) went idle.  Check the busy status
 + * here and adjust the CRTC and GPU clocks as necessary.
 + */
 +static void intel_idle_update(struct work_struct *work)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 +                                                  idle_work);
 +      struct drm_device *dev = dev_priv->dev;
 +      struct drm_crtc *crtc;
        struct intel_crtc *intel_crtc;
 -      int i;
  
 -      intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
 -      if (intel_crtc == NULL)
 +      if (!i915_powersave)
                return;
  
 -      drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
 +      mutex_lock(&dev->struct_mutex);
  
 -      drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
 -      for (i = 0; i < 256; i++) {
 -              intel_crtc->lut_r[i] = i;
 -              intel_crtc->lut_g[i] = i;
 -              intel_crtc->lut_b[i] = i;
 -      }
 +      i915_update_gfx_val(dev_priv);
  
 -      /* Swap pipes & planes for FBC on pre-965 */
 -      intel_crtc->pipe = pipe;
 -      intel_crtc->plane = pipe;
 -      if (IS_MOBILE(dev) && IS_GEN3(dev)) {
 -              DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 -              intel_crtc->plane = !pipe;
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              /* Skip inactive CRTCs */
 +              if (!crtc->fb)
 +                      continue;
 +
 +              intel_crtc = to_intel_crtc(crtc);
 +              if (!intel_crtc->busy)
 +                      intel_decrease_pllclock(crtc);
        }
  
 -      BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 -             dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
 -      dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 -      dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
  
 -      intel_crtc_reset(&intel_crtc->base);
 -      intel_crtc->active = true; /* force the pipe off on setup_init_config */
 -      intel_crtc->bpp = 24; /* default for pre-Ironlake */
 +      mutex_unlock(&dev->struct_mutex);
 +}
  
 -      if (HAS_PCH_SPLIT(dev)) {
 -              if (pipe == 2 && IS_IVYBRIDGE(dev))
 -                      intel_crtc->no_pll = true;
 -              intel_helper_funcs.prepare = ironlake_crtc_prepare;
 -              intel_helper_funcs.commit = ironlake_crtc_commit;
 -      } else {
 -              intel_helper_funcs.prepare = i9xx_crtc_prepare;
 -              intel_helper_funcs.commit = i9xx_crtc_commit;
 -      }
 +/**
 + * intel_mark_busy - mark the GPU and possibly the display busy
 + * @dev: drm device
 + * @obj: object we're operating on
 + *
 + * Callers can use this function to indicate that the GPU is busy processing
 + * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
 + * buffer), we'll also mark the display as busy, so we know to increase its
 + * clock frequency.
 + */
 +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_crtc *crtc = NULL;
 +      struct intel_framebuffer *intel_fb;
 +      struct intel_crtc *intel_crtc;
  
 -      drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              return;
  
 -      intel_crtc->busy = false;
 +      if (!dev_priv->busy)
 +              dev_priv->busy = true;
 +      else
 +              mod_timer(&dev_priv->idle_timer, jiffies +
 +                        msecs_to_jiffies(GPU_IDLE_TIMEOUT));
  
 -      setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
 -                  (unsigned long)intel_crtc);
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              if (!crtc->fb)
 +                      continue;
 +
 +              intel_crtc = to_intel_crtc(crtc);
 +              intel_fb = to_intel_framebuffer(crtc->fb);
 +              if (intel_fb->obj == obj) {
 +                      if (!intel_crtc->busy) {
 +                              /* Non-busy -> busy, upclock */
 +                              intel_increase_pllclock(crtc);
 +                              intel_crtc->busy = true;
 +                      } else {
 +                              /* Busy -> busy, put off timer */
 +                              mod_timer(&intel_crtc->idle_timer, jiffies +
 +                                        msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 +                      }
 +              }
 +      }
  }
  
 -int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 -                              struct drm_file *file)
 +static void intel_crtc_destroy(struct drm_crtc *crtc)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
 -      struct drm_mode_object *drmmode_obj;
 -      struct intel_crtc *crtc;
 -
 -      if (!dev_priv) {
 -              DRM_ERROR("called with no initialization\n");
 -              return -EINVAL;
 -      }
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct drm_device *dev = crtc->dev;
 +      struct intel_unpin_work *work;
 +      unsigned long flags;
  
 -      drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
 -                      DRM_MODE_OBJECT_CRTC);
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      work = intel_crtc->unpin_work;
 +      intel_crtc->unpin_work = NULL;
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      if (!drmmode_obj) {
 -              DRM_ERROR("no such CRTC id\n");
 -              return -EINVAL;
 +      if (work) {
 +              cancel_work_sync(&work->work);
 +              kfree(work);
        }
  
 -      crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 -      pipe_from_crtc_id->pipe = crtc->pipe;
 +      drm_crtc_cleanup(crtc);
  
 -      return 0;
 +      kfree(intel_crtc);
  }
  
 -static int intel_encoder_clones(struct drm_device *dev, int type_mask)
 +static void intel_unpin_work_fn(struct work_struct *__work)
  {
 -      struct intel_encoder *encoder;
 -      int index_mask = 0;
 -      int entry = 0;
 +      struct intel_unpin_work *work =
 +              container_of(__work, struct intel_unpin_work, work);
  
 -      list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 -              if (type_mask & encoder->clone_mask)
 -                      index_mask |= (1 << entry);
 -              entry++;
 -      }
 +      mutex_lock(&work->dev->struct_mutex);
 +      intel_unpin_fb_obj(work->old_fb_obj);
 +      drm_gem_object_unreference(&work->pending_flip_obj->base);
 +      drm_gem_object_unreference(&work->old_fb_obj->base);
  
 -      return index_mask;
 +      intel_update_fbc(work->dev);
 +      mutex_unlock(&work->dev->struct_mutex);
 +      kfree(work);
  }
  
 -static bool has_edp_a(struct drm_device *dev)
 +static void do_intel_finish_page_flip(struct drm_device *dev,
 +                                    struct drm_crtc *crtc)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (!IS_MOBILE(dev))
 -              return false;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_unpin_work *work;
 +      struct drm_i915_gem_object *obj;
 +      struct drm_pending_vblank_event *e;
 +      struct timeval tnow, tvbl;
 +      unsigned long flags;
  
 -      if ((I915_READ(DP_A) & DP_DETECTED) == 0)
 -              return false;
 +      /* Ignore early vblank irqs */
 +      if (intel_crtc == NULL)
 +              return;
  
 -      if (IS_GEN5(dev) &&
 -          (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
 -              return false;
 +      do_gettimeofday(&tnow);
  
 -      return true;
 -}
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      work = intel_crtc->unpin_work;
 +      if (work == NULL || !work->pending) {
 +              spin_unlock_irqrestore(&dev->event_lock, flags);
 +              return;
 +      }
  
 -static void intel_setup_outputs(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_encoder *encoder;
 -      bool dpd_is_edp = false;
 -      bool has_lvds;
 +      intel_crtc->unpin_work = NULL;
  
 -      has_lvds = intel_lvds_init(dev);
 -      if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
 -              /* disable the panel fitter on everything but LVDS */
 -              I915_WRITE(PFIT_CONTROL, 0);
 -      }
 +      if (work->event) {
 +              e = work->event;
 +              e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
  
 -      if (HAS_PCH_SPLIT(dev)) {
 -              dpd_is_edp = intel_dpd_is_edp(dev);
 +              /* Called before vblank count and timestamps have
 +               * been updated for the vblank interval of flip
 +               * completion? Need to increment vblank count and
 +               * add one videorefresh duration to returned timestamp
 +               * to account for this. We assume this happened if we
 +               * get called over 0.9 frame durations after the last
 +               * timestamped vblank.
 +               *
 +               * This calculation can not be used with vrefresh rates
 +               * below 5Hz (10Hz to be on the safe side) without
 +               * promoting to 64 integers.
 +               */
 +              if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
 +                  9 * crtc->framedur_ns) {
 +                      e->event.sequence++;
 +                      tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
 +                                           crtc->framedur_ns);
 +              }
  
 -              if (has_edp_a(dev))
 -                      intel_dp_init(dev, DP_A);
 +              e->event.tv_sec = tvbl.tv_sec;
 +              e->event.tv_usec = tvbl.tv_usec;
  
 -              if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
 -                      intel_dp_init(dev, PCH_DP_D);
 +              list_add_tail(&e->base.link,
 +                            &e->base.file_priv->event_list);
 +              wake_up_interruptible(&e->base.file_priv->event_wait);
        }
  
 -      intel_crt_init(dev);
 -
 -      if (HAS_PCH_SPLIT(dev)) {
 -              int found;
 +      drm_vblank_put(dev, intel_crtc->pipe);
  
 -              if (I915_READ(HDMIB) & PORT_DETECTED) {
 -                      /* PCH SDVOB multiplex with HDMIB */
 -                      found = intel_sdvo_init(dev, PCH_SDVOB);
 -                      if (!found)
 -                              intel_hdmi_init(dev, HDMIB);
 -                      if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
 -                              intel_dp_init(dev, PCH_DP_B);
 -              }
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -              if (I915_READ(HDMIC) & PORT_DETECTED)
 -                      intel_hdmi_init(dev, HDMIC);
 +      obj = work->old_fb_obj;
  
 -              if (I915_READ(HDMID) & PORT_DETECTED)
 -                      intel_hdmi_init(dev, HDMID);
 +      atomic_clear_mask(1 << intel_crtc->plane,
 +                        &obj->pending_flip.counter);
 +      if (atomic_read(&obj->pending_flip) == 0)
 +              wake_up(&dev_priv->pending_flip_queue);
  
 -              if (I915_READ(PCH_DP_C) & DP_DETECTED)
 -                      intel_dp_init(dev, PCH_DP_C);
 +      schedule_work(&work->work);
  
 -              if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
 -                      intel_dp_init(dev, PCH_DP_D);
 +      trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
 +}
  
 -      } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 -              bool found = false;
 +void intel_finish_page_flip(struct drm_device *dev, int pipe)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  
 -              if (I915_READ(SDVOB) & SDVO_DETECTED) {
 -                      DRM_DEBUG_KMS("probing SDVOB\n");
 -                      found = intel_sdvo_init(dev, SDVOB);
 -                      if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 -                              DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 -                              intel_hdmi_init(dev, SDVOB);
 -                      }
 +      do_intel_finish_page_flip(dev, crtc);
 +}
  
 -                      if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
 -                              DRM_DEBUG_KMS("probing DP_B\n");
 -                              intel_dp_init(dev, DP_B);
 -                      }
 -              }
 +void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
  
 -              /* Before G4X SDVOC doesn't have its own detect register */
 +      do_intel_finish_page_flip(dev, crtc);
 +}
  
 -              if (I915_READ(SDVOB) & SDVO_DETECTED) {
 -                      DRM_DEBUG_KMS("probing SDVOC\n");
 -                      found = intel_sdvo_init(dev, SDVOC);
 -              }
 +void intel_prepare_page_flip(struct drm_device *dev, int plane)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc =
 +              to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
 +      unsigned long flags;
  
 -              if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      if (intel_crtc->unpin_work) {
 +              if ((++intel_crtc->unpin_work->pending) > 1)
 +                      DRM_ERROR("Prepared flip multiple times\n");
 +      } else {
 +              DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
 +      }
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
 +}
  
 -                      if (SUPPORTS_INTEGRATED_HDMI(dev)) {
 -                              DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 -                              intel_hdmi_init(dev, SDVOC);
 -                      }
 -                      if (SUPPORTS_INTEGRATED_DP(dev)) {
 -                              DRM_DEBUG_KMS("probing DP_C\n");
 -                              intel_dp_init(dev, DP_C);
 -                      }
 -              }
 +static int intel_gen2_queue_flip(struct drm_device *dev,
 +                               struct drm_crtc *crtc,
 +                               struct drm_framebuffer *fb,
 +                               struct drm_i915_gem_object *obj)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      unsigned long offset;
 +      u32 flip_mask;
 +      int ret;
  
 -              if (SUPPORTS_INTEGRATED_DP(dev) &&
 -                  (I915_READ(DP_D) & DP_DETECTED)) {
 -                      DRM_DEBUG_KMS("probing DP_D\n");
 -                      intel_dp_init(dev, DP_D);
 -              }
 -      } else if (IS_GEN2(dev))
 -              intel_dvo_init(dev);
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 +      if (ret)
 +              goto err;
  
 -      if (SUPPORTS_TV(dev))
 -              intel_tv_init(dev);
 +      /* Offset into the new buffer for cases of shared fbs between CRTCs */
 +      offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  
 -      list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 -              encoder->base.possible_crtcs = encoder->crtc_mask;
 -              encoder->base.possible_clones =
 -                      intel_encoder_clones(dev, encoder->clone_mask);
 -      }
 +      ret = BEGIN_LP_RING(6);
 +      if (ret)
 +              goto err_unpin;
  
 -      /* disable all the possible outputs/crtcs before entering KMS mode */
 -      drm_helper_disable_unused_functions(dev);
 +      /* Can't queue multiple flips, so wait for the previous
 +       * one to finish before executing the next.
 +       */
 +      if (intel_crtc->plane)
 +              flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 +      else
 +              flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 +      OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
 +      OUT_RING(MI_NOOP);
 +      OUT_RING(MI_DISPLAY_FLIP |
 +               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 +      OUT_RING(fb->pitches[0]);
 +      OUT_RING(obj->gtt_offset + offset);
 +      OUT_RING(0); /* aux display base address, unused */
 +      ADVANCE_LP_RING();
 +      return 0;
  
 -      if (HAS_PCH_SPLIT(dev))
 -              ironlake_init_pch_refclk(dev);
 +err_unpin:
 +      intel_unpin_fb_obj(obj);
 +err:
 +      return ret;
  }
  
 -static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 +static int intel_gen3_queue_flip(struct drm_device *dev,
 +                               struct drm_crtc *crtc,
 +                               struct drm_framebuffer *fb,
 +                               struct drm_i915_gem_object *obj)
  {
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      unsigned long offset;
 +      u32 flip_mask;
 +      int ret;
  
 -      drm_framebuffer_cleanup(fb);
 -      drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 +      if (ret)
 +              goto err;
  
 -      kfree(intel_fb);
 -}
 +      /* Offset into the new buffer for cases of shared fbs between CRTCs */
 +      offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
  
 -static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
 -                                              struct drm_file *file,
 -                                              unsigned int *handle)
 -{
 -      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 -      struct drm_i915_gem_object *obj = intel_fb->obj;
 +      ret = BEGIN_LP_RING(6);
 +      if (ret)
 +              goto err_unpin;
  
 -      return drm_gem_handle_create(file, &obj->base, handle);
 -}
 +      if (intel_crtc->plane)
 +              flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 +      else
 +              flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 +      OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
 +      OUT_RING(MI_NOOP);
 +      OUT_RING(MI_DISPLAY_FLIP_I915 |
 +               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 +      OUT_RING(fb->pitches[0]);
 +      OUT_RING(obj->gtt_offset + offset);
 +      OUT_RING(MI_NOOP);
  
 -static const struct drm_framebuffer_funcs intel_fb_funcs = {
 -      .destroy = intel_user_framebuffer_destroy,
 -      .create_handle = intel_user_framebuffer_create_handle,
 -};
 +      ADVANCE_LP_RING();
 +      return 0;
  
 -int intel_framebuffer_init(struct drm_device *dev,
 -                         struct intel_framebuffer *intel_fb,
 -                         struct drm_mode_fb_cmd2 *mode_cmd,
 -                         struct drm_i915_gem_object *obj)
 +err_unpin:
 +      intel_unpin_fb_obj(obj);
 +err:
 +      return ret;
 +}
 +
 +static int intel_gen4_queue_flip(struct drm_device *dev,
 +                               struct drm_crtc *crtc,
 +                               struct drm_framebuffer *fb,
 +                               struct drm_i915_gem_object *obj)
  {
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      uint32_t pf, pipesrc;
        int ret;
  
 -      if (obj->tiling_mode == I915_TILING_Y)
 -              return -EINVAL;
 -
 -      if (mode_cmd->pitches[0] & 63)
 -              return -EINVAL;
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 +      if (ret)
 +              goto err;
  
 -      switch (mode_cmd->pixel_format) {
 -      case DRM_FORMAT_RGB332:
 -      case DRM_FORMAT_RGB565:
 -      case DRM_FORMAT_XRGB8888:
 -      case DRM_FORMAT_XBGR8888:
 -      case DRM_FORMAT_ARGB8888:
 -      case DRM_FORMAT_XRGB2101010:
 -      case DRM_FORMAT_ARGB2101010:
 -              /* RGB formats are common across chipsets */
 -              break;
 -      case DRM_FORMAT_YUYV:
 -      case DRM_FORMAT_UYVY:
 -      case DRM_FORMAT_YVYU:
 -      case DRM_FORMAT_VYUY:
 -              break;
 -      default:
 -              DRM_DEBUG_KMS("unsupported pixel format %u\n",
 -                              mode_cmd->pixel_format);
 -              return -EINVAL;
 -      }
 +      ret = BEGIN_LP_RING(4);
 +      if (ret)
 +              goto err_unpin;
  
 -      ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 -      if (ret) {
 -              DRM_ERROR("framebuffer init failed %d\n", ret);
 -              return ret;
 -      }
 +      /* i965+ uses the linear or tiled offsets from the
 +       * Display Registers (which do not change across a page-flip)
 +       * so we need only reprogram the base address.
 +       */
 +      OUT_RING(MI_DISPLAY_FLIP |
 +               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 +      OUT_RING(fb->pitches[0]);
 +      OUT_RING(obj->gtt_offset | obj->tiling_mode);
  
 -      drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 -      intel_fb->obj = obj;
 +      /* XXX Enabling the panel-fitter across page-flip is so far
 +       * untested on non-native modes, so ignore it for now.
 +       * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
 +       */
 +      pf = 0;
 +      pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 +      OUT_RING(pf | pipesrc);
 +      ADVANCE_LP_RING();
        return 0;
 -}
 -
 -static struct drm_framebuffer *
 -intel_user_framebuffer_create(struct drm_device *dev,
 -                            struct drm_file *filp,
 -                            struct drm_mode_fb_cmd2 *mode_cmd)
 -{
 -      struct drm_i915_gem_object *obj;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
 -                                              mode_cmd->handles[0]));
 -      if (&obj->base == NULL)
 -              return ERR_PTR(-ENOENT);
 -
 -      return intel_framebuffer_create(dev, mode_cmd, obj);
 +err_unpin:
 +      intel_unpin_fb_obj(obj);
 +err:
 +      return ret;
  }
  
 -static const struct drm_mode_config_funcs intel_mode_funcs = {
 -      .fb_create = intel_user_framebuffer_create,
 -      .output_poll_changed = intel_fb_output_poll_changed,
 -};
 -
 -static struct drm_i915_gem_object *
 -intel_alloc_context_page(struct drm_device *dev)
 +static int intel_gen6_queue_flip(struct drm_device *dev,
 +                               struct drm_crtc *crtc,
 +                               struct drm_framebuffer *fb,
 +                               struct drm_i915_gem_object *obj)
  {
 -      struct drm_i915_gem_object *ctx;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      uint32_t pf, pipesrc;
        int ret;
  
 -      WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 -
 -      ctx = i915_gem_alloc_object(dev, 4096);
 -      if (!ctx) {
 -              DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
 -              return NULL;
 -      }
 -
 -      ret = i915_gem_object_pin(ctx, 4096, true);
 -      if (ret) {
 -              DRM_ERROR("failed to pin power context: %d\n", ret);
 -              goto err_unref;
 -      }
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 +      if (ret)
 +              goto err;
  
 -      ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
 -      if (ret) {
 -              DRM_ERROR("failed to set-domain on power context: %d\n", ret);
 +      ret = BEGIN_LP_RING(4);
 +      if (ret)
                goto err_unpin;
 -      }
  
 -      return ctx;
 +      OUT_RING(MI_DISPLAY_FLIP |
 +               MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 +      OUT_RING(fb->pitches[0] | obj->tiling_mode);
 +      OUT_RING(obj->gtt_offset);
 +
-       pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++      /* Contrary to the suggestions in the documentation,
++       * "Enable Panel Fitter" does not seem to be required when page
++       * flipping with a non-native mode, and worse causes a normal
++       * modeset to fail.
++       * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++       */
++      pf = 0;
 +      pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 +      OUT_RING(pf | pipesrc);
 +      ADVANCE_LP_RING();
 +      return 0;
  
  err_unpin:
 -      i915_gem_object_unpin(ctx);
 -err_unref:
 -      drm_gem_object_unreference(&ctx->base);
 -      mutex_unlock(&dev->struct_mutex);
 -      return NULL;
 +      intel_unpin_fb_obj(obj);
 +err:
 +      return ret;
  }
  
 -bool ironlake_set_drps(struct drm_device *dev, u8 val)
 +/*
 + * On gen7 we currently use the blit ring because (in early silicon at least)
 + * the render ring doesn't give us interrpts for page flip completion, which
 + * means clients will hang after the first flip is queued.  Fortunately the
 + * blit ring generates interrupts properly, so use it instead.
 + */
 +static int intel_gen7_queue_flip(struct drm_device *dev,
 +                               struct drm_crtc *crtc,
 +                               struct drm_framebuffer *fb,
 +                               struct drm_i915_gem_object *obj)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u16 rgvswctl;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 +      int ret;
  
 -      rgvswctl = I915_READ16(MEMSWCTL);
 -      if (rgvswctl & MEMCTL_CMD_STS) {
 -              DRM_DEBUG("gpu busy, RCS change rejected\n");
 -              return false; /* still busy with another command */
 -      }
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
 +      if (ret)
 +              goto err;
 +
 +      ret = intel_ring_begin(ring, 4);
 +      if (ret)
 +              goto err_unpin;
  
 -      rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
 -              (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
 -      I915_WRITE16(MEMSWCTL, rgvswctl);
 -      POSTING_READ16(MEMSWCTL);
 +      intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
 +      intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
 +      intel_ring_emit(ring, (obj->gtt_offset));
 +      intel_ring_emit(ring, (MI_NOOP));
 +      intel_ring_advance(ring);
 +      return 0;
  
 -      rgvswctl |= MEMCTL_CMD_STS;
 -      I915_WRITE16(MEMSWCTL, rgvswctl);
 +err_unpin:
 +      intel_unpin_fb_obj(obj);
 +err:
 +      return ret;
 +}
  
 -      return true;
 +static int intel_default_queue_flip(struct drm_device *dev,
 +                                  struct drm_crtc *crtc,
 +                                  struct drm_framebuffer *fb,
 +                                  struct drm_i915_gem_object *obj)
 +{
 +      return -ENODEV;
  }
  
 -void ironlake_enable_drps(struct drm_device *dev)
 +static int intel_crtc_page_flip(struct drm_crtc *crtc,
 +                              struct drm_framebuffer *fb,
 +                              struct drm_pending_vblank_event *event)
  {
 +      struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 rgvmodectl = I915_READ(MEMMODECTL);
 -      u8 fmax, fmin, fstart, vstart;
 +      struct intel_framebuffer *intel_fb;
 +      struct drm_i915_gem_object *obj;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      struct intel_unpin_work *work;
 +      unsigned long flags;
 +      int ret;
 +
 +      work = kzalloc(sizeof *work, GFP_KERNEL);
 +      if (work == NULL)
 +              return -ENOMEM;
 +
 +      work->event = event;
 +      work->dev = crtc->dev;
 +      intel_fb = to_intel_framebuffer(crtc->fb);
 +      work->old_fb_obj = intel_fb->obj;
 +      INIT_WORK(&work->work, intel_unpin_work_fn);
 +
 +      ret = drm_vblank_get(dev, intel_crtc->pipe);
 +      if (ret)
 +              goto free_work;
 +
 +      /* We borrow the event spin lock for protecting unpin_work */
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      if (intel_crtc->unpin_work) {
 +              spin_unlock_irqrestore(&dev->event_lock, flags);
 +              kfree(work);
 +              drm_vblank_put(dev, intel_crtc->pipe);
  
 -      /* Enable temp reporting */
 -      I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
 -      I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
 +              DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 +              return -EBUSY;
 +      }
 +      intel_crtc->unpin_work = work;
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      /* 100ms RC evaluation intervals */
 -      I915_WRITE(RCUPEI, 100000);
 -      I915_WRITE(RCDNEI, 100000);
 +      intel_fb = to_intel_framebuffer(fb);
 +      obj = intel_fb->obj;
  
 -      /* Set max/min thresholds to 90ms and 80ms respectively */
 -      I915_WRITE(RCBMAXAVG, 90000);
 -      I915_WRITE(RCBMINAVG, 80000);
 +      mutex_lock(&dev->struct_mutex);
  
 -      I915_WRITE(MEMIHYST, 1);
 +      /* Reference the objects for the scheduled work. */
 +      drm_gem_object_reference(&work->old_fb_obj->base);
 +      drm_gem_object_reference(&obj->base);
  
 -      /* Set up min, max, and cur for interrupt handling */
 -      fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
 -      fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
 -      fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 -              MEMMODE_FSTART_SHIFT;
 +      crtc->fb = fb;
  
 -      vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 -              PXVFREQ_PX_SHIFT;
 +      work->pending_flip_obj = obj;
  
 -      dev_priv->fmax = fmax; /* IPS callback will increase this */
 -      dev_priv->fstart = fstart;
 +      work->enable_stall_check = true;
  
 -      dev_priv->max_delay = fstart;
 -      dev_priv->min_delay = fmin;
 -      dev_priv->cur_delay = fstart;
 +      /* Block clients from rendering to the new back buffer until
 +       * the flip occurs and the object is no longer visible.
 +       */
 +      atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
  
 -      DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
 -                       fmax, fmin, fstart);
 +      ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
 +      if (ret)
 +              goto cleanup_pending;
  
 -      I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 +      intel_disable_fbc(dev);
 +      mutex_unlock(&dev->struct_mutex);
  
 -      /*
 -       * Interrupts will be enabled in ironlake_irq_postinstall
 -       */
 +      trace_i915_flip_request(intel_crtc->plane, obj);
  
 -      I915_WRITE(VIDSTART, vstart);
 -      POSTING_READ(VIDSTART);
 +      return 0;
  
 -      rgvmodectl |= MEMMODE_SWMODE_EN;
 -      I915_WRITE(MEMMODECTL, rgvmodectl);
 +cleanup_pending:
 +      atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 +      drm_gem_object_unreference(&work->old_fb_obj->base);
 +      drm_gem_object_unreference(&obj->base);
 +      mutex_unlock(&dev->struct_mutex);
  
 -      if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
 -              DRM_ERROR("stuck trying to change perf mode\n");
 -      msleep(1);
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      intel_crtc->unpin_work = NULL;
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      ironlake_set_drps(dev, fstart);
 +      drm_vblank_put(dev, intel_crtc->pipe);
 +free_work:
 +      kfree(work);
  
 -      dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
 -              I915_READ(0x112e0);
 -      dev_priv->last_time1 = jiffies_to_msecs(jiffies);
 -      dev_priv->last_count2 = I915_READ(0x112f4);
 -      getrawmonotonic(&dev_priv->last_time2);
 +      return ret;
  }
  
 -void ironlake_disable_drps(struct drm_device *dev)
 +static void intel_sanitize_modesetting(struct drm_device *dev,
 +                                     int pipe, int plane)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u16 rgvswctl = I915_READ16(MEMSWCTL);
 -
 -      /* Ack interrupts, disable EFC interrupt */
 -      I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
 -      I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
 -      I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
 -      I915_WRITE(DEIIR, DE_PCU_EVENT);
 -      I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 -
 -      /* Go back to the starting frequency */
 -      ironlake_set_drps(dev, dev_priv->fstart);
 -      msleep(1);
 -      rgvswctl |= MEMCTL_CMD_STS;
 -      I915_WRITE(MEMSWCTL, rgvswctl);
 -      msleep(1);
 +      u32 reg, val;
  
 -}
 +      /* Clear any frame start delays used for debugging left by the BIOS */
 +      for_each_pipe(pipe) {
 +              reg = PIPECONF(pipe);
 +              I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 +      }
  
 -void gen6_set_rps(struct drm_device *dev, u8 val)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 swreq;
 +      if (HAS_PCH_SPLIT(dev))
 +              return;
  
 -      swreq = (val & 0x3ff) << 25;
 -      I915_WRITE(GEN6_RPNSWREQ, swreq);
 -}
 +      /* Who knows what state these registers were left in by the BIOS or
 +       * grub?
 +       *
 +       * If we leave the registers in a conflicting state (e.g. with the
 +       * display plane reading from the other pipe than the one we intend
 +       * to use) then when we attempt to teardown the active mode, we will
 +       * not disable the pipes and planes in the correct order -- leaving
 +       * a plane reading from a disabled pipe and possibly leading to
 +       * undefined behaviour.
 +       */
  
 -void gen6_disable_rps(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      reg = DSPCNTR(plane);
 +      val = I915_READ(reg);
  
 -      I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 -      I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
 -      I915_WRITE(GEN6_PMIER, 0);
 -      /* Complete PM interrupt masking here doesn't race with the rps work
 -       * item again unmasking PM interrupts because that is using a different
 -       * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 -       * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 +      if ((val & DISPLAY_PLANE_ENABLE) == 0)
 +              return;
 +      if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
 +              return;
  
 -      spin_lock_irq(&dev_priv->rps_lock);
 -      dev_priv->pm_iir = 0;
 -      spin_unlock_irq(&dev_priv->rps_lock);
 +      /* This display plane is active and attached to the other CPU pipe. */
 +      pipe = !pipe;
  
 -      I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 +      /* Disable the plane and wait for it to stop reading from the pipe. */
 +      intel_disable_plane(dev_priv, plane, pipe);
 +      intel_disable_pipe(dev_priv, pipe);
  }
  
 -static unsigned long intel_pxfreq(u32 vidfreq)
 +static void intel_crtc_reset(struct drm_crtc *crtc)
  {
 -      unsigned long freq;
 -      int div = (vidfreq & 0x3f0000) >> 16;
 -      int post = (vidfreq & 0x3000) >> 12;
 -      int pre = (vidfreq & 0x7);
 -
 -      if (!pre)
 -              return 0;
 +      struct drm_device *dev = crtc->dev;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
 -      freq = ((div * 133333) / ((1<<post) * pre));
 +      /* Reset flags back to the 'unknown' status so that they
 +       * will be correctly set on the initial modeset.
 +       */
 +      intel_crtc->dpms_mode = -1;
  
 -      return freq;
 +      /* We need to fix up any BIOS configuration that conflicts with
 +       * our expectations.
 +       */
 +      intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
  }
  
 -void intel_init_emon(struct drm_device *dev)
 +static struct drm_crtc_helper_funcs intel_helper_funcs = {
 +      .dpms = intel_crtc_dpms,
 +      .mode_fixup = intel_crtc_mode_fixup,
 +      .mode_set = intel_crtc_mode_set,
 +      .mode_set_base = intel_pipe_set_base,
 +      .mode_set_base_atomic = intel_pipe_set_base_atomic,
 +      .load_lut = intel_crtc_load_lut,
 +      .disable = intel_crtc_disable,
 +};
 +
 +static const struct drm_crtc_funcs intel_crtc_funcs = {
 +      .reset = intel_crtc_reset,
 +      .cursor_set = intel_crtc_cursor_set,
 +      .cursor_move = intel_crtc_cursor_move,
 +      .gamma_set = intel_crtc_gamma_set,
 +      .set_config = drm_crtc_helper_set_config,
 +      .destroy = intel_crtc_destroy,
 +      .page_flip = intel_crtc_page_flip,
 +};
 +
 +static void intel_crtc_init(struct drm_device *dev, int pipe)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 lcfuse;
 -      u8 pxw[16];
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_crtc *intel_crtc;
        int i;
  
 -      /* Disable to program */
 -      I915_WRITE(ECR, 0);
 -      POSTING_READ(ECR);
 -
 -      /* Program energy weights for various events */
 -      I915_WRITE(SDEW, 0x15040d00);
 -      I915_WRITE(CSIEW0, 0x007f0000);
 -      I915_WRITE(CSIEW1, 0x1e220004);
 -      I915_WRITE(CSIEW2, 0x04000004);
 -
 -      for (i = 0; i < 5; i++)
 -              I915_WRITE(PEW + (i * 4), 0);
 -      for (i = 0; i < 3; i++)
 -              I915_WRITE(DEW + (i * 4), 0);
 -
 -      /* Program P-state weights to account for frequency power adjustment */
 -      for (i = 0; i < 16; i++) {
 -              u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
 -              unsigned long freq = intel_pxfreq(pxvidfreq);
 -              unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
 -                      PXVFREQ_PX_SHIFT;
 -              unsigned long val;
 -
 -              val = vid * vid;
 -              val *= (freq / 1000);
 -              val *= 255;
 -              val /= (127*127*900);
 -              if (val > 0xff)
 -                      DRM_ERROR("bad pxval: %ld\n", val);
 -              pxw[i] = val;
 -      }
 -      /* Render standby states get 0 weight */
 -      pxw[14] = 0;
 -      pxw[15] = 0;
 +      intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
 +      if (intel_crtc == NULL)
 +              return;
  
 -      for (i = 0; i < 4; i++) {
 -              u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
 -                      (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
 -              I915_WRITE(PXW + (i * 4), val);
 -      }
 +      drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
  
 -      /* Adjust magic regs to magic values (more experimental results) */
 -      I915_WRITE(OGW0, 0);
 -      I915_WRITE(OGW1, 0);
 -      I915_WRITE(EG0, 0x00007f00);
 -      I915_WRITE(EG1, 0x0000000e);
 -      I915_WRITE(EG2, 0x000e0000);
 -      I915_WRITE(EG3, 0x68000300);
 -      I915_WRITE(EG4, 0x42000000);
 -      I915_WRITE(EG5, 0x00140031);
 -      I915_WRITE(EG6, 0);
 -      I915_WRITE(EG7, 0);
 +      drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
 +      for (i = 0; i < 256; i++) {
 +              intel_crtc->lut_r[i] = i;
 +              intel_crtc->lut_g[i] = i;
 +              intel_crtc->lut_b[i] = i;
 +      }
  
 -      for (i = 0; i < 8; i++)
 -              I915_WRITE(PXWL + (i * 4), 0);
 +      /* Swap pipes & planes for FBC on pre-965 */
 +      intel_crtc->pipe = pipe;
 +      intel_crtc->plane = pipe;
 +      if (IS_MOBILE(dev) && IS_GEN3(dev)) {
 +              DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 +              intel_crtc->plane = !pipe;
 +      }
  
 -      /* Enable PMON + select events */
 -      I915_WRITE(ECR, 0x80000019);
 +      BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 +             dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
 +      dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 +      dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
  
 -      lcfuse = I915_READ(LCFUSE02);
 +      intel_crtc_reset(&intel_crtc->base);
 +      intel_crtc->active = true; /* force the pipe off on setup_init_config */
 +      intel_crtc->bpp = 24; /* default for pre-Ironlake */
  
 -      dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 -}
 +      if (HAS_PCH_SPLIT(dev)) {
 +              if (pipe == 2 && IS_IVYBRIDGE(dev))
 +                      intel_crtc->no_pll = true;
 +              intel_helper_funcs.prepare = ironlake_crtc_prepare;
 +              intel_helper_funcs.commit = ironlake_crtc_commit;
 +      } else {
 +              intel_helper_funcs.prepare = i9xx_crtc_prepare;
 +              intel_helper_funcs.commit = i9xx_crtc_commit;
 +      }
  
 -static int intel_enable_rc6(struct drm_device *dev)
 -{
 -      /*
 -       * Respect the kernel parameter if it is set
 -       */
 -      if (i915_enable_rc6 >= 0)
 -              return i915_enable_rc6;
 +      drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  
 -      /*
 -       * Disable RC6 on Ironlake
 -       */
 -      if (INTEL_INFO(dev)->gen == 5)
 -              return 0;
 +      intel_crtc->busy = false;
  
 -      /*
 -       * Disable rc6 on Sandybridge
 -       */
 -      if (INTEL_INFO(dev)->gen == 6) {
 -              DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 -              return INTEL_RC6_ENABLE;
 -      }
 -      DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 -      return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 +      setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
 +                  (unsigned long)intel_crtc);
  }
  
 -void gen6_enable_rps(struct drm_i915_private *dev_priv)
 +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 +                              struct drm_file *file)
  {
 -      u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 -      u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 -      u32 pcu_mbox, rc6_mask = 0;
 -      u32 gtfifodbg;
 -      int cur_freq, min_freq, max_freq;
 -      int rc6_mode;
 -      int i;
 -
 -      /* Here begins a magic sequence of register writes to enable
 -       * auto-downclocking.
 -       *
 -       * Perhaps there might be some value in exposing these to
 -       * userspace...
 -       */
 -      I915_WRITE(GEN6_RC_STATE, 0);
 -      mutex_lock(&dev_priv->dev->struct_mutex);
 -
 -      /* Clear the DBG now so we don't confuse earlier errors */
 -      if ((gtfifodbg = I915_READ(GTFIFODBG))) {
 -              DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
 -              I915_WRITE(GTFIFODBG, gtfifodbg);
 -      }
 -
 -      gen6_gt_force_wake_get(dev_priv);
 -
 -      /* disable the counters and set deterministic thresholds */
 -      I915_WRITE(GEN6_RC_CONTROL, 0);
 -
 -      I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
 -      I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
 -      I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
 -      I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 -      I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 -
 -      for (i = 0; i < I915_NUM_RINGS; i++)
 -              I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
 -
 -      I915_WRITE(GEN6_RC_SLEEP, 0);
 -      I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
 -      I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
 -      I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
 -      I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 -
 -      rc6_mode = intel_enable_rc6(dev_priv->dev);
 -      if (rc6_mode & INTEL_RC6_ENABLE)
 -              rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
 -
 -      if (rc6_mode & INTEL_RC6p_ENABLE)
 -              rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
 -
 -      if (rc6_mode & INTEL_RC6pp_ENABLE)
 -              rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
 -
 -      DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
 -                      (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
 -                      (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
 -                      (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
 -
 -      I915_WRITE(GEN6_RC_CONTROL,
 -                 rc6_mask |
 -                 GEN6_RC_CTL_EI_MODE(1) |
 -                 GEN6_RC_CTL_HW_ENABLE);
 -
 -      I915_WRITE(GEN6_RPNSWREQ,
 -                 GEN6_FREQUENCY(10) |
 -                 GEN6_OFFSET(0) |
 -                 GEN6_AGGRESSIVE_TURBO);
 -      I915_WRITE(GEN6_RC_VIDEO_FREQ,
 -                 GEN6_FREQUENCY(12));
 -
 -      I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 -      I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
 -                 18 << 24 |
 -                 6 << 16);
 -      I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
 -      I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
 -      I915_WRITE(GEN6_RP_UP_EI, 100000);
 -      I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
 -      I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 -      I915_WRITE(GEN6_RP_CONTROL,
 -                 GEN6_RP_MEDIA_TURBO |
 -                 GEN6_RP_MEDIA_HW_MODE |
 -                 GEN6_RP_MEDIA_IS_GFX |
 -                 GEN6_RP_ENABLE |
 -                 GEN6_RP_UP_BUSY_AVG |
 -                 GEN6_RP_DOWN_IDLE_CONT);
 -
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 -
 -      I915_WRITE(GEN6_PCODE_DATA, 0);
 -      I915_WRITE(GEN6_PCODE_MAILBOX,
 -                 GEN6_PCODE_READY |
 -                 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 -
 -      min_freq = (rp_state_cap & 0xff0000) >> 16;
 -      max_freq = rp_state_cap & 0xff;
 -      cur_freq = (gt_perf_status & 0xff00) >> 8;
 -
 -      /* Check for overclock support */
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 -      I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
 -      pcu_mbox = I915_READ(GEN6_PCODE_DATA);
 -      if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
 -                   500))
 -              DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 -      if (pcu_mbox & (1<<31)) { /* OC supported */
 -              max_freq = pcu_mbox & 0xff;
 -              DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 -      }
 -
 -      /* In units of 100MHz */
 -      dev_priv->max_delay = max_freq;
 -      dev_priv->min_delay = min_freq;
 -      dev_priv->cur_delay = cur_freq;
 -
 -      /* requires MSI enabled */
 -      I915_WRITE(GEN6_PMIER,
 -                 GEN6_PM_MBOX_EVENT |
 -                 GEN6_PM_THERMAL_EVENT |
 -                 GEN6_PM_RP_DOWN_TIMEOUT |
 -                 GEN6_PM_RP_UP_THRESHOLD |
 -                 GEN6_PM_RP_DOWN_THRESHOLD |
 -                 GEN6_PM_RP_UP_EI_EXPIRED |
 -                 GEN6_PM_RP_DOWN_EI_EXPIRED);
 -      spin_lock_irq(&dev_priv->rps_lock);
 -      WARN_ON(dev_priv->pm_iir != 0);
 -      I915_WRITE(GEN6_PMIMR, 0);
 -      spin_unlock_irq(&dev_priv->rps_lock);
 -      /* enable all PM interrupts */
 -      I915_WRITE(GEN6_PMINTRMSK, 0);
 -
 -      gen6_gt_force_wake_put(dev_priv);
 -      mutex_unlock(&dev_priv->dev->struct_mutex);
 -}
 -
 -void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 -{
 -      int min_freq = 15;
 -      int gpu_freq, ia_freq, max_ia_freq;
 -      int scaling_factor = 180;
 -
 -      max_ia_freq = cpufreq_quick_get_max(0);
 -      /*
 -       * Default to measured freq if none found, PCU will ensure we don't go
 -       * over
 -       */
 -      if (!max_ia_freq)
 -              max_ia_freq = tsc_khz;
 -
 -      /* Convert from kHz to MHz */
 -      max_ia_freq /= 1000;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
 +      struct drm_mode_object *drmmode_obj;
 +      struct intel_crtc *crtc;
  
 -      mutex_lock(&dev_priv->dev->struct_mutex);
 +      if (!dev_priv) {
 +              DRM_ERROR("called with no initialization\n");
 +              return -EINVAL;
 +      }
  
 -      /*
 -       * For each potential GPU frequency, load a ring frequency we'd like
 -       * to use for memory access.  We do this by specifying the IA frequency
 -       * the PCU should use as a reference to determine the ring frequency.
 -       */
 -      for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
 -           gpu_freq--) {
 -              int diff = dev_priv->max_delay - gpu_freq;
 +      drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
 +                      DRM_MODE_OBJECT_CRTC);
  
 -              /*
 -               * For GPU frequencies less than 750MHz, just use the lowest
 -               * ring freq.
 -               */
 -              if (gpu_freq < min_freq)
 -                      ia_freq = 800;
 -              else
 -                      ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
 -              ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
 -
 -              I915_WRITE(GEN6_PCODE_DATA,
 -                         (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
 -                         gpu_freq);
 -              I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
 -                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
 -              if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
 -                            GEN6_PCODE_READY) == 0, 10)) {
 -                      DRM_ERROR("pcode write of freq table timed out\n");
 -                      continue;
 -              }
 +      if (!drmmode_obj) {
 +              DRM_ERROR("no such CRTC id\n");
 +              return -EINVAL;
        }
  
 -      mutex_unlock(&dev_priv->dev->struct_mutex);
 +      crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 +      pipe_from_crtc_id->pipe = crtc->pipe;
 +
 +      return 0;
  }
  
 -static void ironlake_init_clock_gating(struct drm_device *dev)
 +static int intel_encoder_clones(struct drm_device *dev, int type_mask)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 -
 -      /* Required for FBC */
 -      dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
 -              DPFCRUNIT_CLOCK_GATE_DISABLE |
 -              DPFDUNIT_CLOCK_GATE_DISABLE;
 -      /* Required for CxSR */
 -      dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 -
 -      I915_WRITE(PCH_3DCGDIS0,
 -                 MARIUNIT_CLOCK_GATE_DISABLE |
 -                 SVSMUNIT_CLOCK_GATE_DISABLE);
 -      I915_WRITE(PCH_3DCGDIS1,
 -                 VFMUNIT_CLOCK_GATE_DISABLE);
 -
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 -
 -      /*
 -       * According to the spec the following bits should be set in
 -       * order to enable memory self-refresh
 -       * The bit 22/21 of 0x42004
 -       * The bit 5 of 0x42020
 -       * The bit 15 of 0x45000
 -       */
 -      I915_WRITE(ILK_DISPLAY_CHICKEN2,
 -                 (I915_READ(ILK_DISPLAY_CHICKEN2) |
 -                  ILK_DPARB_GATE | ILK_VSDPFD_FULL));
 -      I915_WRITE(ILK_DSPCLK_GATE,
 -                 (I915_READ(ILK_DSPCLK_GATE) |
 -                  ILK_DPARB_CLK_GATE));
 -      I915_WRITE(DISP_ARB_CTL,
 -                 (I915_READ(DISP_ARB_CTL) |
 -                  DISP_FBC_WM_DIS));
 -      I915_WRITE(WM3_LP_ILK, 0);
 -      I915_WRITE(WM2_LP_ILK, 0);
 -      I915_WRITE(WM1_LP_ILK, 0);
 +      struct intel_encoder *encoder;
 +      int index_mask = 0;
 +      int entry = 0;
  
 -      /*
 -       * Based on the document from hardware guys the following bits
 -       * should be set unconditionally in order to enable FBC.
 -       * The bit 22 of 0x42000
 -       * The bit 22 of 0x42004
 -       * The bit 7,8,9 of 0x42020.
 -       */
 -      if (IS_IRONLAKE_M(dev)) {
 -              I915_WRITE(ILK_DISPLAY_CHICKEN1,
 -                         I915_READ(ILK_DISPLAY_CHICKEN1) |
 -                         ILK_FBCQ_DIS);
 -              I915_WRITE(ILK_DISPLAY_CHICKEN2,
 -                         I915_READ(ILK_DISPLAY_CHICKEN2) |
 -                         ILK_DPARB_GATE);
 -              I915_WRITE(ILK_DSPCLK_GATE,
 -                         I915_READ(ILK_DSPCLK_GATE) |
 -                         ILK_DPFC_DIS1 |
 -                         ILK_DPFC_DIS2 |
 -                         ILK_CLK_FBC);
 +      list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 +              if (type_mask & encoder->clone_mask)
 +                      index_mask |= (1 << entry);
 +              entry++;
        }
  
 -      I915_WRITE(ILK_DISPLAY_CHICKEN2,
 -                 I915_READ(ILK_DISPLAY_CHICKEN2) |
 -                 ILK_ELPIN_409_SELECT);
 -      I915_WRITE(_3D_CHICKEN2,
 -                 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
 -                 _3D_CHICKEN2_WM_READ_PIPELINED);
 +      return index_mask;
  }
  
 -static void gen6_init_clock_gating(struct drm_device *dev)
 +static bool has_edp_a(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 -
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 -
 -      I915_WRITE(ILK_DISPLAY_CHICKEN2,
 -                 I915_READ(ILK_DISPLAY_CHICKEN2) |
 -                 ILK_ELPIN_409_SELECT);
  
 -      I915_WRITE(WM3_LP_ILK, 0);
 -      I915_WRITE(WM2_LP_ILK, 0);
 -      I915_WRITE(WM1_LP_ILK, 0);
 -
 -      I915_WRITE(GEN6_UCGCTL1,
 -                 I915_READ(GEN6_UCGCTL1) |
 -                 GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
 +      if (!IS_MOBILE(dev))
 +              return false;
  
 -      /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 -       * gating disable must be set.  Failure to set it results in
 -       * flickering pixels due to Z write ordering failures after
 -       * some amount of runtime in the Mesa "fire" demo, and Unigine
 -       * Sanctuary and Tropics, and apparently anything else with
 -       * alpha test or pixel discard.
 -       *
 -       * According to the spec, bit 11 (RCCUNIT) must also be set,
 -       * but we didn't debug actual testcases to find it out.
 -       */
 -      I915_WRITE(GEN6_UCGCTL2,
 -                 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
 -                 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 +      if ((I915_READ(DP_A) & DP_DETECTED) == 0)
 +              return false;
  
 -      /*
 -       * According to the spec the following bits should be
 -       * set in order to enable memory self-refresh and fbc:
 -       * The bit21 and bit22 of 0x42000
 -       * The bit21 and bit22 of 0x42004
 -       * The bit5 and bit7 of 0x42020
 -       * The bit14 of 0x70180
 -       * The bit14 of 0x71180
 -       */
 -      I915_WRITE(ILK_DISPLAY_CHICKEN1,
 -                 I915_READ(ILK_DISPLAY_CHICKEN1) |
 -                 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
 -      I915_WRITE(ILK_DISPLAY_CHICKEN2,
 -                 I915_READ(ILK_DISPLAY_CHICKEN2) |
 -                 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
 -      I915_WRITE(ILK_DSPCLK_GATE,
 -                 I915_READ(ILK_DSPCLK_GATE) |
 -                 ILK_DPARB_CLK_GATE  |
 -                 ILK_DPFD_CLK_GATE);
 +      if (IS_GEN5(dev) &&
 +          (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
 +              return false;
  
 -      for_each_pipe(pipe) {
 -              I915_WRITE(DSPCNTR(pipe),
 -                         I915_READ(DSPCNTR(pipe)) |
 -                         DISPPLANE_TRICKLE_FEED_DISABLE);
 -              intel_flush_display_plane(dev_priv, pipe);
 -      }
 +      return true;
  }
  
 -static void ivybridge_init_clock_gating(struct drm_device *dev)
 +static void intel_setup_outputs(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      int pipe;
 -      uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 -
 -      I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 -
 -      I915_WRITE(WM3_LP_ILK, 0);
 -      I915_WRITE(WM2_LP_ILK, 0);
 -      I915_WRITE(WM1_LP_ILK, 0);
 -
 -      /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 -       * This implements the WaDisableRCZUnitClockGating workaround.
 -       */
 -      I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 -
 -      I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 -
 -      I915_WRITE(IVB_CHICKEN3,
 -                 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 -                 CHICKEN3_DGMG_DONE_FIX_DISABLE);
 -
 -      /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
 -      I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 -                 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 +      struct intel_encoder *encoder;
 +      bool dpd_is_edp = false;
 +      bool has_lvds;
  
 -      /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
 -      I915_WRITE(GEN7_L3CNTLREG1,
 -                      GEN7_WA_FOR_GEN7_L3_CONTROL);
 -      I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
 -                      GEN7_WA_L3_CHICKEN_MODE);
 +      has_lvds = intel_lvds_init(dev);
 +      if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
 +              /* disable the panel fitter on everything but LVDS */
 +              I915_WRITE(PFIT_CONTROL, 0);
 +      }
  
 -      /* This is required by WaCatErrorRejectionIssue */
 -      I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
 -                      I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 -                      GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 +      if (HAS_PCH_SPLIT(dev)) {
 +              dpd_is_edp = intel_dpd_is_edp(dev);
  
 -      for_each_pipe(pipe) {
 -              I915_WRITE(DSPCNTR(pipe),
 -                         I915_READ(DSPCNTR(pipe)) |
 -                         DISPPLANE_TRICKLE_FEED_DISABLE);
 -              intel_flush_display_plane(dev_priv, pipe);
 -      }
 -}
 +              if (has_edp_a(dev))
 +                      intel_dp_init(dev, DP_A);
  
 -static void g4x_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t dspclk_gate;
 +              if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
 +                      intel_dp_init(dev, PCH_DP_D);
 +      }
  
 -      I915_WRITE(RENCLK_GATE_D1, 0);
 -      I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
 -                 GS_UNIT_CLOCK_GATE_DISABLE |
 -                 CL_UNIT_CLOCK_GATE_DISABLE);
 -      I915_WRITE(RAMCLK_GATE_D, 0);
 -      dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
 -              OVRUNIT_CLOCK_GATE_DISABLE |
 -              OVCUNIT_CLOCK_GATE_DISABLE;
 -      if (IS_GM45(dev))
 -              dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
 -      I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
 -}
 +      intel_crt_init(dev);
  
 -static void crestline_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      if (HAS_PCH_SPLIT(dev)) {
 +              int found;
  
 -      I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
 -      I915_WRITE(RENCLK_GATE_D2, 0);
 -      I915_WRITE(DSPCLK_GATE_D, 0);
 -      I915_WRITE(RAMCLK_GATE_D, 0);
 -      I915_WRITE16(DEUC, 0);
 -}
 +              if (I915_READ(HDMIB) & PORT_DETECTED) {
 +                      /* PCH SDVOB multiplex with HDMIB */
 +                      found = intel_sdvo_init(dev, PCH_SDVOB, true);
 +                      if (!found)
 +                              intel_hdmi_init(dev, HDMIB);
 +                      if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
 +                              intel_dp_init(dev, PCH_DP_B);
 +              }
  
 -static void broadwater_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +              if (I915_READ(HDMIC) & PORT_DETECTED)
 +                      intel_hdmi_init(dev, HDMIC);
  
 -      I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
 -                 I965_RCC_CLOCK_GATE_DISABLE |
 -                 I965_RCPB_CLOCK_GATE_DISABLE |
 -                 I965_ISC_CLOCK_GATE_DISABLE |
 -                 I965_FBC_CLOCK_GATE_DISABLE);
 -      I915_WRITE(RENCLK_GATE_D2, 0);
 -}
 +              if (I915_READ(HDMID) & PORT_DETECTED)
 +                      intel_hdmi_init(dev, HDMID);
  
 -static void gen3_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 dstate = I915_READ(D_STATE);
 +              if (I915_READ(PCH_DP_C) & DP_DETECTED)
 +                      intel_dp_init(dev, PCH_DP_C);
  
 -      dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
 -              DSTATE_DOT_CLOCK_GATING;
 -      I915_WRITE(D_STATE, dstate);
 -}
 +              if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
 +                      intel_dp_init(dev, PCH_DP_D);
  
 -static void i85x_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 +              bool found = false;
  
 -      I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 -}
 +              if (I915_READ(SDVOB) & SDVO_DETECTED) {
 +                      DRM_DEBUG_KMS("probing SDVOB\n");
 +                      found = intel_sdvo_init(dev, SDVOB, true);
 +                      if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
 +                              DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 +                              intel_hdmi_init(dev, SDVOB);
 +                      }
  
 -static void i830_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +                      if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
 +                              DRM_DEBUG_KMS("probing DP_B\n");
 +                              intel_dp_init(dev, DP_B);
 +                      }
 +              }
  
 -      I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 -}
 +              /* Before G4X SDVOC doesn't have its own detect register */
  
 -static void ibx_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +              if (I915_READ(SDVOB) & SDVO_DETECTED) {
 +                      DRM_DEBUG_KMS("probing SDVOC\n");
 +                      found = intel_sdvo_init(dev, SDVOC, false);
 +              }
  
 -      /*
 -       * On Ibex Peak and Cougar Point, we need to disable clock
 -       * gating for the panel power sequencer or it will fail to
 -       * start up when no ports are active.
 -       */
 -      I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
 -}
 +              if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
  
 -static void cpt_init_clock_gating(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int pipe;
 +                      if (SUPPORTS_INTEGRATED_HDMI(dev)) {
 +                              DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 +                              intel_hdmi_init(dev, SDVOC);
 +                      }
 +                      if (SUPPORTS_INTEGRATED_DP(dev)) {
 +                              DRM_DEBUG_KMS("probing DP_C\n");
 +                              intel_dp_init(dev, DP_C);
 +                      }
 +              }
  
 -      /*
 -       * On Ibex Peak and Cougar Point, we need to disable clock
 -       * gating for the panel power sequencer or it will fail to
 -       * start up when no ports are active.
 -       */
 -      I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
 -      I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
 -                 DPLS_EDP_PPS_FIX_DIS);
 -      /* Without this, mode sets may fail silently on FDI */
 -      for_each_pipe(pipe)
 -              I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
 -}
 +              if (SUPPORTS_INTEGRATED_DP(dev) &&
 +                  (I915_READ(DP_D) & DP_DETECTED)) {
 +                      DRM_DEBUG_KMS("probing DP_D\n");
 +                      intel_dp_init(dev, DP_D);
 +              }
 +      } else if (IS_GEN2(dev))
 +              intel_dvo_init(dev);
  
 -static void ironlake_teardown_rc6(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      if (SUPPORTS_TV(dev))
 +              intel_tv_init(dev);
  
 -      if (dev_priv->renderctx) {
 -              i915_gem_object_unpin(dev_priv->renderctx);
 -              drm_gem_object_unreference(&dev_priv->renderctx->base);
 -              dev_priv->renderctx = NULL;
 +      list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 +              encoder->base.possible_crtcs = encoder->crtc_mask;
 +              encoder->base.possible_clones =
 +                      intel_encoder_clones(dev, encoder->clone_mask);
        }
  
 -      if (dev_priv->pwrctx) {
 -              i915_gem_object_unpin(dev_priv->pwrctx);
 -              drm_gem_object_unreference(&dev_priv->pwrctx->base);
 -              dev_priv->pwrctx = NULL;
 -      }
 +      /* disable all the possible outputs/crtcs before entering KMS mode */
 +      drm_helper_disable_unused_functions(dev);
 +
 +      if (HAS_PCH_SPLIT(dev))
 +              ironlake_init_pch_refclk(dev);
  }
  
 -static void ironlake_disable_rc6(struct drm_device *dev)
 +static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (I915_READ(PWRCTXA)) {
 -              /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
 -              I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
 -              wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
 -                       50);
 -
 -              I915_WRITE(PWRCTXA, 0);
 -              POSTING_READ(PWRCTXA);
 +      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  
 -              I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 -              POSTING_READ(RSTDBYCTL);
 -      }
 +      drm_framebuffer_cleanup(fb);
 +      drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
  
 -      ironlake_teardown_rc6(dev);
 +      kfree(intel_fb);
  }
  
 -static int ironlake_setup_rc6(struct drm_device *dev)
 +static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
 +                                              struct drm_file *file,
 +                                              unsigned int *handle)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (dev_priv->renderctx == NULL)
 -              dev_priv->renderctx = intel_alloc_context_page(dev);
 -      if (!dev_priv->renderctx)
 -              return -ENOMEM;
 -
 -      if (dev_priv->pwrctx == NULL)
 -              dev_priv->pwrctx = intel_alloc_context_page(dev);
 -      if (!dev_priv->pwrctx) {
 -              ironlake_teardown_rc6(dev);
 -              return -ENOMEM;
 -      }
 +      struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 +      struct drm_i915_gem_object *obj = intel_fb->obj;
  
 -      return 0;
 +      return drm_gem_handle_create(file, &obj->base, handle);
  }
  
 -void ironlake_enable_rc6(struct drm_device *dev)
 +static const struct drm_framebuffer_funcs intel_fb_funcs = {
 +      .destroy = intel_user_framebuffer_destroy,
 +      .create_handle = intel_user_framebuffer_create_handle,
 +};
 +
 +int intel_framebuffer_init(struct drm_device *dev,
 +                         struct intel_framebuffer *intel_fb,
 +                         struct drm_mode_fb_cmd2 *mode_cmd,
 +                         struct drm_i915_gem_object *obj)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
 -      /* rc6 disabled by default due to repeated reports of hanging during
 -       * boot and resume.
 -       */
 -      if (!intel_enable_rc6(dev))
 -              return;
 +      if (obj->tiling_mode == I915_TILING_Y)
 +              return -EINVAL;
  
 -      mutex_lock(&dev->struct_mutex);
 -      ret = ironlake_setup_rc6(dev);
 -      if (ret) {
 -              mutex_unlock(&dev->struct_mutex);
 -              return;
 -      }
 +      if (mode_cmd->pitches[0] & 63)
 +              return -EINVAL;
  
 -      /*
 -       * GPU can automatically power down the render unit if given a page
 -       * to save state.
 -       */
 -      ret = BEGIN_LP_RING(6);
 -      if (ret) {
 -              ironlake_teardown_rc6(dev);
 -              mutex_unlock(&dev->struct_mutex);
 -              return;
 +      switch (mode_cmd->pixel_format) {
 +      case DRM_FORMAT_RGB332:
 +      case DRM_FORMAT_RGB565:
 +      case DRM_FORMAT_XRGB8888:
 +      case DRM_FORMAT_XBGR8888:
 +      case DRM_FORMAT_ARGB8888:
 +      case DRM_FORMAT_XRGB2101010:
 +      case DRM_FORMAT_ARGB2101010:
 +              /* RGB formats are common across chipsets */
 +              break;
 +      case DRM_FORMAT_YUYV:
 +      case DRM_FORMAT_UYVY:
 +      case DRM_FORMAT_YVYU:
 +      case DRM_FORMAT_VYUY:
 +              break;
 +      default:
 +              DRM_DEBUG_KMS("unsupported pixel format %u\n",
 +                              mode_cmd->pixel_format);
 +              return -EINVAL;
        }
  
 -      OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
 -      OUT_RING(MI_SET_CONTEXT);
 -      OUT_RING(dev_priv->renderctx->gtt_offset |
 -               MI_MM_SPACE_GTT |
 -               MI_SAVE_EXT_STATE_EN |
 -               MI_RESTORE_EXT_STATE_EN |
 -               MI_RESTORE_INHIBIT);
 -      OUT_RING(MI_SUSPEND_FLUSH);
 -      OUT_RING(MI_NOOP);
 -      OUT_RING(MI_FLUSH);
 -      ADVANCE_LP_RING();
 -
 -      /*
 -       * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
 -       * does an implicit flush, combined with MI_FLUSH above, it should be
 -       * safe to assume that renderctx is valid
 -       */
 -      ret = intel_wait_ring_idle(LP_RING(dev_priv));
 +      ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
 -              DRM_ERROR("failed to enable ironlake power power savings\n");
 -              ironlake_teardown_rc6(dev);
 -              mutex_unlock(&dev->struct_mutex);
 -              return;
 +              DRM_ERROR("framebuffer init failed %d\n", ret);
 +              return ret;
        }
  
 -      I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
 -      I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 -      mutex_unlock(&dev->struct_mutex);
 +      drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 +      intel_fb->obj = obj;
 +      return 0;
  }
  
 -void intel_init_clock_gating(struct drm_device *dev)
 +static struct drm_framebuffer *
 +intel_user_framebuffer_create(struct drm_device *dev,
 +                            struct drm_file *filp,
 +                            struct drm_mode_fb_cmd2 *mode_cmd)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj;
  
 -      dev_priv->display.init_clock_gating(dev);
 +      obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
 +                                              mode_cmd->handles[0]));
 +      if (&obj->base == NULL)
 +              return ERR_PTR(-ENOENT);
  
 -      if (dev_priv->display.init_pch_clock_gating)
 -              dev_priv->display.init_pch_clock_gating(dev);
 +      return intel_framebuffer_create(dev, mode_cmd, obj);
  }
  
 +static const struct drm_mode_config_funcs intel_mode_funcs = {
 +      .fb_create = intel_user_framebuffer_create,
 +      .output_poll_changed = intel_fb_output_poll_changed,
 +};
 +
  /* Set up chip specific display functions */
  static void intel_init_display(struct drm_device *dev)
  {
                dev_priv->display.update_plane = i9xx_update_plane;
        }
  
 -      if (I915_HAS_FBC(dev)) {
 -              if (HAS_PCH_SPLIT(dev)) {
 -                      dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 -                      dev_priv->display.enable_fbc = ironlake_enable_fbc;
 -                      dev_priv->display.disable_fbc = ironlake_disable_fbc;
 -              } else if (IS_GM45(dev)) {
 -                      dev_priv->display.fbc_enabled = g4x_fbc_enabled;
 -                      dev_priv->display.enable_fbc = g4x_enable_fbc;
 -                      dev_priv->display.disable_fbc = g4x_disable_fbc;
 -              } else if (IS_CRESTLINE(dev)) {
 -                      dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
 -                      dev_priv->display.enable_fbc = i8xx_enable_fbc;
 -                      dev_priv->display.disable_fbc = i8xx_disable_fbc;
 -              }
 -              /* 855GM needs testing */
 -      }
 -
        /* Returns the core display clock speed */
 -      if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
 +      if (IS_VALLEYVIEW(dev))
 +              dev_priv->display.get_display_clock_speed =
 +                      valleyview_get_display_clock_speed;
 +      else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
        else if (IS_I915G(dev))
                dev_priv->display.get_display_clock_speed =
                        i830_get_display_clock_speed;
  
 -      /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
 -              dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
 -              dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
 -
 -              /* IVB configs may use multi-threaded forcewake */
 -              if (IS_IVYBRIDGE(dev)) {
 -                      u32     ecobus;
 -
 -                      /* A small trick here - if the bios hasn't configured MT forcewake,
 -                       * and if the device is in RC6, then force_wake_mt_get will not wake
 -                       * the device and the ECOBUS read will return zero. Which will be
 -                       * (correctly) interpreted by the test below as MT forcewake being
 -                       * disabled.
 -                       */
 -                      mutex_lock(&dev->struct_mutex);
 -                      __gen6_gt_force_wake_mt_get(dev_priv);
 -                      ecobus = I915_READ_NOTRACE(ECOBUS);
 -                      __gen6_gt_force_wake_mt_put(dev_priv);
 -                      mutex_unlock(&dev->struct_mutex);
 -
 -                      if (ecobus & FORCEWAKE_MT_ENABLE) {
 -                              DRM_DEBUG_KMS("Using MT version of forcewake\n");
 -                              dev_priv->display.force_wake_get =
 -                                      __gen6_gt_force_wake_mt_get;
 -                              dev_priv->display.force_wake_put =
 -                                      __gen6_gt_force_wake_mt_put;
 -                      }
 -              }
 -
 -              if (HAS_PCH_IBX(dev))
 -                      dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
 -              else if (HAS_PCH_CPT(dev))
 -                      dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
 -
                if (IS_GEN5(dev)) {
 -                      if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
 -                              dev_priv->display.update_wm = ironlake_update_wm;
 -                      else {
 -                              DRM_DEBUG_KMS("Failed to get proper latency. "
 -                                            "Disable CxSR\n");
 -                              dev_priv->display.update_wm = NULL;
 -                      }
                        dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
 -                      dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
                        dev_priv->display.write_eld = ironlake_write_eld;
                } else if (IS_GEN6(dev)) {
 -                      if (SNB_READ_WM0_LATENCY()) {
 -                              dev_priv->display.update_wm = sandybridge_update_wm;
 -                              dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
 -                      } else {
 -                              DRM_DEBUG_KMS("Failed to read display plane latency. "
 -                                            "Disable CxSR\n");
 -                              dev_priv->display.update_wm = NULL;
 -                      }
                        dev_priv->display.fdi_link_train = gen6_fdi_link_train;
 -                      dev_priv->display.init_clock_gating = gen6_init_clock_gating;
                        dev_priv->display.write_eld = ironlake_write_eld;
                } else if (IS_IVYBRIDGE(dev)) {
                        /* FIXME: detect B0+ stepping and use auto training */
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
 -                      if (SNB_READ_WM0_LATENCY()) {
 -                              dev_priv->display.update_wm = sandybridge_update_wm;
 -                              dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
 -                      } else {
 -                              DRM_DEBUG_KMS("Failed to read display plane latency. "
 -                                            "Disable CxSR\n");
 -                              dev_priv->display.update_wm = NULL;
 -                      }
 -                      dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
                        dev_priv->display.write_eld = ironlake_write_eld;
                } else
                        dev_priv->display.update_wm = NULL;
 -      } else if (IS_PINEVIEW(dev)) {
 -              if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
 -                                          dev_priv->is_ddr3,
 -                                          dev_priv->fsb_freq,
 -                                          dev_priv->mem_freq)) {
 -                      DRM_INFO("failed to find known CxSR latency "
 -                               "(found ddr%s fsb freq %d, mem freq %d), "
 -                               "disabling CxSR\n",
 -                               (dev_priv->is_ddr3 == 1) ? "3" : "2",
 -                               dev_priv->fsb_freq, dev_priv->mem_freq);
 -                      /* Disable CxSR and never update its watermark again */
 -                      pineview_disable_cxsr(dev);
 -                      dev_priv->display.update_wm = NULL;
 -              } else
 -                      dev_priv->display.update_wm = pineview_update_wm;
 -              dev_priv->display.init_clock_gating = gen3_init_clock_gating;
 +      } else if (IS_VALLEYVIEW(dev)) {
 +              dev_priv->display.force_wake_get = vlv_force_wake_get;
 +              dev_priv->display.force_wake_put = vlv_force_wake_put;
        } else if (IS_G4X(dev)) {
                dev_priv->display.write_eld = g4x_write_eld;
 -              dev_priv->display.update_wm = g4x_update_wm;
 -              dev_priv->display.init_clock_gating = g4x_init_clock_gating;
 -      } else if (IS_GEN4(dev)) {
 -              dev_priv->display.update_wm = i965_update_wm;
 -              if (IS_CRESTLINE(dev))
 -                      dev_priv->display.init_clock_gating = crestline_init_clock_gating;
 -              else if (IS_BROADWATER(dev))
 -                      dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
 -      } else if (IS_GEN3(dev)) {
 -              dev_priv->display.update_wm = i9xx_update_wm;
 -              dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
 -              dev_priv->display.init_clock_gating = gen3_init_clock_gating;
 -      } else if (IS_I865G(dev)) {
 -              dev_priv->display.update_wm = i830_update_wm;
 -              dev_priv->display.init_clock_gating = i85x_init_clock_gating;
 -              dev_priv->display.get_fifo_size = i830_get_fifo_size;
 -      } else if (IS_I85X(dev)) {
 -              dev_priv->display.update_wm = i9xx_update_wm;
 -              dev_priv->display.get_fifo_size = i85x_get_fifo_size;
 -              dev_priv->display.init_clock_gating = i85x_init_clock_gating;
 -      } else {
 -              dev_priv->display.update_wm = i830_update_wm;
 -              dev_priv->display.init_clock_gating = i830_init_clock_gating;
 -              if (IS_845G(dev))
 -                      dev_priv->display.get_fifo_size = i845_get_fifo_size;
 -              else
 -                      dev_priv->display.get_fifo_size = i830_get_fifo_size;
        }
  
        /* Default just returns -ENODEV to indicate unsupported */
@@@ -6426,7 -9090,7 +6437,7 @@@ static void quirk_pipea_force(struct dr
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
 -      DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
 +      DRM_INFO("applying pipe a force quirk\n");
  }
  
  /*
@@@ -6436,18 -9100,6 +6447,18 @@@ static void quirk_ssc_force_disable(str
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
 +      DRM_INFO("applying lvds SSC disable quirk\n");
 +}
 +
 +/*
 + * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
 + * brightness value
 + */
 +static void quirk_invert_brightness(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
 +      DRM_INFO("applying inverted panel brightness quirk\n");
  }
  
  struct intel_quirk {
        void (*hook)(struct drm_device *dev);
  };
  
 -struct intel_quirk intel_quirks[] = {
 +static struct intel_quirk intel_quirks[] = {
        /* HP Mini needs pipe A force quirk (LP: #322104) */
        { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
  
  
        /* Sony Vaio Y cannot use SSC on LVDS */
        { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 +
 +      /* Acer Aspire 5734Z must invert backlight brightness */
 +      { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -6517,7 -9166,7 +6528,7 @@@ static void i915_disable_vga(struct drm
                vga_reg = VGACNTRL;
  
        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
 -      outb(1, VGA_SR_INDEX);
 +      outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
        outb(sr1 | 1<<5, VGA_SR_DATA);
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
        POSTING_READ(vga_reg);
  }
  
 +static void ivb_pch_pwm_override(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      /*
 +       * IVB has CPU eDP backlight regs too, set things up to let the
 +       * PCH regs control the backlight
 +       */
 +      I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
 +      I915_WRITE(BLC_PWM_CPU_CTL, 0);
 +      I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
 +}
 +
 +void intel_modeset_init_hw(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      intel_init_clock_gating(dev);
 +
 +      if (IS_IRONLAKE_M(dev)) {
 +              ironlake_enable_drps(dev);
 +              intel_init_emon(dev);
 +      }
 +
 +      if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
 +              gen6_enable_rps(dev_priv);
 +              gen6_update_ring_freq(dev_priv);
 +      }
 +
 +      if (IS_IVYBRIDGE(dev))
 +              ivb_pch_pwm_override(dev);
 +}
 +
  void intel_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        intel_init_quirks(dev);
  
 +      intel_init_pm(dev);
 +
        intel_init_display(dev);
  
        if (IS_GEN2(dev)) {
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
  
 -      intel_init_clock_gating(dev);
 -
 -      if (IS_IRONLAKE_M(dev)) {
 -              ironlake_enable_drps(dev);
 -              intel_init_emon(dev);
 -      }
 -
 -      if (IS_GEN6(dev) || IS_GEN7(dev)) {
 -              gen6_enable_rps(dev_priv);
 -              gen6_update_ring_freq(dev_priv);
 -      }
 +      intel_modeset_init_hw(dev);
  
        INIT_WORK(&dev_priv->idle_work, intel_idle_update);
        setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@@ -6647,15 -9271,12 +6658,15 @@@ void intel_modeset_cleanup(struct drm_d
  
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_drps(dev);
 -      if (IS_GEN6(dev) || IS_GEN7(dev))
 +      if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
                gen6_disable_rps(dev);
  
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_rc6(dev);
  
 +      if (IS_VALLEYVIEW(dev))
 +              vlv_init_dpio(dev);
 +
        mutex_unlock(&dev->struct_mutex);
  
        /* Disable the irq before mode object teardown, for the irq might
index c5bf8bebf0b07ead891be89de5f26a977a9530b3,715afa15302528ac7523f883aee5bd7e3c428906..7a7cae77f0ca6eca8a9f5ef30d85ae1edef6d66c
        ret__;                                                          \
  })
  
 +#define wait_for_atomic_us(COND, US) ({ \
 +      int i, ret__ = -ETIMEDOUT;      \
 +      for (i = 0; i < (US); i++) {    \
 +              if ((COND)) {           \
 +                      ret__ = 0;      \
 +                      break;          \
 +              }                       \
 +              udelay(1);              \
 +      }                               \
 +      ret__;                          \
 +})
 +
  #define wait_for(COND, MS) _wait_for(COND, MS, 1)
  #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
  
  #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
  #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
  #define INTEL_MODE_DP_FORCE_6BPC (0x10)
+ /* This flag must be set by the encoder's mode_fixup if it changes the crtc
+  * timings in the mode to prevent the crtc fixup from overwriting them.
+  * Currently only lvds needs that. */
+ #define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
  
  static inline void
  intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@@ -204,25 -196,6 +208,25 @@@ struct intel_plane 
                             struct drm_intel_sprite_colorkey *key);
  };
  
 +struct intel_watermark_params {
 +      unsigned long fifo_size;
 +      unsigned long max_wm;
 +      unsigned long default_wm;
 +      unsigned long guard_size;
 +      unsigned long cacheline_size;
 +};
 +
 +struct cxsr_latency {
 +      int is_desktop;
 +      int is_ddr3;
 +      unsigned long fsb_freq;
 +      unsigned long mem_freq;
 +      unsigned long display_sr;
 +      unsigned long display_hpll_disable;
 +      unsigned long cursor_sr;
 +      unsigned long cursor_hpll_disable;
 +};
 +
  #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
  #define to_intel_connector(x) container_of(x, struct intel_connector, base)
  #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@@ -324,8 -297,7 +328,8 @@@ extern void intel_attach_broadcast_rgb_
  extern void intel_crt_init(struct drm_device *dev);
  extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
  void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 -extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 +extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 +                          bool is_sdvob);
  extern void intel_dvo_init(struct drm_device *dev);
  extern void intel_tv_init(struct drm_device *dev);
  extern void intel_mark_busy(struct drm_device *dev,
@@@ -339,8 -311,6 +343,8 @@@ extern bool intel_dpd_is_edp(struct drm
  extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
  extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
  extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 +extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 +                                    enum plane plane);
  
  /* intel_panel.c */
  extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@@ -398,7 -368,6 +402,7 @@@ extern void intel_crtc_fb_gamma_set(str
  extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
                                    u16 *blue, int regno);
  extern void intel_enable_clock_gating(struct drm_device *dev);
 +extern void ironlake_disable_rc6(struct drm_device *dev);
  extern void ironlake_enable_drps(struct drm_device *dev);
  extern void ironlake_disable_drps(struct drm_device *dev);
  extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
@@@ -444,7 -413,7 +448,7 @@@ extern void intel_write_eld(struct drm_
  extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
  
  /* For use by IVB LP watermark workaround in intel_sprite.c */
 -extern void sandybridge_update_wm(struct drm_device *dev);
 +extern void intel_update_watermarks(struct drm_device *dev);
  extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
                                           uint32_t sprite_width,
                                           int pixel_size);
@@@ -454,13 -423,4 +458,13 @@@ extern int intel_sprite_set_colorkey(st
  extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
  
 +extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
 +
 +/* Power-related functions, located in intel_pm.c */
 +extern void intel_init_pm(struct drm_device *dev);
 +/* FBC */
 +extern bool intel_fbc_enabled(struct drm_device *dev);
 +extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 +extern void intel_update_fbc(struct drm_device *dev);
 +
  #endif /* __INTEL_DRV_H__ */
index 71ef2896be96efe6d79e0f930b2e3d83a0e59632,6e9ee33fd4122110a4df115c7d74bb18c20fc386..bf8690720a0cb882b5d99f6edee38b46d7ff48ec
@@@ -94,7 -94,7 +94,7 @@@ static int intelfb_create(struct intel_
        mutex_lock(&dev->struct_mutex);
  
        /* Flush everything out, we'll be doing GTT only from now on */
 -      ret = intel_pin_and_fence_fb_obj(dev, obj, false);
 +      ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
@@@ -279,6 -279,8 +279,8 @@@ void intel_fb_restore_mode(struct drm_d
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_plane *plane;
  
+       mutex_lock(&dev->mode_config.mutex);
        ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
        if (ret)
                DRM_DEBUG("failed to restore crtc mode\n");
        /* Be sure to shut off any planes that may be active */
        list_for_each_entry(plane, &config->plane_list, head)
                plane->funcs->disable_plane(plane);
+       mutex_unlock(&dev->mode_config.mutex);
  }
index 7de2d3b85b328cd9ff2d04d7419cea37edd550ce,2d7f47b56b6ae7a7b1922a328ab971fe9d63077a..1eef50d470d2a91924a448660cdf808cb63ed41d
@@@ -136,7 -136,7 +136,7 @@@ static void i9xx_write_infoframe(struc
  
        val &= ~VIDEO_DIP_SELECT_MASK;
  
-       I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+       I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
  
        for (i = 0; i < len; i += 4) {
                I915_WRITE(VIDEO_DIP_DATA, *data);
@@@ -177,37 -177,6 +177,37 @@@ static void ironlake_write_infoframe(st
  
        I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
  }
 +
 +static void vlv_write_infoframe(struct drm_encoder *encoder,
 +                                   struct dip_infoframe *frame)
 +{
 +      uint32_t *data = (uint32_t *)frame;
 +      struct drm_device *dev = encoder->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_crtc *crtc = encoder->crtc;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 +      unsigned i, len = DIP_HEADER_SIZE + frame->len;
 +      u32 flags, val = I915_READ(reg);
 +
 +      intel_wait_for_vblank(dev, intel_crtc->pipe);
 +
 +      flags = intel_infoframe_index(frame);
 +
 +      val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 +
 +      I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
 +
 +      for (i = 0; i < len; i += 4) {
 +              I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 +              data++;
 +      }
 +
 +      flags |= intel_infoframe_flags(frame);
 +
 +      I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
 +}
 +
  static void intel_set_infoframe(struct drm_encoder *encoder,
                                struct dip_infoframe *frame)
  {
@@@ -365,8 -334,7 +365,8 @@@ intel_hdmi_detect(struct drm_connector 
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        edid = drm_get_edid(connector,
 -                          &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 +                          intel_gmbus_get_adapter(dev_priv,
 +                                                  intel_hdmi->ddc_bus));
  
        if (edid) {
                if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@@ -399,8 -367,7 +399,8 @@@ static int intel_hdmi_get_modes(struct 
         */
  
        return intel_ddc_get_modes(connector,
 -                                 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 +                                 intel_gmbus_get_adapter(dev_priv,
 +                                                         intel_hdmi->ddc_bus));
  }
  
  static bool
@@@ -412,8 -379,7 +412,8 @@@ intel_hdmi_detect_audio(struct drm_conn
        bool has_audio = false;
  
        edid = drm_get_edid(connector,
 -                          &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 +                          intel_gmbus_get_adapter(dev_priv,
 +                                                  intel_hdmi->ddc_bus));
        if (edid) {
                if (edid->input & DRM_EDID_INPUT_DIGITAL)
                        has_audio = drm_detect_monitor_audio(edid);
@@@ -583,11 -549,7 +583,11 @@@ void intel_hdmi_init(struct drm_device 
        if (!HAS_PCH_SPLIT(dev)) {
                intel_hdmi->write_infoframe = i9xx_write_infoframe;
                I915_WRITE(VIDEO_DIP_CTL, 0);
 -      } else {
 +      } else if (IS_VALLEYVIEW(dev)) {
 +              intel_hdmi->write_infoframe = vlv_write_infoframe;
 +              for_each_pipe(i)
 +                      I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
 +      }  else {
                intel_hdmi->write_infoframe = ironlake_write_infoframe;
                for_each_pipe(i)
                        I915_WRITE(TVIDEO_DIP_CTL(i), 0);
index 17a4630cec8a9df19c491037d14089db2aba2f34,9c71183629c2a08fa1e9bddae13cd12dde26429f..9dee82350defb0590ecc16326a4cdd712d15b0e4
@@@ -187,6 -187,8 +187,8 @@@ centre_horizontally(struct drm_display_
  
        mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
        mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+       mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
  }
  
  static void
@@@ -208,6 -210,8 +210,8 @@@ centre_vertically(struct drm_display_mo
  
        mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
        mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+       mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
  }
  
  static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@@ -283,6 -287,8 +287,8 @@@ static bool intel_lvds_mode_fixup(struc
        for_each_pipe(pipe)
                I915_WRITE(BCLRPAT(pipe), 0);
  
+       drm_mode_set_crtcinfo(adjusted_mode, 0);
        switch (intel_lvds->fitting_mode) {
        case DRM_MODE_SCALE_CENTER:
                /*
@@@ -474,7 -480,7 +480,7 @@@ static int intel_lvds_get_modes(struct 
  
  static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
  {
 -      DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
 +      DRM_INFO("Skipping forced modeset for %s\n", id->ident);
        return 1;
  }
  
@@@ -622,7 -628,7 +628,7 @@@ static const struct drm_encoder_funcs i
  
  static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
  {
 -      DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
 +      DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
        return 1;
  }
  
@@@ -744,7 -750,7 +750,7 @@@ static const struct dmi_system_id intel
                .ident = "Hewlett-Packard t5745",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
                },
        },
        {
                .ident = "Hewlett-Packard st5747",
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
                },
        },
        {
@@@ -845,8 -851,8 +851,8 @@@ static bool lvds_is_present_in_vbt(stru
                    child->device_type != DEVICE_TYPE_LFP)
                        continue;
  
 -              if (child->i2c_pin)
 -                  *i2c_pin = child->i2c_pin;
 +              if (intel_gmbus_is_port_valid(child->i2c_pin))
 +                      *i2c_pin = child->i2c_pin;
  
                /* However, we cannot trust the BIOS writers to populate
                 * the VBT correctly.  Since LVDS requires additional
@@@ -987,8 -993,7 +993,8 @@@ bool intel_lvds_init(struct drm_device 
         * preferred mode is the right one.
         */
        intel_lvds->edid = drm_get_edid(connector,
 -                                      &dev_priv->gmbus[pin].adapter);
 +                                      intel_gmbus_get_adapter(dev_priv,
 +                                                              pin));
        if (intel_lvds->edid) {
                if (drm_add_edid_modes(connector,
                                       intel_lvds->edid)) {
index cad45ff8251b004a7484c2efa4664b3ec64b77c7,48177ec4720ed14bae9bc4cb2bdbc0a2d06e4985..2b2e011e9055f7f9b9bdb9fc066410caaed4d68e
@@@ -28,9 -28,6 +28,9 @@@
   *      Chris Wilson <chris@chris-wilson.co.uk>
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/moduleparam.h>
  #include "intel_drv.h"
  
  #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@@ -50,8 -47,6 +50,6 @@@ intel_fixed_panel_mode(struct drm_displ
        adjusted_mode->vtotal = fixed_mode->vtotal;
  
        adjusted_mode->clock = fixed_mode->clock;
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
  }
  
  /* adjusted_mode has been preset to be the panel's fixed mode */
@@@ -174,7 -169,7 +172,7 @@@ u32 intel_panel_get_max_backlight(struc
                /* XXX add code here to query mode clock or hardware clock
                 * and program max PWM appropriately.
                 */
 -              printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
 +              pr_warn_once("fixme: max PWM is zero\n");
                return 1;
        }
  
        return max;
  }
  
 +static int i915_panel_invert_brightness;
 +MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
 +      "(-1 force normal, 0 machine defaults, 1 force inversion), please "
 +      "report PCI device ID, subsystem vendor and subsystem device ID "
 +      "to dri-devel@lists.freedesktop.org, if your machine needs it. "
 +      "It will then be included in an upcoming module version.");
 +module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
 +static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (i915_panel_invert_brightness < 0)
 +              return val;
 +
 +      if (i915_panel_invert_brightness > 0 ||
 +          dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
 +              return intel_panel_get_max_backlight(dev) - val;
 +
 +      return val;
 +}
 +
  u32 intel_panel_get_backlight(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                }
        }
  
 +      val = intel_panel_compute_brightness(dev, val);
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
        return val;
  }
@@@ -253,7 -226,6 +251,7 @@@ static void intel_panel_actually_set_ba
        u32 tmp;
  
        DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
 +      level = intel_panel_compute_brightness(dev, level);
  
        if (HAS_PCH_SPLIT(dev))
                return intel_pch_panel_set_backlight(dev, level);
index 12d9bc789dfbe0ef3b269f548c66efc68a3140bf,80fce51e2f439d9bf4825fffadf05051fb6531b9..b5ef7c145ee5af4a00a1f38d8c66b9c9a4e7bbc0
@@@ -53,35 -53,9 +53,35 @@@ static inline int ring_space(struct int
  }
  
  static int
 -render_ring_flush(struct intel_ring_buffer *ring,
 -                u32   invalidate_domains,
 -                u32   flush_domains)
 +gen2_render_ring_flush(struct intel_ring_buffer *ring,
 +                     u32      invalidate_domains,
 +                     u32      flush_domains)
 +{
 +      u32 cmd;
 +      int ret;
 +
 +      cmd = MI_FLUSH;
 +      if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
 +              cmd |= MI_NO_WRITE_FLUSH;
 +
 +      if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 +              cmd |= MI_READ_FLUSH;
 +
 +      ret = intel_ring_begin(ring, 2);
 +      if (ret)
 +              return ret;
 +
 +      intel_ring_emit(ring, cmd);
 +      intel_ring_emit(ring, MI_NOOP);
 +      intel_ring_advance(ring);
 +
 +      return 0;
 +}
 +
 +static int
 +gen4_render_ring_flush(struct intel_ring_buffer *ring,
 +                     u32      invalidate_domains,
 +                     u32      flush_domains)
  {
        struct drm_device *dev = ring->dev;
        u32 cmd;
         */
  
        cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 -      if ((invalidate_domains|flush_domains) &
 -          I915_GEM_DOMAIN_RENDER)
 +      if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
                cmd &= ~MI_NO_WRITE_FLUSH;
 -      if (INTEL_INFO(dev)->gen < 4) {
 -              /*
 -               * On the 965, the sampler cache always gets flushed
 -               * and this bit is reserved.
 -               */
 -              if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 -                      cmd |= MI_READ_FLUSH;
 -      }
        if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
                cmd |= MI_EXE_FLUSH;
  
@@@ -307,9 -290,9 +307,9 @@@ static int init_ring_common(struct inte
                        | RING_VALID);
  
        /* If the head is still not zero, the ring is dead */
 -      if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
 -          I915_READ_START(ring) != obj->gtt_offset ||
 -          (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
 +      if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
 +                   I915_READ_START(ring) == obj->gtt_offset &&
 +                   (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
@@@ -418,6 -401,14 +418,14 @@@ static int init_render_ring(struct inte
        if (INTEL_INFO(dev)->gen >= 6) {
                I915_WRITE(INSTPM,
                           INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+               /* From the Sandybridge PRM, volume 1 part 3, page 24:
+                * "If this bit is set, STCunit will have LRA as replacement
+                *  policy. [...] This bit must be reset.  LRA replacement
+                *  policy is not supported."
+                */
+               I915_WRITE(CACHE_MODE_0,
+                          CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
        }
  
        return ret;
@@@ -489,30 -480,21 +497,30 @@@ gen6_add_request(struct intel_ring_buff
   * @seqno - seqno which the waiter will block on
   */
  static int
 -intel_ring_sync(struct intel_ring_buffer *waiter,
 -              struct intel_ring_buffer *signaller,
 -              int ring,
 -              u32 seqno)
 +gen6_ring_sync(struct intel_ring_buffer *waiter,
 +             struct intel_ring_buffer *signaller,
 +             u32 seqno)
  {
        int ret;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
  
 +      /* Throughout all of the GEM code, seqno passed implies our current
 +       * seqno is >= the last seqno executed. However for hardware the
 +       * comparison is strictly greater than.
 +       */
 +      seqno -= 1;
 +
 +      WARN_ON(signaller->semaphore_register[waiter->id] ==
 +              MI_SEMAPHORE_SYNC_INVALID);
 +
        ret = intel_ring_begin(waiter, 4);
        if (ret)
                return ret;
  
 -      intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
 +      intel_ring_emit(waiter,
 +                      dw1 | signaller->semaphore_register[waiter->id]);
        intel_ring_emit(waiter, seqno);
        intel_ring_emit(waiter, 0);
        intel_ring_emit(waiter, MI_NOOP);
        return 0;
  }
  
 -/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
 -int
 -render_ring_sync_to(struct intel_ring_buffer *waiter,
 -                  struct intel_ring_buffer *signaller,
 -                  u32 seqno)
 -{
 -      WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
 -      return intel_ring_sync(waiter,
 -                             signaller,
 -                             RCS,
 -                             seqno);
 -}
 -
 -/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
 -int
 -gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
 -                    struct intel_ring_buffer *signaller,
 -                    u32 seqno)
 -{
 -      WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
 -      return intel_ring_sync(waiter,
 -                             signaller,
 -                             VCS,
 -                             seqno);
 -}
 -
 -/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
 -int
 -gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
 -                    struct intel_ring_buffer *signaller,
 -                    u32 seqno)
 -{
 -      WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
 -      return intel_ring_sync(waiter,
 -                             signaller,
 -                             BCS,
 -                             seqno);
 -}
 -
 -
 -
  #define PIPE_CONTROL_FLUSH(ring__, addr__)                                    \
  do {                                                                  \
        intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
@@@ -582,6 -605,27 +590,6 @@@ pc_render_add_request(struct intel_ring
        return 0;
  }
  
 -static int
 -render_ring_add_request(struct intel_ring_buffer *ring,
 -                      u32 *result)
 -{
 -      u32 seqno = i915_gem_next_request_seqno(ring);
 -      int ret;
 -
 -      ret = intel_ring_begin(ring, 4);
 -      if (ret)
 -              return ret;
 -
 -      intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 -      intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 -      intel_ring_emit(ring, seqno);
 -      intel_ring_emit(ring, MI_USER_INTERRUPT);
 -      intel_ring_advance(ring);
 -
 -      *result = seqno;
 -      return 0;
 -}
 -
  static u32
  gen6_ring_get_seqno(struct intel_ring_buffer *ring)
  {
@@@ -608,43 -652,40 +616,43 @@@ pc_render_get_seqno(struct intel_ring_b
        return pc->cpu_page[0];
  }
  
 -static void
 -ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
 +static bool
 +gen5_ring_get_irq(struct intel_ring_buffer *ring)
  {
 -      dev_priv->gt_irq_mask &= ~mask;
 -      I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 -      POSTING_READ(GTIMR);
 -}
 +      struct drm_device *dev = ring->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
  
 -static void
 -ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 -{
 -      dev_priv->gt_irq_mask |= mask;
 -      I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 -      POSTING_READ(GTIMR);
 -}
 +      if (!dev->irq_enabled)
 +              return false;
  
 -static void
 -i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
 -{
 -      dev_priv->irq_mask &= ~mask;
 -      I915_WRITE(IMR, dev_priv->irq_mask);
 -      POSTING_READ(IMR);
 +      spin_lock(&ring->irq_lock);
 +      if (ring->irq_refcount++ == 0) {
 +              dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 +              I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +              POSTING_READ(GTIMR);
 +      }
 +      spin_unlock(&ring->irq_lock);
 +
 +      return true;
  }
  
  static void
 -i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 +gen5_ring_put_irq(struct intel_ring_buffer *ring)
  {
 -      dev_priv->irq_mask |= mask;
 -      I915_WRITE(IMR, dev_priv->irq_mask);
 -      POSTING_READ(IMR);
 +      struct drm_device *dev = ring->dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      spin_lock(&ring->irq_lock);
 +      if (--ring->irq_refcount == 0) {
 +              dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 +              I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +              POSTING_READ(GTIMR);
 +      }
 +      spin_unlock(&ring->irq_lock);
  }
  
  static bool
 -render_ring_get_irq(struct intel_ring_buffer *ring)
 +i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
 -              if (HAS_PCH_SPLIT(dev))
 -                      ironlake_enable_irq(dev_priv,
 -                                          GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
 -              else
 -                      i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
 +              dev_priv->irq_mask &= ~ring->irq_enable_mask;
 +              I915_WRITE(IMR, dev_priv->irq_mask);
 +              POSTING_READ(IMR);
        }
        spin_unlock(&ring->irq_lock);
  
  }
  
  static void
 -render_ring_put_irq(struct intel_ring_buffer *ring)
 +i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
 -              if (HAS_PCH_SPLIT(dev))
 -                      ironlake_disable_irq(dev_priv,
 -                                           GT_USER_INTERRUPT |
 -                                           GT_PIPE_NOTIFY);
 -              else
 -                      i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
 +              dev_priv->irq_mask |= ring->irq_enable_mask;
 +              I915_WRITE(IMR, dev_priv->irq_mask);
 +              POSTING_READ(IMR);
        }
        spin_unlock(&ring->irq_lock);
  }
@@@ -727,7 -773,7 +735,7 @@@ bsd_ring_flush(struct intel_ring_buffe
  }
  
  static int
 -ring_add_request(struct intel_ring_buffer *ring,
 +i9xx_add_request(struct intel_ring_buffer *ring,
                 u32 *result)
  {
        u32 seqno;
  }
  
  static bool
 -gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 +gen6_ring_get_irq(struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
 -              ring->irq_mask &= ~rflag;
 -              I915_WRITE_IMR(ring, ring->irq_mask);
 -              ironlake_enable_irq(dev_priv, gflag);
 +              I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 +              dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 +              I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +              POSTING_READ(GTIMR);
        }
        spin_unlock(&ring->irq_lock);
  
  }
  
  static void
 -gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 +gen6_ring_put_irq(struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        spin_lock(&ring->irq_lock);
        if (--ring->irq_refcount == 0) {
 -              ring->irq_mask |= rflag;
 -              I915_WRITE_IMR(ring, ring->irq_mask);
 -              ironlake_disable_irq(dev_priv, gflag);
 +              I915_WRITE_IMR(ring, ~0);
 +              dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 +              I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 +              POSTING_READ(GTIMR);
        }
        spin_unlock(&ring->irq_lock);
  
        gen6_gt_force_wake_put(dev_priv);
  }
  
 -static bool
 -bsd_ring_get_irq(struct intel_ring_buffer *ring)
 +static int
 +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
  {
 -      struct drm_device *dev = ring->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -
 -      if (!dev->irq_enabled)
 -              return false;
 +      int ret;
  
 -      spin_lock(&ring->irq_lock);
 -      if (ring->irq_refcount++ == 0) {
 -              if (IS_G4X(dev))
 -                      i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
 -              else
 -                      ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
 -      }
 -      spin_unlock(&ring->irq_lock);
 +      ret = intel_ring_begin(ring, 2);
 +      if (ret)
 +              return ret;
  
 -      return true;
 -}
 -static void
 -bsd_ring_put_irq(struct intel_ring_buffer *ring)
 -{
 -      struct drm_device *dev = ring->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      intel_ring_emit(ring,
 +                      MI_BATCH_BUFFER_START |
 +                      MI_BATCH_GTT |
 +                      MI_BATCH_NON_SECURE_I965);
 +      intel_ring_emit(ring, offset);
 +      intel_ring_advance(ring);
  
 -      spin_lock(&ring->irq_lock);
 -      if (--ring->irq_refcount == 0) {
 -              if (IS_G4X(dev))
 -                      i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
 -              else
 -                      ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
 -      }
 -      spin_unlock(&ring->irq_lock);
 +      return 0;
  }
  
  static int
 -ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 +i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 +                              u32 offset, u32 len)
  {
        int ret;
  
 -      ret = intel_ring_begin(ring, 2);
 +      ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
  
 -      intel_ring_emit(ring,
 -                      MI_BATCH_BUFFER_START | (2 << 6) |
 -                      MI_BATCH_NON_SECURE_I965);
 -      intel_ring_emit(ring, offset);
 +      intel_ring_emit(ring, MI_BATCH_BUFFER);
 +      intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 +      intel_ring_emit(ring, offset + len - 8);
 +      intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
  
        return 0;
  }
  
  static int
 -render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 +i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len)
  {
 -      struct drm_device *dev = ring->dev;
        int ret;
  
 -      if (IS_I830(dev) || IS_845G(dev)) {
 -              ret = intel_ring_begin(ring, 4);
 -              if (ret)
 -                      return ret;
 -
 -              intel_ring_emit(ring, MI_BATCH_BUFFER);
 -              intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 -              intel_ring_emit(ring, offset + len - 8);
 -              intel_ring_emit(ring, 0);
 -      } else {
 -              ret = intel_ring_begin(ring, 2);
 -              if (ret)
 -                      return ret;
 +      ret = intel_ring_begin(ring, 2);
 +      if (ret)
 +              return ret;
  
 -              if (INTEL_INFO(dev)->gen >= 4) {
 -                      intel_ring_emit(ring,
 -                                      MI_BATCH_BUFFER_START | (2 << 6) |
 -                                      MI_BATCH_NON_SECURE_I965);
 -                      intel_ring_emit(ring, offset);
 -              } else {
 -                      intel_ring_emit(ring,
 -                                      MI_BATCH_BUFFER_START | (2 << 6));
 -                      intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 -              }
 -      }
 +      intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
 +      intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
        intel_ring_advance(ring);
  
        return 0;
@@@ -909,8 -989,8 +917,8 @@@ err
        return ret;
  }
  
 -int intel_init_ring_buffer(struct drm_device *dev,
 -                         struct intel_ring_buffer *ring)
 +static int intel_init_ring_buffer(struct drm_device *dev,
 +                                struct intel_ring_buffer *ring)
  {
        struct drm_i915_gem_object *obj;
        int ret;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->gpu_write_list);
 +      ring->size = 32 * PAGE_SIZE;
  
        init_waitqueue_head(&ring->irq_queue);
        spin_lock_init(&ring->irq_lock);
 -      ring->irq_mask = ~0;
  
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(ring);
@@@ -1189,6 -1269,44 +1197,6 @@@ void intel_ring_advance(struct intel_ri
        ring->write_tail(ring, ring->tail);
  }
  
 -static const struct intel_ring_buffer render_ring = {
 -      .name                   = "render ring",
 -      .id                     = RCS,
 -      .mmio_base              = RENDER_RING_BASE,
 -      .size                   = 32 * PAGE_SIZE,
 -      .init                   = init_render_ring,
 -      .write_tail             = ring_write_tail,
 -      .flush                  = render_ring_flush,
 -      .add_request            = render_ring_add_request,
 -      .get_seqno              = ring_get_seqno,
 -      .irq_get                = render_ring_get_irq,
 -      .irq_put                = render_ring_put_irq,
 -      .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
 -      .cleanup                = render_ring_cleanup,
 -      .sync_to                = render_ring_sync_to,
 -      .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
 -                                 MI_SEMAPHORE_SYNC_RV,
 -                                 MI_SEMAPHORE_SYNC_RB},
 -      .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
 -};
 -
 -/* ring buffer for bit-stream decoder */
 -
 -static const struct intel_ring_buffer bsd_ring = {
 -      .name                   = "bsd ring",
 -      .id                     = VCS,
 -      .mmio_base              = BSD_RING_BASE,
 -      .size                   = 32 * PAGE_SIZE,
 -      .init                   = init_ring_common,
 -      .write_tail             = ring_write_tail,
 -      .flush                  = bsd_ring_flush,
 -      .add_request            = ring_add_request,
 -      .get_seqno              = ring_get_seqno,
 -      .irq_get                = bsd_ring_get_irq,
 -      .irq_put                = bsd_ring_put_irq,
 -      .dispatch_execbuffer    = ring_dispatch_execbuffer,
 -};
 -
  
  static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                                     u32 value)
@@@ -1251,8 -1369,77 +1259,8 @@@ gen6_ring_dispatch_execbuffer(struct in
        return 0;
  }
  
 -static bool
 -gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
 -{
 -      return gen6_ring_get_irq(ring,
 -                               GT_USER_INTERRUPT,
 -                               GEN6_RENDER_USER_INTERRUPT);
 -}
 -
 -static void
 -gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
 -{
 -      return gen6_ring_put_irq(ring,
 -                               GT_USER_INTERRUPT,
 -                               GEN6_RENDER_USER_INTERRUPT);
 -}
 -
 -static bool
 -gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
 -{
 -      return gen6_ring_get_irq(ring,
 -                               GT_GEN6_BSD_USER_INTERRUPT,
 -                               GEN6_BSD_USER_INTERRUPT);
 -}
 -
 -static void
 -gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
 -{
 -      return gen6_ring_put_irq(ring,
 -                               GT_GEN6_BSD_USER_INTERRUPT,
 -                               GEN6_BSD_USER_INTERRUPT);
 -}
 -
 -/* ring buffer for Video Codec for Gen6+ */
 -static const struct intel_ring_buffer gen6_bsd_ring = {
 -      .name                   = "gen6 bsd ring",
 -      .id                     = VCS,
 -      .mmio_base              = GEN6_BSD_RING_BASE,
 -      .size                   = 32 * PAGE_SIZE,
 -      .init                   = init_ring_common,
 -      .write_tail             = gen6_bsd_ring_write_tail,
 -      .flush                  = gen6_ring_flush,
 -      .add_request            = gen6_add_request,
 -      .get_seqno              = gen6_ring_get_seqno,
 -      .irq_get                = gen6_bsd_ring_get_irq,
 -      .irq_put                = gen6_bsd_ring_put_irq,
 -      .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
 -      .sync_to                = gen6_bsd_ring_sync_to,
 -      .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
 -                                 MI_SEMAPHORE_SYNC_INVALID,
 -                                 MI_SEMAPHORE_SYNC_VB},
 -      .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
 -};
 -
  /* Blitter support (SandyBridge+) */
  
 -static bool
 -blt_ring_get_irq(struct intel_ring_buffer *ring)
 -{
 -      return gen6_ring_get_irq(ring,
 -                               GT_BLT_USER_INTERRUPT,
 -                               GEN6_BLITTER_USER_INTERRUPT);
 -}
 -
 -static void
 -blt_ring_put_irq(struct intel_ring_buffer *ring)
 -{
 -      gen6_ring_put_irq(ring,
 -                        GT_BLT_USER_INTERRUPT,
 -                        GEN6_BLITTER_USER_INTERRUPT);
 -}
 -
  static int blt_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate, u32 flush)
  {
        return 0;
  }
  
 -static const struct intel_ring_buffer gen6_blt_ring = {
 -      .name                   = "blt ring",
 -      .id                     = BCS,
 -      .mmio_base              = BLT_RING_BASE,
 -      .size                   = 32 * PAGE_SIZE,
 -      .init                   = init_ring_common,
 -      .write_tail             = ring_write_tail,
 -      .flush                  = blt_ring_flush,
 -      .add_request            = gen6_add_request,
 -      .get_seqno              = gen6_ring_get_seqno,
 -      .irq_get                = blt_ring_get_irq,
 -      .irq_put                = blt_ring_put_irq,
 -      .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
 -      .sync_to                = gen6_blt_ring_sync_to,
 -      .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
 -                                 MI_SEMAPHORE_SYNC_BV,
 -                                 MI_SEMAPHORE_SYNC_INVALID},
 -      .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
 -};
 -
  int intel_init_render_ring_buffer(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  
 -      *ring = render_ring;
 +      ring->name = "render ring";
 +      ring->id = RCS;
 +      ring->mmio_base = RENDER_RING_BASE;
 +
        if (INTEL_INFO(dev)->gen >= 6) {
                ring->add_request = gen6_add_request;
                ring->flush = gen6_render_ring_flush;
 -              ring->irq_get = gen6_render_ring_get_irq;
 -              ring->irq_put = gen6_render_ring_put_irq;
 +              ring->irq_get = gen6_ring_get_irq;
 +              ring->irq_put = gen6_ring_put_irq;
 +              ring->irq_enable_mask = GT_USER_INTERRUPT;
                ring->get_seqno = gen6_ring_get_seqno;
 +              ring->sync_to = gen6_ring_sync;
 +              ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
 +              ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
 +              ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
 +              ring->signal_mbox[0] = GEN6_VRSYNC;
 +              ring->signal_mbox[1] = GEN6_BRSYNC;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
 +              ring->flush = gen4_render_ring_flush;
                ring->get_seqno = pc_render_get_seqno;
 +              ring->irq_get = gen5_ring_get_irq;
 +              ring->irq_put = gen5_ring_put_irq;
 +              ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
 +      } else {
 +              ring->add_request = i9xx_add_request;
 +              if (INTEL_INFO(dev)->gen < 4)
 +                      ring->flush = gen2_render_ring_flush;
 +              else
 +                      ring->flush = gen4_render_ring_flush;
 +              ring->get_seqno = ring_get_seqno;
 +              ring->irq_get = i9xx_ring_get_irq;
 +              ring->irq_put = i9xx_ring_put_irq;
 +              ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
 +      ring->write_tail = ring_write_tail;
 +      if (INTEL_INFO(dev)->gen >= 6)
 +              ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 +      else if (INTEL_INFO(dev)->gen >= 4)
 +              ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 +      else if (IS_I830(dev) || IS_845G(dev))
 +              ring->dispatch_execbuffer = i830_dispatch_execbuffer;
 +      else
 +              ring->dispatch_execbuffer = i915_dispatch_execbuffer;
 +      ring->init = init_render_ring;
 +      ring->cleanup = render_ring_cleanup;
 +
  
        if (!I915_NEED_GFX_HWS(dev)) {
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@@ -1340,37 -1511,16 +1348,37 @@@ int intel_render_ring_init_dri(struct d
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  
 -      *ring = render_ring;
 +      ring->name = "render ring";
 +      ring->id = RCS;
 +      ring->mmio_base = RENDER_RING_BASE;
 +
        if (INTEL_INFO(dev)->gen >= 6) {
 -              ring->add_request = gen6_add_request;
 -              ring->irq_get = gen6_render_ring_get_irq;
 -              ring->irq_put = gen6_render_ring_put_irq;
 -      } else if (IS_GEN5(dev)) {
 -              ring->add_request = pc_render_add_request;
 -              ring->get_seqno = pc_render_get_seqno;
 +              /* non-kms not supported on gen6+ */
 +              return -ENODEV;
        }
  
 +      /* Note: gem is not supported on gen5/ilk without kms (the corresponding
 +       * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
 +       * the special gen5 functions. */
 +      ring->add_request = i9xx_add_request;
 +      if (INTEL_INFO(dev)->gen < 4)
 +              ring->flush = gen2_render_ring_flush;
 +      else
 +              ring->flush = gen4_render_ring_flush;
 +      ring->get_seqno = ring_get_seqno;
 +      ring->irq_get = i9xx_ring_get_irq;
 +      ring->irq_put = i9xx_ring_put_irq;
 +      ring->irq_enable_mask = I915_USER_INTERRUPT;
 +      ring->write_tail = ring_write_tail;
 +      if (INTEL_INFO(dev)->gen >= 4)
 +              ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 +      else if (IS_I830(dev) || IS_845G(dev))
 +              ring->dispatch_execbuffer = i830_dispatch_execbuffer;
 +      else
 +              ring->dispatch_execbuffer = i915_dispatch_execbuffer;
 +      ring->init = init_render_ring;
 +      ring->cleanup = render_ring_cleanup;
 +
        if (!I915_NEED_GFX_HWS(dev))
                ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  
@@@ -1406,46 -1556,10 +1414,46 @@@ int intel_init_bsd_ring_buffer(struct d
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  
 -      if (IS_GEN6(dev) || IS_GEN7(dev))
 -              *ring = gen6_bsd_ring;
 -      else
 -              *ring = bsd_ring;
 +      ring->name = "bsd ring";
 +      ring->id = VCS;
 +
 +      ring->write_tail = ring_write_tail;
 +      if (IS_GEN6(dev) || IS_GEN7(dev)) {
 +              ring->mmio_base = GEN6_BSD_RING_BASE;
 +              /* gen6 bsd needs a special wa for tail updates */
 +              if (IS_GEN6(dev))
 +                      ring->write_tail = gen6_bsd_ring_write_tail;
 +              ring->flush = gen6_ring_flush;
 +              ring->add_request = gen6_add_request;
 +              ring->get_seqno = gen6_ring_get_seqno;
 +              ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
 +              ring->irq_get = gen6_ring_get_irq;
 +              ring->irq_put = gen6_ring_put_irq;
 +              ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 +              ring->sync_to = gen6_ring_sync;
 +              ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
 +              ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
 +              ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
 +              ring->signal_mbox[0] = GEN6_RVSYNC;
 +              ring->signal_mbox[1] = GEN6_BVSYNC;
 +      } else {
 +              ring->mmio_base = BSD_RING_BASE;
 +              ring->flush = bsd_ring_flush;
 +              ring->add_request = i9xx_add_request;
 +              ring->get_seqno = ring_get_seqno;
 +              if (IS_GEN5(dev)) {
 +                      ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 +                      ring->irq_get = gen5_ring_get_irq;
 +                      ring->irq_put = gen5_ring_put_irq;
 +              } else {
 +                      ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
 +                      ring->irq_get = i9xx_ring_get_irq;
 +                      ring->irq_put = i9xx_ring_put_irq;
 +              }
 +              ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 +      }
 +      ring->init = init_ring_common;
 +
  
        return intel_init_ring_buffer(dev, ring);
  }
@@@ -1455,25 -1569,7 +1463,25 @@@ int intel_init_blt_ring_buffer(struct d
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  
 -      *ring = gen6_blt_ring;
 +      ring->name = "blitter ring";
 +      ring->id = BCS;
 +
 +      ring->mmio_base = BLT_RING_BASE;
 +      ring->write_tail = ring_write_tail;
 +      ring->flush = blt_ring_flush;
 +      ring->add_request = gen6_add_request;
 +      ring->get_seqno = gen6_ring_get_seqno;
 +      ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
 +      ring->irq_get = gen6_ring_get_irq;
 +      ring->irq_put = gen6_ring_put_irq;
 +      ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 +      ring->sync_to = gen6_ring_sync;
 +      ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
 +      ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
 +      ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
 +      ring->signal_mbox[0] = GEN6_RBSYNC;
 +      ring->signal_mbox[1] = GEN6_VBSYNC;
 +      ring->init = init_ring_common;
  
        return intel_init_ring_buffer(dev, ring);
  }
index c330efd59a0e58be5e37a092b617b2851fc64947,232d77d07d8b241b7ea1ec45ee8333463ac63c3a..3d9dfa57130bf3b20fb4c37158f9f1724f9eb94e
@@@ -41,7 -41,7 +41,7 @@@
  #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
  #define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
  #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
 -#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
 +#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
  
  #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
                        SDVO_TV_MASK)
@@@ -74,7 -74,7 +74,7 @@@ struct intel_sdvo 
        struct i2c_adapter ddc;
  
        /* Register for the SDVO device: SDVOB or SDVOC */
 -      int sdvo_reg;
 +      uint32_t sdvo_reg;
  
        /* Active outputs controlled by this SDVO output */
        uint16_t controlled_output;
         */
        bool is_tv;
  
 +      /* On different gens SDVOB is at different places. */
 +      bool is_sdvob;
 +
        /* This is for current tv format name */
        int tv_format_index;
  
@@@ -406,7 -403,8 +406,7 @@@ static const struct _sdvo_cmd_name 
        SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
  };
  
 -#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
 -#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
 +#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
  
  static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
                                   const void *args, int args_len)
@@@ -443,17 -441,9 +443,17 @@@ static const char *cmd_status_names[] 
  static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
                                 const void *args, int args_len)
  {
 -      u8 buf[args_len*2 + 2], status;
 -      struct i2c_msg msgs[args_len + 3];
 -      int i, ret;
 +      u8 *buf, status;
 +      struct i2c_msg *msgs;
 +      int i, ret = true;
 +
 +      buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
 +      if (!buf)
 +              return false;
 +
 +      msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
 +      if (!msgs)
 +              return false;
  
        intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
  
        ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
        if (ret < 0) {
                DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
 -              return false;
 +              ret = false;
 +              goto out;
        }
        if (ret != i+3) {
                /* failure in I2C transfer */
                DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
 -              return false;
 +              ret = false;
        }
  
 -      return true;
 +out:
 +      kfree(msgs);
 +      kfree(buf);
 +      return ret;
  }
  
  static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@@ -745,6 -731,7 +745,7 @@@ static void intel_sdvo_get_dtd_from_mod
        uint16_t width, height;
        uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
        uint16_t h_sync_offset, v_sync_offset;
+       int mode_clock;
  
        width = mode->crtc_hdisplay;
        height = mode->crtc_vdisplay;
        h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
        v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
  
-       dtd->part1.clock = mode->clock / 10;
+       mode_clock = mode->clock;
+       mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+       mode_clock /= 10;
+       dtd->part1.clock = mode_clock;
        dtd->part1.h_active = width & 0xff;
        dtd->part1.h_blank = h_blank_len & 0xff;
        dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@@ -1010,7 -1001,7 +1015,7 @@@ static void intel_sdvo_mode_set(struct 
        struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
-       struct intel_sdvo_dtd input_dtd;
+       struct intel_sdvo_dtd input_dtd, output_dtd;
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
        int rate;
  
                                          intel_sdvo->attached_output))
                return;
  
-       /* We have tried to get input timing in mode_fixup, and filled into
-        * adjusted_mode.
-        */
-       if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-               input_dtd = intel_sdvo->input_dtd;
-       } else {
-               /* Set the output timing to the screen */
-               if (!intel_sdvo_set_target_output(intel_sdvo,
-                                                 intel_sdvo->attached_output))
-                       return;
-               intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-               (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
-       }
+       /* lvds has a special fixed output timing. */
+       if (intel_sdvo->is_lvds)
+               intel_sdvo_get_dtd_from_mode(&output_dtd,
+                                            intel_sdvo->sdvo_lvds_fixed_mode);
+       else
+               intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+       (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
  
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
            !intel_sdvo_set_tv_format(intel_sdvo))
                return;
  
+       /* We have tried to get input timing in mode_fixup, and filled into
+        * adjusted_mode.
+        */
+       intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
        (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
  
        switch (pixel_multiplier) {
@@@ -1266,11 -1254,10 +1268,11 @@@ intel_sdvo_get_analog_edid(struct drm_c
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
  
        return drm_get_edid(connector,
 -                          &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
 +                          intel_gmbus_get_adapter(dev_priv,
 +                                                  dev_priv->crt_ddc_pin));
  }
  
 -enum drm_connector_status
 +static enum drm_connector_status
  intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
  {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@@ -1356,7 -1343,8 +1358,7 @@@ intel_sdvo_detect(struct drm_connector 
                return connector_status_unknown;
  
        /* add 30ms delay when the output type might be TV */
 -      if (intel_sdvo->caps.output_flags &
 -          (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
 +      if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
                mdelay(30);
  
        if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@@ -1907,7 -1895,7 +1909,7 @@@ intel_sdvo_select_ddc_bus(struct drm_i9
  {
        struct sdvo_device_mapping *mapping;
  
 -      if (IS_SDVOB(reg))
 +      if (sdvo->is_sdvob)
                mapping = &(dev_priv->sdvo_mappings[0]);
        else
                mapping = &(dev_priv->sdvo_mappings[1]);
@@@ -1925,7 -1913,7 +1927,7 @@@ intel_sdvo_select_i2c_bus(struct drm_i9
        struct sdvo_device_mapping *mapping;
        u8 pin;
  
 -      if (IS_SDVOB(reg))
 +      if (sdvo->is_sdvob)
                mapping = &dev_priv->sdvo_mappings[0];
        else
                mapping = &dev_priv->sdvo_mappings[1];
        if (mapping->initialized)
                pin = mapping->i2c_pin;
  
 -      if (pin < GMBUS_NUM_PORTS) {
 -              sdvo->i2c = &dev_priv->gmbus[pin].adapter;
 +      if (intel_gmbus_is_port_valid(pin)) {
 +              sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
                intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
                intel_gmbus_force_bit(sdvo->i2c, true);
        } else {
 -              sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
 +              sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
        }
  }
  
@@@ -1950,12 -1938,12 +1952,12 @@@ intel_sdvo_is_hdmi_connector(struct int
  }
  
  static u8
 -intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
 +intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct sdvo_device_mapping *my_mapping, *other_mapping;
  
 -      if (IS_SDVOB(sdvo_reg)) {
 +      if (sdvo->is_sdvob) {
                my_mapping = &dev_priv->sdvo_mappings[0];
                other_mapping = &dev_priv->sdvo_mappings[1];
        } else {
        /* No SDVO device info is found for another DVO port,
         * so use mapping assumption we had before BIOS parsing.
         */
 -      if (IS_SDVOB(sdvo_reg))
 +      if (sdvo->is_sdvob)
                return 0x70;
        else
                return 0x72;
@@@ -2205,10 -2193,6 +2207,10 @@@ intel_sdvo_output_setup(struct intel_sd
                if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
                        return false;
  
 +      if (flags & SDVO_OUTPUT_YPRPB0)
 +              if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
 +                      return false;
 +
        if (flags & SDVO_OUTPUT_RGB0)
                if (!intel_sdvo_analog_init(intel_sdvo, 0))
                        return false;
@@@ -2500,7 -2484,7 +2502,7 @@@ intel_sdvo_init_ddc_proxy(struct intel_
        return i2c_add_adapter(&sdvo->ddc) == 0;
  }
  
 -bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 +bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
                return false;
  
        intel_sdvo->sdvo_reg = sdvo_reg;
 -      intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
 +      intel_sdvo->is_sdvob = is_sdvob;
 +      intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
        intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
        if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
                kfree(intel_sdvo);
                u8 byte;
  
                if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
 -                      DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
 -                                    IS_SDVOB(sdvo_reg) ? 'B' : 'C');
 +                      DRM_DEBUG_KMS("No SDVO device found on %s\n",
 +                                    SDVO_NAME(intel_sdvo));
                        goto err;
                }
        }
  
 -      if (IS_SDVOB(sdvo_reg))
 +      if (intel_sdvo->is_sdvob)
                dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
        else
                dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
  
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
 -              DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
 -                            IS_SDVOB(sdvo_reg) ? 'B' : 'C');
 +              DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
 +                            SDVO_NAME(intel_sdvo));
                goto err;
        }
  
index 2fab38f5a08e8f765167e44f17ce5d6691701eba,af1054f8202a27ac1059b00d0799ee00ebfb1da9..01d77d1554f4258899da5e14eb0044e3d529cc02
@@@ -575,6 -575,9 +575,9 @@@ static u32 atombios_adjust_pll(struct d
  
                if (rdev->family < CHIP_RV770)
                        pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+               /* use frac fb div on APUs */
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+                       pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
        } else {
                pll->flags |= RADEON_PLL_LEGACY;
  
                if (encoder->crtc == crtc) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        connector = radeon_get_connector_for_encoder(encoder);
 -                      /* if (connector && connector->display_info.bpc)
 -                              bpc = connector->display_info.bpc; */
 +                      bpc = radeon_get_monitor_bpc(connector);
                        encoder_mode = atombios_get_encoder_mode(encoder);
                        is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
                        if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@@ -954,8 -958,8 +957,8 @@@ static void atombios_crtc_set_pll(struc
                break;
        }
  
-       if (radeon_encoder->active_device &
-           (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+       if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+           (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
                struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
                struct drm_connector *connector =
                        radeon_get_connector_for_encoder(encoder);
                struct radeon_connector_atom_dig *dig_connector =
                        radeon_connector->con_priv;
                int dp_clock;
 -
 -              /* if (connector->display_info.bpc)
 -                      bpc = connector->display_info.bpc; */
 +              bpc = radeon_get_monitor_bpc(connector);
  
                switch (encoder_mode) {
                case ATOM_ENCODER_MODE_DP_MST:
index 222245d0138a073efe7f7c72425f3a6d0ebecd94,c8187c4b6ae8838f65fdd0445be43974cc040f1b..c37b727fda71d0d843db15bf77d3d3c769f649d3
@@@ -1135,7 -1135,7 +1135,7 @@@ static void r600_vram_gtt_location(stru
        }
        if (rdev->flags & RADEON_IS_AGP) {
                size_bf = mc->gtt_start;
-               size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+               size_af = 0xFFFFFFFF - mc->gtt_end;
                if (size_bf > size_af) {
                        if (mc->mc_vram_size > size_bf) {
                                dev_warn(rdev->dev, "limiting VRAM\n");
                                mc->real_vram_size = size_af;
                                mc->mc_vram_size = size_af;
                        }
-                       mc->vram_start = mc->gtt_end;
+                       mc->vram_start = mc->gtt_end + 1;
                }
                mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
                dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
@@@ -2968,15 -2968,6 +2968,15 @@@ static void r600_disable_interrupt_stat
                        WREG32(DC_HPD5_INT_CONTROL, tmp);
                        tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
 +                      tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
 +                      tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
 +              } else {
 +                      tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +                      WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
 +                      tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +                      WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
                }
        } else {
                WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
                WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
                tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
                WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 +              tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +              WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
 +              tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +              WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
        }
  }
  
@@@ -3060,9 -3047,6 +3060,9 @@@ int r600_irq_init(struct radeon_device 
        else
                r600_disable_interrupt_state(rdev);
  
 +      /* at this point everything should be setup correctly to enable master */
 +      pci_set_master(rdev->pdev);
 +
        /* enable irqs */
        r600_enable_interrupts(rdev);
  
@@@ -3087,7 -3071,7 +3087,7 @@@ int r600_irq_set(struct radeon_device *
        u32 mode_int = 0;
        u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
 -      u32 hdmi1, hdmi2;
 +      u32 hdmi0, hdmi1;
        u32 d1grph = 0, d2grph = 0;
  
        if (!rdev->irq.installed) {
                return 0;
        }
  
 -      hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
        if (ASIC_IS_DCE3(rdev)) {
 -              hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
                hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
                if (ASIC_IS_DCE32(rdev)) {
                        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
                        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +                      hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 +                      hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 +              } else {
 +                      hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +                      hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
                }
        } else {
 -              hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
                hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
                hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
 +              hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
 +              hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
        }
  
        if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
                DRM_DEBUG("r600_irq_set: hpd 6\n");
                hpd6 |= DC_HPDx_INT_EN;
        }
 -      if (rdev->irq.hdmi[0]) {
 -              DRM_DEBUG("r600_irq_set: hdmi 1\n");
 -              hdmi1 |= R600_HDMI_INT_EN;
 +      if (rdev->irq.afmt[0]) {
 +              DRM_DEBUG("r600_irq_set: hdmi 0\n");
 +              hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
        }
 -      if (rdev->irq.hdmi[1]) {
 -              DRM_DEBUG("r600_irq_set: hdmi 2\n");
 -              hdmi2 |= R600_HDMI_INT_EN;
 +      if (rdev->irq.afmt[1]) {
 +              DRM_DEBUG("r600_irq_set: hdmi 0\n");
 +              hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
        }
        if (rdev->irq.gui_idle) {
                DRM_DEBUG("gui idle\n");
        WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
        WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 -      WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
        if (ASIC_IS_DCE3(rdev)) {
 -              WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
                WREG32(DC_HPD1_INT_CONTROL, hpd1);
                WREG32(DC_HPD2_INT_CONTROL, hpd2);
                WREG32(DC_HPD3_INT_CONTROL, hpd3);
                if (ASIC_IS_DCE32(rdev)) {
                        WREG32(DC_HPD5_INT_CONTROL, hpd5);
                        WREG32(DC_HPD6_INT_CONTROL, hpd6);
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
 +              } else {
 +                      WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
 +                      WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
                }
        } else {
 -              WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
                WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
                WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
                WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
 +              WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
 +              WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
        }
  
        return 0;
@@@ -3214,19 -3190,10 +3214,19 @@@ static void r600_irq_ack(struct radeon_
                rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
                rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
                rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
 +              if (ASIC_IS_DCE32(rdev)) {
 +                      rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
 +                      rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
 +              } else {
 +                      rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
 +                      rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
 +              }
        } else {
                rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
                rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
                rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
 +              rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
 +              rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
        }
        rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
        rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
                        tmp |= DC_HPDx_INT_ACK;
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
 -      }
 -      if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
 -              WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
 -      }
 -      if (ASIC_IS_DCE3(rdev)) {
 -              if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
 -                      WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
 +              if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
 +                      tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
 +                      tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
 +              }
 +              if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
 +                      tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
 +                      tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
 +                      WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
                }
        } else {
 -              if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
 -                      WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
 +              if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
 +                      tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
 +                      tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
 +                      WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
 +              }
 +              if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
 +                      if (ASIC_IS_DCE3(rdev)) {
 +                              tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
 +                              tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
 +                              WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
 +                      } else {
 +                              tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
 +                              tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
 +                              WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
 +                      }
                }
        }
  }
@@@ -3393,7 -3345,6 +3393,7 @@@ int r600_irq_process(struct radeon_devi
        u32 ring_index;
        unsigned long flags;
        bool queue_hotplug = false;
 +      bool queue_hdmi = false;
  
        if (!rdev->ih.enabled || rdev->shutdown)
                return IRQ_NONE;
@@@ -3529,26 -3480,9 +3529,26 @@@ restart_ih
                                break;
                        }
                        break;
 -              case 21: /* HDMI */
 -                      DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
 -                      r600_audio_schedule_polling(rdev);
 +              case 21: /* hdmi */
 +                      switch (src_data) {
 +                      case 4:
 +                              if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
 +                                      rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
 +                                      queue_hdmi = true;
 +                                      DRM_DEBUG("IH: HDMI0\n");
 +                              }
 +                              break;
 +                      case 5:
 +                              if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
 +                                      rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
 +                                      queue_hdmi = true;
 +                                      DRM_DEBUG("IH: HDMI1\n");
 +                              }
 +                              break;
 +                      default:
 +                              DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
 +                              break;
 +                      }
                        break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
                goto restart_ih;
        if (queue_hotplug)
                schedule_work(&rdev->hotplug_work);
 +      if (queue_hdmi)
 +              schedule_work(&rdev->audio_work);
        rdev->ih.rptr = rptr;
        WREG32(IH_RB_RPTR, rdev->ih.rptr);
        spin_unlock_irqrestore(&rdev->ih.lock, flags);
index 71fa389e10fe1ec3725a8fd6d46bc881bc5533e0,3c2e7a000a2ad91cefff66c5aa3dde40f3d9649d..2914c5761cfc7a8816f6dcc745aa17282385e626
@@@ -84,62 -84,6 +84,62 @@@ static void radeon_property_change_mode
                                         crtc->x, crtc->y, crtc->fb);
        }
  }
 +
 +int radeon_get_monitor_bpc(struct drm_connector *connector)
 +{
 +      struct drm_device *dev = connector->dev;
 +      struct radeon_device *rdev = dev->dev_private;
 +      struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 +      struct radeon_connector_atom_dig *dig_connector;
 +      int bpc = 8;
 +
 +      switch (connector->connector_type) {
 +      case DRM_MODE_CONNECTOR_DVII:
 +      case DRM_MODE_CONNECTOR_HDMIB:
 +              if (radeon_connector->use_digital) {
 +                      if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
 +                              if (connector->display_info.bpc)
 +                                      bpc = connector->display_info.bpc;
 +                      }
 +              }
 +              break;
 +      case DRM_MODE_CONNECTOR_DVID:
 +      case DRM_MODE_CONNECTOR_HDMIA:
 +              if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
 +                      if (connector->display_info.bpc)
 +                              bpc = connector->display_info.bpc;
 +              }
 +              break;
 +      case DRM_MODE_CONNECTOR_DisplayPort:
 +              dig_connector = radeon_connector->con_priv;
 +              if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 +                  (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
 +                  drm_detect_hdmi_monitor(radeon_connector->edid)) {
 +                      if (connector->display_info.bpc)
 +                              bpc = connector->display_info.bpc;
 +              }
 +              break;
 +      case DRM_MODE_CONNECTOR_eDP:
 +      case DRM_MODE_CONNECTOR_LVDS:
 +              if (connector->display_info.bpc)
 +                      bpc = connector->display_info.bpc;
 +              else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
 +                      struct drm_connector_helper_funcs *connector_funcs =
 +                              connector->helper_private;
 +                      struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
 +                      struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 +                      struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 +
 +                      if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
 +                              bpc = 6;
 +                      else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
 +                              bpc = 8;
 +              }
 +              break;
 +      }
 +      return bpc;
 +}
 +
  static void
  radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
  {
@@@ -1026,7 -970,7 +1026,7 @@@ radeon_dvi_detect(struct drm_connector 
  
                        encoder = obj_to_encoder(obj);
  
-                       if (encoder->encoder_type != DRM_MODE_ENCODER_DAC ||
+                       if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
                            encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
                                continue;
  
         * cases the DVI port is actually a virtual KVM port connected to the service
         * processor.
         */
+ out:
        if ((!rdev->is_atom_bios) &&
            (ret == connector_status_disconnected) &&
            rdev->mode_info.bios_hardcoded_edid_size) {
                ret = connector_status_connected;
        }
  
- out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
        return ret;
index 0fb4f8993caec4f07844452d35b1c27450692884,5992502a3448dc692e426891751822f26aec3350..76a118df04f9d27a7947c0efab63e3180969e441
@@@ -241,8 -241,8 +241,8 @@@ int radeon_wb_init(struct radeon_devic
                                rdev->wb.use_event = true;
                }
        }
-       /* always use writeback/events on NI */
-       if (ASIC_IS_DCE5(rdev)) {
+       /* always use writeback/events on NI, APUs */
+       if (rdev->family >= CHIP_PALM) {
                rdev->wb.enabled = true;
                rdev->wb.use_event = true;
        }
@@@ -955,6 -955,7 +955,6 @@@ int radeon_resume_kms(struct drm_devic
                console_unlock();
                return -1;
        }
 -      pci_set_master(dev->pdev);
        /* resume AGP if in use */
        radeon_agp_resume(rdev);
        radeon_resume(rdev);
index 170f1718d92a27db127c60583b180422ad027192,65060b77c8058efea3c7f35aac4df40b777aac4c..5df58d1aba06661cd706522f86b5e7a88407a350
@@@ -73,7 -73,6 +73,7 @@@ void radeon_driver_irq_preinstall_kms(s
        for (i = 0; i < RADEON_MAX_CRTCS; i++) {
                rdev->irq.crtc_vblank_int[i] = false;
                rdev->irq.pflip[i] = false;
 +              rdev->irq.afmt[i] = false;
        }
        radeon_irq_set(rdev);
        /* Clear bits */
@@@ -109,7 -108,6 +109,7 @@@ void radeon_driver_irq_uninstall_kms(st
        for (i = 0; i < RADEON_MAX_CRTCS; i++) {
                rdev->irq.crtc_vblank_int[i] = false;
                rdev->irq.pflip[i] = false;
 +              rdev->irq.afmt[i] = false;
        }
        radeon_irq_set(rdev);
  }
@@@ -149,6 -147,12 +149,12 @@@ static bool radeon_msi_ok(struct radeon
            (rdev->pdev->subsystem_device == 0x01fd))
                return true;
  
+       /* RV515 seems to have MSI issues where it loses
+        * MSI rearms occasionally. This leads to lockups and freezes.
+        * disable it by default.
+        */
+       if (rdev->family == CHIP_RV515)
+               return false;
        if (rdev->flags & RADEON_IS_IGP) {
                /* APUs work fine with MSIs */
                if (rdev->family >= CHIP_PALM)
@@@ -166,7 -170,6 +172,7 @@@ int radeon_irq_kms_init(struct radeon_d
        int r = 0;
  
        INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 +      INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
  
        spin_lock_init(&rdev->irq.sw_lock);
        for (i = 0; i < rdev->num_crtc; i++)
index 14919e1539fafa89d62c5bcd3c926ed9a3747fd9,27bda986fc2bd8a6ad948d19e819bb1e8ec415df..9727a16c0409b410ebe251512992ea496c5ba310
@@@ -2999,8 -2999,8 +2999,8 @@@ int si_rlc_init(struct radeon_device *r
        }
        r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
                          &rdev->rlc.save_restore_gpu_addr);
+       radeon_bo_unreserve(rdev->rlc.save_restore_obj);
        if (r) {
-               radeon_bo_unreserve(rdev->rlc.save_restore_obj);
                dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
                si_rlc_fini(rdev);
                return r;
        }
        r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
                          &rdev->rlc.clear_state_gpu_addr);
+       radeon_bo_unreserve(rdev->rlc.clear_state_obj);
        if (r) {
-               radeon_bo_unreserve(rdev->rlc.clear_state_obj);
                dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
                si_rlc_fini(rdev);
                return r;
@@@ -3217,8 -3216,6 +3216,8 @@@ static int si_irq_init(struct radeon_de
        /* force the active interrupt state to all disabled */
        si_disable_interrupt_state(rdev);
  
 +      pci_set_master(rdev->pdev);
 +
        /* enable irqs */
        si_enable_interrupts(rdev);
  
This page took 0.290816 seconds and 5 git commands to generate.