drm/i915: Fix HSW parity test
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 664118d8c1d6426353ed97bb61b1113369a7678a..686e5b23481def673fcf6d3c650cf18ceedc4b75 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
-       struct drm_i915_gem_object *obj;
-       volatile u32 *cpu_page;
-       u32 gtt_offset;
-};
-
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
        int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -51,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
        return space;
 }
 
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       ring->tail &= ring->size - 1;
+       if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+               return;
+       ring->write_tail(ring, ring->tail);
+}
+
 static int
 gen2_render_ring_flush(struct intel_ring_buffer *ring,
                       u32      invalidate_domains,
@@ -175,8 +175,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
 static int
 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
 {
-       struct pipe_control *pc = ring->private;
-       u32 scratch_addr = pc->gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 128;
        int ret;
 
 
@@ -213,8 +212,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
                          u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       struct pipe_control *pc = ring->private;
-       u32 scratch_addr = pc->gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 128;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +304,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       struct pipe_control *pc = ring->private;
-       u32 scratch_addr = pc->gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 128;
        int ret;
 
        /*
@@ -440,14 +437,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, obj->gtt_offset);
+       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
        if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == obj->gtt_offset &&
+                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
                     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
@@ -481,68 +478,43 @@ out:
 static int
 init_pipe_control(struct intel_ring_buffer *ring)
 {
-       struct pipe_control *pc;
-       struct drm_i915_gem_object *obj;
        int ret;
 
-       if (ring->private)
+       if (ring->scratch.obj)
                return 0;
 
-       pc = kmalloc(sizeof(*pc), GFP_KERNEL);
-       if (!pc)
-               return -ENOMEM;
-
-       obj = i915_gem_alloc_object(ring->dev, 4096);
-       if (obj == NULL) {
+       ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+       if (ring->scratch.obj == NULL) {
                DRM_ERROR("Failed to allocate seqno page\n");
                ret = -ENOMEM;
                goto err;
        }
 
-       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+       i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
        if (ret)
                goto err_unref;
 
-       pc->gtt_offset = obj->gtt_offset;
-       pc->cpu_page = kmap(sg_page(obj->pages->sgl));
-       if (pc->cpu_page == NULL) {
+       ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+       ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+       if (ring->scratch.cpu_page == NULL) {
                ret = -ENOMEM;
                goto err_unpin;
        }
 
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-                        ring->name, pc->gtt_offset);
-
-       pc->obj = obj;
-       ring->private = pc;
+                        ring->name, ring->scratch.gtt_offset);
        return 0;
 
 err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_unpin(ring->scratch.obj);
 err_unref:
-       drm_gem_object_unreference(&obj->base);
+       drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
-       kfree(pc);
        return ret;
 }
 
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
-       struct pipe_control *pc = ring->private;
-       struct drm_i915_gem_object *obj;
-
-       obj = pc->obj;
-
-       kunmap(sg_page(obj->pages->sgl));
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(&obj->base);
-
-       kfree(pc);
-}
-
 static int init_render_ring(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
@@ -607,16 +579,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
 
-       if (!ring->private)
+       if (ring->scratch.obj == NULL)
                return;
 
-       if (HAS_BROKEN_CS_TLB(dev))
-               drm_gem_object_unreference(to_gem_object(ring->private));
-
-       if (INTEL_INFO(dev)->gen >= 5)
-               cleanup_pipe_control(ring);
+       if (INTEL_INFO(dev)->gen >= 5) {
+               kunmap(sg_page(ring->scratch.obj->pages->sgl));
+               i915_gem_object_unpin(ring->scratch.obj);
+       }
 
-       ring->private = NULL;
+       drm_gem_object_unreference(&ring->scratch.obj->base);
+       ring->scratch.obj = NULL;
 }
 
 static void
@@ -631,7 +603,7 @@ update_mboxes(struct intel_ring_buffer *ring,
 #define MBOX_UPDATE_DWORDS 4
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, mmio_offset);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_NOOP);
 }
 
@@ -667,9 +639,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -742,8 +714,7 @@ do {                                                                        \
 static int
 pc_render_add_request(struct intel_ring_buffer *ring)
 {
-       struct pipe_control *pc = ring->private;
-       u32 scratch_addr = pc->gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 128;
        int ret;
 
        /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,8 +732,8 @@ pc_render_add_request(struct intel_ring_buffer *ring)
        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 128; /* write to separate cachelines */
@@ -780,10 +751,10 @@ pc_render_add_request(struct intel_ring_buffer *ring)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -814,15 +785,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
 static u32
 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 {
-       struct pipe_control *pc = ring->private;
-       return pc->cpu_page[0];
+       return ring->scratch.cpu_page[0];
 }
 
 static void
 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-       struct pipe_control *pc = ring->private;
-       pc->cpu_page[0] = seqno;
+       ring->scratch.cpu_page[0] = seqno;
 }
 
 static bool
@@ -836,11 +805,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
-               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
-       }
+       if (ring->irq_refcount++ == 0)
+               ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
@@ -854,11 +820,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
-               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
-       }
+       if (--ring->irq_refcount == 0)
+               ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
@@ -873,7 +836,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -891,7 +854,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -910,7 +873,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -928,7 +891,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -968,6 +931,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 
        I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
        POSTING_READ(mmio);
+
+       /* Flush the TLB for this page */
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 reg = RING_INSTPM(ring->mmio_base);
+               I915_WRITE(reg,
+                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+                                             INSTPM_SYNC_FLUSH));
+               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+                            1000))
+                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                                 ring->name);
+       }
 }
 
 static int
@@ -998,9 +973,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -1021,16 +996,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
        gen6_gt_force_wake_get(dev_priv);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~(ring->irq_enable_mask |
                                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
                else
                        I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
+               ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1045,15 +1018,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
                else
                        I915_WRITE_IMR(ring, ~0);
-               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
+               ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1070,14 +1041,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return false;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (ring->irq_refcount.pm++ == 0) {
-               u32 pm_imr = I915_READ(GEN6_PMIMR);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
                I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
-               POSTING_READ(GEN6_PMIMR);
+               snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
@@ -1092,14 +1061,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (--ring->irq_refcount.pm == 0) {
-               u32 pm_imr = I915_READ(GEN6_PMIMR);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
                I915_WRITE_IMR(ring, ~0);
-               I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
-               POSTING_READ(GEN6_PMIMR);
+               snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static int
@@ -1143,8 +1110,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                intel_ring_emit(ring, MI_NOOP);
                intel_ring_advance(ring);
        } else {
-               struct drm_i915_gem_object *obj = ring->private;
-               u32 cs_offset = obj->gtt_offset;
+               u32 cs_offset = ring->scratch.gtt_offset;
 
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
@@ -1224,12 +1190,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
 
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
        if (ret != 0) {
                goto err_unref;
        }
 
-       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
        if (ring->status_page.page_addr == NULL) {
                ret = -ENOMEM;
@@ -1307,7 +1273,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->obj = obj;
 
-       ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
        if (ret)
                goto err_unref;
 
@@ -1316,7 +1282,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                goto err_unpin;
 
        ring->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1458,6 +1424,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
        if (ret != -ENOSPC)
                return ret;
 
+       /* force the tail write in case we have been skipping them */
+       __intel_ring_advance(ring);
+
        trace_i915_ring_wait_begin(ring);
        /* With GEM the hangcheck timer should kick us out of the loop,
         * leaving it early runs the risk of corrupting GEM state (due
@@ -1519,7 +1488,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
        int ret;
 
        /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_request) {
+       if (ring->outstanding_lazy_seqno) {
                ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
@@ -1539,10 +1508,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
 static int
 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
 {
-       if (ring->outstanding_lazy_request)
+       if (ring->outstanding_lazy_seqno)
                return 0;
 
-       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+       if (ring->preallocated_lazy_request == NULL) {
+               struct drm_i915_gem_request *request;
+
+               request = kmalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
+
+               ring->preallocated_lazy_request = request;
+       }
+
+       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
 static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1589,28 +1568,19 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
-       BUG_ON(ring->outstanding_lazy_request);
+       BUG_ON(ring->outstanding_lazy_seqno);
 
        if (INTEL_INFO(ring->dev)->gen >= 6) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+               if (HAS_VEBOX(ring->dev))
+                       I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
        }
 
        ring->set_seqno(ring, seqno);
        ring->hangcheck.seqno = seqno;
 }
 
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       ring->tail &= ring->size - 1;
-       if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-               return;
-       ring->write_tail(ring, ring->tail);
-}
-
-
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                                     u32 value)
 {
@@ -1828,14 +1798,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                        return -ENOMEM;
                }
 
-               ret = i915_gem_object_pin(obj, 0, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
                if (ret != 0) {
                        drm_gem_object_unreference(&obj->base);
                        DRM_ERROR("Failed to ping batch bo\n");
                        return ret;
                }
 
-               ring->private = obj;
+               ring->scratch.obj = obj;
+               ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        }
 
        return intel_init_ring_buffer(dev, ring);
@@ -2008,8 +1979,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        ring->add_request = gen6_add_request;
        ring->get_seqno = gen6_ring_get_seqno;
        ring->set_seqno = ring_set_seqno;
-       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
-               PM_VEBOX_CS_ERROR_INTERRUPT;
+       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
        ring->irq_get = hsw_vebox_get_irq;
        ring->irq_put = hsw_vebox_put_irq;
        ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
This page took 0.052251 seconds and 5 git commands to generate.