drm/i915/bxt: Mark WaCcsTlbPrefetchDisable as for Broxton also.
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index e5b3c6dbd46780798e1607b0f3390db5465ecbd5..9b96ed7de9bbb79284a666c376abb70ba0b91c94 100644 (file)
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
-{
-       int ret;
-
-       if (!ring->fbc_dirty)
-               return 0;
-
-       ret = intel_ring_begin(ring, 6);
-       if (ret)
-               return ret;
-       /* WaFbcNukeOn3DBlt:ivb/hsw */
-       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit(ring, MSG_FBC_REND_STATE);
-       intel_ring_emit(ring, value);
-       intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
-       intel_ring_emit(ring, MSG_FBC_REND_STATE);
-       intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
-       intel_ring_advance(ring);
-
-       ring->fbc_dirty = false;
-       return 0;
-}
-
 static int
 gen7_render_ring_flush(struct intel_engine_cs *ring,
                       u32 invalidate_domains, u32 flush_domains)
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
-       if (!invalidate_domains && flush_domains)
-               return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
        return 0;
 }
 
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
                        return ret;
        }
 
-       ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
-       if (ret)
-               return ret;
-
-       if (!invalidate_domains && flush_domains)
-               return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
-
-       return 0;
+       return gen8_emit_pipe_control(ring, flags, scratch_addr);
 }
 
 static void ring_write_tail(struct intel_engine_cs *ring,
@@ -502,6 +469,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
        I915_WRITE(HWS_PGA, addr);
 }
 
+static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       u32 mmio = 0;
+
+       /* The ring status page addresses are no longer next to the rest of
+        * the ring registers as of gen7.
+        */
+       if (IS_GEN7(dev)) {
+               switch (ring->id) {
+               case RCS:
+                       mmio = RENDER_HWS_PGA_GEN7;
+                       break;
+               case BCS:
+                       mmio = BLT_HWS_PGA_GEN7;
+                       break;
+               /*
+                * VCS2 actually doesn't exist on Gen7. Only shut up
+                * gcc switch check warning
+                */
+               case VCS2:
+               case VCS:
+                       mmio = BSD_HWS_PGA_GEN7;
+                       break;
+               case VECS:
+                       mmio = VEBOX_HWS_PGA_GEN7;
+                       break;
+               }
+       } else if (IS_GEN6(ring->dev)) {
+               mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+       } else {
+               /* XXX: gen8 returns to sanity */
+               mmio = RING_HWS_PGA(ring->mmio_base);
+       }
+
+       I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+       POSTING_READ(mmio);
+
+       /*
+        * Flush the TLB for this page
+        *
+        * FIXME: These two bits have disappeared on gen8, so a question
+        * arises: do we still need this and if so how should we go about
+        * invalidating the TLB?
+        */
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+               u32 reg = RING_INSTPM(ring->mmio_base);
+
+               /* ring should be idle before issuing a sync flush*/
+               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+
+               I915_WRITE(reg,
+                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+                                             INSTPM_SYNC_FLUSH));
+               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+                            1000))
+                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                                 ring->name);
+       }
+}
+
 static bool stop_ring(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
@@ -788,12 +817,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
         * workaround for for a possible hang in the unlikely event a TLB
         * invalidation occurs during a PSD flush.
         */
-       /* WaForceEnableNonCoherent:bdw */
-       /* WaHdcDisableFetchWhenMasked:bdw */
-       /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
        WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         /* WaForceEnableNonCoherent:bdw */
                          HDC_FORCE_NON_COHERENT |
+                         /* WaForceContextSaveRestoreNonCoherent:bdw */
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         /* WaHdcDisableFetchWhenMasked:bdw */
                          HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+                         /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
                          (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
        /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
@@ -822,6 +853,9 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
                            GEN6_WIZ_HASHING_MASK,
                            GEN6_WIZ_HASHING_16x4);
 
+       /* WaProgramL3SqcReg1Default:bdw */
+       WA_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+
        return 0;
 }
 
@@ -870,6 +904,166 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
                            GEN6_WIZ_HASHING_MASK,
                            GEN6_WIZ_HASHING_16x4);
 
+       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+           INTEL_REVID(dev) == SKL_REVID_D0)
+               /* WaBarrierPerformanceFixDisable:skl */
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE |
+                                 HDC_BARRIER_PERFORMANCE_DISABLE);
+
+       return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* WaDisablePartialInstShootdown:skl,bxt */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+       /* Syncing dependencies between camera and graphics:skl,bxt */
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                         GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+       if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
+           INTEL_REVID(dev) == SKL_REVID_B0)) ||
+           (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+               /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
+               WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+                                 GEN9_DG_MIRROR_FIX_ENABLE);
+       }
+
+       if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+           (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+               /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+               WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+                                 GEN9_RHWO_OPTIMIZATION_DISABLE);
+               WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
+                                 DISABLE_PIXEL_MASK_CAMMING);
+       }
+
+       if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
+           IS_BROXTON(dev)) {
+               /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
+               WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+                                 GEN9_ENABLE_YV12_BUGFIX);
+       }
+
+       /* Wa4x4STCOptimizationDisable:skl,bxt */
+       WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+       /* WaDisablePartialResolveInVc:skl,bxt */
+       WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
+
+       /* WaCcsTlbPrefetchDisable:skl,bxt */
+       WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+                         GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+       /*
+        * FIXME: don't apply the following on BXT for stepping C. On BXT A0
+        * the flag reads back as 0.
+        */
+       /* WaDisableMaskBasedCammingInRCC:sklC,bxtA */
+       if (INTEL_REVID(dev) == SKL_REVID_C0 || IS_BROXTON(dev))
+               WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+                                 PIXEL_MASK_CAMMING_DISABLE);
+
+       return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 vals[3] = { 0, 0, 0 };
+       unsigned int i;
+
+       for (i = 0; i < 3; i++) {
+               u8 ss;
+
+               /*
+                * Only consider slices where one, and only one, subslice has 7
+                * EUs
+                */
+               if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
+                       continue;
+
+               /*
+                * subslice_7eu[i] != 0 (because of the check above) and
+                * ss_max == 4 (maximum number of subslices possible per slice)
+                *
+                * ->    0 <= ss <= 3;
+                */
+               ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+               vals[i] = 3 - ss;
+       }
+
+       if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+               return 0;
+
+       /* Tune IZ hashing. See intel_device_info_runtime_init() */
+       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+                           GEN9_IZ_HASHING_MASK(2) |
+                           GEN9_IZ_HASHING_MASK(1) |
+                           GEN9_IZ_HASHING_MASK(0),
+                           GEN9_IZ_HASHING(2, vals[2]) |
+                           GEN9_IZ_HASHING(1, vals[1]) |
+                           GEN9_IZ_HASHING(0, vals[0]));
+
+       return 0;
+}
+
+
+static int skl_init_workarounds(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_workarounds(ring);
+
+       /* WaDisablePowerCompilerClockGating:skl */
+       if (INTEL_REVID(dev) == SKL_REVID_B0)
+               WA_SET_BIT_MASKED(HIZ_CHICKEN,
+                                 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+
+       if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+               /*
+                *Use Force Non-Coherent whenever executing a 3D context. This
+                * is a workaround for a possible hang in the unlikely event
+                * a TLB invalidation occurs during a PSD flush.
+                */
+               /* WaForceEnableNonCoherent:skl */
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FORCE_NON_COHERENT);
+       }
+
+       return skl_tune_iz_hashing(ring);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_workarounds(ring);
+
+       /* WaDisableThreadStallDopClockGating:bxt */
+       WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         STALL_DOP_GATING_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:bxt */
+       if (INTEL_REVID(dev) <= BXT_REVID_B0) {
+               WA_SET_BIT_MASKED(
+                       GEN7_HALF_SLICE_CHICKEN1,
+                       GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+       }
+
+       /* WaForceContextSaveRestoreNonCoherent:bxt */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
        return 0;
 }
 
@@ -888,6 +1082,12 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
        if (IS_CHERRYVIEW(dev))
                return chv_init_workarounds(ring);
 
+       if (IS_SKYLAKE(dev))
+               return skl_init_workarounds(ring);
+
+       if (IS_BROXTON(dev))
+               return bxt_init_workarounds(ring);
+
        return 0;
 }
 
@@ -1386,68 +1586,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-void intel_ring_setup_status_page(struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       u32 mmio = 0;
-
-       /* The ring status page addresses are no longer next to the rest of
-        * the ring registers as of gen7.
-        */
-       if (IS_GEN7(dev)) {
-               switch (ring->id) {
-               case RCS:
-                       mmio = RENDER_HWS_PGA_GEN7;
-                       break;
-               case BCS:
-                       mmio = BLT_HWS_PGA_GEN7;
-                       break;
-               /*
-                * VCS2 actually doesn't exist on Gen7. Only shut up
-                * gcc switch check warning
-                */
-               case VCS2:
-               case VCS:
-                       mmio = BSD_HWS_PGA_GEN7;
-                       break;
-               case VECS:
-                       mmio = VEBOX_HWS_PGA_GEN7;
-                       break;
-               }
-       } else if (IS_GEN6(ring->dev)) {
-               mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
-       } else {
-               /* XXX: gen8 returns to sanity */
-               mmio = RING_HWS_PGA(ring->mmio_base);
-       }
-
-       I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
-       POSTING_READ(mmio);
-
-       /*
-        * Flush the TLB for this page
-        *
-        * FIXME: These two bits have disappeared on gen8, so a question
-        * arises: do we still need this and if so how should we go about
-        * invalidating the TLB?
-        */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
-               u32 reg = RING_INSTPM(ring->mmio_base);
-
-               /* ring should be idle before issuing a sync flush*/
-               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-
-               I915_WRITE(reg,
-                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
-                                             INSTPM_SYNC_FLUSH));
-               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-                            1000))
-                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-                                 ring->name);
-       }
-}
-
 static int
 bsd_ring_flush(struct intel_engine_cs *ring,
               u32     invalidate_domains,
@@ -1611,7 +1749,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
 static int
 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
                         u64 offset, u32 length,
-                        unsigned flags)
+                        unsigned dispatch_flags)
 {
        int ret;
 
@@ -1622,7 +1760,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
-                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+                       (dispatch_flags & I915_DISPATCH_SECURE ?
+                        0 : MI_BATCH_NON_SECURE_I965));
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
 
@@ -1635,8 +1774,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
-                               u64 offset, u32 len,
-                               unsigned flags)
+                        u64 offset, u32 len,
+                        unsigned dispatch_flags)
 {
        u32 cs_offset = ring->scratch.gtt_offset;
        int ret;
@@ -1654,7 +1793,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
-       if ((flags & I915_DISPATCH_PINNED) == 0) {
+       if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
 
@@ -1686,7 +1825,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER);
-       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                       0 : MI_BATCH_NON_SECURE));
        intel_ring_emit(ring, offset + len - 8);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
@@ -1697,7 +1837,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
 static int
 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
                         u64 offset, u32 len,
-                        unsigned flags)
+                        unsigned dispatch_flags)
 {
        int ret;
 
@@ -1706,7 +1846,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                       0 : MI_BATCH_NON_SECURE));
        intel_ring_advance(ring);
 
        return 0;
@@ -1872,6 +2013,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->execlist_queue);
+       i915_gem_batch_pool_init(dev, &ring->batch_pool);
        ringbuf->size = 32 * PAGE_SIZE;
        ringbuf->ring = ring;
        memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
@@ -1950,28 +2092,29 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
        cleanup_status_page(ring);
 
        i915_cmd_parser_fini_ring(ring);
+       i915_gem_batch_pool_fini(&ring->batch_pool);
 
        kfree(ringbuf);
        ring->buffer = NULL;
 }
 
-static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 {
        struct intel_ringbuffer *ringbuf = ring->buffer;
        struct drm_i915_gem_request *request;
-       int ret;
+       int ret, new_space;
 
        if (intel_ring_space(ringbuf) >= n)
                return 0;
 
        list_for_each_entry(request, &ring->request_list, list) {
-               if (__intel_ring_space(request->postfix, ringbuf->tail,
-                                      ringbuf->size) >= n) {
+               new_space = __intel_ring_space(request->postfix, ringbuf->tail,
+                                      ringbuf->size);
+               if (new_space >= n)
                        break;
-               }
        }
 
-       if (&request->list == &ring->request_list)
+       if (WARN_ON(&request->list == &ring->request_list))
                return -ENOSPC;
 
        ret = i915_wait_request(request);
@@ -1980,59 +2123,9 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 
        i915_gem_retire_requests_ring(ring);
 
-       return 0;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       unsigned long end;
-       int ret;
-
-       ret = intel_ring_wait_request(ring, n);
-       if (ret != -ENOSPC)
-               return ret;
-
-       /* force the tail write in case we have been skipping them */
-       __intel_ring_advance(ring);
-
-       /* With GEM the hangcheck timer should kick us out of the loop,
-        * leaving it early runs the risk of corrupting GEM state (due
-        * to running on almost untested codepaths). But on resume
-        * timers don't work yet, so prevent a complete hang in that
-        * case by choosing an insanely large timeout. */
-       end = jiffies + 60 * HZ;
-
-       ret = 0;
-       trace_i915_ring_wait_begin(ring);
-       do {
-               if (intel_ring_space(ringbuf) >= n)
-                       break;
-               ringbuf->head = I915_READ_HEAD(ring);
-               if (intel_ring_space(ringbuf) >= n)
-                       break;
-
-               msleep(1);
+       WARN_ON(intel_ring_space(ringbuf) < new_space);
 
-               if (dev_priv->mm.interruptible && signal_pending(current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-
-               ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                          dev_priv->mm.interruptible);
-               if (ret)
-                       break;
-
-               if (time_after(jiffies, end)) {
-                       ret = -EBUSY;
-                       break;
-               }
-       } while (1);
-       trace_i915_ring_wait_end(ring);
-       return ret;
+       return 0;
 }
 
 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
@@ -2081,31 +2174,9 @@ int intel_ring_idle(struct intel_engine_cs *ring)
        return i915_wait_request(req);
 }
 
-static int
-intel_ring_alloc_request(struct intel_engine_cs *ring)
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       int ret;
-       struct drm_i915_gem_request *request;
-       struct drm_i915_private *dev_private = ring->dev->dev_private;
-
-       if (ring->outstanding_lazy_request)
-               return 0;
-
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
-               return -ENOMEM;
-
-       kref_init(&request->ref);
-       request->ring = ring;
-       request->uniq = dev_private->request_uniq++;
-
-       ret = i915_gem_get_seqno(ring->dev, &request->seqno);
-       if (ret) {
-               kfree(request);
-               return ret;
-       }
-
-       ring->outstanding_lazy_request = request;
+       request->ringbuf = request->ring->buffer;
        return 0;
 }
 
@@ -2146,7 +2217,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
                return ret;
 
        /* Preallocate the olr before touching the ring */
-       ret = intel_ring_alloc_request(ring);
+       ret = i915_gem_request_alloc(ring, ring->default_context);
        if (ret)
                return ret;
 
@@ -2273,9 +2344,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
 static int
 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
                              u64 offset, u32 len,
-                             unsigned flags)
+                             unsigned dispatch_flags)
 {
-       bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
+       bool ppgtt = USES_PPGTT(ring->dev) &&
+                       !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
        ret = intel_ring_begin(ring, 4);
@@ -2294,8 +2366,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 static int
 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
-                             u64 offset, u32 len,
-                             unsigned flags)
+                            u64 offset, u32 len,
+                            unsigned dispatch_flags)
 {
        int ret;
 
@@ -2305,7 +2377,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
-                       (flags & I915_DISPATCH_SECURE ?
+                       (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
@@ -2317,7 +2389,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 static int
 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
                              u64 offset, u32 len,
-                             unsigned flags)
+                             unsigned dispatch_flags)
 {
        int ret;
 
@@ -2327,7 +2399,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
-                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+                       (dispatch_flags & I915_DISPATCH_SECURE ?
+                        0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@ -2341,7 +2414,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
                           u32 invalidate, u32 flush)
 {
        struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
 
@@ -2350,7 +2422,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(ring->dev)->gen >= 8)
+       if (INTEL_INFO(dev)->gen >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2370,7 +2442,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
                cmd |= MI_INVALIDATE_TLB;
        intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(ring->dev)->gen >= 8) {
+       if (INTEL_INFO(dev)->gen >= 8) {
                intel_ring_emit(ring, 0); /* upper addr */
                intel_ring_emit(ring, 0); /* value */
        } else  {
@@ -2379,13 +2451,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
        }
        intel_ring_advance(ring);
 
-       if (!invalidate && flush) {
-               if (IS_GEN7(dev))
-                       return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
-               else if (IS_BROADWELL(dev))
-                       dev_priv->fbc.need_sw_cache_clean = true;
-       }
-
        return 0;
 }
 
@@ -2612,19 +2677,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 }
 
 /**
- * Initialize the second BSD ring for Broadwell GT3.
- * It is noted that this only exists on Broadwell GT3.
+ * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
  */
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
-       if ((INTEL_INFO(dev)->gen != 8)) {
-               DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
-               return -EINVAL;
-       }
-
        ring->name = "bsd2 ring";
        ring->id = VCS2;
 
This page took 0.036379 seconds and 5 git commands to generate.