drm/i915: Rename struct intel_context
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_context.c
index 5dd84e148bbac2f5e7066990b23033785ff2bced..8484da26b5d49a4de106ec97daf57f840dc94c8b 100644 (file)
@@ -90,6 +90,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 
+#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+
 /* This is a HW constraint. The value below is the largest known requirement
  * I've seen in a spec to date, and that was a workaround for a non-shipping
  * part. It should be safe to decrease this, but it's more future proof as is.
 #define GEN6_CONTEXT_ALIGN (64<<10)
 #define GEN7_CONTEXT_ALIGN 4096
 
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
 {
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev_priv))
                return GEN6_CONTEXT_ALIGN;
 
        return GEN7_CONTEXT_ALIGN;
 }
 
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
        u32 reg;
 
-       switch (INTEL_INFO(dev)->gen) {
+       switch (INTEL_GEN(dev_priv)) {
        case 6:
                reg = I915_READ(CXT_SIZE);
                ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
                break;
        case 7:
                reg = I915_READ(GEN7_CXT_SIZE);
-               if (IS_HASWELL(dev))
+               if (IS_HASWELL(dev_priv))
                        ret = HSW_CXT_TOTAL_SIZE;
                else
                        ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
        return ret;
 }
 
-static void i915_gem_context_clean(struct intel_context *ctx)
+static void i915_gem_context_clean(struct i915_gem_context *ctx)
 {
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        struct i915_vma *vma, *next;
@@ -150,7 +151,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
 
 void i915_gem_context_free(struct kref *ctx_ref)
 {
-       struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+       struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 
        trace_i915_context_free(ctx);
 
@@ -169,6 +170,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
        if (ctx->legacy_hw_ctx.rcs_state)
                drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
        list_del(&ctx->link);
+
+       ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
        kfree(ctx);
 }
 
@@ -178,9 +181,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        struct drm_i915_gem_object *obj;
        int ret;
 
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return ERR_PTR(-ENOMEM);
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return obj;
 
        /*
         * Try to make the context utilize L3 as well as LLC.
@@ -209,18 +212,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        return obj;
 }
 
-static struct intel_context *
+static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
+{
+       int ret;
+
+       ret = ida_simple_get(&dev_priv->context_hw_ida,
+                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+       if (ret < 0) {
+               /* Contexts are only released when no longer active.
+                * Flush any pending retires to hopefully release some
+                * stale contexts and try again.
+                */
+               i915_gem_retire_requests(dev_priv);
+               ret = ida_simple_get(&dev_priv->context_hw_ida,
+                                    0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *out = ret;
+       return 0;
+}
+
+static struct i915_gem_context *
 __create_hw_context(struct drm_device *dev,
                    struct drm_i915_file_private *file_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
                return ERR_PTR(-ENOMEM);
 
+       ret = assign_hw_id(dev_priv, &ctx->hw_id);
+       if (ret) {
+               kfree(ctx);
+               return ERR_PTR(ret);
+       }
+
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->context_list);
        ctx->i915 = dev_priv;
@@ -249,7 +280,7 @@ __create_hw_context(struct drm_device *dev,
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
         * is no remap info, it will be a NOP. */
-       ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 
        ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
 
@@ -265,12 +296,12 @@ err_out:
  * context state of the GPU for applications that don't utilize HW contexts, as
  * well as an idle case.
  */
-static struct intel_context *
+static struct i915_gem_context *
 i915_gem_create_context(struct drm_device *dev,
                        struct drm_i915_file_private *file_priv)
 {
        const bool is_global_default_ctx = file_priv == NULL;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret = 0;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -288,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev,
                 * context.
                 */
                ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
-                                           get_context_alignment(dev), 0);
+                                           get_context_alignment(to_i915(dev)), 0);
                if (ret) {
                        DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
                        goto err_destroy;
@@ -321,7 +352,7 @@ err_destroy:
        return ERR_PTR(ret);
 }
 
-static void i915_gem_context_unpin(struct intel_context *ctx,
+static void i915_gem_context_unpin(struct i915_gem_context *ctx,
                                   struct intel_engine_cs *engine)
 {
        if (i915.enable_execlists) {
@@ -336,51 +367,46 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
 void i915_gem_context_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
 
        if (i915.enable_execlists) {
-               struct intel_context *ctx;
+               struct i915_gem_context *ctx;
 
                list_for_each_entry(ctx, &dev_priv->context_list, link)
-                       intel_lr_context_reset(dev, ctx);
+                       intel_lr_context_reset(dev_priv, ctx);
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_engine_cs *ring = &dev_priv->ring[i];
-
-               if (ring->last_context) {
-                       i915_gem_context_unpin(ring->last_context, ring);
-                       ring->last_context = NULL;
-               }
-       }
-
-       /* Force the GPU state to be reinitialised on enabling */
-       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+       i915_gem_context_lost(dev_priv);
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
 
        /* Init should only be called once per module load. Eventually the
         * restriction on the context_disabled check can be loosened. */
        if (WARN_ON(dev_priv->kernel_context))
                return 0;
 
-       if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+       if (intel_vgpu_active(dev_priv) &&
+           HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
                if (!i915.enable_execlists) {
                        DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
                        return -EINVAL;
                }
        }
 
+       /* Using the simple ida interface, the max is limited by sizeof(int) */
+       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+       ida_init(&dev_priv->context_hw_ida);
+
        if (i915.enable_execlists) {
                /* NB: intentionally left blank. We will allocate our own
                 * backing objects as we need them, thank you very much */
                dev_priv->hw_context_size = 0;
-       } else if (HAS_HW_CONTEXTS(dev)) {
-               dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+       } else if (HAS_HW_CONTEXTS(dev_priv)) {
+               dev_priv->hw_context_size =
+                       round_up(get_context_size(dev_priv), 4096);
                if (dev_priv->hw_context_size > (1<<20)) {
                        DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
                                         dev_priv->hw_context_size);
@@ -403,66 +429,40 @@ int i915_gem_context_init(struct drm_device *dev)
        return 0;
 }
 
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_lost(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *dctx = dev_priv->kernel_context;
-       int i;
-
-       if (dctx->legacy_hw_ctx.rcs_state) {
-               /* The only known way to stop the gpu from accessing the hw context is
-                * to reset it. Do this as the very last operation to avoid confusing
-                * other code, leading to spurious errors. */
-               intel_gpu_reset(dev);
-
-               /* When default context is created and switched to, base object refcount
-                * will be 2 (+1 from object creation and +1 from do_switch()).
-                * i915_gem_context_fini() will be called after gpu_idle() has switched
-                * to default context. So we need to unreference the base object once
-                * to offset the do_switch part, so that i915_gem_context_unreference()
-                * can then free the base object correctly. */
-               WARN_ON(!dev_priv->ring[RCS].last_context);
-
-               i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-       }
+       struct intel_engine_cs *engine;
 
-       for (i = I915_NUM_RINGS; --i >= 0;) {
-               struct intel_engine_cs *ring = &dev_priv->ring[i];
+       for_each_engine(engine, dev_priv) {
+               if (engine->last_context == NULL)
+                       continue;
 
-               if (ring->last_context) {
-                       i915_gem_context_unpin(ring->last_context, ring);
-                       ring->last_context = NULL;
-               }
+               i915_gem_context_unpin(engine->last_context, engine);
+               engine->last_context = NULL;
        }
 
-       i915_gem_context_unreference(dctx);
-       dev_priv->kernel_context = NULL;
+       /* Force the GPU state to be reinitialised on enabling */
+       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+       dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
 }
 
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
+void i915_gem_context_fini(struct drm_device *dev)
 {
-       struct intel_engine_cs *ring = req->ring;
-       int ret;
-
-       if (i915.enable_execlists) {
-               if (ring->init_context == NULL)
-                       return 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_gem_context *dctx = dev_priv->kernel_context;
 
-               ret = ring->init_context(req);
-       } else
-               ret = i915_switch_context(req);
+       if (dctx->legacy_hw_ctx.rcs_state)
+               i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
 
-       if (ret) {
-               DRM_ERROR("ring init context: %d\n", ret);
-               return ret;
-       }
+       i915_gem_context_unreference(dctx);
+       dev_priv->kernel_context = NULL;
 
-       return 0;
+       ida_destroy(&dev_priv->context_hw_ida);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
 {
-       struct intel_context *ctx = p;
+       struct i915_gem_context *ctx = p;
 
        i915_gem_context_unreference(ctx);
        return 0;
@@ -471,7 +471,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
 
        idr_init(&file_priv->context_idr);
 
@@ -495,12 +495,12 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
        idr_destroy(&file_priv->context_idr);
 }
 
-struct intel_context *
+struct i915_gem_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 {
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
 
-       ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
+       ctx = idr_find(&file_priv->context_idr, id);
        if (!ctx)
                return ERR_PTR(-ENOENT);
 
@@ -510,133 +510,186 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct drm_i915_private *dev_priv = req->i915;
+       struct intel_engine_cs *engine = req->engine;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
-               i915_semaphore_is_enabled(ring->dev) ?
-               hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+               i915_semaphore_is_enabled(dev_priv) ?
+               hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
                0;
-       int len, i, ret;
+       int len, ret;
 
        /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
         * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
         * explicitly, so we rely on the value at ring init, stored in
         * itlb_before_ctx_switch.
         */
-       if (IS_GEN6(ring->dev)) {
-               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+       if (IS_GEN6(dev_priv)) {
+               ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
 
        /* These flags are for resource streamer on HSW+ */
-       if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+       if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
                flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-       else if (INTEL_INFO(ring->dev)->gen < 8)
+       else if (INTEL_GEN(dev_priv) < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
        len = 4;
-       if (INTEL_INFO(ring->dev)->gen >= 7)
-               len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+       if (INTEL_GEN(dev_priv) >= 7)
+               len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
        ret = intel_ring_begin(req, len);
        if (ret)
                return ret;
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (INTEL_INFO(ring->dev)->gen >= 7) {
-               intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+       if (INTEL_GEN(dev_priv) >= 7) {
+               intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
                if (num_rings) {
                        struct intel_engine_cs *signaller;
 
-                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_ring(signaller, to_i915(ring->dev), i) {
-                               if (signaller == ring)
+                       intel_ring_emit(engine,
+                                       MI_LOAD_REGISTER_IMM(num_rings));
+                       for_each_engine(signaller, dev_priv) {
+                               if (signaller == engine)
                                        continue;
 
-                               intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-                               intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+                               intel_ring_emit_reg(engine,
+                                                   RING_PSMI_CTL(signaller->mmio_base));
+                               intel_ring_emit(engine,
+                                               _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
                        }
                }
        }
 
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_emit(engine, MI_SET_CONTEXT);
+       intel_ring_emit(engine,
+                       i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
                        flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
         */
-       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(engine, MI_NOOP);
 
-       if (INTEL_INFO(ring->dev)->gen >= 7) {
+       if (INTEL_GEN(dev_priv) >= 7) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
+                       i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_ring(signaller, to_i915(ring->dev), i) {
-                               if (signaller == ring)
+                       intel_ring_emit(engine,
+                                       MI_LOAD_REGISTER_IMM(num_rings));
+                       for_each_engine(signaller, dev_priv) {
+                               if (signaller == engine)
                                        continue;
 
-                               intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
-                               intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+                               last_reg = RING_PSMI_CTL(signaller->mmio_base);
+                               intel_ring_emit_reg(engine, last_reg);
+                               intel_ring_emit(engine,
+                                               _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
                        }
+
+                       /* Insert a delay before the next switch! */
+                       intel_ring_emit(engine,
+                                       MI_STORE_REGISTER_MEM |
+                                       MI_SRM_LRM_GLOBAL_GTT);
+                       intel_ring_emit_reg(engine, last_reg);
+                       intel_ring_emit(engine, engine->scratch.gtt_offset);
+                       intel_ring_emit(engine, MI_NOOP);
                }
-               intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+               intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
        }
 
-       intel_ring_advance(ring);
+       intel_ring_advance(engine);
 
        return ret;
 }
 
-static inline bool should_skip_switch(struct intel_engine_cs *ring,
-                                     struct intel_context *from,
-                                     struct intel_context *to)
+static int remap_l3(struct drm_i915_gem_request *req, int slice)
+{
+       u32 *remap_info = req->i915->l3_parity.remap_info[slice];
+       struct intel_engine_cs *engine = req->engine;
+       int i, ret;
+
+       if (!remap_info)
+               return 0;
+
+       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+       if (ret)
+               return ret;
+
+       /*
+        * Note: We do not worry about the concurrent register cacheline hang
+        * here because no other code should access these registers other than
+        * at initialization time.
+        */
+       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+       for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+               intel_ring_emit(engine, remap_info[i]);
+       }
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
+
+       return 0;
+}
+
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+                                  struct intel_engine_cs *engine,
+                                  struct i915_gem_context *to)
 {
        if (to->remap_slice)
                return false;
 
-       if (to->ppgtt && from == to &&
-           !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
-               return true;
+       if (!to->legacy_hw_ctx.initialized)
+               return false;
 
-       return false;
+       if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
+               return false;
+
+       return to == engine->last_context;
 }
 
 static bool
-needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+                 struct intel_engine_cs *engine,
+                 struct i915_gem_context *to)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       if (!ppgtt)
+               return false;
+
+       /* Always load the ppgtt on first use */
+       if (!engine->last_context)
+               return true;
 
-       if (!to->ppgtt)
+       /* Same context without new entries, skip */
+       if (engine->last_context == to &&
+           !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return false;
 
-       if (INTEL_INFO(ring->dev)->gen < 8)
+       if (engine->id != RCS)
                return true;
 
-       if (ring != &dev_priv->ring[RCS])
+       if (INTEL_GEN(engine->i915) < 8)
                return true;
 
        return false;
 }
 
 static bool
-needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
-               u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+                  struct i915_gem_context *to,
+                  u32 hw_flags)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       if (!to->ppgtt)
-               return false;
-
-       if (!IS_GEN8(ring->dev))
+       if (!ppgtt)
                return false;
 
-       if (ring != &dev_priv->ring[RCS])
+       if (!IS_GEN8(to->i915))
                return false;
 
        if (hw_flags & MI_RESTORE_INHIBIT)
@@ -645,58 +698,33 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
        return false;
 }
 
-static int do_switch(struct drm_i915_gem_request *req)
+static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
-       struct intel_context *to = req->ctx;
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct intel_context *from = ring->last_context;
-       u32 hw_flags = 0;
-       bool uninitialized = false;
+       struct i915_gem_context *to = req->ctx;
+       struct intel_engine_cs *engine = req->engine;
+       struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+       struct i915_gem_context *from;
+       u32 hw_flags;
        int ret, i;
 
-       if (from != NULL && ring == &dev_priv->ring[RCS]) {
-               BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
-               BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
-       }
-
-       if (should_skip_switch(ring, from, to))
+       if (skip_rcs_switch(ppgtt, engine, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       if (ring == &dev_priv->ring[RCS]) {
-               ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-                                           get_context_alignment(ring->dev), 0);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+                                   get_context_alignment(engine->i915),
+                                   0);
+       if (ret)
+               return ret;
 
        /*
         * Pin can switch back to the default context if we end up calling into
         * evict_everything - as a last ditch gtt defrag effort that also
         * switches to the default context. Hence we need to reload from here.
+        *
+        * XXX: Doing so is painfully broken!
         */
-       from = ring->last_context;
-
-       if (needs_pd_load_pre(ring, to)) {
-               /* Older GENs and non render rings still want the load first,
-                * "PP_DCLV followed by PP_DIR_BASE register through Load
-                * Register Immediate commands in Ring Buffer before submitting
-                * a context."*/
-               trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               if (ret)
-                       goto unpin_out;
-
-               /* Doing a PD load always reloads the page dirs */
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
-       }
-
-       if (ring != &dev_priv->ring[RCS]) {
-               if (from)
-                       i915_gem_context_unreference(from);
-               goto done;
-       }
+       from = engine->last_context;
 
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out. Note
@@ -710,53 +738,32 @@ static int do_switch(struct drm_i915_gem_request *req)
        if (ret)
                goto unpin_out;
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
-               hw_flags |= MI_RESTORE_INHIBIT;
+       if (needs_pd_load_pre(ppgtt, engine, to)) {
+               /* Older GENs and non render rings still want the load first,
+                * "PP_DCLV followed by PP_DIR_BASE register through Load
+                * Register Immediate commands in Ring Buffer before submitting
+                * a context."*/
+               trace_switch_mm(engine, to);
+               ret = ppgtt->switch_mm(ppgtt, req);
+               if (ret)
+                       goto unpin_out;
+       }
+
+       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
-       } else if (to->ppgtt &&
-                  (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
-               hw_flags |= MI_FORCE_RESTORE;
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
-       }
-
-       /* We should never emit switch_mm more than once */
-       WARN_ON(needs_pd_load_pre(ring, to) &&
-               needs_pd_load_post(ring, to, hw_flags));
-
-       ret = mi_set_context(req, hw_flags);
-       if (ret)
-               goto unpin_out;
-
-       /* GEN8 does *not* require an explicit reload if the PDPs have been
-        * setup, and we do not wish to move them.
-        */
-       if (needs_pd_load_post(ring, to, hw_flags)) {
-               trace_switch_mm(ring, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
-               /* The hardware context switch is emitted, but we haven't
-                * actually changed the state - so it's probably safe to bail
-                * here. Still, let the user know something dangerous has
-                * happened.
-                */
-               if (ret) {
-                       DRM_ERROR("Failed to change address space on context switch\n");
-                       goto unpin_out;
-               }
-       }
-
-       for (i = 0; i < MAX_L3_SLICES; i++) {
-               if (!(to->remap_slice & (1<<i)))
-                       continue;
-
-               ret = i915_gem_l3_remap(req, i);
-               /* If it failed, try again next round */
+               hw_flags = MI_RESTORE_INHIBIT;
+       else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
+               hw_flags = MI_FORCE_RESTORE;
+       else
+               hw_flags = 0;
+
+       if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+               ret = mi_set_context(req, hw_flags);
                if (ret)
-                       DRM_DEBUG_DRIVER("L3 remapping failed\n");
-               else
-                       to->remap_slice &= ~(1<<i);
+                       goto unpin_out;
        }
 
        /* The backing object for the context is done after switching to the
@@ -781,27 +788,51 @@ static int do_switch(struct drm_i915_gem_request *req)
                i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
                i915_gem_context_unreference(from);
        }
+       i915_gem_context_reference(to);
+       engine->last_context = to;
+
+       /* GEN8 does *not* require an explicit reload if the PDPs have been
+        * setup, and we do not wish to move them.
+        */
+       if (needs_pd_load_post(ppgtt, to, hw_flags)) {
+               trace_switch_mm(engine, to);
+               ret = ppgtt->switch_mm(ppgtt, req);
+               /* The hardware context switch is emitted, but we haven't
+                * actually changed the state - so it's probably safe to bail
+                * here. Still, let the user know something dangerous has
+                * happened.
+                */
+               if (ret)
+                       return ret;
+       }
 
-       uninitialized = !to->legacy_hw_ctx.initialized;
-       to->legacy_hw_ctx.initialized = true;
+       if (ppgtt)
+               ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 
-done:
-       i915_gem_context_reference(to);
-       ring->last_context = to;
+       for (i = 0; i < MAX_L3_SLICES; i++) {
+               if (!(to->remap_slice & (1<<i)))
+                       continue;
 
-       if (uninitialized) {
-               if (ring->init_context) {
-                       ret = ring->init_context(req);
+               ret = remap_l3(req, i);
+               if (ret)
+                       return ret;
+
+               to->remap_slice &= ~(1<<i);
+       }
+
+       if (!to->legacy_hw_ctx.initialized) {
+               if (engine->init_context) {
+                       ret = engine->init_context(req);
                        if (ret)
-                               DRM_ERROR("ring init context: %d\n", ret);
+                               return ret;
                }
+               to->legacy_hw_ctx.initialized = true;
        }
 
        return 0;
 
 unpin_out:
-       if (ring->id == RCS)
-               i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+       i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
        return ret;
 }
 
@@ -820,23 +851,40 @@ unpin_out:
  */
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct intel_engine_cs *engine = req->engine;
+       struct drm_i915_private *dev_priv = req->i915;
 
        WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-               if (req->ctx != ring->last_context) {
-                       i915_gem_context_reference(req->ctx);
-                       if (ring->last_context)
-                               i915_gem_context_unreference(ring->last_context);
-                       ring->last_context = req->ctx;
+       if (engine->id != RCS ||
+           req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+               struct i915_gem_context *to = req->ctx;
+               struct i915_hw_ppgtt *ppgtt =
+                       to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+
+               if (needs_pd_load_pre(ppgtt, engine, to)) {
+                       int ret;
+
+                       trace_switch_mm(engine, to);
+                       ret = ppgtt->switch_mm(ppgtt, req);
+                       if (ret)
+                               return ret;
+
+                       ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+               }
+
+               if (to != engine->last_context) {
+                       i915_gem_context_reference(to);
+                       if (engine->last_context)
+                               i915_gem_context_unreference(engine->last_context);
+                       engine->last_context = to;
                }
+
                return 0;
        }
 
-       return do_switch(req);
+       return do_rcs_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -849,7 +897,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_context_create *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        if (!contexts_enabled(dev))
@@ -878,7 +926,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_context_destroy *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        if (args->pad != 0)
@@ -910,7 +958,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -937,7 +985,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                else if (to_i915(dev)->mm.aliasing_ppgtt)
                        args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
                else
-                       args->value = to_i915(dev)->gtt.base.total;
+                       args->value = to_i915(dev)->ggtt.base.total;
                break;
        default:
                ret = -EINVAL;
@@ -953,7 +1001,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -992,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 
        return ret;
 }
+
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_reset_stats *args = data;
+       struct i915_ctx_hang_stats *hs;
+       struct i915_gem_context *ctx;
+       int ret;
+
+       if (args->flags || args->pad)
+               return -EINVAL;
+
+       if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
+       ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
+       if (IS_ERR(ctx)) {
+               mutex_unlock(&dev->struct_mutex);
+               return PTR_ERR(ctx);
+       }
+       hs = &ctx->hang_stats;
+
+       if (capable(CAP_SYS_ADMIN))
+               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       else
+               args->reset_count = 0;
+
+       args->batch_active = hs->batch_active;
+       args->batch_pending = hs->batch_pending;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
This page took 0.069152 seconds and 5 git commands to generate.