drm/i915: Hook up pfit for DSI
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
index 0d6dc5ec4a46332259a79f8ae5ab658eb5808241..1562a75ac9d16578224682404a501c6398e48a31 100644 (file)
@@ -229,9 +229,6 @@ enum {
 
 static int intel_lr_context_pin(struct intel_context *ctx,
                                struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
-                                          struct drm_i915_gem_object *default_ctx_obj);
-
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -418,6 +415,7 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                                      struct drm_i915_gem_request *rq1)
 {
        struct drm_i915_private *dev_priv = rq0->i915;
+       unsigned int fw_domains = rq0->engine->fw_domains;
 
        execlists_update_context(rq0);
 
@@ -425,11 +423,11 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                execlists_update_context(rq1);
 
        spin_lock_irq(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
 
        execlists_elsp_write(rq0, rq1);
 
-       intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
@@ -552,7 +550,7 @@ static void intel_lrc_irq_handler(unsigned long data)
        unsigned int csb_read = 0, i;
        unsigned int submit_contexts = 0;
 
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
        status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
 
@@ -577,7 +575,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
                                    engine->next_context_status_buffer << 8));
 
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 
        spin_lock(&engine->execlist_lock);
 
@@ -892,16 +890,9 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
  */
 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct drm_i915_private *dev_priv;
        int ret;
 
        WARN_ON(req == NULL);
-       dev_priv = req->i915;
-
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                  dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
 
        ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
        if (ret)
@@ -1016,7 +1007,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, params->request);
-       i915_gem_execbuffer_retire_commands(params);
 
        return 0;
 }
@@ -1057,7 +1047,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
                return;
 
        ret = intel_engine_idle(engine);
-       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+       if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          engine->name, ret);
 
@@ -1093,8 +1083,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
-       struct page *lrc_state_page;
-       uint32_t *lrc_reg_state;
+       void *vaddr;
+       u32 *lrc_reg_state;
        int ret;
 
        WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
@@ -1104,19 +1094,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
        if (ret)
                return ret;
 
-       lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       if (WARN_ON(!lrc_state_page)) {
-               ret = -ENODEV;
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
                goto unpin_ctx_obj;
        }
 
+       lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
        ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
        if (ret)
-               goto unpin_ctx_obj;
+               goto unpin_map;
 
        ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
        intel_lr_context_descriptor_update(ctx, engine);
-       lrc_reg_state = kmap(lrc_state_page);
        lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
        ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
@@ -1127,6 +1118,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
 
        return ret;
 
+unpin_map:
+       i915_gem_object_unpin_map(ctx_obj);
 unpin_ctx_obj:
        i915_gem_object_ggtt_unpin(ctx_obj);
 
@@ -1159,7 +1152,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
 
        WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
        if (--ctx->engine[engine->id].pin_count == 0) {
-               kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+               i915_gem_object_unpin_map(ctx_obj);
                intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
                i915_gem_object_ggtt_unpin(ctx_obj);
                ctx->engine[engine->id].lrc_vma = NULL;
@@ -1579,14 +1572,22 @@ out:
        return ret;
 }
 
+static void lrc_init_hws(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+                  (u32)engine->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned int next_context_status_buffer_hw;
 
-       lrc_setup_hardware_status_page(engine,
-                                      dev_priv->kernel_context->engine[engine->id].state);
+       lrc_init_hws(engine);
 
        I915_WRITE_IMR(engine,
                       ~(engine->irq_enable_mask | engine->irq_keep_mask));
@@ -1625,7 +1626,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
        intel_engine_init_hangcheck(engine);
 
-       return 0;
+       return intel_mocs_init_engine(engine);
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1945,15 +1946,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
@@ -1961,7 +1965,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
@@ -2048,7 +2055,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
        if (engine->status_page.obj) {
-               kunmap(sg_page(engine->status_page.obj->pages->sgl));
+               i915_gem_object_unpin_map(engine->status_page.obj);
                engine->status_page.obj = NULL;
        }
 
@@ -2086,10 +2093,30 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 }
 
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+             struct drm_i915_gem_object *dctx_obj)
+{
+       void *hws;
+
+       /* The HWSP is part of the default context object in LRC mode. */
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+                                      LRC_PPHWSP_PN * PAGE_SIZE;
+       hws = i915_gem_object_pin_map(dctx_obj);
+       if (IS_ERR(hws))
+               return PTR_ERR(hws);
+       engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+       engine->status_page.obj = dctx_obj;
+
+       return 0;
+}
+
 static int
 logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 {
-       struct intel_context *dctx = to_i915(dev)->kernel_context;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_context *dctx = dev_priv->kernel_context;
+       enum forcewake_domains fw_domains;
        int ret;
 
        /* Intentionally left blank. */
@@ -2111,6 +2138,20 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 
        logical_ring_init_platform_invariants(engine);
 
+       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+                                                   RING_ELSP(engine),
+                                                   FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_PTR(engine),
+                                                    FW_REG_READ | FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
+                                                    FW_REG_READ);
+
+       engine->fw_domains = fw_domains;
+
        ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
@@ -2128,6 +2169,13 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
                goto error;
        }
 
+       /* And setup the hardware status page. */
+       ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+       if (ret) {
+               DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
+               goto error;
+       }
+
        return 0;
 
 error:
@@ -2378,15 +2426,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 }
 
 static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
+populate_lr_context(struct intel_context *ctx,
+                   struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *engine,
                    struct intel_ringbuffer *ringbuf)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-       struct page *page;
-       uint32_t *reg_state;
+       void *vaddr;
+       u32 *reg_state;
        int ret;
 
        if (!ppgtt)
@@ -2398,18 +2447,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                return ret;
        }
 
-       ret = i915_gem_object_get_pages(ctx_obj);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Could not get object pages\n");
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
+               DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-
-       i915_gem_object_pin_pages(ctx_obj);
+       ctx_obj->dirty = true;
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
-       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       reg_state = kmap_atomic(page);
+       reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
        /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
         * commands followed by (reg, value) pairs. The values we are setting here are
@@ -2514,8 +2562,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                               make_rpcs(dev));
        }
 
-       kunmap_atomic(reg_state);
-       i915_gem_object_unpin_pages(ctx_obj);
+       i915_gem_object_unpin_map(ctx_obj);
 
        return 0;
 }
@@ -2542,6 +2589,7 @@ void intel_lr_context_free(struct intel_context *ctx)
                if (ctx == ctx->i915->kernel_context) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
+                       i915_gem_object_unpin_map(ctx_obj);
                }
 
                WARN_ON(ctx->engine[i].pin_count);
@@ -2588,24 +2636,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
        return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
-                                          struct drm_i915_gem_object *default_ctx_obj)
-{
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-       struct page *page;
-
-       /* The HWSP is part of the default context object in LRC mode. */
-       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
-                       + LRC_PPHWSP_PN * PAGE_SIZE;
-       page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
-       engine->status_page.page_addr = kmap(page);
-       engine->status_page.obj = default_ctx_obj;
-
-       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
-                       (u32)engine->status_page.gfx_addr);
-       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-}
-
 /**
  * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
@@ -2669,13 +2699,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                }
 
                ret = engine->init_context(req);
+               i915_add_request_no_flush(req);
                if (ret) {
                        DRM_ERROR("ring init context: %d\n",
                                ret);
-                       i915_gem_request_cancel(req);
                        goto error_ringbuf;
                }
-               i915_add_request_no_flush(req);
        }
        return 0;
 
@@ -2688,10 +2717,9 @@ error_deref_obj:
        return ret;
 }
 
-void intel_lr_context_reset(struct drm_device *dev,
-                       struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+                           struct intel_context *ctx)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
@@ -2699,23 +2727,23 @@ void intel_lr_context_reset(struct drm_device *dev,
                                ctx->engine[engine->id].state;
                struct intel_ringbuffer *ringbuf =
                                ctx->engine[engine->id].ringbuf;
+               void *vaddr;
                uint32_t *reg_state;
-               struct page *page;
 
                if (!ctx_obj)
                        continue;
 
-               if (i915_gem_object_get_pages(ctx_obj)) {
-                       WARN(1, "Failed get_pages for context obj\n");
+               vaddr = i915_gem_object_pin_map(ctx_obj);
+               if (WARN_ON(IS_ERR(vaddr)))
                        continue;
-               }
-               page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-               reg_state = kmap_atomic(page);
+
+               reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+               ctx_obj->dirty = true;
 
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               kunmap_atomic(reg_state);
+               i915_gem_object_unpin_map(ctx_obj);
 
                ringbuf->head = 0;
                ringbuf->tail = 0;
This page took 0.032842 seconds and 5 git commands to generate.