static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *default_ctx_obj);
-
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
struct drm_i915_gem_request *rq1)
{
struct drm_i915_private *dev_priv = rq0->i915;
+ unsigned int fw_domains = rq0->engine->fw_domains;
execlists_update_context(rq0);
execlists_update_context(rq1);
spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
execlists_elsp_write(rq0, rq1);
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock);
}
unsigned int csb_read = 0, i;
unsigned int submit_contexts = 0;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
engine->next_context_status_buffer << 8));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
spin_lock(&engine->execlist_lock);
*/
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct drm_i915_private *dev_priv;
int ret;
WARN_ON(req == NULL);
- dev_priv = req->i915;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
return 0;
}
return;
ret = intel_engine_idle(engine);
- if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+ if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
- struct page *lrc_state_page;
- uint32_t *lrc_reg_state;
+ void *vaddr;
+ u32 *lrc_reg_state;
int ret;
WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
if (ret)
return ret;
- lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- if (WARN_ON(!lrc_state_page)) {
- ret = -ENODEV;
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
}
+ lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret)
- goto unpin_ctx_obj;
+ goto unpin_map;
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
return ret;
+unpin_map:
+ i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
- kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+ i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
return ret;
}
+static void lrc_init_hws(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ (u32)engine->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int next_context_status_buffer_hw;
- lrc_setup_hardware_status_page(engine,
- dev_priv->kernel_context->engine[engine->id].state);
+ lrc_init_hws(engine);
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask | engine->irq_keep_mask));
intel_engine_init_hangcheck(engine);
- return 0;
+ return intel_mocs_init_engine(engine);
}
static int gen8_init_render_ring(struct intel_engine_cs *engine)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
+ /* We're using qword write, seqno should be aligned to 8 bytes. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ /* We're thrashing one dword of HWS. */
+ intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
}
i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->status_page.obj) {
- kunmap(sg_page(engine->status_page.obj->pages->sgl));
+ i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *dctx_obj)
+{
+ void *hws;
+
+ /* The HWSP is part of the default context object in LRC mode. */
+ engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+ LRC_PPHWSP_PN * PAGE_SIZE;
+ hws = i915_gem_object_pin_map(dctx_obj);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+ engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+ engine->status_page.obj = dctx_obj;
+
+ return 0;
+}
+
static int
logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
{
- struct intel_context *dctx = to_i915(dev)->kernel_context;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_context *dctx = dev_priv->kernel_context;
+ enum forcewake_domains fw_domains;
int ret;
/* Intentionally left blank. */
logical_ring_init_platform_invariants(engine);
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
+
+ engine->fw_domains = fw_domains;
+
ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
goto error;
}
+ /* And setup the hardware status page. */
+ ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+ if (ret) {
+ DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
+ goto error;
+ }
+
return 0;
error:
}
static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
+populate_lr_context(struct intel_context *ctx,
+ struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- struct page *page;
- uint32_t *reg_state;
+ void *vaddr;
+ u32 *reg_state;
int ret;
if (!ppgtt)
return ret;
}
- ret = i915_gem_object_get_pages(ctx_obj);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not get object pages\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
-
- i915_gem_object_pin_pages(ctx_obj);
+ ctx_obj->dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
make_rpcs(dev));
}
- kunmap_atomic(reg_state);
- i915_gem_object_unpin_pages(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
return 0;
}
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count);
return ret;
}
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *default_ctx_obj)
-{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
- struct page *page;
-
- /* The HWSP is part of the default context object in LRC mode. */
- engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
- + LRC_PPHWSP_PN * PAGE_SIZE;
- page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
- engine->status_page.page_addr = kmap(page);
- engine->status_page.obj = default_ctx_obj;
-
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- (u32)engine->status_page.gfx_addr);
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-}
-
/**
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
}
ret = engine->init_context(req);
+ i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
- i915_gem_request_cancel(req);
goto error_ringbuf;
}
- i915_add_request_no_flush(req);
}
return 0;
return ret;
}
-void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[engine->id].ringbuf;
+ void *vaddr;
uint32_t *reg_state;
- struct page *page;
if (!ctx_obj)
continue;
- if (i915_gem_object_get_pages(ctx_obj)) {
- WARN(1, "Failed get_pages for context obj\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (WARN_ON(IS_ERR(vaddr)))
continue;
- }
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
- kunmap_atomic(reg_state);
+ i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0;
ringbuf->tail = 0;