X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_gem_execbuffer.c;h=c5f879e594c4cb76e3141b30dbf213e2ecfeca3a;hb=5f19e2bffa63a91cd4ac1adcec648e14a44277ce;hp=a3190e793ed43744980bedba4ed42e1d0e38d597;hpb=27cf3a16b2535a490f8cf1d29a6634f1c70f7831;p=deliverable%2Flinux.git diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3190e793ed4..c5f879e594c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -37,7 +37,6 @@ #define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_NEEDS_MAP (1<<29) #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) -#define __EXEC_OBJECT_PURGEABLE (1<<27) #define BATCH_OFFSET_BIAS (256*1024) @@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) if (entry->flags & __EXEC_OBJECT_HAS_PIN) vma->pin_count--; - if (entry->flags & __EXEC_OBJECT_PURGEABLE) - obj->madv = I915_MADV_DONTNEED; - - entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | - __EXEC_OBJECT_HAS_PIN | - __EXEC_OBJECT_PURGEABLE); + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); } static void eb_destroy(struct eb_vmas *eb) @@ -406,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, * pipe_control writes because the gpu doesn't properly redirect them * through the ppgtt for non_secure batchbuffers. */ if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !(target_vma->bound & GLOBAL_BIND))) { + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) { ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, - GLOBAL_BIND); + PIN_GLOBAL); if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) return ret; } @@ -591,12 +584,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, uint64_t flags; int ret; - flags = 0; + flags = PIN_USER; + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) + flags |= PIN_GLOBAL; + if (!drm_mm_node_allocated(&vma->node)) { if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) flags |= PIN_GLOBAL | PIN_MAPPABLE; - if (entry->flags & EXEC_OBJECT_NEEDS_GTT) - flags |= PIN_GLOBAL; if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; } @@ -606,7 +600,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, only_mappable_for_reloc(entry->flags)) ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, - flags & ~(PIN_GLOBAL | PIN_MAPPABLE)); + flags & ~PIN_MAPPABLE); if (ret) return ret; @@ -682,6 +676,7 @@ eb_vma_misplaced(struct i915_vma *vma) static int i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct list_head *vmas, + struct intel_context *ctx, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -704,6 +699,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, obj = vma->obj; entry = vma->exec_entry; + if (ctx->flags & CONTEXT_NO_ZEROMAP) + entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + if (!has_fenced_gpu_access) entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = @@ -781,7 +779,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *ring, struct eb_vmas *eb, - struct drm_i915_gem_exec_object2 *exec) + struct drm_i915_gem_exec_object2 *exec, + struct intel_context *ctx) { struct drm_i915_gem_relocation_entry *reloc; struct i915_address_space *vm; @@ -867,7 +866,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); if (ret) goto err; @@ -895,6 +894,7 @@ static int i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, struct list_head *vmas) { + const unsigned other_rings = ~intel_ring_flag(ring); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -902,9 +902,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, list_for_each_entry(vma, vmas, exec_list) { struct drm_i915_gem_object *obj = vma->obj; - ret = i915_gem_object_sync(obj, ring); - if (ret) - return ret; + + if (obj->active & other_rings) { + ret = i915_gem_object_sync(obj, ring); + if (ret) + return ret; + } if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) flush_chipset |= i915_gem_clflush_object(obj, false); @@ -954,6 +957,9 @@ validate_exec_list(struct drm_device *dev, if (exec[i].flags & invalid_flags) return -EINVAL; + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) + return -EINVAL; + /* First check for malicious input causing overflow in * the worst case where we need to allocate the entire * relocation tree as a single array. @@ -1035,7 +1041,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); - intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); + intel_fb_obj_invalidate(obj, ORIGIN_CS); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1063,7 +1069,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - (void)__i915_add_request(ring, file, obj); + __i915_add_request(ring, file, obj); } static int @@ -1142,12 +1148,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, u32 batch_len, bool is_master) { - struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); struct drm_i915_gem_object *shadow_batch_obj; struct i915_vma *vma; int ret; - shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, + shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) return shadow_batch_obj; @@ -1165,11 +1170,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, if (ret) goto err; + i915_gem_object_unpin_pages(shadow_batch_obj); + memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma->exec_entry = shadow_exec_entry; - vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN; + vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; drm_gem_object_reference(&shadow_batch_obj->base); list_add_tail(&vma->exec_list, &eb->vmas); @@ -1178,6 +1185,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, return shadow_batch_obj; err: + i915_gem_object_unpin_pages(shadow_batch_obj); if (ret == -EACCES) /* unhandled chained batch */ return batch_obj; else @@ -1185,17 +1193,15 @@ err: } int -i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, - struct intel_context *ctx, +i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas, - struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 dispatch_flags) + struct list_head *vmas) { struct drm_clip_rect *cliprects = NULL; + struct drm_device *dev = params->dev; + struct intel_engine_cs *ring = params->ring; struct drm_i915_private *dev_priv = dev->dev_private; - u64 exec_len; + u64 exec_start, exec_len; int instp_mode; u32 instp_mask; int i, ret = 0; @@ -1247,16 +1253,12 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, if (ret) goto error; - ret = i915_switch_context(ring, ctx); + ret = i915_switch_context(ring, params->ctx); if (ret) goto error; - if (ctx->ppgtt) - WARN(ctx->ppgtt->pd_dirty_rings & (1<id), - "%s didn't clear reload\n", ring->name); - else if (dev_priv->mm.aliasing_ppgtt) - WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings & - (1<id), "%s didn't clear reload\n", ring->name); + WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<id), + "%s didn't clear reload\n", ring->name); instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; @@ -1316,7 +1318,10 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, goto error; } - exec_len = args->batch_len; + exec_len = args->batch_len; + exec_start = params->batch_obj_vm_offset + + params->args_batch_start_offset; + if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { ret = i915_emit_box(ring, &cliprects[i], @@ -1326,22 +1331,23 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ret = ring->dispatch_execbuffer(ring, exec_start, exec_len, - dispatch_flags); + params->dispatch_flags); if (ret) goto error; } } else { ret = ring->dispatch_execbuffer(ring, exec_start, exec_len, - dispatch_flags); + params->dispatch_flags); if (ret) return ret; } - trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); + trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), params->dispatch_flags); i915_gem_execbuffer_move_to_active(vmas, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); + i915_gem_execbuffer_retire_commands(params->dev, params->file, ring, + params->batch_obj); error: kfree(cliprects); @@ -1411,8 +1417,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct intel_engine_cs *ring; struct intel_context *ctx; struct i915_address_space *vm; + struct i915_execbuffer_params params_master; /* XXX: will be removed later */ + struct i915_execbuffer_params *params = ¶ms_master; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u64 exec_start = args->batch_start_offset; u32 dispatch_flags; int ret; bool need_relocs; @@ -1505,6 +1512,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else vm = &dev_priv->gtt.base; + memset(¶ms_master, 0x00, sizeof(params_master)); + eb = eb_create(args); if (eb == NULL) { i915_gem_context_unreference(ctx); @@ -1523,7 +1532,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); if (ret) goto err; @@ -1533,7 +1542,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, - eb, exec); + eb, exec, ctx); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } if (ret) @@ -1547,34 +1556,41 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } + params->args_batch_start_offset = args->batch_start_offset; if (i915_needs_cmd_parser(ring) && args->batch_len) { - batch_obj = i915_gem_execbuffer_parse(ring, + struct drm_i915_gem_object *parsed_batch_obj; + + parsed_batch_obj = i915_gem_execbuffer_parse(ring, &shadow_exec_entry, eb, batch_obj, args->batch_start_offset, args->batch_len, file->is_master); - if (IS_ERR(batch_obj)) { - ret = PTR_ERR(batch_obj); + if (IS_ERR(parsed_batch_obj)) { + ret = PTR_ERR(parsed_batch_obj); goto err; } /* - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in the - * dispatch_execbuffer implementations. We specifically - * don't want that set when the command parser is - * enabled. - * - * FIXME: with aliasing ppgtt, buffers that should only - * be in ggtt still end up in the aliasing ppgtt. remove - * this check when that is fixed. + * parsed_batch_obj == batch_obj means batch not fully parsed: + * Accept, but don't promote to secure. */ - if (USES_FULL_PPGTT(dev)) - dispatch_flags |= I915_DISPATCH_SECURE; - exec_start = 0; + if (parsed_batch_obj != batch_obj) { + /* + * Batch parsed and accepted: + * + * Set the DISPATCH_SECURE bit to remove the NON_SECURE + * bit from MI_BATCH_BUFFER_START commands issued in + * the dispatch_execbuffer implementations. We + * specifically don't want that set on batches the + * command parser has accepted. + */ + dispatch_flags |= I915_DISPATCH_SECURE; + params->args_batch_start_offset = 0; + batch_obj = parsed_batch_obj; + } } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; @@ -1597,14 +1613,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto err; - exec_start += i915_gem_obj_ggtt_offset(batch_obj); + params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj); } else - exec_start += i915_gem_obj_offset(batch_obj, vm); + params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); - ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, - &eb->vmas, batch_obj, exec_start, - dispatch_flags); + /* Allocate a request for this batch buffer nice and early. */ + ret = i915_gem_request_alloc(ring, ctx); + if (ret) + goto err_batch_unpin; + + /* + * Save assorted stuff away to pass through to *_submission(). + * NB: This data should be 'persistent' and not local as it will + * kept around beyond the duration of the IOCTL once the GPU + * scheduler arrives. + */ + params->dev = dev; + params->file = file; + params->ring = ring; + params->dispatch_flags = dispatch_flags; + params->batch_obj = batch_obj; + params->ctx = ctx; + + ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); +err_batch_unpin: /* * FIXME: We crucially rely upon the active tracking for the (ppgtt) * batch vma for correctness. For less ugly and less fragility this @@ -1613,6 +1646,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ if (dispatch_flags & I915_DISPATCH_SECURE) i915_gem_object_ggtt_unpin(batch_obj); + err: /* the request owns the ref now */ i915_gem_context_unreference(ctx);