drm/i915: Merged the many do_execbuf() parameters into a structure
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index bd0e4bda2c649a54bb97a1b791ecb6d52e4438f1..c5f879e594c4cb76e3141b30dbf213e2ecfeca3a 100644 (file)
@@ -676,6 +676,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                            struct list_head *vmas,
+                           struct intel_context *ctx,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
@@ -698,6 +699,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                obj = vma->obj;
                entry = vma->exec_entry;
 
+               if (ctx->flags & CONTEXT_NO_ZEROMAP)
+                       entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
                if (!has_fenced_gpu_access)
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
@@ -775,7 +779,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_file *file,
                                  struct intel_engine_cs *ring,
                                  struct eb_vmas *eb,
-                                 struct drm_i915_gem_exec_object2 *exec)
+                                 struct drm_i915_gem_exec_object2 *exec,
+                                 struct intel_context *ctx)
 {
        struct drm_i915_gem_relocation_entry *reloc;
        struct i915_address_space *vm;
@@ -861,7 +866,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
        if (ret)
                goto err;
 
@@ -952,6 +957,9 @@ validate_exec_list(struct drm_device *dev,
                if (exec[i].flags & invalid_flags)
                        return -EINVAL;
 
+               if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+                       return -EINVAL;
+
                /* First check for malicious input causing overflow in
                 * the worst case where we need to allocate the entire
                 * relocation tree as a single array.
@@ -1033,7 +1041,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->dirty = 1;
                        i915_gem_request_assign(&obj->last_write_req, req);
 
-                       intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+                       intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1061,7 +1069,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
        ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       (void)__i915_add_request(ring, file, obj);
+       __i915_add_request(ring, file, obj);
 }
 
 static int
@@ -1185,17 +1193,15 @@ err:
 }
 
 int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-                              struct intel_engine_cs *ring,
-                              struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
-                              struct list_head *vmas,
-                              struct drm_i915_gem_object *batch_obj,
-                              u64 exec_start, u32 dispatch_flags)
+                              struct list_head *vmas)
 {
        struct drm_clip_rect *cliprects = NULL;
+       struct drm_device *dev = params->dev;
+       struct intel_engine_cs *ring = params->ring;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u64 exec_len;
+       u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
        int i, ret = 0;
@@ -1247,11 +1253,11 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
        if (ret)
                goto error;
 
-       ret = i915_switch_context(ring, ctx);
+       ret = i915_switch_context(ring, params->ctx);
        if (ret)
                goto error;
 
-       WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+       WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
             "%s didn't clear reload\n", ring->name);
 
        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1312,7 +1318,10 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                        goto error;
        }
 
-       exec_len = args->batch_len;
+       exec_len   = args->batch_len;
+       exec_start = params->batch_obj_vm_offset +
+                    params->args_batch_start_offset;
+
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
                        ret = i915_emit_box(ring, &cliprects[i],
@@ -1322,22 +1331,23 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 
                        ret = ring->dispatch_execbuffer(ring,
                                                        exec_start, exec_len,
-                                                       dispatch_flags);
+                                                       params->dispatch_flags);
                        if (ret)
                                goto error;
                }
        } else {
                ret = ring->dispatch_execbuffer(ring,
                                                exec_start, exec_len,
-                                               dispatch_flags);
+                                               params->dispatch_flags);
                if (ret)
                        return ret;
        }
 
-       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), params->dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, ring);
-       i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+       i915_gem_execbuffer_retire_commands(params->dev, params->file, ring,
+                                           params->batch_obj);
 
 error:
        kfree(cliprects);
@@ -1407,8 +1417,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct intel_engine_cs *ring;
        struct intel_context *ctx;
        struct i915_address_space *vm;
+       struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+       struct i915_execbuffer_params *params = &params_master;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-       u64 exec_start = args->batch_start_offset;
        u32 dispatch_flags;
        int ret;
        bool need_relocs;
@@ -1501,6 +1512,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        else
                vm = &dev_priv->gtt.base;
 
+       memset(&params_master, 0x00, sizeof(params_master));
+
        eb = eb_create(args);
        if (eb == NULL) {
                i915_gem_context_unreference(ctx);
@@ -1519,7 +1532,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
        if (ret)
                goto err;
 
@@ -1529,7 +1542,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-                                                               eb, exec);
+                                                               eb, exec, ctx);
                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
                }
                if (ret)
@@ -1543,6 +1556,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
+       params->args_batch_start_offset = args->batch_start_offset;
        if (i915_needs_cmd_parser(ring) && args->batch_len) {
                struct drm_i915_gem_object *parsed_batch_obj;
 
@@ -1574,7 +1588,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         * command parser has accepted.
                         */
                        dispatch_flags |= I915_DISPATCH_SECURE;
-                       exec_start = 0;
+                       params->args_batch_start_offset = 0;
                        batch_obj = parsed_batch_obj;
                }
        }
@@ -1599,14 +1613,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret)
                        goto err;
 
-               exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+               params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
        } else
-               exec_start += i915_gem_obj_offset(batch_obj, vm);
+               params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
-       ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-                                         &eb->vmas, batch_obj, exec_start,
-                                         dispatch_flags);
+       /* Allocate a request for this batch buffer nice and early. */
+       ret = i915_gem_request_alloc(ring, ctx);
+       if (ret)
+               goto err_batch_unpin;
 
+       /*
+        * Save assorted stuff away to pass through to *_submission().
+        * NB: This data should be 'persistent' and not local as it will
+        * kept around beyond the duration of the IOCTL once the GPU
+        * scheduler arrives.
+        */
+       params->dev                     = dev;
+       params->file                    = file;
+       params->ring                    = ring;
+       params->dispatch_flags          = dispatch_flags;
+       params->batch_obj               = batch_obj;
+       params->ctx                     = ctx;
+
+       ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
        /*
         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
         * batch vma for correctness. For less ugly and less fragility this
@@ -1615,6 +1646,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         */
        if (dispatch_flags & I915_DISPATCH_SECURE)
                i915_gem_object_ggtt_unpin(batch_obj);
+
 err:
        /* the request owns the ref now */
        i915_gem_context_unreference(ctx);
This page took 0.028947 seconds and 5 git commands to generate.