Merge tag 'v3.14' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index 032def901f983e2e9b9ef8137a111b35a0ab4c72..3851a1b1dc88de14461f34cdeefaaf2469a77e95 100644 (file)
@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-       bool need_fence, need_mappable;
-       u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
-               !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
+       bool need_fence;
+       unsigned flags;
        int ret;
 
+       flags = 0;
+
        need_fence =
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(vma);
+       if (need_fence || need_reloc_mappable(vma))
+               flags |= PIN_MAPPABLE;
+
+       if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+               flags |= PIN_GLOBAL;
 
-       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
-                                 false);
+       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
        if (ret)
                return ret;
 
@@ -585,8 +589,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       vma->bind_vma(vma, obj->cache_level, flags);
-
        return 0;
 }
 
@@ -1180,6 +1182,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
+       if (i915_needs_cmd_parser(ring)) {
+               ret = i915_parse_cmds(ring,
+                                     batch_obj,
+                                     args->batch_start_offset,
+                                     file->is_master);
+               if (ret)
+                       goto err;
+
+               /*
+                * XXX: Actually do this when enabling batch copy...
+                *
+                * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
+                * from MI_BATCH_BUFFER_START commands issued in the
+                * dispatch_execbuffer implementations. We specifically don't
+                * want that set when the command parser is enabled.
+                */
+       }
+
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
This page took 0.038391 seconds and 5 git commands to generate.