drm/i915/gtt: Allow >= 4GB sizes for vm.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 4625a2fdc180f4504a2575fca4d7b1d65ab5ec76..db1955fad00595103ac78c7eb6203f79a6749b1b 100644 (file)
@@ -1149,20 +1149,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
        return 0;
 }
 
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
-       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
-       if (req == req->ring->outstanding_lazy_request)
-               i915_add_request(req->ring);
-
-       return 0;
-}
-
 static void fake_irq(unsigned long data)
 {
        wake_up_process((struct task_struct *)data);
@@ -1334,6 +1320,33 @@ out:
        return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+                                  struct drm_file *file)
+{
+       struct drm_i915_private *dev_private;
+       struct drm_i915_file_private *file_priv;
+
+       WARN_ON(!req || !file || req->file_priv);
+
+       if (!req || !file)
+               return -EINVAL;
+
+       if (req->file_priv)
+               return -EINVAL;
+
+       dev_private = req->ring->dev->dev_private;
+       file_priv = file->driver_priv;
+
+       spin_lock(&file_priv->mm.lock);
+       req->file_priv = file_priv;
+       list_add_tail(&req->client_list, &file_priv->mm.request_list);
+       spin_unlock(&file_priv->mm.lock);
+
+       req->pid = get_pid(task_pid(current));
+
+       return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
@@ -1346,6 +1359,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        list_del(&request->client_list);
        request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
+
+       put_pid(request->pid);
+       request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1365,8 +1381,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
 
-       put_pid(request->pid);
-
        i915_gem_request_unreference(request);
 }
 
@@ -1415,10 +1429,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(req);
-       if (ret)
-               return ret;
-
        ret = __i915_wait_request(req,
                                  atomic_read(&dev_priv->gpu_error.reset_counter),
                                  interruptible, NULL, NULL);
@@ -1518,10 +1528,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                if (req == NULL)
                        return 0;
 
-               ret = i915_gem_check_olr(req);
-               if (ret)
-                       goto err;
-
                requests[n++] = i915_gem_request_reference(req);
        } else {
                for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1531,10 +1537,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                        if (req == NULL)
                                continue;
 
-                       ret = i915_gem_check_olr(req);
-                       if (ret)
-                               goto err;
-
                        requests[n++] = i915_gem_request_reference(req);
                }
        }
@@ -1545,7 +1547,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                          NULL, rps);
        mutex_lock(&dev->struct_mutex);
 
-err:
        for (i = 0; i < n; i++) {
                if (ret == 0)
                        i915_gem_object_retire_request(obj, requests[i]);
@@ -2340,9 +2341,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring)
+                            struct drm_i915_gem_request *req)
 {
        struct drm_i915_gem_object *obj = vma->obj;
+       struct intel_engine_cs *ring;
+
+       ring = i915_gem_request_get_ring(req);
 
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
@@ -2350,8 +2354,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        obj->active |= intel_ring_flag(ring);
 
        list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-       i915_gem_request_assign(&obj->last_read_req[ring->id],
-                               intel_ring_get_request(ring));
+       i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
        list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2468,25 +2471,22 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_add_request(struct intel_engine_cs *ring,
-                       struct drm_file *file,
+void __i915_add_request(struct drm_i915_gem_request *request,
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_request *request;
+       struct intel_engine_cs *ring;
+       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        int ret;
 
-       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
                return;
 
-       if (i915.enable_execlists) {
-               ringbuf = request->ctx->engine[ring->id].ringbuf;
-       } else
-               ringbuf = ring->buffer;
+       ring = request->ring;
+       dev_priv = ring->dev->dev_private;
+       ringbuf = request->ringbuf;
 
        /*
         * To ensure that this call will not fail, space for its emissions
@@ -2505,9 +2505,9 @@ void __i915_add_request(struct intel_engine_cs *ring,
         */
        if (flush_caches) {
                if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
+                       ret = logical_ring_flush_all_caches(request);
                else
-                       ret = intel_ring_flush_all_caches(ring);
+                       ret = intel_ring_flush_all_caches(request);
                /* Not allowed to fail! */
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
@@ -2520,9 +2520,9 @@ void __i915_add_request(struct intel_engine_cs *ring,
        request->postfix = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists)
-               ret = ring->emit_request(ringbuf, request);
+               ret = ring->emit_request(request);
        else {
-               ret = ring->add_request(ring);
+               ret = ring->add_request(request);
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
@@ -2541,22 +2541,8 @@ void __i915_add_request(struct intel_engine_cs *ring,
 
        request->emitted_jiffies = jiffies;
        list_add_tail(&request->list, &ring->request_list);
-       request->file_priv = NULL;
-
-       if (file) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-
-               spin_lock(&file_priv->mm.lock);
-               request->file_priv = file_priv;
-               list_add_tail(&request->client_list,
-                             &file_priv->mm.request_list);
-               spin_unlock(&file_priv->mm.lock);
-
-               request->pid = get_pid(task_pid(current));
-       }
 
        trace_i915_gem_request_add(request);
-       ring->outstanding_lazy_request = NULL;
 
        i915_queue_hangcheck(ring->dev);
 
@@ -2620,6 +2606,9 @@ void i915_gem_request_free(struct kref *req_ref)
                                                 typeof(*req), ref);
        struct intel_context *ctx = req->ctx;
 
+       if (req->file_priv)
+               i915_gem_request_remove_from_client(req);
+
        if (ctx) {
                if (i915.enable_execlists) {
                        struct intel_engine_cs *ring = req->ring;
@@ -2645,8 +2634,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
        if (!req_out)
                return -EINVAL;
 
-       if ((*req_out = ring->outstanding_lazy_request) != NULL)
-               return 0;
+       *req_out = NULL;
 
        req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
        if (req == NULL)
@@ -2677,21 +2665,22 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
         * i915_add_request() call can't fail. Note that the reserve may need
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
-        *
-        * Note further that this call merely notes the reserve request. A
-        * subsequent call to *_ring_begin() is required to actually ensure
-        * that the reservation is available. Without the begin, if the
-        * request creator immediately submitted the request without adding
-        * any commands to it then there might not actually be sufficient
-        * room for the submission commands. Unfortunately, the current
-        * *_ring_begin() implementations potentially call back here to
-        * i915_gem_request_alloc(). Thus calling _begin() here would lead to
-        * infinite recursion! Until that back call path is removed, it is
-        * necessary to do a manual _begin() outside.
         */
-       intel_ring_reserved_space_reserve(req->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_reserve_space(req);
+       else
+               ret = intel_ring_reserve_space(req);
+       if (ret) {
+               /*
+                * At this point, the request is fully allocated even if not
+                * fully prepared. Thus it can be cleaned up using the proper
+                * free code.
+                */
+               i915_gem_request_cancel(req);
+               return ret;
+       }
 
-       *req_out = ring->outstanding_lazy_request = req;
+       *req_out = req;
        return 0;
 
 err:
@@ -2788,9 +2777,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                i915_gem_request_retire(request);
        }
-
-       /* This may not have been flushed before the reset, so clean it now */
-       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2973,7 +2959,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-       int ret, i;
+       int i;
 
        if (!obj->active)
                return 0;
@@ -2988,10 +2974,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (list_empty(&req->list))
                        goto retire;
 
-               ret = i915_gem_check_olr(req);
-               if (ret)
-                       return ret;
-
                if (i915_gem_request_completed(req, true)) {
                        __i915_gem_request_retire__upto(req);
 retire:
@@ -3094,25 +3076,22 @@ out:
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                       struct intel_engine_cs *to,
-                      struct drm_i915_gem_request *req)
+                      struct drm_i915_gem_request *from_req,
+                      struct drm_i915_gem_request **to_req)
 {
        struct intel_engine_cs *from;
        int ret;
 
-       from = i915_gem_request_get_ring(req);
+       from = i915_gem_request_get_ring(from_req);
        if (to == from)
                return 0;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(from_req, true))
                return 0;
 
-       ret = i915_gem_check_olr(req);
-       if (ret)
-               return ret;
-
        if (!i915_semaphore_is_enabled(obj->base.dev)) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               ret = __i915_wait_request(req,
+               ret = __i915_wait_request(from_req,
                                          atomic_read(&i915->gpu_error.reset_counter),
                                          i915->mm.interruptible,
                                          NULL,
@@ -3120,16 +3099,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                if (ret)
                        return ret;
 
-               i915_gem_object_retire_request(obj, req);
+               i915_gem_object_retire_request(obj, from_req);
        } else {
                int idx = intel_ring_sync_index(from, to);
-               u32 seqno = i915_gem_request_get_seqno(req);
+               u32 seqno = i915_gem_request_get_seqno(from_req);
+
+               WARN_ON(!to_req);
 
                if (seqno <= from->semaphore.sync_seqno[idx])
                        return 0;
 
-               trace_i915_gem_ring_sync_to(from, to, req);
-               ret = to->semaphore.sync_to(to, from, seqno);
+               if (*to_req == NULL) {
+                       ret = i915_gem_request_alloc(to, to->default_context, to_req);
+                       if (ret)
+                               return ret;
+               }
+
+               trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+               ret = to->semaphore.sync_to(*to_req, from, seqno);
                if (ret)
                        return ret;
 
@@ -3149,11 +3136,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  *
  * @obj: object which may be in use on another ring.
  * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
  * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
  * into a buffer at any time, but multiple readers. To ensure each has
  * a coherent view of memory, we must:
  *
@@ -3164,11 +3154,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct intel_engine_cs *to)
+                    struct intel_engine_cs *to,
+                    struct drm_i915_gem_request **to_req)
 {
        const bool readonly = obj->base.pending_write_domain == 0;
        struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3190,7 +3191,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                                req[n++] = obj->last_read_req[i];
        }
        for (i = 0; i < n; i++) {
-               ret = __i915_gem_object_sync(obj, to, req[i]);
+               ret = __i915_gem_object_sync(obj, to, req[i], to_req);
                if (ret)
                        return ret;
        }
@@ -3315,11 +3316,9 @@ int i915_gpu_idle(struct drm_device *dev)
                                return ret;
                        }
 
-                       i915_add_request_no_flush(req->ring);
+                       i915_add_request_no_flush(req);
                }
 
-               WARN_ON(ring->outstanding_lazy_request);
-
                ret = intel_ring_idle(ring);
                if (ret)
                        return ret;
@@ -3713,9 +3712,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
-       unsigned long start =
+       u64 start =
                flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-       unsigned long end =
+       u64 end =
                flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
        struct i915_vma *vma;
        int ret;
@@ -3771,7 +3770,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
                          ggtt_view ? ggtt_view->type : 0,
                          size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -4140,12 +4139,13 @@ int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     struct intel_engine_cs *pipelined,
+                                    struct drm_i915_gem_request **pipelined_request,
                                     const struct i915_ggtt_view *view)
 {
        u32 old_read_domains, old_write_domain;
        int ret;
 
-       ret = i915_gem_object_sync(obj, pipelined);
+       ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
        if (ret)
                return ret;
 
@@ -4299,6 +4299,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
+               /*
+                * Note that the request might not have been submitted yet.
+                * In which case emitted_jiffies will be zero.
+                */
+               if (!request->emitted_jiffies)
+                       continue;
+
                target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4856,8 +4863,9 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
+       struct intel_engine_cs *ring = req->ring;
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4867,7 +4875,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
        if (!HAS_L3_DPF(dev) || !remap_info)
                return 0;
 
-       ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
        if (ret)
                return ret;
 
@@ -5079,7 +5087,7 @@ i915_gem_init_hw(struct drm_device *dev)
 
                if (ring->id == RCS) {
                        for (j = 0; j < NUM_L3_SLICES(dev); j++)
-                               i915_gem_l3_remap(ring, j);
+                               i915_gem_l3_remap(req, j);
                }
 
                ret = i915_ppgtt_init_ring(req);
@@ -5098,7 +5106,7 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto out;
                }
 
-               i915_add_request_no_flush(ring);
+               i915_add_request_no_flush(req);
        }
 
 out:
This page took 0.031774 seconds and 5 git commands to generate.