drm/i915: Prefer to check for idleness in worker rather than sync-flush
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 84e2a231b03cb9cf4fc73621b235cfa92030111a..95e46c7490ab735519567b584857a27eca820367 100644 (file)
@@ -378,13 +378,13 @@ out:
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
+       return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       kmem_cache_free(dev_priv->slab, obj);
+       kmem_cache_free(dev_priv->objects, obj);
 }
 
 static int
@@ -1181,14 +1181,6 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
        return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
-static bool can_wait_boost(struct drm_i915_file_private *file_priv)
-{
-       if (file_priv == NULL)
-               return true;
-
-       return !atomic_xchg(&file_priv->rps_wait_boost, true);
-}
-
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
@@ -1230,13 +1222,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        timeout_expire = timeout ?
                jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
 
-       if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
-               gen6_rps_boost(dev_priv);
-               if (file_priv)
-                       mod_delayed_work(dev_priv->wq,
-                                        &file_priv->mm.idle_work,
-                                        msecs_to_jiffies(100));
-       }
+       if (INTEL_INFO(dev)->gen >= 6)
+               gen6_rps_boost(dev_priv, file_priv);
 
        if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
@@ -2178,6 +2165,10 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
                return ret;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
+       obj->get_page.sg = obj->pages->sgl;
+       obj->get_page.last = 0;
+
        return 0;
 }
 
@@ -2420,7 +2411,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
 
        i915_queue_hangcheck(ring->dev);
 
-       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
        queue_delayed_work(dev_priv->wq,
                           &dev_priv->mm.retire_work,
                           round_jiffies_up_relative(HZ));
@@ -2515,7 +2505,46 @@ void i915_gem_request_free(struct kref *req_ref)
                i915_gem_context_unreference(ctx);
        }
 
-       kfree(req);
+       kmem_cache_free(req->i915->requests, req);
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+                          struct intel_context *ctx)
+{
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_gem_request *rq;
+       int ret;
+
+       if (ring->outstanding_lazy_request)
+               return 0;
+
+       rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       if (rq == NULL)
+               return -ENOMEM;
+
+       kref_init(&rq->ref);
+       rq->i915 = dev_priv;
+
+       ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
+       if (ret) {
+               kfree(rq);
+               return ret;
+       }
+
+       rq->ring = ring;
+       rq->uniq = dev_priv->request_uniq++;
+
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_alloc_request_extras(rq, ctx);
+       else
+               ret = intel_ring_alloc_request_extras(rq);
+       if (ret) {
+               kfree(rq);
+               return ret;
+       }
+
+       ring->outstanding_lazy_request = rq;
+       return 0;
 }
 
 struct drm_i915_gem_request *
@@ -2577,7 +2606,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                struct drm_i915_gem_request,
                                execlist_link);
                list_del(&submit_req->execlist_link);
-               intel_runtime_pm_put(dev_priv);
 
                if (submit_req->ctx != ring->default_context)
                        intel_lr_context_unpin(ring, submit_req->ctx);
@@ -2660,24 +2688,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
        WARN_ON(i915_verify_lists(ring->dev));
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate,
-        * before we free the context associated with the requests.
+       /* Retire requests first as we use it above for the early return.
+        * If we retire requests last, we may use a later seqno and so clear
+        * the requests lists without clearing the active list, leading to
+        * confusion.
         */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_gem_request_completed(obj->last_read_req, true))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
-
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2700,6 +2715,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                i915_gem_free_request(request);
        }
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_gem_request_completed(obj->last_read_req, true))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
        if (unlikely(ring->trace_irq_req &&
                     i915_gem_request_completed(ring->trace_irq_req, true))) {
                ring->irq_put(ring);
@@ -2763,8 +2795,25 @@ i915_gem_idle_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), mm.idle_work.work);
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_engine_cs *ring;
+       int i;
+
+       for_each_ring(ring, dev_priv, i)
+               if (!list_empty(&ring->request_list))
+                       return;
+
+       intel_mark_idle(dev);
+
+       if (mutex_trylock(&dev->struct_mutex)) {
+               struct intel_engine_cs *ring;
+               int i;
+
+               for_each_ring(ring, dev_priv, i)
+                       i915_gem_batch_pool_fini(&ring->batch_pool);
 
-       intel_mark_idle(dev_priv->dev);
+               mutex_unlock(&dev->struct_mutex);
+       }
 }
 
 /**
@@ -2862,9 +2911,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        ret = __i915_wait_request(req, reset_counter, true,
                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                  file->driver_priv);
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_request_unreference(req);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_request_unreference__unlocked(req);
        return ret;
 
 out:
@@ -3512,6 +3559,8 @@ search_free:
 
        /*  allocate before insert / bind */
        if (vma->vm->allocate_va_range) {
+               trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+                               VM_TO_TRACE_NAME(vma->vm));
                ret = vma->vm->allocate_va_range(vma->vm,
                                                vma->node.start,
                                                vma->node.size);
@@ -3871,7 +3920,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
-                                    struct intel_engine_cs *pipelined)
+                                    struct intel_engine_cs *pipelined,
+                                    const struct i915_ggtt_view *view)
 {
        u32 old_read_domains, old_write_domain;
        bool was_pin_display;
@@ -3907,7 +3957,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+       ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+                                      view->type == I915_GGTT_VIEW_NORMAL ?
+                                      PIN_MAPPABLE : 0);
        if (ret)
                goto err_unpin_display;
 
@@ -3935,9 +3987,11 @@ err_unpin_display:
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+                                        const struct i915_ggtt_view *view)
 {
-       i915_gem_object_ggtt_unpin(obj);
+       i915_gem_object_ggtt_unpin_view(obj, view);
+
        obj->pin_display = is_pin_display(obj);
 }
 
@@ -4060,9 +4114,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_request_unreference(target);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_request_unreference__unlocked(target);
 
        return ret;
 }
@@ -4122,7 +4174,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 
                if (i915_vma_misplaced(vma, alignment, flags)) {
                        unsigned long offset;
-                       offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view->type) :
+                       offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
                                             i915_gem_obj_offset(obj, vm);
                        WARN(vma->pin_count,
                             "bo is already pinned in %s with incorrect alignment:"
@@ -4214,15 +4266,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 }
 
 void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+                               const struct i915_ggtt_view *view)
 {
-       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+       struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
        BUG_ON(!vma);
-       BUG_ON(vma->pin_count == 0);
-       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+       WARN_ON(vma->pin_count == 0);
+       WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
-       if (--vma->pin_count == 0)
+       if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
                obj->pin_mappable = false;
 }
 
@@ -4362,7 +4415,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
-       INIT_LIST_HEAD(&obj->batch_pool_list);
+       INIT_LIST_HEAD(&obj->batch_pool_link);
 
        obj->ops = ops;
 
@@ -4542,7 +4595,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                return ERR_PTR(-EINVAL);
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == ggtt && vma->ggtt_view.type == view->type)
+               if (vma->vm == ggtt &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
 }
@@ -4563,7 +4617,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 
        list_del(&vma->vma_link);
 
-       kfree(vma);
+       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
 
 static void
@@ -4850,12 +4904,12 @@ int i915_gem_init(struct drm_device *dev)
        }
 
        if (!i915.enable_execlists) {
-               dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+               dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
                dev_priv->gt.init_rings = i915_gem_init_rings;
                dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
                dev_priv->gt.stop_ring = intel_stop_ring_buffer;
        } else {
-               dev_priv->gt.do_execbuf = intel_execlists_submission;
+               dev_priv->gt.execbuf_submit = intel_execlists_submission;
                dev_priv->gt.init_rings = intel_logical_rings_init;
                dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
                dev_priv->gt.stop_ring = intel_logical_ring_stop;
@@ -4937,11 +4991,21 @@ i915_gem_load(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       dev_priv->slab =
+       dev_priv->objects =
                kmem_cache_create("i915_gem_object",
                                  sizeof(struct drm_i915_gem_object), 0,
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
+       dev_priv->vmas =
+               kmem_cache_create("i915_gem_vma",
+                                 sizeof(struct i915_vma), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+       dev_priv->requests =
+               kmem_cache_create("i915_gem_request",
+                                 sizeof(struct drm_i915_gem_request), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
 
        INIT_LIST_HEAD(&dev_priv->vm_list);
        i915_init_vm(dev_priv, &dev_priv->gtt.base);
@@ -4984,8 +5048,6 @@ i915_gem_load(struct drm_device *dev)
 
        i915_gem_shrinker_init(dev_priv);
 
-       i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
-
        mutex_init(&dev_priv->fb_tracking.lock);
 }
 
@@ -4993,8 +5055,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       cancel_delayed_work_sync(&file_priv->mm.idle_work);
-
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
@@ -5010,15 +5070,12 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
                request->file_priv = NULL;
        }
        spin_unlock(&file_priv->mm.lock);
-}
-
-static void
-i915_gem_file_idle_work_handler(struct work_struct *work)
-{
-       struct drm_i915_file_private *file_priv =
-               container_of(work, typeof(*file_priv), mm.idle_work.work);
 
-       atomic_set(&file_priv->rps_wait_boost, false);
+       if (!list_empty(&file_priv->rps_boost)) {
+               mutex_lock(&to_i915(dev)->rps.hw_lock);
+               list_del(&file_priv->rps_boost);
+               mutex_unlock(&to_i915(dev)->rps.hw_lock);
+       }
 }
 
 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
@@ -5035,11 +5092,10 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        file->driver_priv = file_priv;
        file_priv->dev_priv = dev->dev_private;
        file_priv->file = file;
+       INIT_LIST_HEAD(&file_priv->rps_boost);
 
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
-       INIT_DELAYED_WORK(&file_priv->mm.idle_work,
-                         i915_gem_file_idle_work_handler);
 
        ret = i915_gem_context_open(dev, file);
        if (ret)
@@ -5099,13 +5155,14 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
 
 unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-                             enum i915_ggtt_view_type view)
+                             const struct i915_ggtt_view *view)
 {
        struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, vma_link)
-               if (vma->vm == ggtt && vma->ggtt_view.type == view)
+               if (vma->vm == ggtt &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
 
        WARN(1, "global vma for this object not found.\n");
@@ -5129,14 +5186,14 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 }
 
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
-                                 enum i915_ggtt_view_type view)
+                                 const struct i915_ggtt_view *view)
 {
        struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, vma_link)
                if (vma->vm == ggtt &&
-                   vma->ggtt_view.type == view &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
 
This page took 0.039063 seconds and 5 git commands to generate.