drm/i915/bdw: Clean up execlist queue items in retire_work
authorThomas Daniel <thomas.daniel@intel.com>
Thu, 13 Nov 2014 10:27:05 +0000 (10:27 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 19 Nov 2014 18:16:45 +0000 (19:16 +0100)
No longer create a work item to clean each execlist queue item.
Instead, move retired execlist requests to a queue and clean up the
items during retire_requests.

v2: Fix legacy ring path broken during overzealous cleanup

v3: Update idle detection to take execlists queue into account

v4: Grab execlist lock when checking queue state

v5: Fix leaking requests by freeing in execlists_retire_requests.

Issue: VIZ-4274
Signed-off-by: Thomas Daniel <thomas.daniel@intel.com>
Reviewed-by: Deepak S <deepak.s@linux.intel.com>
Reviewed-by: Akash Goel <akash.goels@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.h

index 86cf428b6c4e251bcdc7c4351cc1e95753789d84..2e85ef1650bfab5c1d534dacc092ae4b79b5fbce 100644 (file)
@@ -2800,6 +2800,15 @@ i915_gem_retire_requests(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                i915_gem_retire_requests_ring(ring);
                idle &= list_empty(&ring->request_list);
+               if (i915.enable_execlists) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&ring->execlist_lock, flags);
+                       idle &= list_empty(&ring->execlist_queue);
+                       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+                       intel_execlists_retire_requests(ring);
+               }
        }
 
        if (idle)
index 3cf15c4da0e871960975f76bd078f4143d27d2b9..c855051ba18d65cb37abdafff37f28b6db1d70d8 100644 (file)
@@ -399,7 +399,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 {
        struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
        struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
        assert_spin_locked(&ring->execlist_lock);
 
@@ -416,7 +415,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
                        list_del(&req0->execlist_link);
-                       queue_work(dev_priv->wq, &req0->work);
+                       list_add_tail(&req0->execlist_link,
+                               &ring->execlist_retired_req_list);
                        req0 = cursor;
                } else {
                        req1 = cursor;
@@ -438,7 +438,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
                                           u32 request_id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_ctx_submit_request *head_req;
 
        assert_spin_locked(&ring->execlist_lock);
@@ -456,7 +455,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
 
                        if (--head_req->elsp_submitted <= 0) {
                                list_del(&head_req->execlist_link);
-                               queue_work(dev_priv->wq, &head_req->work);
+                               list_add_tail(&head_req->execlist_link,
+                                       &ring->execlist_retired_req_list);
                                return true;
                        }
                }
@@ -525,22 +525,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
                   ((u32)ring->next_context_status_buffer & 0x07) << 8);
 }
 
-static void execlists_free_request_task(struct work_struct *work)
-{
-       struct intel_ctx_submit_request *req =
-               container_of(work, struct intel_ctx_submit_request, work);
-       struct drm_device *dev = req->ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       intel_runtime_pm_put(dev_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_context_unreference(req->ctx);
-       mutex_unlock(&dev->struct_mutex);
-
-       kfree(req);
-}
-
 static int execlists_context_queue(struct intel_engine_cs *ring,
                                   struct intel_context *to,
                                   u32 tail)
@@ -557,7 +541,6 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
        i915_gem_context_reference(req->ctx);
        req->ring = ring;
        req->tail = tail;
-       INIT_WORK(&req->work, execlists_free_request_task);
 
        intel_runtime_pm_get(dev_priv);
 
@@ -578,7 +561,8 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                        WARN(tail_req->elsp_submitted != 0,
                             "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
-                       queue_work(dev_priv->wq, &tail_req->work);
+                       list_add_tail(&tail_req->execlist_link,
+                               &ring->execlist_retired_req_list);
                }
        }
 
@@ -746,6 +730,30 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
        return 0;
 }
 
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+       struct intel_ctx_submit_request *req, *tmp;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       unsigned long flags;
+       struct list_head retired_list;
+
+       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       if (list_empty(&ring->execlist_retired_req_list))
+               return;
+
+       INIT_LIST_HEAD(&retired_list);
+       spin_lock_irqsave(&ring->execlist_lock, flags);
+       list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+       list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+               intel_runtime_pm_put(dev_priv);
+               i915_gem_context_unreference(req->ctx);
+               list_del(&req->execlist_link);
+               kfree(req);
+       }
+}
+
 void intel_logical_ring_stop(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1301,6 +1309,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
        init_waitqueue_head(&ring->irq_queue);
 
        INIT_LIST_HEAD(&ring->execlist_queue);
+       INIT_LIST_HEAD(&ring->execlist_retired_req_list);
        spin_lock_init(&ring->execlist_lock);
        ring->next_context_status_buffer = 0;
 
index 33c3b4bf28c56322c77235575d12fd83532f947b..84bbf1916324a5b06e1052f59d91a4943dc729b7 100644 (file)
@@ -104,11 +104,11 @@ struct intel_ctx_submit_request {
        u32 tail;
 
        struct list_head execlist_link;
-       struct work_struct work;
 
        int elsp_submitted;
 };
 
 void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *ring);
 
 #endif /* _INTEL_LRC_H_ */
index aab2e2f90a74b262f5c4a7baf0d92b7fbc221292..85156567044b167e29ea6a79f545a9bd44ab5921 100644 (file)
@@ -236,6 +236,7 @@ struct  intel_engine_cs {
        /* Execlists */
        spinlock_t execlist_lock;
        struct list_head execlist_queue;
+       struct list_head execlist_retired_req_list;
        u8 next_context_status_buffer;
        u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
        int             (*emit_request)(struct intel_ringbuffer *ringbuf);
This page took 0.030817 seconds and 5 git commands to generate.