From 472db7ab3093bf2a2999f6b5aa64a030466d6f92 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 14 May 2014 15:42:29 +0200 Subject: [PATCH] drm/ttm: use rcu in core ttm Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/ttm/ttm_bo.c | 76 ++++++------------------------------ 1 file changed, 13 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 66707be386f7..a11969acfea5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -467,66 +467,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ((HZ / 100) < 1) ? 1 : HZ / 100); } -static int ttm_bo_unreserve_and_wait(struct ttm_buffer_object *bo, - bool interruptible) -{ - struct ttm_bo_global *glob = bo->glob; - struct reservation_object_list *fobj; - struct fence *excl = NULL; - struct fence **shared = NULL; - u32 shared_count = 0, i; - int ret = 0; - - fobj = reservation_object_get_list(bo->resv); - if (fobj && fobj->shared_count) { - shared = kmalloc(sizeof(*shared) * fobj->shared_count, - GFP_KERNEL); - - if (!shared) { - ret = -ENOMEM; - __ttm_bo_unreserve(bo); - spin_unlock(&glob->lru_lock); - return ret; - } - - for (i = 0; i < fobj->shared_count; ++i) { - if (!fence_is_signaled(fobj->shared[i])) { - fence_get(fobj->shared[i]); - shared[shared_count++] = fobj->shared[i]; - } - } - if (!shared_count) { - kfree(shared); - shared = NULL; - } - } - - excl = reservation_object_get_excl(bo->resv); - if (excl && !fence_is_signaled(excl)) - fence_get(excl); - else - excl = NULL; - - __ttm_bo_unreserve(bo); - spin_unlock(&glob->lru_lock); - - if (excl) { - ret = fence_wait(excl, interruptible); - fence_put(excl); - } - - if (shared_count > 0) { - for (i = 0; i < shared_count; ++i) { - if (!ret) - ret = fence_wait(shared[i], interruptible); - fence_put(shared[i]); - } - kfree(shared); - } - - return ret; -} - /** * function ttm_bo_cleanup_refs_and_unlock * If bo idle, remove from delayed- and lru lists, and unref. @@ -550,9 +490,19 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ret = ttm_bo_wait(bo, false, false, true); if (ret && !no_wait_gpu) { - ret = ttm_bo_unreserve_and_wait(bo, interruptible); - if (ret) - return ret; + long lret; + ww_mutex_unlock(&bo->resv->lock); + spin_unlock(&glob->lru_lock); + + lret = reservation_object_wait_timeout_rcu(bo->resv, + true, + interruptible, + 30 * HZ); + + if (lret < 0) + return lret; + else if (lret == 0) + return -EBUSY; spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, false, NULL); -- 2.34.1