drm/ttm: allow fence to be added as shared
authorChristian König <christian.koenig@amd.com>
Thu, 4 Sep 2014 18:01:52 +0000 (20:01 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 11 Sep 2014 14:46:00 +0000 (10:46 -0400)
This patch adds a new flag to the ttm_validate_buffer list to
add the fence as shared to the reservation object.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/drm/ttm/ttm_execbuf_util.h

index a6e19c83143e0ce2fba2d97ba1fc37e921f59f41..446e71ca36cb111c5d47aad5b4928eb6548a68c5 100644 (file)
@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 
        qxl_bo_ref(bo);
        entry->tv.bo = &bo->tbo;
+       entry->tv.shared = false;
        list_add_tail(&entry->tv.head, &release->bos);
        return 0;
 }
index 6e3d1c8f34832a3e9cacedeca71c1e9f44b011c7..cd517ab936085c55eda7c14324c5604e35ec4883 100644 (file)
@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                }
 
                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
+               p->relocs[i].tv.shared = false;
                p->relocs[i].handle = r->handle;
 
                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
index 671ee566aa51d6d29bf86f67c3f209612f5f34f3..1cce4468cd7567513a659bc8b2125639a45f775d 100644 (file)
@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
        list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].tv.bo = &vm->page_directory->tbo;
+       list[0].tv.shared = false;
        list[0].tiling_flags = 0;
        list[0].handle = 0;
        list_add(&list[0].tv.head, head);
@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
                list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].tv.bo = &list[idx].robj->tbo;
+               list[idx].tv.shared = false;
                list[idx].tiling_flags = 0;
                list[idx].handle = 0;
                list_add(&list[idx++].tv.head, head);
@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
 
         memset(&tv, 0, sizeof(tv));
         tv.bo = &bo->tbo;
+       tv.shared = false;
 
         INIT_LIST_HEAD(&head);
         list_add(&tv.head, &head);
index adafc0f8ec065fb76a2e8388b26c381d2ed88bd7..8ce508e76208a0f5ce8c12b1da9e54111aac7ad6 100644 (file)
@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        ret = -EBUSY;
                }
 
-               if (!ret)
-                       continue;
+               if (!ret) {
+                       if (!entry->shared)
+                               continue;
+
+                       ret = reservation_object_reserve_shared(bo->resv);
+                       if (!ret)
+                               continue;
+               }
 
                /* uh oh, we lost out, drop every reservation and try
                 * to only reserve this buffer, then start over if
@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        ret = 0;
                }
 
+               if (!ret && entry->shared)
+                       ret = reservation_object_reserve_shared(bo->resv);
+
                if (unlikely(ret != 0)) {
                        if (ret == -EINTR)
                                ret = -ERESTARTSYS;
@@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
-               reservation_object_add_excl_fence(bo->resv, fence);
+               if (entry->shared)
+                       reservation_object_add_shared_fence(bo->resv, fence);
+               else
+                       reservation_object_add_excl_fence(bo->resv, fence);
                ttm_bo_add_to_lru(bo);
                __ttm_bo_unreserve(bo);
        }
index 0ceaddc8e4f74993453bd82ed2b7673d409f0b04..b4de3b2a7cc58d5bf193a80d367eae3703ba6e26 100644 (file)
@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                ++sw_context->cur_val_buf;
                val_buf = &vval_buf->base;
                val_buf->bo = ttm_bo_reference(bo);
+               val_buf->shared = false;
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
                vval_buf->validate_as_mob = validate_as_mob;
        }
@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
        INIT_LIST_HEAD(&validate_list);
 
        pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+       pinned_val.shared = false;
        list_add_tail(&pinned_val.head, &validate_list);
 
        query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+       query_val.shared = false;
        list_add_tail(&query_val.head, &validate_list);
 
        ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
index ff0e03b97753e95c3742ecf4da1d3dd8d0dab992..26584316cb789002ff73982f73620056d13f9ada 100644 (file)
@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
                        struct ttm_validate_buffer val_buf;
 
                        val_buf.bo = bo;
+                       val_buf.shared = false;
                        res->func->unbind(res, false, &val_buf);
                }
                res->backup_dirty = false;
@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
 
        INIT_LIST_HEAD(&val_list);
        val_buf->bo = ttm_bo_reference(&res->backup->base);
+       val_buf->shared = false;
        list_add_tail(&val_buf->head, &val_list);
        ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
        if (unlikely(ret != 0))
@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
        BUG_ON(!func->may_evict);
 
        val_buf.bo = NULL;
+       val_buf.shared = false;
        ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
        if (unlikely(ret != 0))
                return ret;
@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
                return 0;
 
        val_buf.bo = NULL;
+       val_buf.shared = false;
        if (res->backup)
                val_buf.bo = &res->backup->base;
        do {
@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
                struct ttm_validate_buffer val_buf;
 
                val_buf.bo = bo;
+               val_buf.shared = false;
 
                list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
 
index ff11a424f752f112d27eea5afb546f861454a767..460441714413c620b8313ca9390778e089257651 100644 (file)
  *
  * @head:           list head for thread-private list.
  * @bo:             refcounted buffer object pointer.
+ * @shared:         should the fence be added shared?
  */
 
 struct ttm_validate_buffer {
        struct list_head head;
        struct ttm_buffer_object *bo;
+       bool shared;
 };
 
 /**
This page took 0.029422 seconds and 5 git commands to generate.