drm/ttm: add the infrastructure for pipelined evictions
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
index 4194b7ea7a725f7cb5413cda4a6ed46cc09f0d45..0c389a54cac13d756c4ccbc9efe49fc08d5504be 100644 (file)
@@ -320,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait_gpu,
+                      bool evict, bool interruptible,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -336,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        unsigned long add = 0;
        int dir;
 
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
        ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
        if (ret)
                return ret;
@@ -400,7 +405,7 @@ out2:
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
 
-       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+       if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(ttm);
                bo->ttm = NULL;
        }
@@ -460,6 +465,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
+       fbo->moving = NULL;
        drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
@@ -632,7 +638,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              struct fence *fence,
                              bool evict,
-                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -647,8 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                if (ret)
                        return ret;
 
-               if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
-                   (bo->ttm != NULL)) {
+               if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                        ttm_tt_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
@@ -662,7 +666,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 * operation has completed.
                 */
 
-               set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
 
                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
@@ -691,3 +696,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+                        struct fence *fence, bool evict,
+                        struct ttm_mem_reg *new_mem)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+       struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+       int ret;
+
+       reservation_object_add_excl_fence(bo->resv, fence);
+
+       if (!evict) {
+               struct ttm_buffer_object *ghost_obj;
+
+               /**
+                * This should help pipeline ordinary buffer moves.
+                *
+                * Hang old buffer memory on a new buffer object,
+                * and leave it to be released when the GPU
+                * operation has completed.
+                */
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+               if (ret)
+                       return ret;
+
+               reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+               /**
+                * If we're not moving to fixed memory, the TTM object
+                * needs to stay alive. Otherwhise hang it on the ghost
+                * bo to be unbound and destroyed.
+                */
+
+               if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+                       ghost_obj->ttm = NULL;
+               else
+                       bo->ttm = NULL;
+
+               ttm_bo_unreserve(ghost_obj);
+               ttm_bo_unref(&ghost_obj);
+
+       } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+               /**
+                * BO doesn't have a TTM we need to bind/unbind. Just remember
+                * this eviction and free up the allocation
+                */
+
+               spin_lock(&from->move_lock);
+               if (!from->move || fence_is_later(from->move, fence)) {
+                       fence_put(from->move);
+                       from->move = fence_get(fence);
+               }
+               spin_unlock(&from->move_lock);
+
+               ttm_bo_free_old_node(bo);
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+       } else {
+               /**
+                * Last resort, wait for the move to be completed.
+                *
+                * Should never happen in pratice.
+                */
+
+               ret = ttm_bo_wait(bo, false, false);
+               if (ret)
+                       return ret;
+
+               if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+                       ttm_tt_destroy(bo->ttm);
+                       bo->ttm = NULL;
+               }
+               ttm_bo_free_old_node(bo);
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
This page took 0.047961 seconds and 5 git commands to generate.