drm/nouveau: require reservations for nouveau_fence_sync and nouveau_bo_fence
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
index 4ab9f7171c4ff04a539a8bde7fc8a9dec2b9b817..b992ec3c318a2bb7ddb0ca5064d374186cb6bbd6 100644 (file)
@@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = {
        .mode = S_IRUGO
 };
 
-static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+static inline int ttm_mem_type_from_place(const struct ttm_place *place,
+                                         uint32_t *mem_type)
 {
        int i;
 
        for (i = 0; i <= TTM_PL_PRIV5; i++)
-               if (flags & (1 << i)) {
+               if (place->flags & (1 << i)) {
                        *mem_type = i;
                        return 0;
                }
@@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
               bo, bo->mem.num_pages, bo->mem.size >> 10,
               bo->mem.size >> 20);
        for (i = 0; i < placement->num_placement; i++) {
-               ret = ttm_mem_type_from_flags(placement->placement[i],
+               ret = ttm_mem_type_from_place(&placement->placement[i],
                                                &mem_type);
                if (ret)
                        return;
                pr_err("  placement[%d]=0x%08X (%d)\n",
-                      i, placement->placement[i], mem_type);
+                      i, placement->placement[i].flags, mem_type);
                ttm_mem_type_debug(bo->bdev, mem_type);
        }
 }
@@ -412,7 +413,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
        int ret;
 
        spin_lock(&glob->lru_lock);
-       ret = __ttm_bo_reserve(bo, false, true, false, 0);
+       ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +515,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                        return ret;
 
                spin_lock(&glob->lru_lock);
-               ret = __ttm_bo_reserve(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 
                /*
                 * We raced, and lost, someone else holds the reservation now,
@@ -577,11 +578,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               ret = __ttm_bo_reserve(entry, false, true, false, 0);
+               ret = __ttm_bo_reserve(entry, false, true, false, NULL);
                if (remove_all && ret) {
                        spin_unlock(&glob->lru_lock);
                        ret = __ttm_bo_reserve(entry, false, false,
-                                              false, 0);
+                                              false, NULL);
                        spin_lock(&glob->lru_lock);
                }
 
@@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        evict_mem.bus.io_reserved_vm = false;
        evict_mem.bus.io_reserved_count = 0;
 
-       placement.fpfn = 0;
-       placement.lpfn = 0;
        placement.num_placement = 0;
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
@@ -726,7 +725,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &man->lru, lru) {
-               ret = __ttm_bo_reserve(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, NULL);
                if (!ret)
                        break;
        }
@@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
  */
 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        uint32_t mem_type,
-                                       struct ttm_placement *placement,
+                                       const struct ttm_place *place,
                                        struct ttm_mem_reg *mem,
                                        bool interruptible,
                                        bool no_wait_gpu)
@@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
        int ret;
 
        do {
-               ret = (*man->func->get_node)(man, bo, placement, mem);
+               ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret != 0))
                        return ret;
                if (mem->mm_node)
@@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
                                 uint32_t mem_type,
-                                uint32_t proposed_placement,
+                                const struct ttm_place *place,
                                 uint32_t *masked_placement)
 {
        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 
-       if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+       if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
                return false;
 
-       if ((proposed_placement & man->available_caching) == 0)
+       if ((place->flags & man->available_caching) == 0)
                return false;
 
-       cur_flags |= (proposed_placement & man->available_caching);
+       cur_flags |= (place->flags & man->available_caching);
 
        *masked_placement = cur_flags;
        return true;
@@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
        mem->mm_node = NULL;
        for (i = 0; i < placement->num_placement; ++i) {
-               ret = ttm_mem_type_from_flags(placement->placement[i],
-                                               &mem_type);
+               const struct ttm_place *place = &placement->placement[i];
+
+               ret = ttm_mem_type_from_place(place, &mem_type);
                if (ret)
                        return ret;
                man = &bdev->man[mem_type];
 
-               type_ok = ttm_bo_mt_compatible(man,
-                                               mem_type,
-                                               placement->placement[i],
+               type_ok = ttm_bo_mt_compatible(man, mem_type, place,
                                                &cur_flags);
 
                if (!type_ok)
@@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                 * Use the access and other non-mapping-related flag bits from
                 * the memory placement flags to the current flags
                 */
-               ttm_flag_masked(&cur_flags, placement->placement[i],
+               ttm_flag_masked(&cur_flags, place->flags,
                                ~TTM_PL_MASK_MEMTYPE);
 
                if (mem_type == TTM_PL_SYSTEM)
@@ -897,7 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
                if (man->has_type && man->use_type) {
                        type_found = true;
-                       ret = (*man->func->get_node)(man, bo, placement, mem);
+                       ret = (*man->func->get_node)(man, bo, place, mem);
                        if (unlikely(ret))
                                return ret;
                }
@@ -915,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                return -EINVAL;
 
        for (i = 0; i < placement->num_busy_placement; ++i) {
-               ret = ttm_mem_type_from_flags(placement->busy_placement[i],
-                                               &mem_type);
+               const struct ttm_place *place = &placement->busy_placement[i];
+
+               ret = ttm_mem_type_from_place(place, &mem_type);
                if (ret)
                        return ret;
                man = &bdev->man[mem_type];
                if (!man->has_type)
                        continue;
-               if (!ttm_bo_mt_compatible(man,
-                                               mem_type,
-                                               placement->busy_placement[i],
-                                               &cur_flags))
+               if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
                        continue;
 
                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -934,10 +930,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                 * Use the access and other non-mapping-related flag bits from
                 * the memory placement flags to the current flags
                 */
-               ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+               ttm_flag_masked(&cur_flags, place->flags,
                                ~TTM_PL_MASK_MEMTYPE);
 
-
                if (mem_type == TTM_PL_SYSTEM) {
                        mem->mem_type = mem_type;
                        mem->placement = cur_flags;
@@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        return 0;
                }
 
-               ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+               ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
                                                interruptible, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
@@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
 {
        int i;
 
-       if (mem->mm_node && placement->lpfn != 0 &&
-           (mem->start < placement->fpfn ||
-            mem->start + mem->num_pages > placement->lpfn))
-               return false;
-
        for (i = 0; i < placement->num_placement; i++) {
-               *new_flags = placement->placement[i];
+               const struct ttm_place *heap = &placement->placement[i];
+               if (mem->mm_node && heap->lpfn != 0 &&
+                   (mem->start < heap->fpfn ||
+                    mem->start + mem->num_pages > heap->lpfn))
+                       continue;
+
+               *new_flags = heap->flags;
                if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
                    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
                        return true;
        }
 
        for (i = 0; i < placement->num_busy_placement; i++) {
-               *new_flags = placement->busy_placement[i];
+               const struct ttm_place *heap = &placement->busy_placement[i];
+               if (mem->mm_node && heap->lpfn != 0 &&
+                   (mem->start < heap->fpfn ||
+                    mem->start + mem->num_pages > heap->lpfn))
+                       continue;
+
+               *new_flags = heap->flags;
                if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
                    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
                        return true;
@@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        uint32_t new_flags;
 
        lockdep_assert_held(&bo->resv->lock.base);
-       /* Check that range is valid */
-       if (placement->lpfn || placement->fpfn)
-               if (placement->fpfn > placement->lpfn ||
-                       (placement->lpfn - placement->fpfn) < bo->num_pages)
-                       return -EINVAL;
        /*
         * Check whether we need to move buffer.
         */
@@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_validate);
 
-int ttm_bo_check_placement(struct ttm_buffer_object *bo,
-                               struct ttm_placement *placement)
-{
-       BUG_ON((placement->fpfn || placement->lpfn) &&
-              (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
-
-       return 0;
-}
-
 int ttm_bo_init(struct ttm_bo_device *bdev,
                struct ttm_buffer_object *bo,
                unsigned long size,
@@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        atomic_inc(&bo->glob->bo_count);
        drm_vma_node_reset(&bo->vma_node);
 
-       ret = ttm_bo_check_placement(bo, placement);
-
        /*
         * For ttm_bo_type_device buffers, allocate
         * address space from the device.
         */
-       if (likely(!ret) &&
-           (bo->type == ttm_bo_type_device ||
-            bo->type == ttm_bo_type_sg))
+       if (bo->type == ttm_bo_type_device ||
+           bo->type == ttm_bo_type_sg)
                ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
                                         bo->mem.num_pages);
 
@@ -1595,7 +1580,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
         * Using ttm_bo_reserve makes sure the lru lists are updated.
         */
 
-       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+       ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
        if (unlikely(ret != 0))
                return ret;
        spin_lock(&bdev->fence_lock);
@@ -1630,7 +1615,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &glob->swap_lru, swap) {
-               ret = __ttm_bo_reserve(bo, false, true, false, 0);
+               ret = __ttm_bo_reserve(bo, false, true, false, NULL);
                if (!ret)
                        break;
        }
This page took 0.028699 seconds and 5 git commands to generate.