drm/amdgpu: use common fences for VMID management v2
authorChristian König <christian.koenig@amd.com>
Thu, 22 Oct 2015 09:55:58 +0000 (11:55 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 30 Oct 2015 05:55:12 +0000 (01:55 -0400)
v2: add missing NULL check.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

index 6d9c929e6d51136c4682c088a50a87d17a8cbfba..371994c4645f60f906907a5f7d97e5c752a1e472 100644 (file)
@@ -970,7 +970,7 @@ struct amdgpu_vm_id {
        /* last flushed PD/PT update */
        struct fence            *flushed_updates;
        /* last use of vmid */
-       struct amdgpu_fence     *last_id_use;
+       struct fence            *last_id_use;
 };
 
 struct amdgpu_vm {
@@ -1003,7 +1003,7 @@ struct amdgpu_vm {
 };
 
 struct amdgpu_vm_manager {
-       struct amdgpu_fence             *active[AMDGPU_NUM_VM];
+       struct fence                    *active[AMDGPU_NUM_VM];
        uint32_t                        max_pfn;
        /* number of VMIDs */
        unsigned                        nvm;
index 06e207fd007b501e9857726989143d33a49aa370..a12c726f77816621bf040260765c5356918ec0bd 100644 (file)
@@ -135,7 +135,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync)
 {
-       struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
+       struct fence *best[AMDGPU_MAX_RINGS] = {};
        struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
        struct amdgpu_device *adev = ring->adev;
 
@@ -154,7 +154,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
        /* skip over VMID 0, since it is the system VM */
        for (i = 1; i < adev->vm_manager.nvm; ++i) {
-               struct amdgpu_fence *fence = adev->vm_manager.active[i];
+               struct fence *fence = adev->vm_manager.active[i];
+               struct amdgpu_ring *fring;
 
                if (fence == NULL) {
                        /* found a free one */
@@ -163,21 +164,23 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                        return 0;
                }
 
-               if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
-                       best[fence->ring->idx] = fence;
-                       choices[fence->ring == ring ? 0 : 1] = i;
+               fring = amdgpu_ring_from_fence(fence);
+               if (best[fring->idx] == NULL ||
+                   fence_is_later(best[fring->idx], fence)) {
+                       best[fring->idx] = fence;
+                       choices[fring == ring ? 0 : 1] = i;
                }
        }
 
        for (i = 0; i < 2; ++i) {
                if (choices[i]) {
-                       struct amdgpu_fence *fence;
+                       struct fence *fence;
 
                        fence  = adev->vm_manager.active[choices[i]];
                        vm_id->id = choices[i];
 
                        trace_amdgpu_vm_grab_id(choices[i], ring->idx);
-                       return amdgpu_sync_fence(ring->adev, sync, &fence->base);
+                       return amdgpu_sync_fence(ring->adev, sync, fence);
                }
        }
 
@@ -246,11 +249,11 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
        unsigned ridx = fence->ring->idx;
        unsigned vm_id = vm->ids[ridx].id;
 
-       amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
-       adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
+       fence_put(adev->vm_manager.active[vm_id]);
+       adev->vm_manager.active[vm_id] = fence_get(&fence->base);
 
-       amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
-       vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
+       fence_put(vm->ids[ridx].last_id_use);
+       vm->ids[ridx].last_id_use = fence_get(&fence->base);
 }
 
 /**
@@ -1311,7 +1314,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                fence_put(vm->ids[i].flushed_updates);
-               amdgpu_fence_unref(&vm->ids[i].last_id_use);
+               fence_put(vm->ids[i].last_id_use);
        }
 
        mutex_destroy(&vm->mutex);
index 488348272c4d1a0e5a9d93d80277abdf5407039e..85bbcdc73fff0198d350ac1468a8d552b3a450c7 100644 (file)
@@ -965,7 +965,7 @@ static int gmc_v7_0_sw_fini(void *handle)
 
        if (adev->vm_manager.enabled) {
                for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+                       fence_put(adev->vm_manager.active[i]);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1015,7 +1015,7 @@ static int gmc_v7_0_suspend(void *handle)
 
        if (adev->vm_manager.enabled) {
                for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+                       fence_put(adev->vm_manager.active[i]);
                gmc_v7_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
index 72e977b1685db6a5c70c0e6984e4ee8a9877ac48..1bcc4e74e3b485d694d9b91417b76998868b0e6f 100644 (file)
@@ -984,7 +984,7 @@ static int gmc_v8_0_sw_fini(void *handle)
 
        if (adev->vm_manager.enabled) {
                for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+                       fence_put(adev->vm_manager.active[i]);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
@@ -1036,7 +1036,7 @@ static int gmc_v8_0_suspend(void *handle)
 
        if (adev->vm_manager.enabled) {
                for (i = 0; i < AMDGPU_NUM_VM; ++i)
-                       amdgpu_fence_unref(&adev->vm_manager.active[i]);
+                       fence_put(adev->vm_manager.active[i]);
                gmc_v8_0_vm_fini(adev);
                adev->vm_manager.enabled = false;
        }
This page took 0.031689 seconds and 5 git commands to generate.