2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
33 #include "amdgpu_trace.h"
35 struct amdgpu_sync_entry
{
36 struct hlist_node node
;
41 * amdgpu_sync_create - zero init sync object
43 * @sync: sync object to initialize
45 * Just clear the sync object for now.
47 void amdgpu_sync_create(struct amdgpu_sync
*sync
)
51 for (i
= 0; i
< AMDGPU_NUM_SYNCS
; ++i
)
52 sync
->semaphores
[i
] = NULL
;
54 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
55 sync
->sync_to
[i
] = NULL
;
57 hash_init(sync
->fences
);
58 sync
->last_vm_update
= NULL
;
61 static bool amdgpu_sync_same_dev(struct amdgpu_device
*adev
, struct fence
*f
)
63 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
64 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
67 return a_fence
->ring
->adev
== adev
;
69 return (struct amdgpu_device
*)s_fence
->scheduler
->priv
== adev
;
73 static bool amdgpu_sync_test_owner(struct fence
*f
, void *owner
)
75 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
76 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
78 return s_fence
->owner
== owner
;
80 return a_fence
->owner
== owner
;
85 * amdgpu_sync_fence - remember to sync to this fence
87 * @sync: sync object to add fence to
88 * @fence: fence to sync to
91 int amdgpu_sync_fence(struct amdgpu_device
*adev
, struct amdgpu_sync
*sync
,
94 struct amdgpu_sync_entry
*e
;
95 struct amdgpu_fence
*fence
;
96 struct amdgpu_fence
*other
;
97 struct fence
*tmp
, *later
;
102 if (amdgpu_sync_same_dev(adev
, f
) &&
103 amdgpu_sync_test_owner(f
, AMDGPU_FENCE_OWNER_VM
)) {
104 if (sync
->last_vm_update
) {
105 tmp
= sync
->last_vm_update
;
106 BUG_ON(f
->context
!= tmp
->context
);
107 later
= (f
->seqno
- tmp
->seqno
<= INT_MAX
) ? f
: tmp
;
108 sync
->last_vm_update
= fence_get(later
);
111 sync
->last_vm_update
= fence_get(f
);
114 fence
= to_amdgpu_fence(f
);
115 if (!fence
|| fence
->ring
->adev
!= adev
) {
116 hash_for_each_possible(sync
->fences
, e
, node
, f
->context
) {
118 if (unlikely(e
->fence
->context
!= f
->context
))
120 new = fence_get(fence_later(e
->fence
, f
));
128 e
= kmalloc(sizeof(struct amdgpu_sync_entry
), GFP_KERNEL
);
132 hash_add(sync
->fences
, &e
->node
, f
->context
);
133 e
->fence
= fence_get(f
);
137 other
= sync
->sync_to
[fence
->ring
->idx
];
138 sync
->sync_to
[fence
->ring
->idx
] = amdgpu_fence_ref(
139 amdgpu_fence_later(fence
, other
));
140 amdgpu_fence_unref(&other
);
145 static void *amdgpu_sync_get_owner(struct fence
*f
)
147 struct amdgpu_fence
*a_fence
= to_amdgpu_fence(f
);
148 struct amd_sched_fence
*s_fence
= to_amd_sched_fence(f
);
151 return s_fence
->owner
;
153 return a_fence
->owner
;
154 return AMDGPU_FENCE_OWNER_UNDEFINED
;
158 * amdgpu_sync_resv - use the semaphores to sync to a reservation object
160 * @sync: sync object to add fences from reservation object to
161 * @resv: reservation object with embedded fence
162 * @shared: true if we should only sync to the exclusive fence
164 * Sync to the fence using the semaphore objects
166 int amdgpu_sync_resv(struct amdgpu_device
*adev
,
167 struct amdgpu_sync
*sync
,
168 struct reservation_object
*resv
,
171 struct reservation_object_list
*flist
;
180 /* always sync to the exclusive fence */
181 f
= reservation_object_get_excl(resv
);
182 r
= amdgpu_sync_fence(adev
, sync
, f
);
184 flist
= reservation_object_get_list(resv
);
188 for (i
= 0; i
< flist
->shared_count
; ++i
) {
189 f
= rcu_dereference_protected(flist
->shared
[i
],
190 reservation_object_held(resv
));
191 if (amdgpu_sync_same_dev(adev
, f
)) {
192 /* VM updates are only interesting
193 * for other VM updates and moves.
195 fence_owner
= amdgpu_sync_get_owner(f
);
196 if ((owner
!= AMDGPU_FENCE_OWNER_MOVE
) &&
197 (fence_owner
!= AMDGPU_FENCE_OWNER_MOVE
) &&
198 ((owner
== AMDGPU_FENCE_OWNER_VM
) !=
199 (fence_owner
== AMDGPU_FENCE_OWNER_VM
)))
202 /* Ignore fence from the same owner as
203 * long as it isn't undefined.
205 if (owner
!= AMDGPU_FENCE_OWNER_UNDEFINED
&&
206 fence_owner
== owner
)
210 r
= amdgpu_sync_fence(adev
, sync
, f
);
217 struct fence
*amdgpu_sync_get_fence(struct amdgpu_sync
*sync
)
219 struct amdgpu_sync_entry
*e
;
220 struct hlist_node
*tmp
;
224 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
231 if (!fence_is_signaled(f
))
239 int amdgpu_sync_wait(struct amdgpu_sync
*sync
)
241 struct amdgpu_sync_entry
*e
;
242 struct hlist_node
*tmp
;
245 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
246 r
= fence_wait(e
->fence
, false);
255 if (amdgpu_enable_semaphores
)
258 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
259 struct amdgpu_fence
*fence
= sync
->sync_to
[i
];
263 r
= fence_wait(&fence
->base
, false);
272 * amdgpu_sync_rings - sync ring to all registered fences
274 * @sync: sync object to use
275 * @ring: ring that needs sync
277 * Ensure that all registered fences are signaled before letting
278 * the ring continue. The caller must hold the ring lock.
280 int amdgpu_sync_rings(struct amdgpu_sync
*sync
,
281 struct amdgpu_ring
*ring
)
283 struct amdgpu_device
*adev
= ring
->adev
;
287 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
288 struct amdgpu_fence
*fence
= sync
->sync_to
[i
];
289 struct amdgpu_semaphore
*semaphore
;
290 struct amdgpu_ring
*other
= adev
->rings
[i
];
292 /* check if we really need to sync */
293 if (!amdgpu_fence_need_sync(fence
, ring
))
296 /* prevent GPU deadlocks */
298 dev_err(adev
->dev
, "Syncing to a disabled ring!");
302 if (amdgpu_enable_scheduler
|| !amdgpu_enable_semaphores
||
303 (count
>= AMDGPU_NUM_SYNCS
)) {
304 /* not enough room, wait manually */
305 r
= fence_wait(&fence
->base
, false);
310 r
= amdgpu_semaphore_create(adev
, &semaphore
);
314 sync
->semaphores
[count
++] = semaphore
;
316 /* allocate enough space for sync command */
317 r
= amdgpu_ring_alloc(other
, 16);
321 /* emit the signal semaphore */
322 if (!amdgpu_semaphore_emit_signal(other
, semaphore
)) {
323 /* signaling wasn't successful wait manually */
324 amdgpu_ring_undo(other
);
325 r
= fence_wait(&fence
->base
, false);
331 /* we assume caller has already allocated space on waiters ring */
332 if (!amdgpu_semaphore_emit_wait(ring
, semaphore
)) {
333 /* waiting wasn't successful wait manually */
334 amdgpu_ring_undo(other
);
335 r
= fence_wait(&fence
->base
, false);
341 amdgpu_ring_commit(other
);
342 amdgpu_fence_note_sync(fence
, ring
);
349 * amdgpu_sync_free - free the sync object
351 * @adev: amdgpu_device pointer
352 * @sync: sync object to use
353 * @fence: fence to use for the free
355 * Free the sync object by freeing all semaphores in it.
357 void amdgpu_sync_free(struct amdgpu_device
*adev
,
358 struct amdgpu_sync
*sync
,
361 struct amdgpu_sync_entry
*e
;
362 struct hlist_node
*tmp
;
365 hash_for_each_safe(sync
->fences
, i
, tmp
, e
, node
) {
371 for (i
= 0; i
< AMDGPU_NUM_SYNCS
; ++i
)
372 amdgpu_semaphore_free(adev
, &sync
->semaphores
[i
], fence
);
374 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
375 amdgpu_fence_unref(&sync
->sync_to
[i
]);
377 fence_put(sync
->last_vm_update
);