Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <christian.koenig@amd.com> | |
29 | */ | |
30 | ||
31 | #include <drm/drmP.h> | |
32 | #include "amdgpu.h" | |
33 | #include "amdgpu_trace.h" | |
34 | ||
f91b3a69 CK |
35 | struct amdgpu_sync_entry { |
36 | struct hlist_node node; | |
37 | struct fence *fence; | |
38 | }; | |
39 | ||
257bf15a CK |
40 | static struct kmem_cache *amdgpu_sync_slab; |
41 | ||
d38ceaf9 AD |
42 | /** |
43 | * amdgpu_sync_create - zero init sync object | |
44 | * | |
45 | * @sync: sync object to initialize | |
46 | * | |
47 | * Just clear the sync object for now. | |
48 | */ | |
49 | void amdgpu_sync_create(struct amdgpu_sync *sync) | |
50 | { | |
f91b3a69 | 51 | hash_init(sync->fences); |
d38ceaf9 AD |
52 | sync->last_vm_update = NULL; |
53 | } | |
54 | ||
bcc634f4 CK |
55 | /** |
56 | * amdgpu_sync_same_dev - test if fence belong to us | |
57 | * | |
58 | * @adev: amdgpu device to use for the test | |
59 | * @f: fence to test | |
60 | * | |
61 | * Test if the fence was issued by us. | |
62 | */ | |
3c62338c CZ |
63 | static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) |
64 | { | |
3c62338c CZ |
65 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
66 | ||
4f839a24 CK |
67 | if (s_fence) { |
68 | struct amdgpu_ring *ring; | |
69 | ||
70 | ring = container_of(s_fence->sched, struct amdgpu_ring, sched); | |
71 | return ring->adev == adev; | |
72 | } | |
73 | ||
3c62338c CZ |
74 | return false; |
75 | } | |
76 | ||
bcc634f4 CK |
77 | /** |
78 | * amdgpu_sync_get_owner - extract the owner of a fence | |
79 | * | |
80 | * @fence: fence get the owner from | |
81 | * | |
82 | * Extract who originally created the fence. | |
83 | */ | |
84 | static void *amdgpu_sync_get_owner(struct fence *f) | |
3c62338c | 85 | { |
3c62338c | 86 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
bcc634f4 | 87 | |
3c62338c | 88 | if (s_fence) |
bcc634f4 | 89 | return s_fence->owner; |
336d1f5e | 90 | |
bcc634f4 | 91 | return AMDGPU_FENCE_OWNER_UNDEFINED; |
3c62338c CZ |
92 | } |
93 | ||
bcc634f4 CK |
94 | /** |
95 | * amdgpu_sync_keep_later - Keep the later fence | |
96 | * | |
97 | * @keep: existing fence to test | |
98 | * @fence: new fence | |
99 | * | |
100 | * Either keep the existing fence or the new one, depending which one is later. | |
101 | */ | |
24233860 CK |
102 | static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence) |
103 | { | |
104 | if (*keep && fence_is_later(*keep, fence)) | |
105 | return; | |
106 | ||
107 | fence_put(*keep); | |
108 | *keep = fence_get(fence); | |
109 | } | |
110 | ||
832a902f CK |
111 | /** |
112 | * amdgpu_sync_add_later - add the fence to the hash | |
113 | * | |
114 | * @sync: sync object to add the fence to | |
115 | * @f: fence to add | |
116 | * | |
117 | * Tries to add the fence to an existing hash entry. Returns true when an entry | |
118 | * was found, false otherwise. | |
119 | */ | |
120 | static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f) | |
121 | { | |
122 | struct amdgpu_sync_entry *e; | |
123 | ||
124 | hash_for_each_possible(sync->fences, e, node, f->context) { | |
125 | if (unlikely(e->fence->context != f->context)) | |
126 | continue; | |
127 | ||
128 | amdgpu_sync_keep_later(&e->fence, f); | |
129 | return true; | |
130 | } | |
131 | return false; | |
132 | } | |
133 | ||
d38ceaf9 | 134 | /** |
91e1a520 | 135 | * amdgpu_sync_fence - remember to sync to this fence |
d38ceaf9 AD |
136 | * |
137 | * @sync: sync object to add fence to | |
138 | * @fence: fence to sync to | |
139 | * | |
d38ceaf9 | 140 | */ |
91e1a520 CK |
141 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
142 | struct fence *f) | |
d38ceaf9 | 143 | { |
f91b3a69 | 144 | struct amdgpu_sync_entry *e; |
d38ceaf9 | 145 | |
91e1a520 CK |
146 | if (!f) |
147 | return 0; | |
148 | ||
3c62338c | 149 | if (amdgpu_sync_same_dev(adev, f) && |
bcc634f4 | 150 | amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) |
24233860 | 151 | amdgpu_sync_keep_later(&sync->last_vm_update, f); |
3c62338c | 152 | |
832a902f | 153 | if (amdgpu_sync_add_later(sync, f)) |
f91b3a69 | 154 | return 0; |
d38ceaf9 | 155 | |
257bf15a | 156 | e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); |
046c12c6 CK |
157 | if (!e) |
158 | return -ENOMEM; | |
d38ceaf9 | 159 | |
046c12c6 CK |
160 | hash_add(sync->fences, &e->node, f->context); |
161 | e->fence = fence_get(f); | |
91e1a520 | 162 | return 0; |
d38ceaf9 AD |
163 | } |
164 | ||
165 | /** | |
2f4b9400 | 166 | * amdgpu_sync_resv - sync to a reservation object |
d38ceaf9 AD |
167 | * |
168 | * @sync: sync object to add fences from reservation object to | |
169 | * @resv: reservation object with embedded fence | |
170 | * @shared: true if we should only sync to the exclusive fence | |
171 | * | |
2f4b9400 | 172 | * Sync to the fence |
d38ceaf9 AD |
173 | */ |
174 | int amdgpu_sync_resv(struct amdgpu_device *adev, | |
175 | struct amdgpu_sync *sync, | |
176 | struct reservation_object *resv, | |
177 | void *owner) | |
178 | { | |
179 | struct reservation_object_list *flist; | |
180 | struct fence *f; | |
423a9480 | 181 | void *fence_owner; |
d38ceaf9 AD |
182 | unsigned i; |
183 | int r = 0; | |
184 | ||
4b095304 JZ |
185 | if (resv == NULL) |
186 | return -EINVAL; | |
187 | ||
d38ceaf9 AD |
188 | /* always sync to the exclusive fence */ |
189 | f = reservation_object_get_excl(resv); | |
91e1a520 | 190 | r = amdgpu_sync_fence(adev, sync, f); |
d38ceaf9 AD |
191 | |
192 | flist = reservation_object_get_list(resv); | |
193 | if (!flist || r) | |
194 | return r; | |
195 | ||
196 | for (i = 0; i < flist->shared_count; ++i) { | |
197 | f = rcu_dereference_protected(flist->shared[i], | |
198 | reservation_object_held(resv)); | |
423a9480 | 199 | if (amdgpu_sync_same_dev(adev, f)) { |
1d3897e0 CK |
200 | /* VM updates are only interesting |
201 | * for other VM updates and moves. | |
202 | */ | |
423a9480 | 203 | fence_owner = amdgpu_sync_get_owner(f); |
7a91d6cb CK |
204 | if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && |
205 | (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && | |
1d3897e0 | 206 | ((owner == AMDGPU_FENCE_OWNER_VM) != |
423a9480 | 207 | (fence_owner == AMDGPU_FENCE_OWNER_VM))) |
91e1a520 CK |
208 | continue; |
209 | ||
1d3897e0 CK |
210 | /* Ignore fence from the same owner as |
211 | * long as it isn't undefined. | |
212 | */ | |
213 | if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && | |
423a9480 | 214 | fence_owner == owner) |
1d3897e0 CK |
215 | continue; |
216 | } | |
217 | ||
91e1a520 CK |
218 | r = amdgpu_sync_fence(adev, sync, f); |
219 | if (r) | |
220 | break; | |
d38ceaf9 AD |
221 | } |
222 | return r; | |
223 | } | |
224 | ||
832a902f | 225 | /** |
1fbb2e92 | 226 | * amdgpu_sync_peek_fence - get the next fence not signaled yet |
832a902f CK |
227 | * |
228 | * @sync: the sync object | |
35420238 | 229 | * @ring: optional ring to use for test |
832a902f | 230 | * |
1fbb2e92 CK |
231 | * Returns the next fence not signaled yet without removing it from the sync |
232 | * object. | |
832a902f | 233 | */ |
1fbb2e92 CK |
234 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, |
235 | struct amdgpu_ring *ring) | |
832a902f CK |
236 | { |
237 | struct amdgpu_sync_entry *e; | |
238 | struct hlist_node *tmp; | |
239 | int i; | |
240 | ||
241 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | |
242 | struct fence *f = e->fence; | |
35420238 CK |
243 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
244 | ||
245 | if (ring && s_fence) { | |
246 | /* For fences from the same ring it is sufficient | |
247 | * when they are scheduled. | |
248 | */ | |
1fbb2e92 CK |
249 | if (s_fence->sched == &ring->sched) { |
250 | if (fence_is_signaled(&s_fence->scheduled)) | |
251 | continue; | |
832a902f | 252 | |
1fbb2e92 CK |
253 | return &s_fence->scheduled; |
254 | } | |
832a902f CK |
255 | } |
256 | ||
832a902f | 257 | if (fence_is_signaled(f)) { |
1fbb2e92 | 258 | hash_del(&e->node); |
832a902f CK |
259 | fence_put(f); |
260 | kmem_cache_free(amdgpu_sync_slab, e); | |
261 | continue; | |
262 | } | |
263 | ||
1fbb2e92 | 264 | return f; |
832a902f CK |
265 | } |
266 | ||
1fbb2e92 | 267 | return NULL; |
832a902f CK |
268 | } |
269 | ||
0e9d239b CK |
270 | /** |
271 | * amdgpu_sync_get_fence - get the next fence from the sync object | |
272 | * | |
273 | * @sync: sync object to use | |
274 | * | |
275 | * Get and removes the next fence from the sync object not signaled yet. | |
276 | */ | |
e61235db CK |
277 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) |
278 | { | |
279 | struct amdgpu_sync_entry *e; | |
280 | struct hlist_node *tmp; | |
281 | struct fence *f; | |
282 | int i; | |
283 | ||
284 | hash_for_each_safe(sync->fences, i, tmp, e, node) { | |
285 | ||
286 | f = e->fence; | |
287 | ||
288 | hash_del(&e->node); | |
257bf15a | 289 | kmem_cache_free(amdgpu_sync_slab, e); |
e61235db CK |
290 | |
291 | if (!fence_is_signaled(f)) | |
292 | return f; | |
293 | ||
294 | fence_put(f); | |
295 | } | |
296 | return NULL; | |
297 | } | |
298 | ||
d38ceaf9 AD |
299 | /** |
300 | * amdgpu_sync_free - free the sync object | |
301 | * | |
d38ceaf9 | 302 | * @sync: sync object to use |
d38ceaf9 | 303 | * |
2f4b9400 | 304 | * Free the sync object. |
d38ceaf9 | 305 | */ |
8a8f0b48 | 306 | void amdgpu_sync_free(struct amdgpu_sync *sync) |
d38ceaf9 | 307 | { |
f91b3a69 CK |
308 | struct amdgpu_sync_entry *e; |
309 | struct hlist_node *tmp; | |
d38ceaf9 AD |
310 | unsigned i; |
311 | ||
f91b3a69 CK |
312 | hash_for_each_safe(sync->fences, i, tmp, e, node) { |
313 | hash_del(&e->node); | |
314 | fence_put(e->fence); | |
257bf15a | 315 | kmem_cache_free(amdgpu_sync_slab, e); |
f91b3a69 CK |
316 | } |
317 | ||
3c62338c | 318 | fence_put(sync->last_vm_update); |
d38ceaf9 | 319 | } |
257bf15a CK |
320 | |
321 | /** | |
322 | * amdgpu_sync_init - init sync object subsystem | |
323 | * | |
324 | * Allocate the slab allocator. | |
325 | */ | |
326 | int amdgpu_sync_init(void) | |
327 | { | |
328 | amdgpu_sync_slab = kmem_cache_create( | |
329 | "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0, | |
330 | SLAB_HWCACHE_ALIGN, NULL); | |
331 | if (!amdgpu_sync_slab) | |
332 | return -ENOMEM; | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
337 | /** | |
338 | * amdgpu_sync_fini - fini sync object subsystem | |
339 | * | |
340 | * Free the slab allocator. | |
341 | */ | |
342 | void amdgpu_sync_fini(void) | |
343 | { | |
344 | kmem_cache_destroy(amdgpu_sync_slab); | |
345 | } |