Commit | Line | Data |
---|---|---|
ecc0b326 MD |
1 | /* |
2 | * Copyright 2009 VMware, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Michel Dänzer | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include <drm/radeon_drm.h> | |
26 | #include "radeon_reg.h" | |
27 | #include "radeon.h" | |
28 | ||
009ee7a0 AD |
29 | #define RADEON_TEST_COPY_BLIT 1 |
30 | #define RADEON_TEST_COPY_DMA 0 | |
31 | ||
ecc0b326 MD |
32 | |
33 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | |
009ee7a0 | 34 | static void radeon_do_test_moves(struct radeon_device *rdev, int flag) |
ecc0b326 | 35 | { |
4c788679 JG |
36 | struct radeon_bo *vram_obj = NULL; |
37 | struct radeon_bo **gtt_obj = NULL; | |
ecc0b326 | 38 | uint64_t gtt_addr, vram_addr; |
89cd67b3 DC |
39 | unsigned n, size; |
40 | int i, r, ring; | |
009ee7a0 AD |
41 | |
42 | switch (flag) { | |
43 | case RADEON_TEST_COPY_DMA: | |
44 | ring = radeon_copy_dma_ring_index(rdev); | |
45 | break; | |
46 | case RADEON_TEST_COPY_BLIT: | |
47 | ring = radeon_copy_blit_ring_index(rdev); | |
48 | break; | |
49 | default: | |
50 | DRM_ERROR("Unknown copy method\n"); | |
51 | return; | |
52 | } | |
ecc0b326 MD |
53 | |
54 | size = 1024 * 1024; | |
55 | ||
56 | /* Number of tests = | |
24cae9e7 | 57 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
ecc0b326 | 58 | */ |
7b1f2485 | 59 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; |
bf852799 | 60 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
e32eb50d | 61 | n -= rdev->ring[i].ring_size; |
24cae9e7 MD |
62 | if (rdev->wb.wb_obj) |
63 | n -= RADEON_GPU_PAGE_SIZE; | |
64 | if (rdev->ih.ring_obj) | |
65 | n -= rdev->ih.ring_size; | |
66 | n /= size; | |
ecc0b326 MD |
67 | |
68 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | |
69 | if (!gtt_obj) { | |
70 | DRM_ERROR("Failed to allocate %d pointers\n", n); | |
71 | r = 1; | |
72 | goto out_cleanup; | |
73 | } | |
74 | ||
441921d5 | 75 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
40f5cf99 | 76 | NULL, &vram_obj); |
ecc0b326 MD |
77 | if (r) { |
78 | DRM_ERROR("Failed to create VRAM object\n"); | |
79 | goto out_cleanup; | |
80 | } | |
4c788679 JG |
81 | r = radeon_bo_reserve(vram_obj, false); |
82 | if (unlikely(r != 0)) | |
977c38d5 | 83 | goto out_unref; |
4c788679 | 84 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); |
ecc0b326 MD |
85 | if (r) { |
86 | DRM_ERROR("Failed to pin VRAM object\n"); | |
977c38d5 | 87 | goto out_unres; |
ecc0b326 | 88 | } |
ecc0b326 MD |
89 | for (i = 0; i < n; i++) { |
90 | void *gtt_map, *vram_map; | |
91 | void **gtt_start, **gtt_end; | |
92 | void **vram_start, **vram_end; | |
977c38d5 | 93 | struct radeon_fence *fence = NULL; |
ecc0b326 | 94 | |
441921d5 | 95 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
40f5cf99 | 96 | RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); |
ecc0b326 MD |
97 | if (r) { |
98 | DRM_ERROR("Failed to create GTT object %d\n", i); | |
977c38d5 | 99 | goto out_lclean; |
ecc0b326 MD |
100 | } |
101 | ||
4c788679 JG |
102 | r = radeon_bo_reserve(gtt_obj[i], false); |
103 | if (unlikely(r != 0)) | |
977c38d5 | 104 | goto out_lclean_unref; |
4c788679 | 105 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); |
ecc0b326 MD |
106 | if (r) { |
107 | DRM_ERROR("Failed to pin GTT object %d\n", i); | |
977c38d5 | 108 | goto out_lclean_unres; |
ecc0b326 MD |
109 | } |
110 | ||
4c788679 | 111 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
112 | if (r) { |
113 | DRM_ERROR("Failed to map GTT object %d\n", i); | |
977c38d5 | 114 | goto out_lclean_unpin; |
ecc0b326 MD |
115 | } |
116 | ||
117 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | |
118 | gtt_start < gtt_end; | |
119 | gtt_start++) | |
120 | *gtt_start = gtt_start; | |
121 | ||
4c788679 | 122 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 | 123 | |
009ee7a0 AD |
124 | if (ring == R600_RING_TYPE_DMA_INDEX) |
125 | r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); | |
126 | else | |
127 | r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); | |
ecc0b326 MD |
128 | if (r) { |
129 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | |
977c38d5 | 130 | goto out_lclean_unpin; |
ecc0b326 MD |
131 | } |
132 | ||
133 | r = radeon_fence_wait(fence, false); | |
134 | if (r) { | |
135 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | |
977c38d5 | 136 | goto out_lclean_unpin; |
ecc0b326 MD |
137 | } |
138 | ||
139 | radeon_fence_unref(&fence); | |
140 | ||
4c788679 | 141 | r = radeon_bo_kmap(vram_obj, &vram_map); |
ecc0b326 MD |
142 | if (r) { |
143 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | |
977c38d5 | 144 | goto out_lclean_unpin; |
ecc0b326 MD |
145 | } |
146 | ||
147 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
148 | vram_start = vram_map, vram_end = vram_map + size; | |
149 | vram_start < vram_end; | |
150 | gtt_start++, vram_start++) { | |
151 | if (*vram_start != gtt_start) { | |
152 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | |
4fb1a35c MD |
153 | "expected 0x%p (GTT/VRAM offset " |
154 | "0x%16llx/0x%16llx)\n", | |
155 | i, *vram_start, gtt_start, | |
156 | (unsigned long long) | |
157 | (gtt_addr - rdev->mc.gtt_start + | |
158 | (void*)gtt_start - gtt_map), | |
159 | (unsigned long long) | |
160 | (vram_addr - rdev->mc.vram_start + | |
161 | (void*)gtt_start - gtt_map)); | |
4c788679 | 162 | radeon_bo_kunmap(vram_obj); |
977c38d5 | 163 | goto out_lclean_unpin; |
ecc0b326 MD |
164 | } |
165 | *vram_start = vram_start; | |
166 | } | |
167 | ||
4c788679 | 168 | radeon_bo_kunmap(vram_obj); |
ecc0b326 | 169 | |
009ee7a0 AD |
170 | if (ring == R600_RING_TYPE_DMA_INDEX) |
171 | r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); | |
172 | else | |
173 | r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); | |
ecc0b326 MD |
174 | if (r) { |
175 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | |
977c38d5 | 176 | goto out_lclean_unpin; |
ecc0b326 MD |
177 | } |
178 | ||
179 | r = radeon_fence_wait(fence, false); | |
180 | if (r) { | |
181 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | |
977c38d5 | 182 | goto out_lclean_unpin; |
ecc0b326 MD |
183 | } |
184 | ||
185 | radeon_fence_unref(&fence); | |
186 | ||
4c788679 | 187 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
188 | if (r) { |
189 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | |
977c38d5 | 190 | goto out_lclean_unpin; |
ecc0b326 MD |
191 | } |
192 | ||
193 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
194 | vram_start = vram_map, vram_end = vram_map + size; | |
195 | gtt_start < gtt_end; | |
196 | gtt_start++, vram_start++) { | |
197 | if (*gtt_start != vram_start) { | |
198 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | |
4fb1a35c MD |
199 | "expected 0x%p (VRAM/GTT offset " |
200 | "0x%16llx/0x%16llx)\n", | |
201 | i, *gtt_start, vram_start, | |
202 | (unsigned long long) | |
203 | (vram_addr - rdev->mc.vram_start + | |
204 | (void*)vram_start - vram_map), | |
205 | (unsigned long long) | |
206 | (gtt_addr - rdev->mc.gtt_start + | |
207 | (void*)vram_start - vram_map)); | |
4c788679 | 208 | radeon_bo_kunmap(gtt_obj[i]); |
977c38d5 | 209 | goto out_lclean_unpin; |
ecc0b326 MD |
210 | } |
211 | } | |
212 | ||
4c788679 | 213 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
214 | |
215 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | |
d594e46a | 216 | gtt_addr - rdev->mc.gtt_start); |
977c38d5 ML |
217 | continue; |
218 | ||
219 | out_lclean_unpin: | |
220 | radeon_bo_unpin(gtt_obj[i]); | |
221 | out_lclean_unres: | |
222 | radeon_bo_unreserve(gtt_obj[i]); | |
223 | out_lclean_unref: | |
224 | radeon_bo_unref(>t_obj[i]); | |
225 | out_lclean: | |
226 | for (--i; i >= 0; --i) { | |
227 | radeon_bo_unpin(gtt_obj[i]); | |
228 | radeon_bo_unreserve(gtt_obj[i]); | |
229 | radeon_bo_unref(>t_obj[i]); | |
230 | } | |
231 | if (fence) | |
232 | radeon_fence_unref(&fence); | |
233 | break; | |
ecc0b326 MD |
234 | } |
235 | ||
977c38d5 ML |
236 | radeon_bo_unpin(vram_obj); |
237 | out_unres: | |
238 | radeon_bo_unreserve(vram_obj); | |
239 | out_unref: | |
240 | radeon_bo_unref(&vram_obj); | |
ecc0b326 | 241 | out_cleanup: |
977c38d5 | 242 | kfree(gtt_obj); |
ecc0b326 MD |
243 | if (r) { |
244 | printk(KERN_WARNING "Error while testing BO move.\n"); | |
245 | } | |
246 | } | |
60a7e396 | 247 | |
009ee7a0 AD |
248 | void radeon_test_moves(struct radeon_device *rdev) |
249 | { | |
250 | if (rdev->asic->copy.dma) | |
251 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); | |
252 | if (rdev->asic->copy.blit) | |
253 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); | |
254 | } | |
255 | ||
f2ba57b5 CK |
256 | static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, |
257 | struct radeon_ring *ring, | |
258 | struct radeon_fence **fence) | |
259 | { | |
260 | int r; | |
261 | ||
262 | if (ring->idx == R600_RING_TYPE_UVD_INDEX) { | |
263 | r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); | |
264 | if (r) { | |
265 | DRM_ERROR("Failed to get dummy create msg\n"); | |
266 | return r; | |
267 | } | |
268 | ||
269 | r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence); | |
270 | if (r) { | |
271 | DRM_ERROR("Failed to get dummy destroy msg\n"); | |
272 | return r; | |
273 | } | |
274 | } else { | |
275 | r = radeon_ring_lock(rdev, ring, 64); | |
276 | if (r) { | |
277 | DRM_ERROR("Failed to lock ring A %d\n", ring->idx); | |
278 | return r; | |
279 | } | |
280 | radeon_fence_emit(rdev, fence, ring->idx); | |
281 | radeon_ring_unlock_commit(rdev, ring); | |
282 | } | |
283 | return 0; | |
284 | } | |
285 | ||
60a7e396 | 286 | void radeon_test_ring_sync(struct radeon_device *rdev, |
e32eb50d CK |
287 | struct radeon_ring *ringA, |
288 | struct radeon_ring *ringB) | |
60a7e396 | 289 | { |
ce954884 | 290 | struct radeon_fence *fence1 = NULL, *fence2 = NULL; |
60a7e396 | 291 | struct radeon_semaphore *semaphore = NULL; |
60a7e396 CK |
292 | int r; |
293 | ||
60a7e396 CK |
294 | r = radeon_semaphore_create(rdev, &semaphore); |
295 | if (r) { | |
296 | DRM_ERROR("Failed to create semaphore\n"); | |
297 | goto out_cleanup; | |
298 | } | |
299 | ||
e32eb50d | 300 | r = radeon_ring_lock(rdev, ringA, 64); |
60a7e396 | 301 | if (r) { |
8b25ed34 | 302 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
60a7e396 CK |
303 | goto out_cleanup; |
304 | } | |
8b25ed34 | 305 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
f2ba57b5 CK |
306 | radeon_ring_unlock_commit(rdev, ringA); |
307 | ||
308 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); | |
309 | if (r) | |
876dc9f3 | 310 | goto out_cleanup; |
f2ba57b5 CK |
311 | |
312 | r = radeon_ring_lock(rdev, ringA, 64); | |
876dc9f3 | 313 | if (r) { |
f2ba57b5 | 314 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
876dc9f3 CK |
315 | goto out_cleanup; |
316 | } | |
f2ba57b5 | 317 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
e32eb50d | 318 | radeon_ring_unlock_commit(rdev, ringA); |
60a7e396 | 319 | |
f2ba57b5 CK |
320 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); |
321 | if (r) | |
322 | goto out_cleanup; | |
323 | ||
60a7e396 CK |
324 | mdelay(1000); |
325 | ||
ce954884 CK |
326 | if (radeon_fence_signaled(fence1)) { |
327 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | |
60a7e396 CK |
328 | goto out_cleanup; |
329 | } | |
330 | ||
e32eb50d | 331 | r = radeon_ring_lock(rdev, ringB, 64); |
60a7e396 | 332 | if (r) { |
e32eb50d | 333 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
334 | goto out_cleanup; |
335 | } | |
8b25ed34 | 336 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
e32eb50d | 337 | radeon_ring_unlock_commit(rdev, ringB); |
60a7e396 | 338 | |
ce954884 CK |
339 | r = radeon_fence_wait(fence1, false); |
340 | if (r) { | |
341 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
342 | goto out_cleanup; | |
343 | } | |
344 | ||
345 | mdelay(1000); | |
346 | ||
347 | if (radeon_fence_signaled(fence2)) { | |
348 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | |
349 | goto out_cleanup; | |
350 | } | |
351 | ||
352 | r = radeon_ring_lock(rdev, ringB, 64); | |
60a7e396 | 353 | if (r) { |
ce954884 | 354 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
355 | goto out_cleanup; |
356 | } | |
8b25ed34 | 357 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
ce954884 | 358 | radeon_ring_unlock_commit(rdev, ringB); |
60a7e396 | 359 | |
ce954884 CK |
360 | r = radeon_fence_wait(fence2, false); |
361 | if (r) { | |
362 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
363 | goto out_cleanup; | |
364 | } | |
60a7e396 CK |
365 | |
366 | out_cleanup: | |
220907d9 | 367 | radeon_semaphore_free(rdev, &semaphore, NULL); |
60a7e396 | 368 | |
ce954884 CK |
369 | if (fence1) |
370 | radeon_fence_unref(&fence1); | |
371 | ||
372 | if (fence2) | |
373 | radeon_fence_unref(&fence2); | |
374 | ||
375 | if (r) | |
376 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
377 | } | |
378 | ||
1109ca09 | 379 | static void radeon_test_ring_sync2(struct radeon_device *rdev, |
ce954884 CK |
380 | struct radeon_ring *ringA, |
381 | struct radeon_ring *ringB, | |
382 | struct radeon_ring *ringC) | |
383 | { | |
384 | struct radeon_fence *fenceA = NULL, *fenceB = NULL; | |
385 | struct radeon_semaphore *semaphore = NULL; | |
ce954884 CK |
386 | bool sigA, sigB; |
387 | int i, r; | |
388 | ||
ce954884 CK |
389 | r = radeon_semaphore_create(rdev, &semaphore); |
390 | if (r) { | |
391 | DRM_ERROR("Failed to create semaphore\n"); | |
392 | goto out_cleanup; | |
393 | } | |
394 | ||
395 | r = radeon_ring_lock(rdev, ringA, 64); | |
396 | if (r) { | |
8b25ed34 | 397 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
ce954884 CK |
398 | goto out_cleanup; |
399 | } | |
8b25ed34 | 400 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
ce954884 CK |
401 | radeon_ring_unlock_commit(rdev, ringA); |
402 | ||
f2ba57b5 CK |
403 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); |
404 | if (r) | |
405 | goto out_cleanup; | |
406 | ||
ce954884 CK |
407 | r = radeon_ring_lock(rdev, ringB, 64); |
408 | if (r) { | |
8b25ed34 | 409 | DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); |
ce954884 CK |
410 | goto out_cleanup; |
411 | } | |
8b25ed34 | 412 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); |
ce954884 | 413 | radeon_ring_unlock_commit(rdev, ringB); |
f2ba57b5 CK |
414 | r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); |
415 | if (r) | |
416 | goto out_cleanup; | |
ce954884 CK |
417 | |
418 | mdelay(1000); | |
419 | ||
420 | if (radeon_fence_signaled(fenceA)) { | |
421 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
422 | goto out_cleanup; | |
423 | } | |
424 | if (radeon_fence_signaled(fenceB)) { | |
f2ba57b5 | 425 | DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); |
ce954884 CK |
426 | goto out_cleanup; |
427 | } | |
428 | ||
429 | r = radeon_ring_lock(rdev, ringC, 64); | |
430 | if (r) { | |
431 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
432 | goto out_cleanup; | |
433 | } | |
8b25ed34 | 434 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
ce954884 CK |
435 | radeon_ring_unlock_commit(rdev, ringC); |
436 | ||
437 | for (i = 0; i < 30; ++i) { | |
438 | mdelay(100); | |
439 | sigA = radeon_fence_signaled(fenceA); | |
440 | sigB = radeon_fence_signaled(fenceB); | |
441 | if (sigA || sigB) | |
442 | break; | |
443 | } | |
444 | ||
445 | if (!sigA && !sigB) { | |
446 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | |
447 | goto out_cleanup; | |
448 | } else if (sigA && sigB) { | |
449 | DRM_ERROR("Both fence A and B has been signaled\n"); | |
450 | goto out_cleanup; | |
451 | } | |
452 | ||
453 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | |
454 | ||
455 | r = radeon_ring_lock(rdev, ringC, 64); | |
456 | if (r) { | |
457 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
458 | goto out_cleanup; | |
459 | } | |
8b25ed34 | 460 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
ce954884 CK |
461 | radeon_ring_unlock_commit(rdev, ringC); |
462 | ||
463 | mdelay(1000); | |
464 | ||
465 | r = radeon_fence_wait(fenceA, false); | |
466 | if (r) { | |
467 | DRM_ERROR("Failed to wait for sync fence A\n"); | |
468 | goto out_cleanup; | |
469 | } | |
470 | r = radeon_fence_wait(fenceB, false); | |
471 | if (r) { | |
472 | DRM_ERROR("Failed to wait for sync fence B\n"); | |
473 | goto out_cleanup; | |
474 | } | |
475 | ||
476 | out_cleanup: | |
220907d9 | 477 | radeon_semaphore_free(rdev, &semaphore, NULL); |
ce954884 CK |
478 | |
479 | if (fenceA) | |
480 | radeon_fence_unref(&fenceA); | |
481 | ||
482 | if (fenceB) | |
483 | radeon_fence_unref(&fenceB); | |
60a7e396 CK |
484 | |
485 | if (r) | |
486 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
487 | } | |
488 | ||
489 | void radeon_test_syncing(struct radeon_device *rdev) | |
490 | { | |
ce954884 | 491 | int i, j, k; |
60a7e396 CK |
492 | |
493 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { | |
e32eb50d CK |
494 | struct radeon_ring *ringA = &rdev->ring[i]; |
495 | if (!ringA->ready) | |
60a7e396 CK |
496 | continue; |
497 | ||
498 | for (j = 0; j < i; ++j) { | |
e32eb50d CK |
499 | struct radeon_ring *ringB = &rdev->ring[j]; |
500 | if (!ringB->ready) | |
60a7e396 CK |
501 | continue; |
502 | ||
ce954884 | 503 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); |
e32eb50d | 504 | radeon_test_ring_sync(rdev, ringA, ringB); |
60a7e396 | 505 | |
ce954884 | 506 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); |
e32eb50d | 507 | radeon_test_ring_sync(rdev, ringB, ringA); |
ce954884 CK |
508 | |
509 | for (k = 0; k < j; ++k) { | |
510 | struct radeon_ring *ringC = &rdev->ring[k]; | |
1f2e124d AD |
511 | if (!ringC->ready) |
512 | continue; | |
ce954884 CK |
513 | |
514 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | |
515 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); | |
516 | ||
517 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | |
518 | radeon_test_ring_sync2(rdev, ringA, ringC, ringB); | |
519 | ||
520 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | |
521 | radeon_test_ring_sync2(rdev, ringB, ringA, ringC); | |
522 | ||
523 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | |
524 | radeon_test_ring_sync2(rdev, ringB, ringC, ringA); | |
525 | ||
526 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | |
527 | radeon_test_ring_sync2(rdev, ringC, ringA, ringB); | |
528 | ||
529 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | |
530 | radeon_test_ring_sync2(rdev, ringC, ringB, ringA); | |
531 | } | |
60a7e396 CK |
532 | } |
533 | } | |
534 | } |