Commit | Line | Data |
---|---|---|
ecc0b326 MD |
1 | /* |
2 | * Copyright 2009 VMware, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Michel Dänzer | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include <drm/radeon_drm.h> | |
26 | #include "radeon_reg.h" | |
27 | #include "radeon.h" | |
28 | ||
29 | ||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | |
31 | void radeon_test_moves(struct radeon_device *rdev) | |
32 | { | |
4c788679 JG |
33 | struct radeon_bo *vram_obj = NULL; |
34 | struct radeon_bo **gtt_obj = NULL; | |
ecc0b326 MD |
35 | struct radeon_fence *fence = NULL; |
36 | uint64_t gtt_addr, vram_addr; | |
37 | unsigned i, n, size; | |
38 | int r; | |
39 | ||
40 | size = 1024 * 1024; | |
41 | ||
42 | /* Number of tests = | |
24cae9e7 | 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
ecc0b326 | 44 | */ |
7b1f2485 | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; |
bf852799 | 46 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
e32eb50d | 47 | n -= rdev->ring[i].ring_size; |
24cae9e7 MD |
48 | if (rdev->wb.wb_obj) |
49 | n -= RADEON_GPU_PAGE_SIZE; | |
50 | if (rdev->ih.ring_obj) | |
51 | n -= rdev->ih.ring_size; | |
52 | n /= size; | |
ecc0b326 MD |
53 | |
54 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | |
55 | if (!gtt_obj) { | |
56 | DRM_ERROR("Failed to allocate %d pointers\n", n); | |
57 | r = 1; | |
58 | goto out_cleanup; | |
59 | } | |
60 | ||
441921d5 | 61 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
4c788679 | 62 | &vram_obj); |
ecc0b326 MD |
63 | if (r) { |
64 | DRM_ERROR("Failed to create VRAM object\n"); | |
65 | goto out_cleanup; | |
66 | } | |
4c788679 JG |
67 | r = radeon_bo_reserve(vram_obj, false); |
68 | if (unlikely(r != 0)) | |
69 | goto out_cleanup; | |
70 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | |
ecc0b326 MD |
71 | if (r) { |
72 | DRM_ERROR("Failed to pin VRAM object\n"); | |
73 | goto out_cleanup; | |
74 | } | |
ecc0b326 MD |
75 | for (i = 0; i < n; i++) { |
76 | void *gtt_map, *vram_map; | |
77 | void **gtt_start, **gtt_end; | |
78 | void **vram_start, **vram_end; | |
79 | ||
441921d5 | 80 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
4c788679 | 81 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
ecc0b326 MD |
82 | if (r) { |
83 | DRM_ERROR("Failed to create GTT object %d\n", i); | |
84 | goto out_cleanup; | |
85 | } | |
86 | ||
4c788679 JG |
87 | r = radeon_bo_reserve(gtt_obj[i], false); |
88 | if (unlikely(r != 0)) | |
89 | goto out_cleanup; | |
90 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | |
ecc0b326 MD |
91 | if (r) { |
92 | DRM_ERROR("Failed to pin GTT object %d\n", i); | |
93 | goto out_cleanup; | |
94 | } | |
95 | ||
4c788679 | 96 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
97 | if (r) { |
98 | DRM_ERROR("Failed to map GTT object %d\n", i); | |
99 | goto out_cleanup; | |
100 | } | |
101 | ||
102 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | |
103 | gtt_start < gtt_end; | |
104 | gtt_start++) | |
105 | *gtt_start = gtt_start; | |
106 | ||
4c788679 | 107 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 | 108 | |
7465280c | 109 | r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); |
ecc0b326 MD |
110 | if (r) { |
111 | DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); | |
112 | goto out_cleanup; | |
113 | } | |
114 | ||
a77f1718 | 115 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence); |
ecc0b326 MD |
116 | if (r) { |
117 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | |
118 | goto out_cleanup; | |
119 | } | |
120 | ||
121 | r = radeon_fence_wait(fence, false); | |
122 | if (r) { | |
123 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | |
124 | goto out_cleanup; | |
125 | } | |
126 | ||
127 | radeon_fence_unref(&fence); | |
128 | ||
4c788679 | 129 | r = radeon_bo_kmap(vram_obj, &vram_map); |
ecc0b326 MD |
130 | if (r) { |
131 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | |
132 | goto out_cleanup; | |
133 | } | |
134 | ||
135 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
136 | vram_start = vram_map, vram_end = vram_map + size; | |
137 | vram_start < vram_end; | |
138 | gtt_start++, vram_start++) { | |
139 | if (*vram_start != gtt_start) { | |
140 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | |
4fb1a35c MD |
141 | "expected 0x%p (GTT/VRAM offset " |
142 | "0x%16llx/0x%16llx)\n", | |
143 | i, *vram_start, gtt_start, | |
144 | (unsigned long long) | |
145 | (gtt_addr - rdev->mc.gtt_start + | |
146 | (void*)gtt_start - gtt_map), | |
147 | (unsigned long long) | |
148 | (vram_addr - rdev->mc.vram_start + | |
149 | (void*)gtt_start - gtt_map)); | |
4c788679 | 150 | radeon_bo_kunmap(vram_obj); |
ecc0b326 MD |
151 | goto out_cleanup; |
152 | } | |
153 | *vram_start = vram_start; | |
154 | } | |
155 | ||
4c788679 | 156 | radeon_bo_kunmap(vram_obj); |
ecc0b326 | 157 | |
7465280c | 158 | r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); |
ecc0b326 MD |
159 | if (r) { |
160 | DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); | |
161 | goto out_cleanup; | |
162 | } | |
163 | ||
a77f1718 | 164 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence); |
ecc0b326 MD |
165 | if (r) { |
166 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | |
167 | goto out_cleanup; | |
168 | } | |
169 | ||
170 | r = radeon_fence_wait(fence, false); | |
171 | if (r) { | |
172 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | |
173 | goto out_cleanup; | |
174 | } | |
175 | ||
176 | radeon_fence_unref(&fence); | |
177 | ||
4c788679 | 178 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
179 | if (r) { |
180 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | |
181 | goto out_cleanup; | |
182 | } | |
183 | ||
184 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
185 | vram_start = vram_map, vram_end = vram_map + size; | |
186 | gtt_start < gtt_end; | |
187 | gtt_start++, vram_start++) { | |
188 | if (*gtt_start != vram_start) { | |
189 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | |
4fb1a35c MD |
190 | "expected 0x%p (VRAM/GTT offset " |
191 | "0x%16llx/0x%16llx)\n", | |
192 | i, *gtt_start, vram_start, | |
193 | (unsigned long long) | |
194 | (vram_addr - rdev->mc.vram_start + | |
195 | (void*)vram_start - vram_map), | |
196 | (unsigned long long) | |
197 | (gtt_addr - rdev->mc.gtt_start + | |
198 | (void*)vram_start - vram_map)); | |
4c788679 | 199 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
200 | goto out_cleanup; |
201 | } | |
202 | } | |
203 | ||
4c788679 | 204 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
205 | |
206 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | |
d594e46a | 207 | gtt_addr - rdev->mc.gtt_start); |
ecc0b326 MD |
208 | } |
209 | ||
210 | out_cleanup: | |
211 | if (vram_obj) { | |
4c788679 JG |
212 | if (radeon_bo_is_reserved(vram_obj)) { |
213 | radeon_bo_unpin(vram_obj); | |
214 | radeon_bo_unreserve(vram_obj); | |
215 | } | |
216 | radeon_bo_unref(&vram_obj); | |
ecc0b326 MD |
217 | } |
218 | if (gtt_obj) { | |
219 | for (i = 0; i < n; i++) { | |
220 | if (gtt_obj[i]) { | |
4c788679 JG |
221 | if (radeon_bo_is_reserved(gtt_obj[i])) { |
222 | radeon_bo_unpin(gtt_obj[i]); | |
223 | radeon_bo_unreserve(gtt_obj[i]); | |
224 | } | |
225 | radeon_bo_unref(>t_obj[i]); | |
ecc0b326 MD |
226 | } |
227 | } | |
228 | kfree(gtt_obj); | |
229 | } | |
230 | if (fence) { | |
231 | radeon_fence_unref(&fence); | |
232 | } | |
233 | if (r) { | |
234 | printk(KERN_WARNING "Error while testing BO move.\n"); | |
235 | } | |
236 | } | |
60a7e396 CK |
237 | |
238 | void radeon_test_ring_sync(struct radeon_device *rdev, | |
e32eb50d CK |
239 | struct radeon_ring *ringA, |
240 | struct radeon_ring *ringB) | |
60a7e396 | 241 | { |
ce954884 | 242 | struct radeon_fence *fence1 = NULL, *fence2 = NULL; |
60a7e396 | 243 | struct radeon_semaphore *semaphore = NULL; |
e32eb50d CK |
244 | int ridxA = radeon_ring_index(rdev, ringA); |
245 | int ridxB = radeon_ring_index(rdev, ringB); | |
60a7e396 CK |
246 | int r; |
247 | ||
ce954884 CK |
248 | r = radeon_fence_create(rdev, &fence1, ridxA); |
249 | if (r) { | |
250 | DRM_ERROR("Failed to create sync fence 1\n"); | |
251 | goto out_cleanup; | |
252 | } | |
253 | r = radeon_fence_create(rdev, &fence2, ridxA); | |
60a7e396 | 254 | if (r) { |
ce954884 | 255 | DRM_ERROR("Failed to create sync fence 2\n"); |
60a7e396 CK |
256 | goto out_cleanup; |
257 | } | |
258 | ||
259 | r = radeon_semaphore_create(rdev, &semaphore); | |
260 | if (r) { | |
261 | DRM_ERROR("Failed to create semaphore\n"); | |
262 | goto out_cleanup; | |
263 | } | |
264 | ||
e32eb50d | 265 | r = radeon_ring_lock(rdev, ringA, 64); |
60a7e396 | 266 | if (r) { |
e32eb50d | 267 | DRM_ERROR("Failed to lock ring A %d\n", ridxA); |
60a7e396 CK |
268 | goto out_cleanup; |
269 | } | |
e32eb50d | 270 | radeon_semaphore_emit_wait(rdev, ridxA, semaphore); |
ce954884 CK |
271 | radeon_fence_emit(rdev, fence1); |
272 | radeon_semaphore_emit_wait(rdev, ridxA, semaphore); | |
273 | radeon_fence_emit(rdev, fence2); | |
e32eb50d | 274 | radeon_ring_unlock_commit(rdev, ringA); |
60a7e396 CK |
275 | |
276 | mdelay(1000); | |
277 | ||
ce954884 CK |
278 | if (radeon_fence_signaled(fence1)) { |
279 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | |
60a7e396 CK |
280 | goto out_cleanup; |
281 | } | |
282 | ||
e32eb50d | 283 | r = radeon_ring_lock(rdev, ringB, 64); |
60a7e396 | 284 | if (r) { |
e32eb50d | 285 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
286 | goto out_cleanup; |
287 | } | |
e32eb50d CK |
288 | radeon_semaphore_emit_signal(rdev, ridxB, semaphore); |
289 | radeon_ring_unlock_commit(rdev, ringB); | |
60a7e396 | 290 | |
ce954884 CK |
291 | r = radeon_fence_wait(fence1, false); |
292 | if (r) { | |
293 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
294 | goto out_cleanup; | |
295 | } | |
296 | ||
297 | mdelay(1000); | |
298 | ||
299 | if (radeon_fence_signaled(fence2)) { | |
300 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | |
301 | goto out_cleanup; | |
302 | } | |
303 | ||
304 | r = radeon_ring_lock(rdev, ringB, 64); | |
60a7e396 | 305 | if (r) { |
ce954884 | 306 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
307 | goto out_cleanup; |
308 | } | |
ce954884 CK |
309 | radeon_semaphore_emit_signal(rdev, ridxB, semaphore); |
310 | radeon_ring_unlock_commit(rdev, ringB); | |
60a7e396 | 311 | |
ce954884 CK |
312 | r = radeon_fence_wait(fence2, false); |
313 | if (r) { | |
314 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
315 | goto out_cleanup; | |
316 | } | |
60a7e396 CK |
317 | |
318 | out_cleanup: | |
319 | if (semaphore) | |
a8c05940 | 320 | radeon_semaphore_free(rdev, semaphore, NULL); |
60a7e396 | 321 | |
ce954884 CK |
322 | if (fence1) |
323 | radeon_fence_unref(&fence1); | |
324 | ||
325 | if (fence2) | |
326 | radeon_fence_unref(&fence2); | |
327 | ||
328 | if (r) | |
329 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
330 | } | |
331 | ||
332 | void radeon_test_ring_sync2(struct radeon_device *rdev, | |
333 | struct radeon_ring *ringA, | |
334 | struct radeon_ring *ringB, | |
335 | struct radeon_ring *ringC) | |
336 | { | |
337 | struct radeon_fence *fenceA = NULL, *fenceB = NULL; | |
338 | struct radeon_semaphore *semaphore = NULL; | |
339 | int ridxA = radeon_ring_index(rdev, ringA); | |
340 | int ridxB = radeon_ring_index(rdev, ringB); | |
341 | int ridxC = radeon_ring_index(rdev, ringC); | |
342 | bool sigA, sigB; | |
343 | int i, r; | |
344 | ||
345 | r = radeon_fence_create(rdev, &fenceA, ridxA); | |
346 | if (r) { | |
347 | DRM_ERROR("Failed to create sync fence 1\n"); | |
348 | goto out_cleanup; | |
349 | } | |
350 | r = radeon_fence_create(rdev, &fenceB, ridxB); | |
351 | if (r) { | |
352 | DRM_ERROR("Failed to create sync fence 2\n"); | |
353 | goto out_cleanup; | |
354 | } | |
355 | ||
356 | r = radeon_semaphore_create(rdev, &semaphore); | |
357 | if (r) { | |
358 | DRM_ERROR("Failed to create semaphore\n"); | |
359 | goto out_cleanup; | |
360 | } | |
361 | ||
362 | r = radeon_ring_lock(rdev, ringA, 64); | |
363 | if (r) { | |
364 | DRM_ERROR("Failed to lock ring A %d\n", ridxA); | |
365 | goto out_cleanup; | |
366 | } | |
367 | radeon_semaphore_emit_wait(rdev, ridxA, semaphore); | |
368 | radeon_fence_emit(rdev, fenceA); | |
369 | radeon_ring_unlock_commit(rdev, ringA); | |
370 | ||
371 | r = radeon_ring_lock(rdev, ringB, 64); | |
372 | if (r) { | |
373 | DRM_ERROR("Failed to lock ring B %d\n", ridxB); | |
374 | goto out_cleanup; | |
375 | } | |
376 | radeon_semaphore_emit_wait(rdev, ridxB, semaphore); | |
377 | radeon_fence_emit(rdev, fenceB); | |
378 | radeon_ring_unlock_commit(rdev, ringB); | |
379 | ||
380 | mdelay(1000); | |
381 | ||
382 | if (radeon_fence_signaled(fenceA)) { | |
383 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
384 | goto out_cleanup; | |
385 | } | |
386 | if (radeon_fence_signaled(fenceB)) { | |
387 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
388 | goto out_cleanup; | |
389 | } | |
390 | ||
391 | r = radeon_ring_lock(rdev, ringC, 64); | |
392 | if (r) { | |
393 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
394 | goto out_cleanup; | |
395 | } | |
396 | radeon_semaphore_emit_signal(rdev, ridxC, semaphore); | |
397 | radeon_ring_unlock_commit(rdev, ringC); | |
398 | ||
399 | for (i = 0; i < 30; ++i) { | |
400 | mdelay(100); | |
401 | sigA = radeon_fence_signaled(fenceA); | |
402 | sigB = radeon_fence_signaled(fenceB); | |
403 | if (sigA || sigB) | |
404 | break; | |
405 | } | |
406 | ||
407 | if (!sigA && !sigB) { | |
408 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | |
409 | goto out_cleanup; | |
410 | } else if (sigA && sigB) { | |
411 | DRM_ERROR("Both fence A and B has been signaled\n"); | |
412 | goto out_cleanup; | |
413 | } | |
414 | ||
415 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | |
416 | ||
417 | r = radeon_ring_lock(rdev, ringC, 64); | |
418 | if (r) { | |
419 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
420 | goto out_cleanup; | |
421 | } | |
422 | radeon_semaphore_emit_signal(rdev, ridxC, semaphore); | |
423 | radeon_ring_unlock_commit(rdev, ringC); | |
424 | ||
425 | mdelay(1000); | |
426 | ||
427 | r = radeon_fence_wait(fenceA, false); | |
428 | if (r) { | |
429 | DRM_ERROR("Failed to wait for sync fence A\n"); | |
430 | goto out_cleanup; | |
431 | } | |
432 | r = radeon_fence_wait(fenceB, false); | |
433 | if (r) { | |
434 | DRM_ERROR("Failed to wait for sync fence B\n"); | |
435 | goto out_cleanup; | |
436 | } | |
437 | ||
438 | out_cleanup: | |
439 | if (semaphore) | |
a8c05940 | 440 | radeon_semaphore_free(rdev, semaphore, NULL); |
ce954884 CK |
441 | |
442 | if (fenceA) | |
443 | radeon_fence_unref(&fenceA); | |
444 | ||
445 | if (fenceB) | |
446 | radeon_fence_unref(&fenceB); | |
60a7e396 CK |
447 | |
448 | if (r) | |
449 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
450 | } | |
451 | ||
452 | void radeon_test_syncing(struct radeon_device *rdev) | |
453 | { | |
ce954884 | 454 | int i, j, k; |
60a7e396 CK |
455 | |
456 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { | |
e32eb50d CK |
457 | struct radeon_ring *ringA = &rdev->ring[i]; |
458 | if (!ringA->ready) | |
60a7e396 CK |
459 | continue; |
460 | ||
461 | for (j = 0; j < i; ++j) { | |
e32eb50d CK |
462 | struct radeon_ring *ringB = &rdev->ring[j]; |
463 | if (!ringB->ready) | |
60a7e396 CK |
464 | continue; |
465 | ||
ce954884 | 466 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); |
e32eb50d | 467 | radeon_test_ring_sync(rdev, ringA, ringB); |
60a7e396 | 468 | |
ce954884 | 469 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); |
e32eb50d | 470 | radeon_test_ring_sync(rdev, ringB, ringA); |
ce954884 CK |
471 | |
472 | for (k = 0; k < j; ++k) { | |
473 | struct radeon_ring *ringC = &rdev->ring[k]; | |
1f2e124d AD |
474 | if (!ringC->ready) |
475 | continue; | |
ce954884 CK |
476 | |
477 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | |
478 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); | |
479 | ||
480 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | |
481 | radeon_test_ring_sync2(rdev, ringA, ringC, ringB); | |
482 | ||
483 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | |
484 | radeon_test_ring_sync2(rdev, ringB, ringA, ringC); | |
485 | ||
486 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | |
487 | radeon_test_ring_sync2(rdev, ringB, ringC, ringA); | |
488 | ||
489 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | |
490 | radeon_test_ring_sync2(rdev, ringC, ringA, ringB); | |
491 | ||
492 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | |
493 | radeon_test_ring_sync2(rdev, ringC, ringB, ringA); | |
494 | } | |
60a7e396 CK |
495 | } |
496 | } | |
497 | } |