Commit | Line | Data |
---|---|---|
ecc0b326 MD |
1 | /* |
2 | * Copyright 2009 VMware, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Michel Dänzer | |
23 | */ | |
24 | #include <drm/drmP.h> | |
25 | #include <drm/radeon_drm.h> | |
26 | #include "radeon_reg.h" | |
27 | #include "radeon.h" | |
28 | ||
29 | ||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | |
31 | void radeon_test_moves(struct radeon_device *rdev) | |
32 | { | |
4c788679 JG |
33 | struct radeon_bo *vram_obj = NULL; |
34 | struct radeon_bo **gtt_obj = NULL; | |
ecc0b326 MD |
35 | struct radeon_fence *fence = NULL; |
36 | uint64_t gtt_addr, vram_addr; | |
37 | unsigned i, n, size; | |
38 | int r; | |
39 | ||
40 | size = 1024 * 1024; | |
41 | ||
42 | /* Number of tests = | |
24cae9e7 | 43 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
ecc0b326 | 44 | */ |
7b1f2485 | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; |
bf852799 | 46 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
e32eb50d | 47 | n -= rdev->ring[i].ring_size; |
24cae9e7 MD |
48 | if (rdev->wb.wb_obj) |
49 | n -= RADEON_GPU_PAGE_SIZE; | |
50 | if (rdev->ih.ring_obj) | |
51 | n -= rdev->ih.ring_size; | |
52 | n /= size; | |
ecc0b326 MD |
53 | |
54 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | |
55 | if (!gtt_obj) { | |
56 | DRM_ERROR("Failed to allocate %d pointers\n", n); | |
57 | r = 1; | |
58 | goto out_cleanup; | |
59 | } | |
60 | ||
441921d5 | 61 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
40f5cf99 | 62 | NULL, &vram_obj); |
ecc0b326 MD |
63 | if (r) { |
64 | DRM_ERROR("Failed to create VRAM object\n"); | |
65 | goto out_cleanup; | |
66 | } | |
4c788679 JG |
67 | r = radeon_bo_reserve(vram_obj, false); |
68 | if (unlikely(r != 0)) | |
69 | goto out_cleanup; | |
70 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | |
ecc0b326 MD |
71 | if (r) { |
72 | DRM_ERROR("Failed to pin VRAM object\n"); | |
73 | goto out_cleanup; | |
74 | } | |
ecc0b326 MD |
75 | for (i = 0; i < n; i++) { |
76 | void *gtt_map, *vram_map; | |
77 | void **gtt_start, **gtt_end; | |
78 | void **vram_start, **vram_end; | |
79 | ||
441921d5 | 80 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
40f5cf99 | 81 | RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); |
ecc0b326 MD |
82 | if (r) { |
83 | DRM_ERROR("Failed to create GTT object %d\n", i); | |
84 | goto out_cleanup; | |
85 | } | |
86 | ||
4c788679 JG |
87 | r = radeon_bo_reserve(gtt_obj[i], false); |
88 | if (unlikely(r != 0)) | |
89 | goto out_cleanup; | |
90 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | |
ecc0b326 MD |
91 | if (r) { |
92 | DRM_ERROR("Failed to pin GTT object %d\n", i); | |
93 | goto out_cleanup; | |
94 | } | |
95 | ||
4c788679 | 96 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
97 | if (r) { |
98 | DRM_ERROR("Failed to map GTT object %d\n", i); | |
99 | goto out_cleanup; | |
100 | } | |
101 | ||
102 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | |
103 | gtt_start < gtt_end; | |
104 | gtt_start++) | |
105 | *gtt_start = gtt_start; | |
106 | ||
4c788679 | 107 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 | 108 | |
876dc9f3 | 109 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
ecc0b326 MD |
110 | if (r) { |
111 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | |
112 | goto out_cleanup; | |
113 | } | |
114 | ||
115 | r = radeon_fence_wait(fence, false); | |
116 | if (r) { | |
117 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | |
118 | goto out_cleanup; | |
119 | } | |
120 | ||
121 | radeon_fence_unref(&fence); | |
122 | ||
4c788679 | 123 | r = radeon_bo_kmap(vram_obj, &vram_map); |
ecc0b326 MD |
124 | if (r) { |
125 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | |
126 | goto out_cleanup; | |
127 | } | |
128 | ||
129 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
130 | vram_start = vram_map, vram_end = vram_map + size; | |
131 | vram_start < vram_end; | |
132 | gtt_start++, vram_start++) { | |
133 | if (*vram_start != gtt_start) { | |
134 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | |
4fb1a35c MD |
135 | "expected 0x%p (GTT/VRAM offset " |
136 | "0x%16llx/0x%16llx)\n", | |
137 | i, *vram_start, gtt_start, | |
138 | (unsigned long long) | |
139 | (gtt_addr - rdev->mc.gtt_start + | |
140 | (void*)gtt_start - gtt_map), | |
141 | (unsigned long long) | |
142 | (vram_addr - rdev->mc.vram_start + | |
143 | (void*)gtt_start - gtt_map)); | |
4c788679 | 144 | radeon_bo_kunmap(vram_obj); |
ecc0b326 MD |
145 | goto out_cleanup; |
146 | } | |
147 | *vram_start = vram_start; | |
148 | } | |
149 | ||
4c788679 | 150 | radeon_bo_kunmap(vram_obj); |
ecc0b326 | 151 | |
876dc9f3 | 152 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
ecc0b326 MD |
153 | if (r) { |
154 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | |
155 | goto out_cleanup; | |
156 | } | |
157 | ||
158 | r = radeon_fence_wait(fence, false); | |
159 | if (r) { | |
160 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | |
161 | goto out_cleanup; | |
162 | } | |
163 | ||
164 | radeon_fence_unref(&fence); | |
165 | ||
4c788679 | 166 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
ecc0b326 MD |
167 | if (r) { |
168 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | |
169 | goto out_cleanup; | |
170 | } | |
171 | ||
172 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | |
173 | vram_start = vram_map, vram_end = vram_map + size; | |
174 | gtt_start < gtt_end; | |
175 | gtt_start++, vram_start++) { | |
176 | if (*gtt_start != vram_start) { | |
177 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | |
4fb1a35c MD |
178 | "expected 0x%p (VRAM/GTT offset " |
179 | "0x%16llx/0x%16llx)\n", | |
180 | i, *gtt_start, vram_start, | |
181 | (unsigned long long) | |
182 | (vram_addr - rdev->mc.vram_start + | |
183 | (void*)vram_start - vram_map), | |
184 | (unsigned long long) | |
185 | (gtt_addr - rdev->mc.gtt_start + | |
186 | (void*)vram_start - vram_map)); | |
4c788679 | 187 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
188 | goto out_cleanup; |
189 | } | |
190 | } | |
191 | ||
4c788679 | 192 | radeon_bo_kunmap(gtt_obj[i]); |
ecc0b326 MD |
193 | |
194 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | |
d594e46a | 195 | gtt_addr - rdev->mc.gtt_start); |
ecc0b326 MD |
196 | } |
197 | ||
198 | out_cleanup: | |
199 | if (vram_obj) { | |
4c788679 JG |
200 | if (radeon_bo_is_reserved(vram_obj)) { |
201 | radeon_bo_unpin(vram_obj); | |
202 | radeon_bo_unreserve(vram_obj); | |
203 | } | |
204 | radeon_bo_unref(&vram_obj); | |
ecc0b326 MD |
205 | } |
206 | if (gtt_obj) { | |
207 | for (i = 0; i < n; i++) { | |
208 | if (gtt_obj[i]) { | |
4c788679 JG |
209 | if (radeon_bo_is_reserved(gtt_obj[i])) { |
210 | radeon_bo_unpin(gtt_obj[i]); | |
211 | radeon_bo_unreserve(gtt_obj[i]); | |
212 | } | |
213 | radeon_bo_unref(>t_obj[i]); | |
ecc0b326 MD |
214 | } |
215 | } | |
216 | kfree(gtt_obj); | |
217 | } | |
218 | if (fence) { | |
219 | radeon_fence_unref(&fence); | |
220 | } | |
221 | if (r) { | |
222 | printk(KERN_WARNING "Error while testing BO move.\n"); | |
223 | } | |
224 | } | |
60a7e396 CK |
225 | |
226 | void radeon_test_ring_sync(struct radeon_device *rdev, | |
e32eb50d CK |
227 | struct radeon_ring *ringA, |
228 | struct radeon_ring *ringB) | |
60a7e396 | 229 | { |
ce954884 | 230 | struct radeon_fence *fence1 = NULL, *fence2 = NULL; |
60a7e396 | 231 | struct radeon_semaphore *semaphore = NULL; |
60a7e396 CK |
232 | int r; |
233 | ||
60a7e396 CK |
234 | r = radeon_semaphore_create(rdev, &semaphore); |
235 | if (r) { | |
236 | DRM_ERROR("Failed to create semaphore\n"); | |
237 | goto out_cleanup; | |
238 | } | |
239 | ||
e32eb50d | 240 | r = radeon_ring_lock(rdev, ringA, 64); |
60a7e396 | 241 | if (r) { |
8b25ed34 | 242 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
60a7e396 CK |
243 | goto out_cleanup; |
244 | } | |
8b25ed34 AD |
245 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
246 | r = radeon_fence_emit(rdev, &fence1, ringA->idx); | |
876dc9f3 CK |
247 | if (r) { |
248 | DRM_ERROR("Failed to emit fence 1\n"); | |
249 | radeon_ring_unlock_undo(rdev, ringA); | |
250 | goto out_cleanup; | |
251 | } | |
8b25ed34 AD |
252 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
253 | r = radeon_fence_emit(rdev, &fence2, ringA->idx); | |
876dc9f3 CK |
254 | if (r) { |
255 | DRM_ERROR("Failed to emit fence 2\n"); | |
256 | radeon_ring_unlock_undo(rdev, ringA); | |
257 | goto out_cleanup; | |
258 | } | |
e32eb50d | 259 | radeon_ring_unlock_commit(rdev, ringA); |
60a7e396 CK |
260 | |
261 | mdelay(1000); | |
262 | ||
ce954884 CK |
263 | if (radeon_fence_signaled(fence1)) { |
264 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); | |
60a7e396 CK |
265 | goto out_cleanup; |
266 | } | |
267 | ||
e32eb50d | 268 | r = radeon_ring_lock(rdev, ringB, 64); |
60a7e396 | 269 | if (r) { |
e32eb50d | 270 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
271 | goto out_cleanup; |
272 | } | |
8b25ed34 | 273 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
e32eb50d | 274 | radeon_ring_unlock_commit(rdev, ringB); |
60a7e396 | 275 | |
ce954884 CK |
276 | r = radeon_fence_wait(fence1, false); |
277 | if (r) { | |
278 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
279 | goto out_cleanup; | |
280 | } | |
281 | ||
282 | mdelay(1000); | |
283 | ||
284 | if (radeon_fence_signaled(fence2)) { | |
285 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); | |
286 | goto out_cleanup; | |
287 | } | |
288 | ||
289 | r = radeon_ring_lock(rdev, ringB, 64); | |
60a7e396 | 290 | if (r) { |
ce954884 | 291 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
60a7e396 CK |
292 | goto out_cleanup; |
293 | } | |
8b25ed34 | 294 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
ce954884 | 295 | radeon_ring_unlock_commit(rdev, ringB); |
60a7e396 | 296 | |
ce954884 CK |
297 | r = radeon_fence_wait(fence2, false); |
298 | if (r) { | |
299 | DRM_ERROR("Failed to wait for sync fence 1\n"); | |
300 | goto out_cleanup; | |
301 | } | |
60a7e396 CK |
302 | |
303 | out_cleanup: | |
220907d9 | 304 | radeon_semaphore_free(rdev, &semaphore, NULL); |
60a7e396 | 305 | |
ce954884 CK |
306 | if (fence1) |
307 | radeon_fence_unref(&fence1); | |
308 | ||
309 | if (fence2) | |
310 | radeon_fence_unref(&fence2); | |
311 | ||
312 | if (r) | |
313 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
314 | } | |
315 | ||
316 | void radeon_test_ring_sync2(struct radeon_device *rdev, | |
317 | struct radeon_ring *ringA, | |
318 | struct radeon_ring *ringB, | |
319 | struct radeon_ring *ringC) | |
320 | { | |
321 | struct radeon_fence *fenceA = NULL, *fenceB = NULL; | |
322 | struct radeon_semaphore *semaphore = NULL; | |
ce954884 CK |
323 | bool sigA, sigB; |
324 | int i, r; | |
325 | ||
ce954884 CK |
326 | r = radeon_semaphore_create(rdev, &semaphore); |
327 | if (r) { | |
328 | DRM_ERROR("Failed to create semaphore\n"); | |
329 | goto out_cleanup; | |
330 | } | |
331 | ||
332 | r = radeon_ring_lock(rdev, ringA, 64); | |
333 | if (r) { | |
8b25ed34 | 334 | DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); |
ce954884 CK |
335 | goto out_cleanup; |
336 | } | |
8b25ed34 AD |
337 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
338 | r = radeon_fence_emit(rdev, &fenceA, ringA->idx); | |
876dc9f3 CK |
339 | if (r) { |
340 | DRM_ERROR("Failed to emit sync fence 1\n"); | |
341 | radeon_ring_unlock_undo(rdev, ringA); | |
342 | goto out_cleanup; | |
343 | } | |
ce954884 CK |
344 | radeon_ring_unlock_commit(rdev, ringA); |
345 | ||
346 | r = radeon_ring_lock(rdev, ringB, 64); | |
347 | if (r) { | |
8b25ed34 | 348 | DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); |
ce954884 CK |
349 | goto out_cleanup; |
350 | } | |
8b25ed34 AD |
351 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); |
352 | r = radeon_fence_emit(rdev, &fenceB, ringB->idx); | |
876dc9f3 CK |
353 | if (r) { |
354 | DRM_ERROR("Failed to create sync fence 2\n"); | |
355 | radeon_ring_unlock_undo(rdev, ringB); | |
356 | goto out_cleanup; | |
357 | } | |
ce954884 CK |
358 | radeon_ring_unlock_commit(rdev, ringB); |
359 | ||
360 | mdelay(1000); | |
361 | ||
362 | if (radeon_fence_signaled(fenceA)) { | |
363 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
364 | goto out_cleanup; | |
365 | } | |
366 | if (radeon_fence_signaled(fenceB)) { | |
367 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); | |
368 | goto out_cleanup; | |
369 | } | |
370 | ||
371 | r = radeon_ring_lock(rdev, ringC, 64); | |
372 | if (r) { | |
373 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
374 | goto out_cleanup; | |
375 | } | |
8b25ed34 | 376 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
ce954884 CK |
377 | radeon_ring_unlock_commit(rdev, ringC); |
378 | ||
379 | for (i = 0; i < 30; ++i) { | |
380 | mdelay(100); | |
381 | sigA = radeon_fence_signaled(fenceA); | |
382 | sigB = radeon_fence_signaled(fenceB); | |
383 | if (sigA || sigB) | |
384 | break; | |
385 | } | |
386 | ||
387 | if (!sigA && !sigB) { | |
388 | DRM_ERROR("Neither fence A nor B has been signaled\n"); | |
389 | goto out_cleanup; | |
390 | } else if (sigA && sigB) { | |
391 | DRM_ERROR("Both fence A and B has been signaled\n"); | |
392 | goto out_cleanup; | |
393 | } | |
394 | ||
395 | DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); | |
396 | ||
397 | r = radeon_ring_lock(rdev, ringC, 64); | |
398 | if (r) { | |
399 | DRM_ERROR("Failed to lock ring B %p\n", ringC); | |
400 | goto out_cleanup; | |
401 | } | |
8b25ed34 | 402 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
ce954884 CK |
403 | radeon_ring_unlock_commit(rdev, ringC); |
404 | ||
405 | mdelay(1000); | |
406 | ||
407 | r = radeon_fence_wait(fenceA, false); | |
408 | if (r) { | |
409 | DRM_ERROR("Failed to wait for sync fence A\n"); | |
410 | goto out_cleanup; | |
411 | } | |
412 | r = radeon_fence_wait(fenceB, false); | |
413 | if (r) { | |
414 | DRM_ERROR("Failed to wait for sync fence B\n"); | |
415 | goto out_cleanup; | |
416 | } | |
417 | ||
418 | out_cleanup: | |
220907d9 | 419 | radeon_semaphore_free(rdev, &semaphore, NULL); |
ce954884 CK |
420 | |
421 | if (fenceA) | |
422 | radeon_fence_unref(&fenceA); | |
423 | ||
424 | if (fenceB) | |
425 | radeon_fence_unref(&fenceB); | |
60a7e396 CK |
426 | |
427 | if (r) | |
428 | printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); | |
429 | } | |
430 | ||
431 | void radeon_test_syncing(struct radeon_device *rdev) | |
432 | { | |
ce954884 | 433 | int i, j, k; |
60a7e396 CK |
434 | |
435 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { | |
e32eb50d CK |
436 | struct radeon_ring *ringA = &rdev->ring[i]; |
437 | if (!ringA->ready) | |
60a7e396 CK |
438 | continue; |
439 | ||
440 | for (j = 0; j < i; ++j) { | |
e32eb50d CK |
441 | struct radeon_ring *ringB = &rdev->ring[j]; |
442 | if (!ringB->ready) | |
60a7e396 CK |
443 | continue; |
444 | ||
ce954884 | 445 | DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); |
e32eb50d | 446 | radeon_test_ring_sync(rdev, ringA, ringB); |
60a7e396 | 447 | |
ce954884 | 448 | DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); |
e32eb50d | 449 | radeon_test_ring_sync(rdev, ringB, ringA); |
ce954884 CK |
450 | |
451 | for (k = 0; k < j; ++k) { | |
452 | struct radeon_ring *ringC = &rdev->ring[k]; | |
1f2e124d AD |
453 | if (!ringC->ready) |
454 | continue; | |
ce954884 CK |
455 | |
456 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); | |
457 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); | |
458 | ||
459 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); | |
460 | radeon_test_ring_sync2(rdev, ringA, ringC, ringB); | |
461 | ||
462 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); | |
463 | radeon_test_ring_sync2(rdev, ringB, ringA, ringC); | |
464 | ||
465 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); | |
466 | radeon_test_ring_sync2(rdev, ringB, ringC, ringA); | |
467 | ||
468 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); | |
469 | radeon_test_ring_sync2(rdev, ringC, ringA, ringB); | |
470 | ||
471 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); | |
472 | radeon_test_ring_sync2(rdev, ringC, ringB, ringA); | |
473 | } | |
60a7e396 CK |
474 | } |
475 | } | |
476 | } |