2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
40 int radeon_debugfs_sa_init(struct radeon_device
*rdev
);
42 int radeon_ib_get(struct radeon_device
*rdev
, int ring
,
43 struct radeon_ib
*ib
, unsigned size
)
47 r
= radeon_sa_bo_new(rdev
, &rdev
->ring_tmp_bo
, &ib
->sa_bo
, size
, 256, true);
49 dev_err(rdev
->dev
, "failed to get a new IB (%d)\n", r
);
52 r
= radeon_fence_create(rdev
, &ib
->fence
, ring
);
54 dev_err(rdev
->dev
, "failed to create fence for new IB (%d)\n", r
);
55 radeon_sa_bo_free(rdev
, &ib
->sa_bo
, NULL
);
59 ib
->ptr
= radeon_sa_bo_cpu_addr(ib
->sa_bo
);
60 ib
->gpu_addr
= radeon_sa_bo_gpu_addr(ib
->sa_bo
);
62 ib
->is_const_ib
= false;
68 void radeon_ib_free(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
70 radeon_semaphore_free(rdev
, ib
->semaphore
, ib
->fence
);
71 radeon_sa_bo_free(rdev
, &ib
->sa_bo
, ib
->fence
);
72 radeon_fence_unref(&ib
->fence
);
75 int radeon_ib_schedule(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
77 struct radeon_ring
*ring
= &rdev
->ring
[ib
->fence
->ring
];
80 if (!ib
->length_dw
|| !ring
->ready
) {
81 /* TODO: Nothings in the ib we should report. */
82 dev_err(rdev
->dev
, "couldn't schedule ib\n");
86 /* 64 dwords should be enough for fence too */
87 r
= radeon_ring_lock(rdev
, ring
, 64);
89 dev_err(rdev
->dev
, "scheduling IB failed (%d).\n", r
);
92 radeon_ring_ib_execute(rdev
, ib
->fence
->ring
, ib
);
93 radeon_fence_emit(rdev
, ib
->fence
);
94 radeon_ring_unlock_commit(rdev
, ring
);
98 int radeon_ib_pool_init(struct radeon_device
*rdev
)
102 if (rdev
->ib_pool_ready
) {
105 r
= radeon_sa_bo_manager_init(rdev
, &rdev
->ring_tmp_bo
,
106 RADEON_IB_POOL_SIZE
*64*1024,
107 RADEON_GEM_DOMAIN_GTT
);
111 rdev
->ib_pool_ready
= true;
112 if (radeon_debugfs_sa_init(rdev
)) {
113 dev_err(rdev
->dev
, "failed to register debugfs file for SA\n");
118 void radeon_ib_pool_fini(struct radeon_device
*rdev
)
120 if (rdev
->ib_pool_ready
) {
121 radeon_sa_bo_manager_fini(rdev
, &rdev
->ring_tmp_bo
);
122 rdev
->ib_pool_ready
= false;
126 int radeon_ib_pool_start(struct radeon_device
*rdev
)
128 return radeon_sa_bo_manager_start(rdev
, &rdev
->ring_tmp_bo
);
131 int radeon_ib_pool_suspend(struct radeon_device
*rdev
)
133 return radeon_sa_bo_manager_suspend(rdev
, &rdev
->ring_tmp_bo
);
136 int radeon_ib_ring_tests(struct radeon_device
*rdev
)
141 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
142 struct radeon_ring
*ring
= &rdev
->ring
[i
];
147 r
= radeon_ib_test(rdev
, i
, ring
);
151 if (i
== RADEON_RING_TYPE_GFX_INDEX
) {
152 /* oh, oh, that's really bad */
153 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r
);
154 rdev
->accel_working
= false;
158 /* still not good, but we can live with it */
159 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i
, r
);
169 int radeon_debugfs_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
);
171 void radeon_ring_write(struct radeon_ring
*ring
, uint32_t v
)
174 if (ring
->count_dw
<= 0) {
175 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
178 ring
->ring
[ring
->wptr
++] = v
;
179 ring
->wptr
&= ring
->ptr_mask
;
181 ring
->ring_free_dw
--;
184 int radeon_ring_index(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
186 /* r1xx-r5xx only has CP ring */
187 if (rdev
->family
< CHIP_R600
)
188 return RADEON_RING_TYPE_GFX_INDEX
;
190 if (rdev
->family
>= CHIP_CAYMAN
) {
191 if (ring
== &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
])
192 return CAYMAN_RING_TYPE_CP1_INDEX
;
193 else if (ring
== &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
])
194 return CAYMAN_RING_TYPE_CP2_INDEX
;
196 return RADEON_RING_TYPE_GFX_INDEX
;
199 void radeon_ring_free_size(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
203 if (rdev
->wb
.enabled
)
204 rptr
= le32_to_cpu(rdev
->wb
.wb
[ring
->rptr_offs
/4]);
206 rptr
= RREG32(ring
->rptr_reg
);
207 ring
->rptr
= (rptr
& ring
->ptr_reg_mask
) >> ring
->ptr_reg_shift
;
208 /* This works because ring_size is a power of 2 */
209 ring
->ring_free_dw
= (ring
->rptr
+ (ring
->ring_size
/ 4));
210 ring
->ring_free_dw
-= ring
->wptr
;
211 ring
->ring_free_dw
&= ring
->ptr_mask
;
212 if (!ring
->ring_free_dw
) {
213 ring
->ring_free_dw
= ring
->ring_size
/ 4;
218 int radeon_ring_alloc(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ndw
)
222 /* Align requested size with padding so unlock_commit can
224 ndw
= (ndw
+ ring
->align_mask
) & ~ring
->align_mask
;
225 while (ndw
> (ring
->ring_free_dw
- 1)) {
226 radeon_ring_free_size(rdev
, ring
);
227 if (ndw
< ring
->ring_free_dw
) {
230 r
= radeon_fence_wait_next_locked(rdev
, radeon_ring_index(rdev
, ring
));
234 ring
->count_dw
= ndw
;
235 ring
->wptr_old
= ring
->wptr
;
239 int radeon_ring_lock(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ndw
)
243 mutex_lock(&rdev
->ring_lock
);
244 r
= radeon_ring_alloc(rdev
, ring
, ndw
);
246 mutex_unlock(&rdev
->ring_lock
);
252 void radeon_ring_commit(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
254 unsigned count_dw_pad
;
257 /* We pad to match fetch size */
258 count_dw_pad
= (ring
->align_mask
+ 1) -
259 (ring
->wptr
& ring
->align_mask
);
260 for (i
= 0; i
< count_dw_pad
; i
++) {
261 radeon_ring_write(ring
, ring
->nop
);
264 WREG32(ring
->wptr_reg
, (ring
->wptr
<< ring
->ptr_reg_shift
) & ring
->ptr_reg_mask
);
265 (void)RREG32(ring
->wptr_reg
);
268 void radeon_ring_unlock_commit(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
270 radeon_ring_commit(rdev
, ring
);
271 mutex_unlock(&rdev
->ring_lock
);
274 void radeon_ring_undo(struct radeon_ring
*ring
)
276 ring
->wptr
= ring
->wptr_old
;
279 void radeon_ring_unlock_undo(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
281 radeon_ring_undo(ring
);
282 mutex_unlock(&rdev
->ring_lock
);
285 void radeon_ring_force_activity(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
289 radeon_ring_free_size(rdev
, ring
);
290 if (ring
->rptr
== ring
->wptr
) {
291 r
= radeon_ring_alloc(rdev
, ring
, 1);
293 radeon_ring_write(ring
, ring
->nop
);
294 radeon_ring_commit(rdev
, ring
);
299 void radeon_ring_lockup_update(struct radeon_ring
*ring
)
301 ring
->last_rptr
= ring
->rptr
;
302 ring
->last_activity
= jiffies
;
306 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
307 * @rdev: radeon device structure
308 * @ring: radeon_ring structure holding ring information
310 * We don't need to initialize the lockup tracking information as we will either
311 * have CP rptr to a different value of jiffies wrap around which will force
312 * initialization of the lockup tracking informations.
314 * A possible false positivie is if we get call after while and last_cp_rptr ==
315 * the current CP rptr, even if it's unlikely it might happen. To avoid this
316 * if the elapsed time since last call is bigger than 2 second than we return
317 * false and update the tracking information. Due to this the caller must call
318 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
319 * the fencing code should be cautious about that.
321 * Caller should write to the ring to force CP to do something so we don't get
322 * false positive when CP is just gived nothing to do.
325 bool radeon_ring_test_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
327 unsigned long cjiffies
, elapsed
;
331 if (!time_after(cjiffies
, ring
->last_activity
)) {
332 /* likely a wrap around */
333 radeon_ring_lockup_update(ring
);
336 rptr
= RREG32(ring
->rptr_reg
);
337 ring
->rptr
= (rptr
& ring
->ptr_reg_mask
) >> ring
->ptr_reg_shift
;
338 if (ring
->rptr
!= ring
->last_rptr
) {
339 /* CP is still working no lockup */
340 radeon_ring_lockup_update(ring
);
343 elapsed
= jiffies_to_msecs(cjiffies
- ring
->last_activity
);
344 if (radeon_lockup_timeout
&& elapsed
>= radeon_lockup_timeout
) {
345 dev_err(rdev
->dev
, "GPU lockup CP stall for more than %lumsec\n", elapsed
);
348 /* give a chance to the GPU ... */
352 int radeon_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
,
353 unsigned rptr_offs
, unsigned rptr_reg
, unsigned wptr_reg
,
354 u32 ptr_reg_shift
, u32 ptr_reg_mask
, u32 nop
)
358 ring
->ring_size
= ring_size
;
359 ring
->rptr_offs
= rptr_offs
;
360 ring
->rptr_reg
= rptr_reg
;
361 ring
->wptr_reg
= wptr_reg
;
362 ring
->ptr_reg_shift
= ptr_reg_shift
;
363 ring
->ptr_reg_mask
= ptr_reg_mask
;
365 /* Allocate ring buffer */
366 if (ring
->ring_obj
== NULL
) {
367 r
= radeon_bo_create(rdev
, ring
->ring_size
, PAGE_SIZE
, true,
368 RADEON_GEM_DOMAIN_GTT
,
369 NULL
, &ring
->ring_obj
);
371 dev_err(rdev
->dev
, "(%d) ring create failed\n", r
);
374 r
= radeon_bo_reserve(ring
->ring_obj
, false);
375 if (unlikely(r
!= 0))
377 r
= radeon_bo_pin(ring
->ring_obj
, RADEON_GEM_DOMAIN_GTT
,
380 radeon_bo_unreserve(ring
->ring_obj
);
381 dev_err(rdev
->dev
, "(%d) ring pin failed\n", r
);
384 r
= radeon_bo_kmap(ring
->ring_obj
,
385 (void **)&ring
->ring
);
386 radeon_bo_unreserve(ring
->ring_obj
);
388 dev_err(rdev
->dev
, "(%d) ring map failed\n", r
);
392 ring
->ptr_mask
= (ring
->ring_size
/ 4) - 1;
393 ring
->ring_free_dw
= ring
->ring_size
/ 4;
394 if (radeon_debugfs_ring_init(rdev
, ring
)) {
395 DRM_ERROR("Failed to register debugfs file for rings !\n");
400 void radeon_ring_fini(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
403 struct radeon_bo
*ring_obj
;
405 mutex_lock(&rdev
->ring_lock
);
406 ring_obj
= ring
->ring_obj
;
409 ring
->ring_obj
= NULL
;
410 mutex_unlock(&rdev
->ring_lock
);
413 r
= radeon_bo_reserve(ring_obj
, false);
414 if (likely(r
== 0)) {
415 radeon_bo_kunmap(ring_obj
);
416 radeon_bo_unpin(ring_obj
);
417 radeon_bo_unreserve(ring_obj
);
419 radeon_bo_unref(&ring_obj
);
426 #if defined(CONFIG_DEBUG_FS)
428 static int radeon_debugfs_ring_info(struct seq_file
*m
, void *data
)
430 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
431 struct drm_device
*dev
= node
->minor
->dev
;
432 struct radeon_device
*rdev
= dev
->dev_private
;
433 int ridx
= *(int*)node
->info_ent
->data
;
434 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
435 unsigned count
, i
, j
;
437 radeon_ring_free_size(rdev
, ring
);
438 count
= (ring
->ring_size
/ 4) - ring
->ring_free_dw
;
439 seq_printf(m
, "wptr(0x%04x): 0x%08x\n", ring
->wptr_reg
, RREG32(ring
->wptr_reg
));
440 seq_printf(m
, "rptr(0x%04x): 0x%08x\n", ring
->rptr_reg
, RREG32(ring
->rptr_reg
));
441 seq_printf(m
, "driver's copy of the wptr: 0x%08x\n", ring
->wptr
);
442 seq_printf(m
, "driver's copy of the rptr: 0x%08x\n", ring
->rptr
);
443 seq_printf(m
, "%u free dwords in ring\n", ring
->ring_free_dw
);
444 seq_printf(m
, "%u dwords in ring\n", count
);
446 for (j
= 0; j
<= count
; j
++) {
447 seq_printf(m
, "r[%04d]=0x%08x\n", i
, ring
->ring
[i
]);
448 i
= (i
+ 1) & ring
->ptr_mask
;
453 static int radeon_ring_type_gfx_index
= RADEON_RING_TYPE_GFX_INDEX
;
454 static int cayman_ring_type_cp1_index
= CAYMAN_RING_TYPE_CP1_INDEX
;
455 static int cayman_ring_type_cp2_index
= CAYMAN_RING_TYPE_CP2_INDEX
;
457 static struct drm_info_list radeon_debugfs_ring_info_list
[] = {
458 {"radeon_ring_gfx", radeon_debugfs_ring_info
, 0, &radeon_ring_type_gfx_index
},
459 {"radeon_ring_cp1", radeon_debugfs_ring_info
, 0, &cayman_ring_type_cp1_index
},
460 {"radeon_ring_cp2", radeon_debugfs_ring_info
, 0, &cayman_ring_type_cp2_index
},
463 static int radeon_debugfs_sa_info(struct seq_file
*m
, void *data
)
465 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
466 struct drm_device
*dev
= node
->minor
->dev
;
467 struct radeon_device
*rdev
= dev
->dev_private
;
469 radeon_sa_bo_dump_debug_info(&rdev
->ring_tmp_bo
, m
);
475 static struct drm_info_list radeon_debugfs_sa_list
[] = {
476 {"radeon_sa_info", &radeon_debugfs_sa_info
, 0, NULL
},
481 int radeon_debugfs_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
483 #if defined(CONFIG_DEBUG_FS)
485 for (i
= 0; i
< ARRAY_SIZE(radeon_debugfs_ring_info_list
); ++i
) {
486 struct drm_info_list
*info
= &radeon_debugfs_ring_info_list
[i
];
487 int ridx
= *(int*)radeon_debugfs_ring_info_list
[i
].data
;
490 if (&rdev
->ring
[ridx
] != ring
)
493 r
= radeon_debugfs_add_files(rdev
, info
, 1);
501 int radeon_debugfs_sa_init(struct radeon_device
*rdev
)
503 #if defined(CONFIG_DEBUG_FS)
504 return radeon_debugfs_add_files(rdev
, radeon_debugfs_sa_list
, 1);