Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/seq_file.h> | |
5a0e3ad6 | 29 | #include <linux/slab.h> |
771fe6b9 JG |
30 | #include "drmP.h" |
31 | #include "radeon_drm.h" | |
32 | #include "radeon_reg.h" | |
33 | #include "radeon.h" | |
34 | #include "atom.h" | |
35 | ||
36 | int radeon_debugfs_ib_init(struct radeon_device *rdev); | |
af9720f4 | 37 | int radeon_debugfs_ring_init(struct radeon_device *rdev); |
771fe6b9 | 38 | |
ce580fab AK |
39 | u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
40 | { | |
41 | struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; | |
42 | u32 pg_idx, pg_offset; | |
43 | u32 idx_value = 0; | |
44 | int new_page; | |
45 | ||
46 | pg_idx = (idx * 4) / PAGE_SIZE; | |
47 | pg_offset = (idx * 4) % PAGE_SIZE; | |
48 | ||
49 | if (ibc->kpage_idx[0] == pg_idx) | |
50 | return ibc->kpage[0][pg_offset/4]; | |
51 | if (ibc->kpage_idx[1] == pg_idx) | |
52 | return ibc->kpage[1][pg_offset/4]; | |
53 | ||
54 | new_page = radeon_cs_update_pages(p, pg_idx); | |
55 | if (new_page < 0) { | |
56 | p->parser_error = new_page; | |
57 | return 0; | |
58 | } | |
59 | ||
60 | idx_value = ibc->kpage[new_page][pg_offset/4]; | |
61 | return idx_value; | |
62 | } | |
63 | ||
e32eb50d | 64 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
ce580fab AK |
65 | { |
66 | #if DRM_DEBUG_CODE | |
e32eb50d | 67 | if (ring->count_dw <= 0) { |
ce580fab AK |
68 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); |
69 | } | |
70 | #endif | |
e32eb50d CK |
71 | ring->ring[ring->wptr++] = v; |
72 | ring->wptr &= ring->ptr_mask; | |
73 | ring->count_dw--; | |
74 | ring->ring_free_dw--; | |
ce580fab AK |
75 | } |
76 | ||
9f93ed39 JG |
77 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) |
78 | { | |
79 | struct radeon_ib *ib, *n; | |
80 | ||
81 | list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) { | |
82 | list_del(&ib->list); | |
83 | vfree(ib->ptr); | |
84 | kfree(ib); | |
85 | } | |
86 | } | |
87 | ||
88 | void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib) | |
89 | { | |
90 | struct radeon_ib *bib; | |
91 | ||
92 | bib = kmalloc(sizeof(*bib), GFP_KERNEL); | |
93 | if (bib == NULL) | |
94 | return; | |
95 | bib->ptr = vmalloc(ib->length_dw * 4); | |
96 | if (bib->ptr == NULL) { | |
97 | kfree(bib); | |
98 | return; | |
99 | } | |
100 | memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); | |
101 | bib->length_dw = ib->length_dw; | |
102 | mutex_lock(&rdev->ib_pool.mutex); | |
103 | list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib); | |
104 | mutex_unlock(&rdev->ib_pool.mutex); | |
105 | } | |
106 | ||
771fe6b9 JG |
107 | /* |
108 | * IB. | |
109 | */ | |
7b1f2485 | 110 | int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib) |
771fe6b9 JG |
111 | { |
112 | struct radeon_fence *fence; | |
113 | struct radeon_ib *nib; | |
91cb91be | 114 | int r = 0, i, c; |
771fe6b9 JG |
115 | |
116 | *ib = NULL; | |
7b1f2485 | 117 | r = radeon_fence_create(rdev, &fence, ring); |
771fe6b9 | 118 | if (r) { |
91cb91be | 119 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
771fe6b9 JG |
120 | return r; |
121 | } | |
122 | mutex_lock(&rdev->ib_pool.mutex); | |
91cb91be JG |
123 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { |
124 | i &= (RADEON_IB_POOL_SIZE - 1); | |
125 | if (rdev->ib_pool.ibs[i].free) { | |
126 | nib = &rdev->ib_pool.ibs[i]; | |
127 | break; | |
128 | } | |
771fe6b9 | 129 | } |
91cb91be JG |
130 | if (nib == NULL) { |
131 | /* This should never happen, it means we allocated all | |
132 | * IB and haven't scheduled one yet, return EBUSY to | |
133 | * userspace hoping that on ioctl recall we get better | |
134 | * luck | |
135 | */ | |
136 | dev_err(rdev->dev, "no free indirect buffer !\n"); | |
ecb114a1 | 137 | mutex_unlock(&rdev->ib_pool.mutex); |
91cb91be JG |
138 | radeon_fence_unref(&fence); |
139 | return -EBUSY; | |
771fe6b9 | 140 | } |
91cb91be JG |
141 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
142 | nib->free = false; | |
143 | if (nib->fence) { | |
ecb114a1 | 144 | mutex_unlock(&rdev->ib_pool.mutex); |
91cb91be JG |
145 | r = radeon_fence_wait(nib->fence, false); |
146 | if (r) { | |
147 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", | |
148 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); | |
149 | mutex_lock(&rdev->ib_pool.mutex); | |
150 | nib->free = true; | |
151 | mutex_unlock(&rdev->ib_pool.mutex); | |
152 | radeon_fence_unref(&fence); | |
153 | return r; | |
154 | } | |
155 | mutex_lock(&rdev->ib_pool.mutex); | |
771fe6b9 JG |
156 | } |
157 | radeon_fence_unref(&nib->fence); | |
91cb91be | 158 | nib->fence = fence; |
771fe6b9 | 159 | nib->length_dw = 0; |
ecb114a1 | 160 | mutex_unlock(&rdev->ib_pool.mutex); |
771fe6b9 | 161 | *ib = nib; |
91cb91be | 162 | return 0; |
771fe6b9 JG |
163 | } |
164 | ||
165 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |
166 | { | |
167 | struct radeon_ib *tmp = *ib; | |
168 | ||
169 | *ib = NULL; | |
170 | if (tmp == NULL) { | |
171 | return; | |
172 | } | |
851a6bd9 | 173 | if (!tmp->fence->emitted) |
7d404c7b | 174 | radeon_fence_unref(&tmp->fence); |
771fe6b9 | 175 | mutex_lock(&rdev->ib_pool.mutex); |
91cb91be | 176 | tmp->free = true; |
771fe6b9 JG |
177 | mutex_unlock(&rdev->ib_pool.mutex); |
178 | } | |
179 | ||
771fe6b9 JG |
180 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
181 | { | |
e32eb50d | 182 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
771fe6b9 JG |
183 | int r = 0; |
184 | ||
e32eb50d | 185 | if (!ib->length_dw || !ring->ready) { |
771fe6b9 | 186 | /* TODO: Nothings in the ib we should report. */ |
91cb91be | 187 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
771fe6b9 JG |
188 | return -EINVAL; |
189 | } | |
ecb114a1 | 190 | |
6cdf6585 | 191 | /* 64 dwords should be enough for fence too */ |
e32eb50d | 192 | r = radeon_ring_lock(rdev, ring, 64); |
771fe6b9 | 193 | if (r) { |
ec4f2ac4 | 194 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
771fe6b9 JG |
195 | return r; |
196 | } | |
4c87bc26 | 197 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); |
771fe6b9 | 198 | radeon_fence_emit(rdev, ib->fence); |
ecb114a1 | 199 | mutex_lock(&rdev->ib_pool.mutex); |
91cb91be JG |
200 | /* once scheduled IB is considered free and protected by the fence */ |
201 | ib->free = true; | |
771fe6b9 | 202 | mutex_unlock(&rdev->ib_pool.mutex); |
e32eb50d | 203 | radeon_ring_unlock_commit(rdev, ring); |
771fe6b9 JG |
204 | return 0; |
205 | } | |
206 | ||
207 | int radeon_ib_pool_init(struct radeon_device *rdev) | |
208 | { | |
209 | void *ptr; | |
210 | uint64_t gpu_addr; | |
211 | int i; | |
212 | int r = 0; | |
213 | ||
9f022ddf JG |
214 | if (rdev->ib_pool.robj) |
215 | return 0; | |
9f93ed39 | 216 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); |
771fe6b9 | 217 | /* Allocate 1M object buffer */ |
441921d5 | 218 | r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, |
268b2510 AD |
219 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, |
220 | &rdev->ib_pool.robj); | |
771fe6b9 JG |
221 | if (r) { |
222 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | |
223 | return r; | |
224 | } | |
4c788679 JG |
225 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
226 | if (unlikely(r != 0)) | |
227 | return r; | |
228 | r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | |
771fe6b9 | 229 | if (r) { |
4c788679 | 230 | radeon_bo_unreserve(rdev->ib_pool.robj); |
771fe6b9 JG |
231 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); |
232 | return r; | |
233 | } | |
4c788679 JG |
234 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); |
235 | radeon_bo_unreserve(rdev->ib_pool.robj); | |
771fe6b9 | 236 | if (r) { |
205a44a4 | 237 | DRM_ERROR("radeon: failed to map ib pool (%d).\n", r); |
771fe6b9 JG |
238 | return r; |
239 | } | |
240 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | |
241 | unsigned offset; | |
242 | ||
243 | offset = i * 64 * 1024; | |
244 | rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; | |
245 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | |
246 | rdev->ib_pool.ibs[i].idx = i; | |
247 | rdev->ib_pool.ibs[i].length_dw = 0; | |
91cb91be | 248 | rdev->ib_pool.ibs[i].free = true; |
771fe6b9 | 249 | } |
91cb91be | 250 | rdev->ib_pool.head_id = 0; |
771fe6b9 JG |
251 | rdev->ib_pool.ready = true; |
252 | DRM_INFO("radeon: ib pool ready.\n"); | |
253 | if (radeon_debugfs_ib_init(rdev)) { | |
254 | DRM_ERROR("Failed to register debugfs file for IB !\n"); | |
255 | } | |
af9720f4 CK |
256 | if (radeon_debugfs_ring_init(rdev)) { |
257 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | |
258 | } | |
771fe6b9 JG |
259 | return r; |
260 | } | |
261 | ||
262 | void radeon_ib_pool_fini(struct radeon_device *rdev) | |
263 | { | |
4c788679 | 264 | int r; |
ca2af923 | 265 | struct radeon_bo *robj; |
4c788679 | 266 | |
771fe6b9 JG |
267 | if (!rdev->ib_pool.ready) { |
268 | return; | |
269 | } | |
270 | mutex_lock(&rdev->ib_pool.mutex); | |
9f93ed39 | 271 | radeon_ib_bogus_cleanup(rdev); |
ca2af923 AD |
272 | robj = rdev->ib_pool.robj; |
273 | rdev->ib_pool.robj = NULL; | |
274 | mutex_unlock(&rdev->ib_pool.mutex); | |
eb6b6d7c | 275 | |
ca2af923 AD |
276 | if (robj) { |
277 | r = radeon_bo_reserve(robj, false); | |
4c788679 | 278 | if (likely(r == 0)) { |
ca2af923 AD |
279 | radeon_bo_kunmap(robj); |
280 | radeon_bo_unpin(robj); | |
281 | radeon_bo_unreserve(robj); | |
4c788679 | 282 | } |
ca2af923 | 283 | radeon_bo_unref(&robj); |
771fe6b9 | 284 | } |
771fe6b9 JG |
285 | } |
286 | ||
771fe6b9 JG |
287 | |
288 | /* | |
289 | * Ring. | |
290 | */ | |
e32eb50d | 291 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
bf852799 CK |
292 | { |
293 | /* r1xx-r5xx only has CP ring */ | |
294 | if (rdev->family < CHIP_R600) | |
295 | return RADEON_RING_TYPE_GFX_INDEX; | |
296 | ||
297 | if (rdev->family >= CHIP_CAYMAN) { | |
e32eb50d | 298 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
bf852799 | 299 | return CAYMAN_RING_TYPE_CP1_INDEX; |
e32eb50d | 300 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
bf852799 CK |
301 | return CAYMAN_RING_TYPE_CP2_INDEX; |
302 | } | |
303 | return RADEON_RING_TYPE_GFX_INDEX; | |
304 | } | |
305 | ||
e32eb50d | 306 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 307 | { |
724c80e1 | 308 | if (rdev->wb.enabled) |
e32eb50d | 309 | ring->rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
5596a9db | 310 | else |
e32eb50d | 311 | ring->rptr = RREG32(ring->rptr_reg); |
771fe6b9 | 312 | /* This works because ring_size is a power of 2 */ |
e32eb50d CK |
313 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
314 | ring->ring_free_dw -= ring->wptr; | |
315 | ring->ring_free_dw &= ring->ptr_mask; | |
316 | if (!ring->ring_free_dw) { | |
317 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
318 | } |
319 | } | |
320 | ||
7b1f2485 | 321 | |
e32eb50d | 322 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
771fe6b9 JG |
323 | { |
324 | int r; | |
325 | ||
326 | /* Align requested size with padding so unlock_commit can | |
327 | * pad safely */ | |
e32eb50d CK |
328 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
329 | while (ndw > (ring->ring_free_dw - 1)) { | |
330 | radeon_ring_free_size(rdev, ring); | |
331 | if (ndw < ring->ring_free_dw) { | |
771fe6b9 JG |
332 | break; |
333 | } | |
e32eb50d | 334 | r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring)); |
91700f3c | 335 | if (r) |
771fe6b9 | 336 | return r; |
771fe6b9 | 337 | } |
e32eb50d CK |
338 | ring->count_dw = ndw; |
339 | ring->wptr_old = ring->wptr; | |
771fe6b9 JG |
340 | return 0; |
341 | } | |
342 | ||
e32eb50d | 343 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
91700f3c MG |
344 | { |
345 | int r; | |
346 | ||
e32eb50d CK |
347 | mutex_lock(&ring->mutex); |
348 | r = radeon_ring_alloc(rdev, ring, ndw); | |
91700f3c | 349 | if (r) { |
e32eb50d | 350 | mutex_unlock(&ring->mutex); |
91700f3c MG |
351 | return r; |
352 | } | |
353 | return 0; | |
354 | } | |
355 | ||
e32eb50d | 356 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 JG |
357 | { |
358 | unsigned count_dw_pad; | |
359 | unsigned i; | |
360 | ||
361 | /* We pad to match fetch size */ | |
e32eb50d CK |
362 | count_dw_pad = (ring->align_mask + 1) - |
363 | (ring->wptr & ring->align_mask); | |
771fe6b9 | 364 | for (i = 0; i < count_dw_pad; i++) { |
e32eb50d | 365 | radeon_ring_write(ring, 2 << 30); |
771fe6b9 JG |
366 | } |
367 | DRM_MEMORYBARRIER(); | |
e32eb50d CK |
368 | WREG32(ring->wptr_reg, ring->wptr); |
369 | (void)RREG32(ring->wptr_reg); | |
91700f3c MG |
370 | } |
371 | ||
e32eb50d | 372 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
91700f3c | 373 | { |
e32eb50d CK |
374 | radeon_ring_commit(rdev, ring); |
375 | mutex_unlock(&ring->mutex); | |
771fe6b9 JG |
376 | } |
377 | ||
e32eb50d | 378 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 379 | { |
e32eb50d CK |
380 | ring->wptr = ring->wptr_old; |
381 | mutex_unlock(&ring->mutex); | |
771fe6b9 JG |
382 | } |
383 | ||
e32eb50d | 384 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
5596a9db | 385 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg) |
771fe6b9 JG |
386 | { |
387 | int r; | |
388 | ||
e32eb50d CK |
389 | ring->ring_size = ring_size; |
390 | ring->rptr_offs = rptr_offs; | |
391 | ring->rptr_reg = rptr_reg; | |
392 | ring->wptr_reg = wptr_reg; | |
771fe6b9 | 393 | /* Allocate ring buffer */ |
e32eb50d CK |
394 | if (ring->ring_obj == NULL) { |
395 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, | |
4c788679 | 396 | RADEON_GEM_DOMAIN_GTT, |
e32eb50d | 397 | &ring->ring_obj); |
771fe6b9 | 398 | if (r) { |
4c788679 | 399 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
771fe6b9 JG |
400 | return r; |
401 | } | |
e32eb50d | 402 | r = radeon_bo_reserve(ring->ring_obj, false); |
4c788679 JG |
403 | if (unlikely(r != 0)) |
404 | return r; | |
e32eb50d CK |
405 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
406 | &ring->gpu_addr); | |
771fe6b9 | 407 | if (r) { |
e32eb50d | 408 | radeon_bo_unreserve(ring->ring_obj); |
4c788679 | 409 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
771fe6b9 JG |
410 | return r; |
411 | } | |
e32eb50d CK |
412 | r = radeon_bo_kmap(ring->ring_obj, |
413 | (void **)&ring->ring); | |
414 | radeon_bo_unreserve(ring->ring_obj); | |
771fe6b9 | 415 | if (r) { |
4c788679 | 416 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
771fe6b9 JG |
417 | return r; |
418 | } | |
419 | } | |
e32eb50d CK |
420 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
421 | ring->ring_free_dw = ring->ring_size / 4; | |
771fe6b9 JG |
422 | return 0; |
423 | } | |
424 | ||
e32eb50d | 425 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
771fe6b9 | 426 | { |
4c788679 | 427 | int r; |
ca2af923 | 428 | struct radeon_bo *ring_obj; |
4c788679 | 429 | |
e32eb50d CK |
430 | mutex_lock(&ring->mutex); |
431 | ring_obj = ring->ring_obj; | |
432 | ring->ring = NULL; | |
433 | ring->ring_obj = NULL; | |
434 | mutex_unlock(&ring->mutex); | |
ca2af923 AD |
435 | |
436 | if (ring_obj) { | |
437 | r = radeon_bo_reserve(ring_obj, false); | |
4c788679 | 438 | if (likely(r == 0)) { |
ca2af923 AD |
439 | radeon_bo_kunmap(ring_obj); |
440 | radeon_bo_unpin(ring_obj); | |
441 | radeon_bo_unreserve(ring_obj); | |
4c788679 | 442 | } |
ca2af923 | 443 | radeon_bo_unref(&ring_obj); |
771fe6b9 | 444 | } |
771fe6b9 JG |
445 | } |
446 | ||
771fe6b9 JG |
447 | /* |
448 | * Debugfs info | |
449 | */ | |
450 | #if defined(CONFIG_DEBUG_FS) | |
af9720f4 CK |
451 | |
452 | static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |
453 | { | |
454 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
455 | struct drm_device *dev = node->minor->dev; | |
456 | struct radeon_device *rdev = dev->dev_private; | |
457 | int ridx = *(int*)node->info_ent->data; | |
458 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
459 | unsigned count, i, j; | |
460 | ||
461 | radeon_ring_free_size(rdev, ring); | |
462 | count = (ring->ring_size / 4) - ring->ring_free_dw; | |
463 | seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); | |
464 | seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); | |
465 | seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); | |
466 | seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); | |
467 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | |
468 | seq_printf(m, "%u dwords in ring\n", count); | |
469 | i = ring->rptr; | |
470 | for (j = 0; j <= count; j++) { | |
471 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | |
472 | i = (i + 1) & ring->ptr_mask; | |
473 | } | |
474 | return 0; | |
475 | } | |
476 | ||
477 | static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; | |
478 | static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; | |
479 | static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; | |
480 | ||
481 | static struct drm_info_list radeon_debugfs_ring_info_list[] = { | |
482 | {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, | |
483 | {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, | |
484 | {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, | |
485 | }; | |
486 | ||
771fe6b9 JG |
487 | static int radeon_debugfs_ib_info(struct seq_file *m, void *data) |
488 | { | |
489 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
490 | struct radeon_ib *ib = node->info_ent->data; | |
491 | unsigned i; | |
492 | ||
493 | if (ib == NULL) { | |
494 | return 0; | |
495 | } | |
91cb91be | 496 | seq_printf(m, "IB %04u\n", ib->idx); |
771fe6b9 JG |
497 | seq_printf(m, "IB fence %p\n", ib->fence); |
498 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | |
499 | for (i = 0; i < ib->length_dw; i++) { | |
500 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); | |
501 | } | |
502 | return 0; | |
503 | } | |
504 | ||
9f93ed39 JG |
505 | static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) |
506 | { | |
507 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
508 | struct radeon_device *rdev = node->info_ent->data; | |
509 | struct radeon_ib *ib; | |
510 | unsigned i; | |
511 | ||
512 | mutex_lock(&rdev->ib_pool.mutex); | |
513 | if (list_empty(&rdev->ib_pool.bogus_ib)) { | |
514 | mutex_unlock(&rdev->ib_pool.mutex); | |
515 | seq_printf(m, "no bogus IB recorded\n"); | |
516 | return 0; | |
517 | } | |
518 | ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); | |
519 | list_del_init(&ib->list); | |
520 | mutex_unlock(&rdev->ib_pool.mutex); | |
521 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | |
522 | for (i = 0; i < ib->length_dw; i++) { | |
523 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); | |
524 | } | |
525 | vfree(ib->ptr); | |
526 | kfree(ib); | |
527 | return 0; | |
528 | } | |
529 | ||
771fe6b9 JG |
530 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
531 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; | |
9f93ed39 JG |
532 | |
533 | static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { | |
534 | {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, | |
535 | }; | |
771fe6b9 JG |
536 | #endif |
537 | ||
af9720f4 CK |
538 | int radeon_debugfs_ring_init(struct radeon_device *rdev) |
539 | { | |
540 | #if defined(CONFIG_DEBUG_FS) | |
541 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, | |
542 | ARRAY_SIZE(radeon_debugfs_ring_info_list)); | |
543 | #else | |
544 | return 0; | |
545 | #endif | |
546 | } | |
547 | ||
771fe6b9 JG |
548 | int radeon_debugfs_ib_init(struct radeon_device *rdev) |
549 | { | |
550 | #if defined(CONFIG_DEBUG_FS) | |
551 | unsigned i; | |
9f93ed39 | 552 | int r; |
771fe6b9 | 553 | |
9f93ed39 JG |
554 | radeon_debugfs_ib_bogus_info_list[0].data = rdev; |
555 | r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); | |
556 | if (r) | |
557 | return r; | |
771fe6b9 JG |
558 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
559 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); | |
560 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; | |
561 | radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; | |
562 | radeon_debugfs_ib_list[i].driver_features = 0; | |
563 | radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i]; | |
564 | } | |
565 | return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, | |
566 | RADEON_IB_POOL_SIZE); | |
567 | #else | |
568 | return 0; | |
569 | #endif | |
570 | } |