drm/radeon: make all functions work with multiple rings.
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_ring.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include "drmP.h"
31#include "radeon_drm.h"
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "atom.h"
35
36int radeon_debugfs_ib_init(struct radeon_device *rdev);
37
ce580fab
AK
38u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
39{
40 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
41 u32 pg_idx, pg_offset;
42 u32 idx_value = 0;
43 int new_page;
44
45 pg_idx = (idx * 4) / PAGE_SIZE;
46 pg_offset = (idx * 4) % PAGE_SIZE;
47
48 if (ibc->kpage_idx[0] == pg_idx)
49 return ibc->kpage[0][pg_offset/4];
50 if (ibc->kpage_idx[1] == pg_idx)
51 return ibc->kpage[1][pg_offset/4];
52
53 new_page = radeon_cs_update_pages(p, pg_idx);
54 if (new_page < 0) {
55 p->parser_error = new_page;
56 return 0;
57 }
58
59 idx_value = ibc->kpage[new_page][pg_offset/4];
60 return idx_value;
61}
62
7b1f2485 63void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
ce580fab
AK
64{
65#if DRM_DEBUG_CODE
7b1f2485 66 if (cp->count_dw <= 0) {
ce580fab
AK
67 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
68 }
69#endif
7b1f2485
CK
70 cp->ring[cp->wptr++] = v;
71 cp->wptr &= cp->ptr_mask;
72 cp->count_dw--;
73 cp->ring_free_dw--;
ce580fab
AK
74}
75
9f93ed39
JG
76void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
77{
78 struct radeon_ib *ib, *n;
79
80 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
81 list_del(&ib->list);
82 vfree(ib->ptr);
83 kfree(ib);
84 }
85}
86
87void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
88{
89 struct radeon_ib *bib;
90
91 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
92 if (bib == NULL)
93 return;
94 bib->ptr = vmalloc(ib->length_dw * 4);
95 if (bib->ptr == NULL) {
96 kfree(bib);
97 return;
98 }
99 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
100 bib->length_dw = ib->length_dw;
101 mutex_lock(&rdev->ib_pool.mutex);
102 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
103 mutex_unlock(&rdev->ib_pool.mutex);
104}
105
771fe6b9
JG
106/*
107 * IB.
108 */
7b1f2485 109int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
771fe6b9
JG
110{
111 struct radeon_fence *fence;
112 struct radeon_ib *nib;
91cb91be 113 int r = 0, i, c;
771fe6b9
JG
114
115 *ib = NULL;
7b1f2485 116 r = radeon_fence_create(rdev, &fence, ring);
771fe6b9 117 if (r) {
91cb91be 118 dev_err(rdev->dev, "failed to create fence for new IB\n");
771fe6b9
JG
119 return r;
120 }
121 mutex_lock(&rdev->ib_pool.mutex);
91cb91be
JG
122 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
123 i &= (RADEON_IB_POOL_SIZE - 1);
124 if (rdev->ib_pool.ibs[i].free) {
125 nib = &rdev->ib_pool.ibs[i];
126 break;
127 }
771fe6b9 128 }
91cb91be
JG
129 if (nib == NULL) {
130 /* This should never happen, it means we allocated all
131 * IB and haven't scheduled one yet, return EBUSY to
132 * userspace hoping that on ioctl recall we get better
133 * luck
134 */
135 dev_err(rdev->dev, "no free indirect buffer !\n");
ecb114a1 136 mutex_unlock(&rdev->ib_pool.mutex);
91cb91be
JG
137 radeon_fence_unref(&fence);
138 return -EBUSY;
771fe6b9 139 }
91cb91be
JG
140 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
141 nib->free = false;
142 if (nib->fence) {
ecb114a1 143 mutex_unlock(&rdev->ib_pool.mutex);
91cb91be
JG
144 r = radeon_fence_wait(nib->fence, false);
145 if (r) {
146 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
147 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
148 mutex_lock(&rdev->ib_pool.mutex);
149 nib->free = true;
150 mutex_unlock(&rdev->ib_pool.mutex);
151 radeon_fence_unref(&fence);
152 return r;
153 }
154 mutex_lock(&rdev->ib_pool.mutex);
771fe6b9
JG
155 }
156 radeon_fence_unref(&nib->fence);
91cb91be 157 nib->fence = fence;
771fe6b9 158 nib->length_dw = 0;
ecb114a1 159 mutex_unlock(&rdev->ib_pool.mutex);
771fe6b9 160 *ib = nib;
91cb91be 161 return 0;
771fe6b9
JG
162}
163
164void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
165{
166 struct radeon_ib *tmp = *ib;
167
168 *ib = NULL;
169 if (tmp == NULL) {
170 return;
171 }
851a6bd9 172 if (!tmp->fence->emitted)
7d404c7b 173 radeon_fence_unref(&tmp->fence);
771fe6b9 174 mutex_lock(&rdev->ib_pool.mutex);
91cb91be 175 tmp->free = true;
771fe6b9
JG
176 mutex_unlock(&rdev->ib_pool.mutex);
177}
178
771fe6b9
JG
179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{
7b1f2485 181 struct radeon_cp *cp = &rdev->cp;
771fe6b9
JG
182 int r = 0;
183
7b1f2485 184 if (!ib->length_dw || !cp->ready) {
771fe6b9 185 /* TODO: Nothings in the ib we should report. */
91cb91be 186 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
771fe6b9
JG
187 return -EINVAL;
188 }
ecb114a1 189
6cdf6585 190 /* 64 dwords should be enough for fence too */
7b1f2485 191 r = radeon_ring_lock(rdev, cp, 64);
771fe6b9 192 if (r) {
ec4f2ac4 193 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
771fe6b9
JG
194 return r;
195 }
3ce0a23d 196 radeon_ring_ib_execute(rdev, ib);
771fe6b9 197 radeon_fence_emit(rdev, ib->fence);
ecb114a1 198 mutex_lock(&rdev->ib_pool.mutex);
91cb91be
JG
199 /* once scheduled IB is considered free and protected by the fence */
200 ib->free = true;
771fe6b9 201 mutex_unlock(&rdev->ib_pool.mutex);
7b1f2485 202 radeon_ring_unlock_commit(rdev, cp);
771fe6b9
JG
203 return 0;
204}
205
206int radeon_ib_pool_init(struct radeon_device *rdev)
207{
208 void *ptr;
209 uint64_t gpu_addr;
210 int i;
211 int r = 0;
212
9f022ddf
JG
213 if (rdev->ib_pool.robj)
214 return 0;
9f93ed39 215 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
771fe6b9 216 /* Allocate 1M object buffer */
441921d5 217 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
268b2510
AD
218 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
219 &rdev->ib_pool.robj);
771fe6b9
JG
220 if (r) {
221 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
222 return r;
223 }
4c788679
JG
224 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
225 if (unlikely(r != 0))
226 return r;
227 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
771fe6b9 228 if (r) {
4c788679 229 radeon_bo_unreserve(rdev->ib_pool.robj);
771fe6b9
JG
230 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
231 return r;
232 }
4c788679
JG
233 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
234 radeon_bo_unreserve(rdev->ib_pool.robj);
771fe6b9 235 if (r) {
205a44a4 236 DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
771fe6b9
JG
237 return r;
238 }
239 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
240 unsigned offset;
241
242 offset = i * 64 * 1024;
243 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
244 rdev->ib_pool.ibs[i].ptr = ptr + offset;
245 rdev->ib_pool.ibs[i].idx = i;
246 rdev->ib_pool.ibs[i].length_dw = 0;
91cb91be 247 rdev->ib_pool.ibs[i].free = true;
771fe6b9 248 }
91cb91be 249 rdev->ib_pool.head_id = 0;
771fe6b9
JG
250 rdev->ib_pool.ready = true;
251 DRM_INFO("radeon: ib pool ready.\n");
252 if (radeon_debugfs_ib_init(rdev)) {
253 DRM_ERROR("Failed to register debugfs file for IB !\n");
254 }
255 return r;
256}
257
258void radeon_ib_pool_fini(struct radeon_device *rdev)
259{
4c788679 260 int r;
ca2af923 261 struct radeon_bo *robj;
4c788679 262
771fe6b9
JG
263 if (!rdev->ib_pool.ready) {
264 return;
265 }
266 mutex_lock(&rdev->ib_pool.mutex);
9f93ed39 267 radeon_ib_bogus_cleanup(rdev);
ca2af923
AD
268 robj = rdev->ib_pool.robj;
269 rdev->ib_pool.robj = NULL;
270 mutex_unlock(&rdev->ib_pool.mutex);
eb6b6d7c 271
ca2af923
AD
272 if (robj) {
273 r = radeon_bo_reserve(robj, false);
4c788679 274 if (likely(r == 0)) {
ca2af923
AD
275 radeon_bo_kunmap(robj);
276 radeon_bo_unpin(robj);
277 radeon_bo_unreserve(robj);
4c788679 278 }
ca2af923 279 radeon_bo_unref(&robj);
771fe6b9 280 }
771fe6b9
JG
281}
282
771fe6b9
JG
283
284/*
285 * Ring.
286 */
7b1f2485 287void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
771fe6b9 288{
724c80e1 289 if (rdev->wb.enabled)
dc66b325 290 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
724c80e1
AD
291 else {
292 if (rdev->family >= CHIP_R600)
293 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
294 else
295 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
296 }
771fe6b9 297 /* This works because ring_size is a power of 2 */
7b1f2485
CK
298 cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
299 cp->ring_free_dw -= cp->wptr;
300 cp->ring_free_dw &= cp->ptr_mask;
301 if (!cp->ring_free_dw) {
302 cp->ring_free_dw = cp->ring_size / 4;
771fe6b9
JG
303 }
304}
305
7b1f2485
CK
306
307int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
771fe6b9
JG
308{
309 int r;
310
311 /* Align requested size with padding so unlock_commit can
312 * pad safely */
7b1f2485
CK
313 ndw = (ndw + cp->align_mask) & ~cp->align_mask;
314 while (ndw > (cp->ring_free_dw - 1)) {
315 radeon_ring_free_size(rdev, cp);
316 if (ndw < cp->ring_free_dw) {
771fe6b9
JG
317 break;
318 }
7465280c 319 r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX);
91700f3c 320 if (r)
771fe6b9 321 return r;
771fe6b9 322 }
7b1f2485
CK
323 cp->count_dw = ndw;
324 cp->wptr_old = cp->wptr;
771fe6b9
JG
325 return 0;
326}
327
7b1f2485 328int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
91700f3c
MG
329{
330 int r;
331
7b1f2485
CK
332 mutex_lock(&cp->mutex);
333 r = radeon_ring_alloc(rdev, cp, ndw);
91700f3c 334 if (r) {
7b1f2485 335 mutex_unlock(&cp->mutex);
91700f3c
MG
336 return r;
337 }
338 return 0;
339}
340
7b1f2485 341void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
771fe6b9
JG
342{
343 unsigned count_dw_pad;
344 unsigned i;
345
346 /* We pad to match fetch size */
7b1f2485
CK
347 count_dw_pad = (cp->align_mask + 1) -
348 (cp->wptr & cp->align_mask);
771fe6b9 349 for (i = 0; i < count_dw_pad; i++) {
7b1f2485 350 radeon_ring_write(cp, 2 << 30);
771fe6b9
JG
351 }
352 DRM_MEMORYBARRIER();
7b1f2485 353 radeon_cp_commit(rdev, cp);
91700f3c
MG
354}
355
7b1f2485 356void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
91700f3c 357{
7b1f2485
CK
358 radeon_ring_commit(rdev, cp);
359 mutex_unlock(&cp->mutex);
771fe6b9
JG
360}
361
7b1f2485 362void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
771fe6b9 363{
7b1f2485
CK
364 cp->wptr = cp->wptr_old;
365 mutex_unlock(&cp->mutex);
771fe6b9
JG
366}
367
7b1f2485 368int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size)
771fe6b9
JG
369{
370 int r;
371
7b1f2485 372 cp->ring_size = ring_size;
771fe6b9 373 /* Allocate ring buffer */
7b1f2485
CK
374 if (cp->ring_obj == NULL) {
375 r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
4c788679 376 RADEON_GEM_DOMAIN_GTT,
7b1f2485 377 &cp->ring_obj);
771fe6b9 378 if (r) {
4c788679 379 dev_err(rdev->dev, "(%d) ring create failed\n", r);
771fe6b9
JG
380 return r;
381 }
7b1f2485 382 r = radeon_bo_reserve(cp->ring_obj, false);
4c788679
JG
383 if (unlikely(r != 0))
384 return r;
7b1f2485
CK
385 r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
386 &cp->gpu_addr);
771fe6b9 387 if (r) {
7b1f2485 388 radeon_bo_unreserve(cp->ring_obj);
4c788679 389 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
771fe6b9
JG
390 return r;
391 }
7b1f2485
CK
392 r = radeon_bo_kmap(cp->ring_obj,
393 (void **)&cp->ring);
394 radeon_bo_unreserve(cp->ring_obj);
771fe6b9 395 if (r) {
4c788679 396 dev_err(rdev->dev, "(%d) ring map failed\n", r);
771fe6b9
JG
397 return r;
398 }
399 }
7b1f2485
CK
400 cp->ptr_mask = (cp->ring_size / 4) - 1;
401 cp->ring_free_dw = cp->ring_size / 4;
771fe6b9
JG
402 return 0;
403}
404
7b1f2485 405void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
771fe6b9 406{
4c788679 407 int r;
ca2af923 408 struct radeon_bo *ring_obj;
4c788679 409
7b1f2485
CK
410 mutex_lock(&cp->mutex);
411 ring_obj = cp->ring_obj;
412 cp->ring = NULL;
413 cp->ring_obj = NULL;
414 mutex_unlock(&cp->mutex);
ca2af923
AD
415
416 if (ring_obj) {
417 r = radeon_bo_reserve(ring_obj, false);
4c788679 418 if (likely(r == 0)) {
ca2af923
AD
419 radeon_bo_kunmap(ring_obj);
420 radeon_bo_unpin(ring_obj);
421 radeon_bo_unreserve(ring_obj);
4c788679 422 }
ca2af923 423 radeon_bo_unref(&ring_obj);
771fe6b9 424 }
771fe6b9
JG
425}
426
771fe6b9
JG
427/*
428 * Debugfs info
429 */
430#if defined(CONFIG_DEBUG_FS)
431static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
432{
433 struct drm_info_node *node = (struct drm_info_node *) m->private;
434 struct radeon_ib *ib = node->info_ent->data;
435 unsigned i;
436
437 if (ib == NULL) {
438 return 0;
439 }
91cb91be 440 seq_printf(m, "IB %04u\n", ib->idx);
771fe6b9
JG
441 seq_printf(m, "IB fence %p\n", ib->fence);
442 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
443 for (i = 0; i < ib->length_dw; i++) {
444 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
445 }
446 return 0;
447}
448
9f93ed39
JG
449static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
450{
451 struct drm_info_node *node = (struct drm_info_node *) m->private;
452 struct radeon_device *rdev = node->info_ent->data;
453 struct radeon_ib *ib;
454 unsigned i;
455
456 mutex_lock(&rdev->ib_pool.mutex);
457 if (list_empty(&rdev->ib_pool.bogus_ib)) {
458 mutex_unlock(&rdev->ib_pool.mutex);
459 seq_printf(m, "no bogus IB recorded\n");
460 return 0;
461 }
462 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
463 list_del_init(&ib->list);
464 mutex_unlock(&rdev->ib_pool.mutex);
465 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
466 for (i = 0; i < ib->length_dw; i++) {
467 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
468 }
469 vfree(ib->ptr);
470 kfree(ib);
471 return 0;
472}
473
771fe6b9
JG
474static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
475static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
9f93ed39
JG
476
477static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
478 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
479};
771fe6b9
JG
480#endif
481
482int radeon_debugfs_ib_init(struct radeon_device *rdev)
483{
484#if defined(CONFIG_DEBUG_FS)
485 unsigned i;
9f93ed39 486 int r;
771fe6b9 487
9f93ed39
JG
488 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
489 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
490 if (r)
491 return r;
771fe6b9
JG
492 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
493 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
494 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
495 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
496 radeon_debugfs_ib_list[i].driver_features = 0;
497 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
498 }
499 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
500 RADEON_IB_POOL_SIZE);
501#else
502 return 0;
503#endif
504}
This page took 0.189817 seconds and 5 git commands to generate.