drm/radeon: use one VMID for each ring
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_cs.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
4330441a 27#include <linux/list_sort.h>
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
771fe6b9
JG
30#include "radeon_reg.h"
31#include "radeon.h"
860024e5 32#include "radeon_trace.h"
771fe6b9 33
c9b76548
MO
34#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
1109ca09 75static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
771fe6b9
JG
76{
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
c9b76548 79 struct radeon_cs_buckets buckets;
771fe6b9 80 unsigned i, j;
f72a113a
CK
81 bool duplicate, need_mmap_lock = false;
82 int r;
771fe6b9
JG
83
84 if (p->chunk_relocs_idx == -1) {
85 return 0;
86 }
87 chunk = &p->chunks[p->chunk_relocs_idx];
cf4ccd01 88 p->dma_reloc_idx = 0;
771fe6b9
JG
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
97 return -ENOMEM;
98 }
c9b76548
MO
99
100 radeon_cs_buckets_init(&buckets);
101
771fe6b9
JG
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
c9b76548 104 unsigned priority;
771fe6b9
JG
105
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
16557f1e 108 for (j = 0; j < i; j++) {
771fe6b9
JG
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
4474f3a9 115 if (duplicate) {
16557f1e 116 p->relocs[i].handle = 0;
4474f3a9
CK
117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle);
125 return -ENOENT;
126 }
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
c9b76548
MO
129
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
135 */
701e1e78
CK
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
137 + !!r->write_domain;
4474f3a9 138
4f66c599 139 /* the first reloc of an UVD job is the msg and that must be in
b6a7eeea
CK
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
4f66c599 142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
b6a7eeea
CK
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
146
bcf6f1e9 147 /* TODO: is this still needed for NI+ ? */
ce6758c8 148 p->relocs[i].prefered_domains =
f2ba57b5
CK
149 RADEON_GEM_DOMAIN_VRAM;
150
ce6758c8 151 p->relocs[i].allowed_domains =
f2ba57b5
CK
152 RADEON_GEM_DOMAIN_VRAM;
153
c9b76548
MO
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
f2ba57b5
CK
156 } else {
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
159
ec65da38
MO
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
163 return -EINVAL;
164 }
165
ce6758c8 166 p->relocs[i].prefered_domains = domain;
f2ba57b5
CK
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
ce6758c8 169 p->relocs[i].allowed_domains = domain;
f2ba57b5 170 }
4474f3a9 171
f72a113a
CK
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
177 return -EINVAL;
178 }
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
183 }
184
df0af440 185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
298593b6 186 p->relocs[i].tv.shared = !r->write_domain;
4474f3a9
CK
187 p->relocs[i].handle = r->handle;
188
df0af440 189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
c9b76548 190 priority);
771fe6b9 191 }
c9b76548
MO
192
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
194
6d2f2944
CK
195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
197 &p->validated);
f72a113a
CK
198 if (need_mmap_lock)
199 down_read(&current->mm->mmap_sem);
200
201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
202
203 if (need_mmap_lock)
204 up_read(&current->mm->mmap_sem);
6d2f2944 205
f72a113a 206 return r;
771fe6b9
JG
207}
208
721604a1
JG
209static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
210{
211 p->priority = priority;
212
213 switch (ring) {
214 default:
215 DRM_ERROR("unknown ring id: %d\n", ring);
216 return -EINVAL;
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
219 break;
220 case RADEON_CS_RING_COMPUTE:
963e81f9 221 if (p->rdev->family >= CHIP_TAHITI) {
8d5ef7b1
AD
222 if (p->priority > 0)
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
224 else
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
226 } else
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
721604a1 228 break;
278a334c
AD
229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
231 if (p->priority > 0)
232 p->ring = R600_RING_TYPE_DMA_INDEX;
233 else
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
b9ace36f 235 } else if (p->rdev->family >= CHIP_RV770) {
278a334c
AD
236 p->ring = R600_RING_TYPE_DMA_INDEX;
237 } else {
238 return -EINVAL;
239 }
240 break;
f2ba57b5
CK
241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
243 break;
d93f7937
CK
244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
247 break;
721604a1
JG
248 }
249 return 0;
250}
251
392a250b 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
93504fce 253{
392a250b 254 int i, r = 0;
93504fce 255
cdac5504 256 for (i = 0; i < p->nrelocs; i++) {
f2c24b83 257 struct reservation_object *resv;
f2c24b83 258
f82cbddd 259 if (!p->relocs[i].robj)
cdac5504
CK
260 continue;
261
f2c24b83 262 resv = p->relocs[i].robj->tbo.resv;
975700d2
CK
263 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
264 p->relocs[i].tv.shared);
392a250b
ML
265
266 if (r)
267 break;
8f676c4c 268 }
392a250b 269 return r;
93504fce
CK
270}
271
9b00147d 272/* XXX: note that this is called from the legacy UMS CS ioctl as well */
771fe6b9
JG
273int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
274{
275 struct drm_radeon_cs *cs = data;
276 uint64_t *chunk_array_ptr;
721604a1
JG
277 unsigned size, i;
278 u32 ring = RADEON_CS_RING_GFX;
279 s32 priority = 0;
771fe6b9
JG
280
281 if (!cs->num_chunks) {
282 return 0;
283 }
284 /* get chunks */
285 INIT_LIST_HEAD(&p->validated);
286 p->idx = 0;
f2e39221 287 p->ib.sa_bo = NULL;
f2e39221 288 p->const_ib.sa_bo = NULL;
771fe6b9
JG
289 p->chunk_ib_idx = -1;
290 p->chunk_relocs_idx = -1;
721604a1 291 p->chunk_flags_idx = -1;
dfcf5f36 292 p->chunk_const_ib_idx = -1;
771fe6b9
JG
293 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
294 if (p->chunks_array == NULL) {
295 return -ENOMEM;
296 }
297 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
1d6ac185 298 if (copy_from_user(p->chunks_array, chunk_array_ptr,
771fe6b9
JG
299 sizeof(uint64_t)*cs->num_chunks)) {
300 return -EFAULT;
301 }
721604a1 302 p->cs_flags = 0;
771fe6b9
JG
303 p->nchunks = cs->num_chunks;
304 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
305 if (p->chunks == NULL) {
306 return -ENOMEM;
307 }
308 for (i = 0; i < p->nchunks; i++) {
309 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
310 struct drm_radeon_cs_chunk user_chunk;
311 uint32_t __user *cdata;
312
313 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
1d6ac185 314 if (copy_from_user(&user_chunk, chunk_ptr,
771fe6b9
JG
315 sizeof(struct drm_radeon_cs_chunk))) {
316 return -EFAULT;
317 }
5176fdc4 318 p->chunks[i].length_dw = user_chunk.length_dw;
771fe6b9
JG
319 p->chunks[i].chunk_id = user_chunk.chunk_id;
320 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
321 p->chunk_relocs_idx = i;
322 }
323 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
324 p->chunk_ib_idx = i;
5176fdc4
DA
325 /* zero length IB isn't useful */
326 if (p->chunks[i].length_dw == 0)
327 return -EINVAL;
771fe6b9 328 }
dfcf5f36
AD
329 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
330 p->chunk_const_ib_idx = i;
331 /* zero length CONST IB isn't useful */
332 if (p->chunks[i].length_dw == 0)
333 return -EINVAL;
334 }
721604a1
JG
335 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
336 p->chunk_flags_idx = i;
337 /* zero length flags aren't useful */
338 if (p->chunks[i].length_dw == 0)
339 return -EINVAL;
e70f224c 340 }
5176fdc4 341
28a326c5
ML
342 size = p->chunks[i].length_dw;
343 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
344 p->chunks[i].user_ptr = cdata;
345 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
346 continue;
347
348 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
349 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
350 continue;
351 }
352
353 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
354 size *= sizeof(uint32_t);
355 if (p->chunks[i].kdata == NULL) {
356 return -ENOMEM;
357 }
1d6ac185 358 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
28a326c5
ML
359 return -EFAULT;
360 }
361 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
362 p->cs_flags = p->chunks[i].kdata[0];
363 if (p->chunks[i].length_dw > 1)
364 ring = p->chunks[i].kdata[1];
365 if (p->chunks[i].length_dw > 2)
366 priority = (s32)p->chunks[i].kdata[2];
771fe6b9
JG
367 }
368 }
721604a1 369
9b00147d
AD
370 /* these are KMS only */
371 if (p->rdev) {
372 if ((p->cs_flags & RADEON_CS_USE_VM) &&
373 !p->rdev->vm_manager.enabled) {
374 DRM_ERROR("VM not active on asic!\n");
375 return -EINVAL;
376 }
1b5475db 377
57449040 378 if (radeon_cs_get_ring(p, ring, priority))
9b00147d 379 return -EINVAL;
721604a1 380
57449040 381 /* we only support VM on some SI+ rings */
60a44540
CK
382 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
383 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
384 DRM_ERROR("Ring %d requires VM!\n", p->ring);
385 return -EINVAL;
386 }
387 } else {
388 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
389 DRM_ERROR("VM not supported on ring %d!\n",
390 p->ring);
391 return -EINVAL;
392 }
57449040 393 }
9b00147d 394 }
721604a1 395
771fe6b9
JG
396 return 0;
397}
398
4330441a
MO
399static int cmp_size_smaller_first(void *priv, struct list_head *a,
400 struct list_head *b)
401{
df0af440
CK
402 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
403 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
4330441a
MO
404
405 /* Sort A before B if A is smaller. */
df0af440 406 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
4330441a
MO
407}
408
771fe6b9
JG
409/**
410 * cs_parser_fini() - clean parser states
411 * @parser: parser structure holding parsing context.
412 * @error: error number
413 *
414 * If error is set than unvalidate buffer, otherwise just free memory
415 * used by parsing context.
416 **/
ecff665f 417static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
771fe6b9
JG
418{
419 unsigned i;
420
e43b5ec0 421 if (!error) {
4330441a
MO
422 /* Sort the buffer list from the smallest to largest buffer,
423 * which affects the order of buffers in the LRU list.
424 * This assures that the smallest buffers are added first
425 * to the LRU list, so they are likely to be later evicted
426 * first, instead of large buffers whose eviction is more
427 * expensive.
428 *
429 * This slightly lowers the number of bytes moved by TTM
430 * per frame under memory pressure.
431 */
432 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
433
ecff665f
ML
434 ttm_eu_fence_buffer_objects(&parser->ticket,
435 &parser->validated,
f2c24b83 436 &parser->ib.fence->base);
ecff665f
ML
437 } else if (backoff) {
438 ttm_eu_backoff_reservation(&parser->ticket,
439 &parser->validated);
e43b5ec0 440 }
147666fb 441
fcbc451b
PN
442 if (parser->relocs != NULL) {
443 for (i = 0; i < parser->nrelocs; i++) {
444 if (parser->relocs[i].gobj)
445 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
446 }
771fe6b9 447 }
48e113e5 448 kfree(parser->track);
771fe6b9
JG
449 kfree(parser->relocs);
450 kfree(parser->relocs_ptr);
e5a5fd4d 451 drm_free_large(parser->vm_bos);
28a326c5
ML
452 for (i = 0; i < parser->nchunks; i++)
453 drm_free_large(parser->chunks[i].kdata);
771fe6b9
JG
454 kfree(parser->chunks);
455 kfree(parser->chunks_array);
456 radeon_ib_free(parser->rdev, &parser->ib);
f2e39221 457 radeon_ib_free(parser->rdev, &parser->const_ib);
771fe6b9
JG
458}
459
721604a1
JG
460static int radeon_cs_ib_chunk(struct radeon_device *rdev,
461 struct radeon_cs_parser *parser)
462{
721604a1
JG
463 int r;
464
465 if (parser->chunk_ib_idx == -1)
466 return 0;
467
468 if (parser->cs_flags & RADEON_CS_USE_VM)
469 return 0;
470
eb0c19c5 471 r = radeon_cs_parse(rdev, parser->ring, parser);
721604a1
JG
472 if (r || parser->parser_error) {
473 DRM_ERROR("Invalid command stream !\n");
474 return r;
475 }
ce3537d5 476
392a250b
ML
477 r = radeon_cs_sync_rings(parser);
478 if (r) {
479 if (r != -ERESTARTSYS)
480 DRM_ERROR("Failed to sync rings: %i\n", r);
481 return r;
482 }
483
ce3537d5
AD
484 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
485 radeon_uvd_note_usage(rdev);
03afe6f6
AD
486 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
487 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
488 radeon_vce_note_usage(rdev);
ce3537d5 489
1538a9e0 490 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
721604a1
JG
491 if (r) {
492 DRM_ERROR("Failed to schedule IB !\n");
493 }
93bf888c 494 return r;
721604a1
JG
495}
496
6d2f2944 497static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
721604a1
JG
498 struct radeon_vm *vm)
499{
6d2f2944 500 struct radeon_device *rdev = p->rdev;
036bf46a 501 struct radeon_bo_va *bo_va;
6d2f2944 502 int i, r;
721604a1 503
6d2f2944
CK
504 r = radeon_vm_update_page_directory(rdev, vm);
505 if (r)
3e8970f9 506 return r;
6d2f2944 507
036bf46a
CK
508 r = radeon_vm_clear_freed(rdev, vm);
509 if (r)
510 return r;
511
cc9e67e3 512 if (vm->ib_bo_va == NULL) {
036bf46a
CK
513 DRM_ERROR("Tmp BO not in VM!\n");
514 return -EINVAL;
515 }
516
cc9e67e3
CK
517 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
518 &rdev->ring_tmp_bo.bo->tbo.mem);
6d2f2944
CK
519 if (r)
520 return r;
521
522 for (i = 0; i < p->nrelocs; i++) {
523 struct radeon_bo *bo;
524
525 /* ignore duplicates */
526 if (p->relocs_ptr[i] != &p->relocs[i])
527 continue;
528
529 bo = p->relocs[i].robj;
036bf46a
CK
530 bo_va = radeon_vm_bo_find(vm, bo);
531 if (bo_va == NULL) {
532 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
533 return -EINVAL;
534 }
535
536 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
6d2f2944 537 if (r)
721604a1 538 return r;
721604a1 539 }
e31ad969
CK
540
541 return radeon_vm_clear_invalids(rdev, vm);
721604a1
JG
542}
543
544static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
545 struct radeon_cs_parser *parser)
546{
721604a1
JG
547 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
548 struct radeon_vm *vm = &fpriv->vm;
549 int r;
550
551 if (parser->chunk_ib_idx == -1)
552 return 0;
721604a1
JG
553 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
554 return 0;
555
28a326c5 556 if (parser->const_ib.length_dw) {
f2e39221 557 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
dfcf5f36
AD
558 if (r) {
559 return r;
560 }
561 }
562
f2e39221 563 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
721604a1
JG
564 if (r) {
565 return r;
566 }
567
ce3537d5
AD
568 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
569 radeon_uvd_note_usage(rdev);
570
721604a1 571 mutex_lock(&vm->mutex);
721604a1
JG
572 r = radeon_bo_vm_update_pte(parser, vm);
573 if (r) {
574 goto out;
575 }
392a250b
ML
576
577 r = radeon_cs_sync_rings(parser);
578 if (r) {
579 if (r != -ERESTARTSYS)
580 DRM_ERROR("Failed to sync rings: %i\n", r);
581 goto out;
582 }
975700d2 583 radeon_sync_fence(&parser->ib.sync, vm->fence);
4ef72566 584
dfcf5f36
AD
585 if ((rdev->family >= CHIP_TAHITI) &&
586 (parser->chunk_const_ib_idx != -1)) {
1538a9e0 587 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
4ef72566 588 } else {
1538a9e0 589 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
dfcf5f36
AD
590 }
591
ee60e29f 592out:
36ff39c4 593 mutex_unlock(&vm->mutex);
721604a1
JG
594 return r;
595}
596
6c6f4783
CK
597static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
598{
599 if (r == -EDEADLK) {
600 r = radeon_gpu_reset(rdev);
601 if (!r)
602 r = -EAGAIN;
603 }
604 return r;
605}
606
28a326c5
ML
607static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
608{
609 struct radeon_cs_chunk *ib_chunk;
610 struct radeon_vm *vm = NULL;
611 int r;
612
613 if (parser->chunk_ib_idx == -1)
614 return 0;
615
616 if (parser->cs_flags & RADEON_CS_USE_VM) {
617 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
618 vm = &fpriv->vm;
619
620 if ((rdev->family >= CHIP_TAHITI) &&
621 (parser->chunk_const_ib_idx != -1)) {
622 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
623 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
624 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
625 return -EINVAL;
626 }
627 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
628 vm, ib_chunk->length_dw * 4);
629 if (r) {
630 DRM_ERROR("Failed to get const ib !\n");
631 return r;
632 }
633 parser->const_ib.is_const_ib = true;
634 parser->const_ib.length_dw = ib_chunk->length_dw;
1d6ac185 635 if (copy_from_user(parser->const_ib.ptr,
28a326c5
ML
636 ib_chunk->user_ptr,
637 ib_chunk->length_dw * 4))
638 return -EFAULT;
639 }
640
641 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
642 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
643 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
644 return -EINVAL;
645 }
646 }
647 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
648
649 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
650 vm, ib_chunk->length_dw * 4);
651 if (r) {
652 DRM_ERROR("Failed to get ib !\n");
653 return r;
654 }
655 parser->ib.length_dw = ib_chunk->length_dw;
656 if (ib_chunk->kdata)
657 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
1d6ac185 658 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
28a326c5
ML
659 return -EFAULT;
660 return 0;
661}
662
771fe6b9
JG
663int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
664{
665 struct radeon_device *rdev = dev->dev_private;
666 struct radeon_cs_parser parser;
771fe6b9
JG
667 int r;
668
dee53e7f 669 down_read(&rdev->exclusive_lock);
6b7746e8 670 if (!rdev->accel_working) {
dee53e7f 671 up_read(&rdev->exclusive_lock);
6b7746e8
JG
672 return -EBUSY;
673 }
9bb39ff4
ML
674 if (rdev->in_reset) {
675 up_read(&rdev->exclusive_lock);
676 r = radeon_gpu_reset(rdev);
677 if (!r)
678 r = -EAGAIN;
679 return r;
680 }
771fe6b9
JG
681 /* initialize parser */
682 memset(&parser, 0, sizeof(struct radeon_cs_parser));
683 parser.filp = filp;
684 parser.rdev = rdev;
c8c15ff1 685 parser.dev = rdev->dev;
428c6e36 686 parser.family = rdev->family;
771fe6b9
JG
687 r = radeon_cs_parser_init(&parser, data);
688 if (r) {
689 DRM_ERROR("Failed to initialize parser !\n");
ecff665f 690 radeon_cs_parser_fini(&parser, r, false);
dee53e7f 691 up_read(&rdev->exclusive_lock);
6c6f4783 692 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
693 return r;
694 }
28a326c5
ML
695
696 r = radeon_cs_ib_fill(rdev, &parser);
697 if (!r) {
698 r = radeon_cs_parser_relocs(&parser);
699 if (r && r != -ERESTARTSYS)
97f23b3d 700 DRM_ERROR("Failed to parse relocation %d!\n", r);
28a326c5
ML
701 }
702
703 if (r) {
ecff665f 704 radeon_cs_parser_fini(&parser, r, false);
dee53e7f 705 up_read(&rdev->exclusive_lock);
6c6f4783 706 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
707 return r;
708 }
55b51c88 709
860024e5
CK
710 trace_radeon_cs(&parser);
711
721604a1 712 r = radeon_cs_ib_chunk(rdev, &parser);
771fe6b9 713 if (r) {
721604a1 714 goto out;
771fe6b9 715 }
721604a1 716 r = radeon_cs_ib_vm_chunk(rdev, &parser);
771fe6b9 717 if (r) {
721604a1 718 goto out;
771fe6b9 719 }
721604a1 720out:
ecff665f 721 radeon_cs_parser_fini(&parser, r, true);
dee53e7f 722 up_read(&rdev->exclusive_lock);
6c6f4783 723 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
724 return r;
725}
513bcb46 726
4db01311
IH
727/**
728 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
729 * @parser: parser structure holding parsing context.
730 * @pkt: where to store packet information
731 *
732 * Assume that chunk_ib_index is properly set. Will return -EINVAL
733 * if packet is bigger than remaining ib size. or if packets is unknown.
734 **/
735int radeon_cs_packet_parse(struct radeon_cs_parser *p,
736 struct radeon_cs_packet *pkt,
737 unsigned idx)
738{
739 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
740 struct radeon_device *rdev = p->rdev;
741 uint32_t header;
742
743 if (idx >= ib_chunk->length_dw) {
744 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
745 idx, ib_chunk->length_dw);
746 return -EINVAL;
747 }
748 header = radeon_get_ib_value(p, idx);
749 pkt->idx = idx;
750 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
751 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
752 pkt->one_reg_wr = 0;
753 switch (pkt->type) {
754 case RADEON_PACKET_TYPE0:
755 if (rdev->family < CHIP_R600) {
756 pkt->reg = R100_CP_PACKET0_GET_REG(header);
757 pkt->one_reg_wr =
758 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
759 } else
760 pkt->reg = R600_CP_PACKET0_GET_REG(header);
761 break;
762 case RADEON_PACKET_TYPE3:
763 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
764 break;
765 case RADEON_PACKET_TYPE2:
766 pkt->count = -1;
767 break;
768 default:
769 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
770 return -EINVAL;
771 }
772 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
773 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
774 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
775 return -EINVAL;
776 }
777 return 0;
778}
9ffb7a6d
IH
779
780/**
781 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
782 * @p: structure holding the parser context.
783 *
784 * Check if the next packet is NOP relocation packet3.
785 **/
786bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
787{
788 struct radeon_cs_packet p3reloc;
789 int r;
790
791 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
792 if (r)
793 return false;
794 if (p3reloc.type != RADEON_PACKET_TYPE3)
795 return false;
796 if (p3reloc.opcode != RADEON_PACKET3_NOP)
797 return false;
798 return true;
799}
c3ad63af
IH
800
801/**
802 * radeon_cs_dump_packet() - dump raw packet context
803 * @p: structure holding the parser context.
804 * @pkt: structure holding the packet.
805 *
806 * Used mostly for debugging and error reporting.
807 **/
808void radeon_cs_dump_packet(struct radeon_cs_parser *p,
809 struct radeon_cs_packet *pkt)
810{
811 volatile uint32_t *ib;
812 unsigned i;
813 unsigned idx;
814
815 ib = p->ib.ptr;
816 idx = pkt->idx;
817 for (i = 0; i <= (pkt->count + 1); i++, idx++)
818 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
819}
820
e9716993
IH
821/**
822 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
823 * @parser: parser structure holding parsing context.
824 * @data: pointer to relocation data
825 * @offset_start: starting offset
826 * @offset_mask: offset mask (to align start offset on)
827 * @reloc: reloc informations
828 *
829 * Check if next packet is relocation packet3, do bo validation and compute
830 * GPU offset using the provided start.
831 **/
832int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
833 struct radeon_cs_reloc **cs_reloc,
834 int nomm)
835{
836 struct radeon_cs_chunk *relocs_chunk;
837 struct radeon_cs_packet p3reloc;
838 unsigned idx;
839 int r;
840
841 if (p->chunk_relocs_idx == -1) {
842 DRM_ERROR("No relocation chunk !\n");
843 return -EINVAL;
844 }
845 *cs_reloc = NULL;
846 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
847 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
848 if (r)
849 return r;
850 p->idx += p3reloc.count + 2;
851 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
852 p3reloc.opcode != RADEON_PACKET3_NOP) {
853 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
854 p3reloc.idx);
855 radeon_cs_dump_packet(p, &p3reloc);
856 return -EINVAL;
857 }
858 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
859 if (idx >= relocs_chunk->length_dw) {
860 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
861 idx, relocs_chunk->length_dw);
862 radeon_cs_dump_packet(p, &p3reloc);
863 return -EINVAL;
864 }
865 /* FIXME: we assume reloc size is 4 dwords */
866 if (nomm) {
867 *cs_reloc = p->relocs;
df0af440 868 (*cs_reloc)->gpu_offset =
e9716993 869 (u64)relocs_chunk->kdata[idx + 3] << 32;
df0af440 870 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
e9716993
IH
871 } else
872 *cs_reloc = p->relocs_ptr[(idx / 4)];
873 return 0;
874}
This page took 0.425294 seconds and 5 git commands to generate.