drm/radeon/dpm: disable ulv support on SI
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_cs.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
4330441a 27#include <linux/list_sort.h>
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
771fe6b9
JG
30#include "radeon_reg.h"
31#include "radeon.h"
860024e5 32#include "radeon_trace.h"
771fe6b9 33
c9b76548
MO
34#define RADEON_CS_MAX_PRIORITY 32u
35#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
36
37/* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
40 */
41struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
43};
44
45static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
46{
47 unsigned i;
48
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
51}
52
53static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
55{
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
60 */
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
62}
63
64static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
66{
67 unsigned i;
68
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
72 }
73}
74
1109ca09 75static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
771fe6b9
JG
76{
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
c9b76548 79 struct radeon_cs_buckets buckets;
771fe6b9 80 unsigned i, j;
f72a113a
CK
81 bool duplicate, need_mmap_lock = false;
82 int r;
771fe6b9
JG
83
84 if (p->chunk_relocs_idx == -1) {
85 return 0;
86 }
87 chunk = &p->chunks[p->chunk_relocs_idx];
cf4ccd01 88 p->dma_reloc_idx = 0;
771fe6b9
JG
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
97 return -ENOMEM;
98 }
c9b76548
MO
99
100 radeon_cs_buckets_init(&buckets);
101
771fe6b9
JG
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
c9b76548 104 unsigned priority;
771fe6b9
JG
105
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
16557f1e 108 for (j = 0; j < i; j++) {
771fe6b9
JG
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
4474f3a9 115 if (duplicate) {
16557f1e 116 p->relocs[i].handle = 0;
4474f3a9
CK
117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle);
125 return -ENOENT;
126 }
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
c9b76548
MO
129
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
135 */
701e1e78
CK
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
137 + !!r->write_domain;
4474f3a9 138
4f66c599 139 /* the first reloc of an UVD job is the msg and that must be in
b6a7eeea
CK
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
4f66c599 142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
b6a7eeea
CK
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
146
bcf6f1e9 147 /* TODO: is this still needed for NI+ ? */
ce6758c8 148 p->relocs[i].prefered_domains =
f2ba57b5
CK
149 RADEON_GEM_DOMAIN_VRAM;
150
ce6758c8 151 p->relocs[i].allowed_domains =
f2ba57b5
CK
152 RADEON_GEM_DOMAIN_VRAM;
153
c9b76548
MO
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
f2ba57b5
CK
156 } else {
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
159
ec65da38
MO
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
163 return -EINVAL;
164 }
165
ce6758c8 166 p->relocs[i].prefered_domains = domain;
f2ba57b5
CK
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
ce6758c8 169 p->relocs[i].allowed_domains = domain;
f2ba57b5 170 }
4474f3a9 171
f72a113a
CK
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
177 return -EINVAL;
178 }
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
183 }
184
df0af440 185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
298593b6 186 p->relocs[i].tv.shared = !r->write_domain;
4474f3a9
CK
187 p->relocs[i].handle = r->handle;
188
df0af440 189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
c9b76548 190 priority);
771fe6b9 191 }
c9b76548
MO
192
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
194
6d2f2944
CK
195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
197 &p->validated);
f72a113a
CK
198 if (need_mmap_lock)
199 down_read(&current->mm->mmap_sem);
200
201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
202
203 if (need_mmap_lock)
204 up_read(&current->mm->mmap_sem);
6d2f2944 205
f72a113a 206 return r;
771fe6b9
JG
207}
208
721604a1
JG
209static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
210{
211 p->priority = priority;
212
213 switch (ring) {
214 default:
215 DRM_ERROR("unknown ring id: %d\n", ring);
216 return -EINVAL;
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
219 break;
220 case RADEON_CS_RING_COMPUTE:
963e81f9 221 if (p->rdev->family >= CHIP_TAHITI) {
8d5ef7b1
AD
222 if (p->priority > 0)
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
224 else
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
226 } else
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
721604a1 228 break;
278a334c
AD
229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
231 if (p->priority > 0)
232 p->ring = R600_RING_TYPE_DMA_INDEX;
233 else
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
b9ace36f 235 } else if (p->rdev->family >= CHIP_RV770) {
278a334c
AD
236 p->ring = R600_RING_TYPE_DMA_INDEX;
237 } else {
238 return -EINVAL;
239 }
240 break;
f2ba57b5
CK
241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
243 break;
d93f7937
CK
244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
247 break;
721604a1
JG
248 }
249 return 0;
250}
251
392a250b 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
93504fce 253{
392a250b 254 int i, r = 0;
93504fce 255
cdac5504 256 for (i = 0; i < p->nrelocs; i++) {
f2c24b83 257 struct reservation_object *resv;
f2c24b83 258
f82cbddd 259 if (!p->relocs[i].robj)
cdac5504
CK
260 continue;
261
f2c24b83 262 resv = p->relocs[i].robj->tbo.resv;
392a250b
ML
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared);
265
266 if (r)
267 break;
8f676c4c 268 }
392a250b 269 return r;
93504fce
CK
270}
271
9b00147d 272/* XXX: note that this is called from the legacy UMS CS ioctl as well */
771fe6b9
JG
273int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
274{
275 struct drm_radeon_cs *cs = data;
276 uint64_t *chunk_array_ptr;
721604a1
JG
277 unsigned size, i;
278 u32 ring = RADEON_CS_RING_GFX;
279 s32 priority = 0;
771fe6b9
JG
280
281 if (!cs->num_chunks) {
282 return 0;
283 }
284 /* get chunks */
285 INIT_LIST_HEAD(&p->validated);
286 p->idx = 0;
f2e39221
JG
287 p->ib.sa_bo = NULL;
288 p->ib.semaphore = NULL;
289 p->const_ib.sa_bo = NULL;
290 p->const_ib.semaphore = NULL;
771fe6b9
JG
291 p->chunk_ib_idx = -1;
292 p->chunk_relocs_idx = -1;
721604a1 293 p->chunk_flags_idx = -1;
dfcf5f36 294 p->chunk_const_ib_idx = -1;
771fe6b9
JG
295 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
296 if (p->chunks_array == NULL) {
297 return -ENOMEM;
298 }
299 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
1d6ac185 300 if (copy_from_user(p->chunks_array, chunk_array_ptr,
771fe6b9
JG
301 sizeof(uint64_t)*cs->num_chunks)) {
302 return -EFAULT;
303 }
721604a1 304 p->cs_flags = 0;
771fe6b9
JG
305 p->nchunks = cs->num_chunks;
306 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
307 if (p->chunks == NULL) {
308 return -ENOMEM;
309 }
310 for (i = 0; i < p->nchunks; i++) {
311 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
312 struct drm_radeon_cs_chunk user_chunk;
313 uint32_t __user *cdata;
314
315 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
1d6ac185 316 if (copy_from_user(&user_chunk, chunk_ptr,
771fe6b9
JG
317 sizeof(struct drm_radeon_cs_chunk))) {
318 return -EFAULT;
319 }
5176fdc4 320 p->chunks[i].length_dw = user_chunk.length_dw;
771fe6b9
JG
321 p->chunks[i].chunk_id = user_chunk.chunk_id;
322 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
323 p->chunk_relocs_idx = i;
324 }
325 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
326 p->chunk_ib_idx = i;
5176fdc4
DA
327 /* zero length IB isn't useful */
328 if (p->chunks[i].length_dw == 0)
329 return -EINVAL;
771fe6b9 330 }
dfcf5f36
AD
331 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
332 p->chunk_const_ib_idx = i;
333 /* zero length CONST IB isn't useful */
334 if (p->chunks[i].length_dw == 0)
335 return -EINVAL;
336 }
721604a1
JG
337 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
338 p->chunk_flags_idx = i;
339 /* zero length flags aren't useful */
340 if (p->chunks[i].length_dw == 0)
341 return -EINVAL;
e70f224c 342 }
5176fdc4 343
28a326c5
ML
344 size = p->chunks[i].length_dw;
345 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
346 p->chunks[i].user_ptr = cdata;
347 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
348 continue;
349
350 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
351 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
352 continue;
353 }
354
355 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
356 size *= sizeof(uint32_t);
357 if (p->chunks[i].kdata == NULL) {
358 return -ENOMEM;
359 }
1d6ac185 360 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
28a326c5
ML
361 return -EFAULT;
362 }
363 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
364 p->cs_flags = p->chunks[i].kdata[0];
365 if (p->chunks[i].length_dw > 1)
366 ring = p->chunks[i].kdata[1];
367 if (p->chunks[i].length_dw > 2)
368 priority = (s32)p->chunks[i].kdata[2];
771fe6b9
JG
369 }
370 }
721604a1 371
9b00147d
AD
372 /* these are KMS only */
373 if (p->rdev) {
374 if ((p->cs_flags & RADEON_CS_USE_VM) &&
375 !p->rdev->vm_manager.enabled) {
376 DRM_ERROR("VM not active on asic!\n");
377 return -EINVAL;
378 }
1b5475db 379
57449040 380 if (radeon_cs_get_ring(p, ring, priority))
9b00147d 381 return -EINVAL;
721604a1 382
57449040 383 /* we only support VM on some SI+ rings */
60a44540
CK
384 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
385 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
386 DRM_ERROR("Ring %d requires VM!\n", p->ring);
387 return -EINVAL;
388 }
389 } else {
390 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
391 DRM_ERROR("VM not supported on ring %d!\n",
392 p->ring);
393 return -EINVAL;
394 }
57449040 395 }
9b00147d 396 }
721604a1 397
771fe6b9
JG
398 return 0;
399}
400
4330441a
MO
401static int cmp_size_smaller_first(void *priv, struct list_head *a,
402 struct list_head *b)
403{
df0af440
CK
404 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
405 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
4330441a
MO
406
407 /* Sort A before B if A is smaller. */
df0af440 408 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
4330441a
MO
409}
410
771fe6b9
JG
411/**
412 * cs_parser_fini() - clean parser states
413 * @parser: parser structure holding parsing context.
414 * @error: error number
415 *
416 * If error is set than unvalidate buffer, otherwise just free memory
417 * used by parsing context.
418 **/
ecff665f 419static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
771fe6b9
JG
420{
421 unsigned i;
422
e43b5ec0 423 if (!error) {
4330441a
MO
424 /* Sort the buffer list from the smallest to largest buffer,
425 * which affects the order of buffers in the LRU list.
426 * This assures that the smallest buffers are added first
427 * to the LRU list, so they are likely to be later evicted
428 * first, instead of large buffers whose eviction is more
429 * expensive.
430 *
431 * This slightly lowers the number of bytes moved by TTM
432 * per frame under memory pressure.
433 */
434 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
435
ecff665f
ML
436 ttm_eu_fence_buffer_objects(&parser->ticket,
437 &parser->validated,
f2c24b83 438 &parser->ib.fence->base);
ecff665f
ML
439 } else if (backoff) {
440 ttm_eu_backoff_reservation(&parser->ticket,
441 &parser->validated);
e43b5ec0 442 }
147666fb 443
fcbc451b
PN
444 if (parser->relocs != NULL) {
445 for (i = 0; i < parser->nrelocs; i++) {
446 if (parser->relocs[i].gobj)
447 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
448 }
771fe6b9 449 }
48e113e5 450 kfree(parser->track);
771fe6b9
JG
451 kfree(parser->relocs);
452 kfree(parser->relocs_ptr);
6d2f2944 453 kfree(parser->vm_bos);
28a326c5
ML
454 for (i = 0; i < parser->nchunks; i++)
455 drm_free_large(parser->chunks[i].kdata);
771fe6b9
JG
456 kfree(parser->chunks);
457 kfree(parser->chunks_array);
458 radeon_ib_free(parser->rdev, &parser->ib);
f2e39221 459 radeon_ib_free(parser->rdev, &parser->const_ib);
771fe6b9
JG
460}
461
721604a1
JG
462static int radeon_cs_ib_chunk(struct radeon_device *rdev,
463 struct radeon_cs_parser *parser)
464{
721604a1
JG
465 int r;
466
467 if (parser->chunk_ib_idx == -1)
468 return 0;
469
470 if (parser->cs_flags & RADEON_CS_USE_VM)
471 return 0;
472
eb0c19c5 473 r = radeon_cs_parse(rdev, parser->ring, parser);
721604a1
JG
474 if (r || parser->parser_error) {
475 DRM_ERROR("Invalid command stream !\n");
476 return r;
477 }
ce3537d5 478
392a250b
ML
479 r = radeon_cs_sync_rings(parser);
480 if (r) {
481 if (r != -ERESTARTSYS)
482 DRM_ERROR("Failed to sync rings: %i\n", r);
483 return r;
484 }
485
ce3537d5
AD
486 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
487 radeon_uvd_note_usage(rdev);
03afe6f6
AD
488 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
489 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
490 radeon_vce_note_usage(rdev);
ce3537d5 491
1538a9e0 492 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
721604a1
JG
493 if (r) {
494 DRM_ERROR("Failed to schedule IB !\n");
495 }
93bf888c 496 return r;
721604a1
JG
497}
498
6d2f2944 499static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
721604a1
JG
500 struct radeon_vm *vm)
501{
6d2f2944 502 struct radeon_device *rdev = p->rdev;
036bf46a 503 struct radeon_bo_va *bo_va;
6d2f2944 504 int i, r;
721604a1 505
6d2f2944
CK
506 r = radeon_vm_update_page_directory(rdev, vm);
507 if (r)
3e8970f9 508 return r;
6d2f2944 509
036bf46a
CK
510 r = radeon_vm_clear_freed(rdev, vm);
511 if (r)
512 return r;
513
cc9e67e3 514 if (vm->ib_bo_va == NULL) {
036bf46a
CK
515 DRM_ERROR("Tmp BO not in VM!\n");
516 return -EINVAL;
517 }
518
cc9e67e3
CK
519 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
520 &rdev->ring_tmp_bo.bo->tbo.mem);
6d2f2944
CK
521 if (r)
522 return r;
523
524 for (i = 0; i < p->nrelocs; i++) {
525 struct radeon_bo *bo;
526
527 /* ignore duplicates */
528 if (p->relocs_ptr[i] != &p->relocs[i])
529 continue;
530
531 bo = p->relocs[i].robj;
036bf46a
CK
532 bo_va = radeon_vm_bo_find(vm, bo);
533 if (bo_va == NULL) {
534 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
535 return -EINVAL;
536 }
537
538 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
6d2f2944 539 if (r)
721604a1 540 return r;
721604a1 541 }
e31ad969
CK
542
543 return radeon_vm_clear_invalids(rdev, vm);
721604a1
JG
544}
545
546static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
547 struct radeon_cs_parser *parser)
548{
721604a1
JG
549 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
550 struct radeon_vm *vm = &fpriv->vm;
551 int r;
552
553 if (parser->chunk_ib_idx == -1)
554 return 0;
721604a1
JG
555 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
556 return 0;
557
28a326c5 558 if (parser->const_ib.length_dw) {
f2e39221 559 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
dfcf5f36
AD
560 if (r) {
561 return r;
562 }
563 }
564
f2e39221 565 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
721604a1
JG
566 if (r) {
567 return r;
568 }
569
ce3537d5
AD
570 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
571 radeon_uvd_note_usage(rdev);
572
721604a1 573 mutex_lock(&vm->mutex);
721604a1
JG
574 r = radeon_bo_vm_update_pte(parser, vm);
575 if (r) {
576 goto out;
577 }
392a250b
ML
578
579 r = radeon_cs_sync_rings(parser);
580 if (r) {
581 if (r != -ERESTARTSYS)
582 DRM_ERROR("Failed to sync rings: %i\n", r);
583 goto out;
584 }
57d20a43 585 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
4ef72566 586
dfcf5f36
AD
587 if ((rdev->family >= CHIP_TAHITI) &&
588 (parser->chunk_const_ib_idx != -1)) {
1538a9e0 589 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
4ef72566 590 } else {
1538a9e0 591 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
dfcf5f36
AD
592 }
593
ee60e29f 594out:
36ff39c4 595 mutex_unlock(&vm->mutex);
721604a1
JG
596 return r;
597}
598
6c6f4783
CK
599static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
600{
601 if (r == -EDEADLK) {
602 r = radeon_gpu_reset(rdev);
603 if (!r)
604 r = -EAGAIN;
605 }
606 return r;
607}
608
28a326c5
ML
609static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
610{
611 struct radeon_cs_chunk *ib_chunk;
612 struct radeon_vm *vm = NULL;
613 int r;
614
615 if (parser->chunk_ib_idx == -1)
616 return 0;
617
618 if (parser->cs_flags & RADEON_CS_USE_VM) {
619 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
620 vm = &fpriv->vm;
621
622 if ((rdev->family >= CHIP_TAHITI) &&
623 (parser->chunk_const_ib_idx != -1)) {
624 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
625 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
626 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
627 return -EINVAL;
628 }
629 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
630 vm, ib_chunk->length_dw * 4);
631 if (r) {
632 DRM_ERROR("Failed to get const ib !\n");
633 return r;
634 }
635 parser->const_ib.is_const_ib = true;
636 parser->const_ib.length_dw = ib_chunk->length_dw;
1d6ac185 637 if (copy_from_user(parser->const_ib.ptr,
28a326c5
ML
638 ib_chunk->user_ptr,
639 ib_chunk->length_dw * 4))
640 return -EFAULT;
641 }
642
643 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
644 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
645 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
646 return -EINVAL;
647 }
648 }
649 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
650
651 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
652 vm, ib_chunk->length_dw * 4);
653 if (r) {
654 DRM_ERROR("Failed to get ib !\n");
655 return r;
656 }
657 parser->ib.length_dw = ib_chunk->length_dw;
658 if (ib_chunk->kdata)
659 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
1d6ac185 660 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
28a326c5
ML
661 return -EFAULT;
662 return 0;
663}
664
771fe6b9
JG
665int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
666{
667 struct radeon_device *rdev = dev->dev_private;
668 struct radeon_cs_parser parser;
771fe6b9
JG
669 int r;
670
dee53e7f 671 down_read(&rdev->exclusive_lock);
6b7746e8 672 if (!rdev->accel_working) {
dee53e7f 673 up_read(&rdev->exclusive_lock);
6b7746e8
JG
674 return -EBUSY;
675 }
9bb39ff4
ML
676 if (rdev->in_reset) {
677 up_read(&rdev->exclusive_lock);
678 r = radeon_gpu_reset(rdev);
679 if (!r)
680 r = -EAGAIN;
681 return r;
682 }
771fe6b9
JG
683 /* initialize parser */
684 memset(&parser, 0, sizeof(struct radeon_cs_parser));
685 parser.filp = filp;
686 parser.rdev = rdev;
c8c15ff1 687 parser.dev = rdev->dev;
428c6e36 688 parser.family = rdev->family;
771fe6b9
JG
689 r = radeon_cs_parser_init(&parser, data);
690 if (r) {
691 DRM_ERROR("Failed to initialize parser !\n");
ecff665f 692 radeon_cs_parser_fini(&parser, r, false);
dee53e7f 693 up_read(&rdev->exclusive_lock);
6c6f4783 694 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
695 return r;
696 }
28a326c5
ML
697
698 r = radeon_cs_ib_fill(rdev, &parser);
699 if (!r) {
700 r = radeon_cs_parser_relocs(&parser);
701 if (r && r != -ERESTARTSYS)
97f23b3d 702 DRM_ERROR("Failed to parse relocation %d!\n", r);
28a326c5
ML
703 }
704
705 if (r) {
ecff665f 706 radeon_cs_parser_fini(&parser, r, false);
dee53e7f 707 up_read(&rdev->exclusive_lock);
6c6f4783 708 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
709 return r;
710 }
55b51c88 711
860024e5
CK
712 trace_radeon_cs(&parser);
713
721604a1 714 r = radeon_cs_ib_chunk(rdev, &parser);
771fe6b9 715 if (r) {
721604a1 716 goto out;
771fe6b9 717 }
721604a1 718 r = radeon_cs_ib_vm_chunk(rdev, &parser);
771fe6b9 719 if (r) {
721604a1 720 goto out;
771fe6b9 721 }
721604a1 722out:
ecff665f 723 radeon_cs_parser_fini(&parser, r, true);
dee53e7f 724 up_read(&rdev->exclusive_lock);
6c6f4783 725 r = radeon_cs_handle_lockup(rdev, r);
771fe6b9
JG
726 return r;
727}
513bcb46 728
4db01311
IH
729/**
730 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
731 * @parser: parser structure holding parsing context.
732 * @pkt: where to store packet information
733 *
734 * Assume that chunk_ib_index is properly set. Will return -EINVAL
735 * if packet is bigger than remaining ib size. or if packets is unknown.
736 **/
737int radeon_cs_packet_parse(struct radeon_cs_parser *p,
738 struct radeon_cs_packet *pkt,
739 unsigned idx)
740{
741 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
742 struct radeon_device *rdev = p->rdev;
743 uint32_t header;
744
745 if (idx >= ib_chunk->length_dw) {
746 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
747 idx, ib_chunk->length_dw);
748 return -EINVAL;
749 }
750 header = radeon_get_ib_value(p, idx);
751 pkt->idx = idx;
752 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
753 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
754 pkt->one_reg_wr = 0;
755 switch (pkt->type) {
756 case RADEON_PACKET_TYPE0:
757 if (rdev->family < CHIP_R600) {
758 pkt->reg = R100_CP_PACKET0_GET_REG(header);
759 pkt->one_reg_wr =
760 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
761 } else
762 pkt->reg = R600_CP_PACKET0_GET_REG(header);
763 break;
764 case RADEON_PACKET_TYPE3:
765 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
766 break;
767 case RADEON_PACKET_TYPE2:
768 pkt->count = -1;
769 break;
770 default:
771 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
772 return -EINVAL;
773 }
774 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
775 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
776 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
777 return -EINVAL;
778 }
779 return 0;
780}
9ffb7a6d
IH
781
782/**
783 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
784 * @p: structure holding the parser context.
785 *
786 * Check if the next packet is NOP relocation packet3.
787 **/
788bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
789{
790 struct radeon_cs_packet p3reloc;
791 int r;
792
793 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
794 if (r)
795 return false;
796 if (p3reloc.type != RADEON_PACKET_TYPE3)
797 return false;
798 if (p3reloc.opcode != RADEON_PACKET3_NOP)
799 return false;
800 return true;
801}
c3ad63af
IH
802
803/**
804 * radeon_cs_dump_packet() - dump raw packet context
805 * @p: structure holding the parser context.
806 * @pkt: structure holding the packet.
807 *
808 * Used mostly for debugging and error reporting.
809 **/
810void radeon_cs_dump_packet(struct radeon_cs_parser *p,
811 struct radeon_cs_packet *pkt)
812{
813 volatile uint32_t *ib;
814 unsigned i;
815 unsigned idx;
816
817 ib = p->ib.ptr;
818 idx = pkt->idx;
819 for (i = 0; i <= (pkt->count + 1); i++, idx++)
820 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
821}
822
e9716993
IH
823/**
824 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
825 * @parser: parser structure holding parsing context.
826 * @data: pointer to relocation data
827 * @offset_start: starting offset
828 * @offset_mask: offset mask (to align start offset on)
829 * @reloc: reloc informations
830 *
831 * Check if next packet is relocation packet3, do bo validation and compute
832 * GPU offset using the provided start.
833 **/
834int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
835 struct radeon_cs_reloc **cs_reloc,
836 int nomm)
837{
838 struct radeon_cs_chunk *relocs_chunk;
839 struct radeon_cs_packet p3reloc;
840 unsigned idx;
841 int r;
842
843 if (p->chunk_relocs_idx == -1) {
844 DRM_ERROR("No relocation chunk !\n");
845 return -EINVAL;
846 }
847 *cs_reloc = NULL;
848 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
849 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
850 if (r)
851 return r;
852 p->idx += p3reloc.count + 2;
853 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
854 p3reloc.opcode != RADEON_PACKET3_NOP) {
855 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
856 p3reloc.idx);
857 radeon_cs_dump_packet(p, &p3reloc);
858 return -EINVAL;
859 }
860 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
861 if (idx >= relocs_chunk->length_dw) {
862 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
863 idx, relocs_chunk->length_dw);
864 radeon_cs_dump_packet(p, &p3reloc);
865 return -EINVAL;
866 }
867 /* FIXME: we assume reloc size is 4 dwords */
868 if (nomm) {
869 *cs_reloc = p->relocs;
df0af440 870 (*cs_reloc)->gpu_offset =
e9716993 871 (u64)relocs_chunk->kdata[idx + 3] << 32;
df0af440 872 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
e9716993
IH
873 } else
874 *cs_reloc = p->relocs_ptr[(idx / 4)];
875 return 0;
876}
This page took 0.331807 seconds and 5 git commands to generate.