drm/ttm: flip the switch, and convert to dma_fence
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34
35 void
36 nouveau_gem_object_del(struct drm_gem_object *gem)
37 {
38 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39 struct ttm_buffer_object *bo = &nvbo->bo;
40
41 if (gem->import_attach)
42 drm_prime_gem_destroy(gem, nvbo->bo.sg);
43
44 drm_gem_object_release(gem);
45
46 /* reset filp so nouveau_bo_del_ttm() can test for it */
47 gem->filp = NULL;
48 ttm_bo_unref(&bo);
49 }
50
51 int
52 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
53 {
54 struct nouveau_cli *cli = nouveau_cli(file_priv);
55 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
56 struct nouveau_vma *vma;
57 int ret;
58
59 if (!cli->vm)
60 return 0;
61
62 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
63 if (ret)
64 return ret;
65
66 vma = nouveau_bo_vma_find(nvbo, cli->vm);
67 if (!vma) {
68 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
69 if (!vma) {
70 ret = -ENOMEM;
71 goto out;
72 }
73
74 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
75 if (ret) {
76 kfree(vma);
77 goto out;
78 }
79 } else {
80 vma->refcount++;
81 }
82
83 out:
84 ttm_bo_unreserve(&nvbo->bo);
85 return ret;
86 }
87
88 static void
89 nouveau_gem_object_delete(void *data)
90 {
91 struct nouveau_vma *vma = data;
92 nouveau_vm_unmap(vma);
93 nouveau_vm_put(vma);
94 kfree(vma);
95 }
96
97 static void
98 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
99 {
100 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
101 struct fence *fence = NULL;
102
103 list_del(&vma->head);
104
105 if (mapped)
106 fence = reservation_object_get_excl(nvbo->bo.resv);
107
108 if (fence) {
109 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
110 } else {
111 if (mapped)
112 nouveau_vm_unmap(vma);
113 nouveau_vm_put(vma);
114 kfree(vma);
115 }
116 }
117
118 void
119 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
120 {
121 struct nouveau_cli *cli = nouveau_cli(file_priv);
122 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
123 struct nouveau_vma *vma;
124 int ret;
125
126 if (!cli->vm)
127 return;
128
129 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
130 if (ret)
131 return;
132
133 vma = nouveau_bo_vma_find(nvbo, cli->vm);
134 if (vma) {
135 if (--vma->refcount == 0)
136 nouveau_gem_object_unmap(nvbo, vma);
137 }
138 ttm_bo_unreserve(&nvbo->bo);
139 }
140
141 int
142 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
143 uint32_t tile_mode, uint32_t tile_flags,
144 struct nouveau_bo **pnvbo)
145 {
146 struct nouveau_drm *drm = nouveau_drm(dev);
147 struct nouveau_bo *nvbo;
148 u32 flags = 0;
149 int ret;
150
151 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
152 flags |= TTM_PL_FLAG_VRAM;
153 if (domain & NOUVEAU_GEM_DOMAIN_GART)
154 flags |= TTM_PL_FLAG_TT;
155 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
156 flags |= TTM_PL_FLAG_SYSTEM;
157
158 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
159 tile_flags, NULL, pnvbo);
160 if (ret)
161 return ret;
162 nvbo = *pnvbo;
163
164 /* we restrict allowed domains on nv50+ to only the types
165 * that were requested at creation time. not possibly on
166 * earlier chips without busting the ABI.
167 */
168 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
169 NOUVEAU_GEM_DOMAIN_GART;
170 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
171 nvbo->valid_domains &= domain;
172
173 /* Initialize the embedded gem-object. We return a single gem-reference
174 * to the caller, instead of a normal nouveau_bo ttm reference. */
175 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
176 if (ret) {
177 nouveau_bo_ref(NULL, pnvbo);
178 return -ENOMEM;
179 }
180
181 nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
182 return 0;
183 }
184
185 static int
186 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
187 struct drm_nouveau_gem_info *rep)
188 {
189 struct nouveau_cli *cli = nouveau_cli(file_priv);
190 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
191 struct nouveau_vma *vma;
192
193 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
194 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
195 else
196 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
197
198 rep->offset = nvbo->bo.offset;
199 if (cli->vm) {
200 vma = nouveau_bo_vma_find(nvbo, cli->vm);
201 if (!vma)
202 return -EINVAL;
203
204 rep->offset = vma->offset;
205 }
206
207 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
208 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
209 rep->tile_mode = nvbo->tile_mode;
210 rep->tile_flags = nvbo->tile_flags;
211 return 0;
212 }
213
214 int
215 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
216 struct drm_file *file_priv)
217 {
218 struct nouveau_drm *drm = nouveau_drm(dev);
219 struct nouveau_cli *cli = nouveau_cli(file_priv);
220 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
221 struct drm_nouveau_gem_new *req = data;
222 struct nouveau_bo *nvbo = NULL;
223 int ret = 0;
224
225 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
226 NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
227 return -EINVAL;
228 }
229
230 ret = nouveau_gem_new(dev, req->info.size, req->align,
231 req->info.domain, req->info.tile_mode,
232 req->info.tile_flags, &nvbo);
233 if (ret)
234 return ret;
235
236 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
237 if (ret == 0) {
238 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
239 if (ret)
240 drm_gem_handle_delete(file_priv, req->info.handle);
241 }
242
243 /* drop reference from allocate - handle holds it now */
244 drm_gem_object_unreference_unlocked(&nvbo->gem);
245 return ret;
246 }
247
248 static int
249 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
250 uint32_t write_domains, uint32_t valid_domains)
251 {
252 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
253 struct ttm_buffer_object *bo = &nvbo->bo;
254 uint32_t domains = valid_domains & nvbo->valid_domains &
255 (write_domains ? write_domains : read_domains);
256 uint32_t pref_flags = 0, valid_flags = 0;
257
258 if (!domains)
259 return -EINVAL;
260
261 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
262 valid_flags |= TTM_PL_FLAG_VRAM;
263
264 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
265 valid_flags |= TTM_PL_FLAG_TT;
266
267 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
268 bo->mem.mem_type == TTM_PL_VRAM)
269 pref_flags |= TTM_PL_FLAG_VRAM;
270
271 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
272 bo->mem.mem_type == TTM_PL_TT)
273 pref_flags |= TTM_PL_FLAG_TT;
274
275 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
276 pref_flags |= TTM_PL_FLAG_VRAM;
277
278 else
279 pref_flags |= TTM_PL_FLAG_TT;
280
281 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
282
283 return 0;
284 }
285
286 struct validate_op {
287 struct list_head vram_list;
288 struct list_head gart_list;
289 struct list_head both_list;
290 struct ww_acquire_ctx ticket;
291 };
292
293 static void
294 validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
295 struct ww_acquire_ctx *ticket)
296 {
297 struct list_head *entry, *tmp;
298 struct nouveau_bo *nvbo;
299
300 list_for_each_safe(entry, tmp, list) {
301 nvbo = list_entry(entry, struct nouveau_bo, entry);
302
303 if (likely(fence))
304 nouveau_bo_fence(nvbo, fence);
305
306 if (unlikely(nvbo->validate_mapped)) {
307 ttm_bo_kunmap(&nvbo->kmap);
308 nvbo->validate_mapped = false;
309 }
310
311 list_del(&nvbo->entry);
312 nvbo->reserved_by = NULL;
313 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
314 drm_gem_object_unreference_unlocked(&nvbo->gem);
315 }
316 }
317
318 static void
319 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
320 {
321 validate_fini_list(&op->vram_list, fence, &op->ticket);
322 validate_fini_list(&op->gart_list, fence, &op->ticket);
323 validate_fini_list(&op->both_list, fence, &op->ticket);
324 }
325
326 static void
327 validate_fini(struct validate_op *op, struct nouveau_fence *fence)
328 {
329 validate_fini_no_ticket(op, fence);
330 ww_acquire_fini(&op->ticket);
331 }
332
333 static int
334 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
335 struct drm_nouveau_gem_pushbuf_bo *pbbo,
336 int nr_buffers, struct validate_op *op)
337 {
338 struct nouveau_cli *cli = nouveau_cli(file_priv);
339 struct drm_device *dev = chan->drm->dev;
340 int trycnt = 0;
341 int ret, i;
342 struct nouveau_bo *res_bo = NULL;
343
344 ww_acquire_init(&op->ticket, &reservation_ww_class);
345 retry:
346 if (++trycnt > 100000) {
347 NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
348 return -EINVAL;
349 }
350
351 for (i = 0; i < nr_buffers; i++) {
352 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
353 struct drm_gem_object *gem;
354 struct nouveau_bo *nvbo;
355
356 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
357 if (!gem) {
358 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
359 ww_acquire_done(&op->ticket);
360 validate_fini(op, NULL);
361 return -ENOENT;
362 }
363 nvbo = nouveau_gem_object(gem);
364 if (nvbo == res_bo) {
365 res_bo = NULL;
366 drm_gem_object_unreference_unlocked(gem);
367 continue;
368 }
369
370 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
371 NV_PRINTK(error, cli, "multiple instances of buffer %d on "
372 "validation list\n", b->handle);
373 drm_gem_object_unreference_unlocked(gem);
374 ww_acquire_done(&op->ticket);
375 validate_fini(op, NULL);
376 return -EINVAL;
377 }
378
379 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
380 if (ret) {
381 validate_fini_no_ticket(op, NULL);
382 if (unlikely(ret == -EDEADLK)) {
383 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
384 &op->ticket);
385 if (!ret)
386 res_bo = nvbo;
387 }
388 if (unlikely(ret)) {
389 ww_acquire_done(&op->ticket);
390 ww_acquire_fini(&op->ticket);
391 drm_gem_object_unreference_unlocked(gem);
392 if (ret != -ERESTARTSYS)
393 NV_PRINTK(error, cli, "fail reserve\n");
394 return ret;
395 }
396 }
397
398 b->user_priv = (uint64_t)(unsigned long)nvbo;
399 nvbo->reserved_by = file_priv;
400 nvbo->pbbo_index = i;
401 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
402 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
403 list_add_tail(&nvbo->entry, &op->both_list);
404 else
405 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
406 list_add_tail(&nvbo->entry, &op->vram_list);
407 else
408 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
409 list_add_tail(&nvbo->entry, &op->gart_list);
410 else {
411 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
412 b->valid_domains);
413 list_add_tail(&nvbo->entry, &op->both_list);
414 ww_acquire_done(&op->ticket);
415 validate_fini(op, NULL);
416 return -EINVAL;
417 }
418 if (nvbo == res_bo)
419 goto retry;
420 }
421
422 ww_acquire_done(&op->ticket);
423 return 0;
424 }
425
426 static int
427 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
428 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
429 uint64_t user_pbbo_ptr)
430 {
431 struct nouveau_drm *drm = chan->drm;
432 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
433 (void __force __user *)(uintptr_t)user_pbbo_ptr;
434 struct nouveau_bo *nvbo;
435 int ret, relocs = 0;
436
437 list_for_each_entry(nvbo, list, entry) {
438 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
439
440 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
441 b->write_domains,
442 b->valid_domains);
443 if (unlikely(ret)) {
444 NV_PRINTK(error, cli, "fail set_domain\n");
445 return ret;
446 }
447
448 ret = nouveau_bo_validate(nvbo, true, false);
449 if (unlikely(ret)) {
450 if (ret != -ERESTARTSYS)
451 NV_PRINTK(error, cli, "fail ttm_validate\n");
452 return ret;
453 }
454
455 ret = nouveau_fence_sync(nvbo, chan);
456 if (unlikely(ret)) {
457 if (ret != -ERESTARTSYS)
458 NV_PRINTK(error, cli, "fail post-validate sync\n");
459 return ret;
460 }
461
462 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
463 if (nvbo->bo.offset == b->presumed.offset &&
464 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
465 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
466 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
467 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
468 continue;
469
470 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
471 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
472 else
473 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
474 b->presumed.offset = nvbo->bo.offset;
475 b->presumed.valid = 0;
476 relocs++;
477
478 if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
479 &b->presumed, sizeof(b->presumed)))
480 return -EFAULT;
481 }
482 }
483
484 return relocs;
485 }
486
487 static int
488 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
489 struct drm_file *file_priv,
490 struct drm_nouveau_gem_pushbuf_bo *pbbo,
491 uint64_t user_buffers, int nr_buffers,
492 struct validate_op *op, int *apply_relocs)
493 {
494 struct nouveau_cli *cli = nouveau_cli(file_priv);
495 int ret, relocs = 0;
496
497 INIT_LIST_HEAD(&op->vram_list);
498 INIT_LIST_HEAD(&op->gart_list);
499 INIT_LIST_HEAD(&op->both_list);
500
501 if (nr_buffers == 0)
502 return 0;
503
504 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
505 if (unlikely(ret)) {
506 if (ret != -ERESTARTSYS)
507 NV_PRINTK(error, cli, "validate_init\n");
508 return ret;
509 }
510
511 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
512 if (unlikely(ret < 0)) {
513 if (ret != -ERESTARTSYS)
514 NV_PRINTK(error, cli, "validate vram_list\n");
515 validate_fini(op, NULL);
516 return ret;
517 }
518 relocs += ret;
519
520 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
521 if (unlikely(ret < 0)) {
522 if (ret != -ERESTARTSYS)
523 NV_PRINTK(error, cli, "validate gart_list\n");
524 validate_fini(op, NULL);
525 return ret;
526 }
527 relocs += ret;
528
529 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
530 if (unlikely(ret < 0)) {
531 if (ret != -ERESTARTSYS)
532 NV_PRINTK(error, cli, "validate both_list\n");
533 validate_fini(op, NULL);
534 return ret;
535 }
536 relocs += ret;
537
538 *apply_relocs = relocs;
539 return 0;
540 }
541
542 static inline void
543 u_free(void *addr)
544 {
545 if (!is_vmalloc_addr(addr))
546 kfree(addr);
547 else
548 vfree(addr);
549 }
550
551 static inline void *
552 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
553 {
554 void *mem;
555 void __user *userptr = (void __force __user *)(uintptr_t)user;
556
557 size *= nmemb;
558
559 mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
560 if (!mem)
561 mem = vmalloc(size);
562 if (!mem)
563 return ERR_PTR(-ENOMEM);
564
565 if (copy_from_user(mem, userptr, size)) {
566 u_free(mem);
567 return ERR_PTR(-EFAULT);
568 }
569
570 return mem;
571 }
572
573 static int
574 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
575 struct drm_nouveau_gem_pushbuf *req,
576 struct drm_nouveau_gem_pushbuf_bo *bo)
577 {
578 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
579 int ret = 0;
580 unsigned i;
581
582 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
583 if (IS_ERR(reloc))
584 return PTR_ERR(reloc);
585
586 for (i = 0; i < req->nr_relocs; i++) {
587 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
588 struct drm_nouveau_gem_pushbuf_bo *b;
589 struct nouveau_bo *nvbo;
590 uint32_t data;
591
592 if (unlikely(r->bo_index > req->nr_buffers)) {
593 NV_PRINTK(error, cli, "reloc bo index invalid\n");
594 ret = -EINVAL;
595 break;
596 }
597
598 b = &bo[r->bo_index];
599 if (b->presumed.valid)
600 continue;
601
602 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
603 NV_PRINTK(error, cli, "reloc container bo index invalid\n");
604 ret = -EINVAL;
605 break;
606 }
607 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
608
609 if (unlikely(r->reloc_bo_offset + 4 >
610 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
611 NV_PRINTK(error, cli, "reloc outside of bo\n");
612 ret = -EINVAL;
613 break;
614 }
615
616 if (!nvbo->kmap.virtual) {
617 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
618 &nvbo->kmap);
619 if (ret) {
620 NV_PRINTK(error, cli, "failed kmap for reloc\n");
621 break;
622 }
623 nvbo->validate_mapped = true;
624 }
625
626 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
627 data = b->presumed.offset + r->data;
628 else
629 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
630 data = (b->presumed.offset + r->data) >> 32;
631 else
632 data = r->data;
633
634 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
635 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
636 data |= r->tor;
637 else
638 data |= r->vor;
639 }
640
641 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
642 if (ret) {
643 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
644 break;
645 }
646
647 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
648 }
649
650 u_free(reloc);
651 return ret;
652 }
653
654 int
655 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
656 struct drm_file *file_priv)
657 {
658 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
659 struct nouveau_cli *cli = nouveau_cli(file_priv);
660 struct nouveau_abi16_chan *temp;
661 struct nouveau_drm *drm = nouveau_drm(dev);
662 struct drm_nouveau_gem_pushbuf *req = data;
663 struct drm_nouveau_gem_pushbuf_push *push;
664 struct drm_nouveau_gem_pushbuf_bo *bo;
665 struct nouveau_channel *chan = NULL;
666 struct validate_op op;
667 struct nouveau_fence *fence = NULL;
668 int i, j, ret = 0, do_reloc = 0;
669
670 if (unlikely(!abi16))
671 return -ENOMEM;
672
673 list_for_each_entry(temp, &abi16->channels, head) {
674 if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
675 chan = temp->chan;
676 break;
677 }
678 }
679
680 if (!chan)
681 return nouveau_abi16_put(abi16, -ENOENT);
682
683 req->vram_available = drm->gem.vram_available;
684 req->gart_available = drm->gem.gart_available;
685 if (unlikely(req->nr_push == 0))
686 goto out_next;
687
688 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
689 NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
690 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
691 return nouveau_abi16_put(abi16, -EINVAL);
692 }
693
694 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
695 NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
696 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
697 return nouveau_abi16_put(abi16, -EINVAL);
698 }
699
700 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
701 NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
702 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
703 return nouveau_abi16_put(abi16, -EINVAL);
704 }
705
706 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
707 if (IS_ERR(push))
708 return nouveau_abi16_put(abi16, PTR_ERR(push));
709
710 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
711 if (IS_ERR(bo)) {
712 u_free(push);
713 return nouveau_abi16_put(abi16, PTR_ERR(bo));
714 }
715
716 /* Ensure all push buffers are on validate list */
717 for (i = 0; i < req->nr_push; i++) {
718 if (push[i].bo_index >= req->nr_buffers) {
719 NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
720 ret = -EINVAL;
721 goto out_prevalid;
722 }
723 }
724
725 /* Validate buffer list */
726 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
727 req->nr_buffers, &op, &do_reloc);
728 if (ret) {
729 if (ret != -ERESTARTSYS)
730 NV_PRINTK(error, cli, "validate: %d\n", ret);
731 goto out_prevalid;
732 }
733
734 /* Apply any relocations that are required */
735 if (do_reloc) {
736 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
737 if (ret) {
738 NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
739 goto out;
740 }
741 }
742
743 if (chan->dma.ib_max) {
744 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
745 if (ret) {
746 NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
747 goto out;
748 }
749
750 for (i = 0; i < req->nr_push; i++) {
751 struct nouveau_bo *nvbo = (void *)(unsigned long)
752 bo[push[i].bo_index].user_priv;
753
754 nv50_dma_push(chan, nvbo, push[i].offset,
755 push[i].length);
756 }
757 } else
758 if (drm->device.info.chipset >= 0x25) {
759 ret = RING_SPACE(chan, req->nr_push * 2);
760 if (ret) {
761 NV_PRINTK(error, cli, "cal_space: %d\n", ret);
762 goto out;
763 }
764
765 for (i = 0; i < req->nr_push; i++) {
766 struct nouveau_bo *nvbo = (void *)(unsigned long)
767 bo[push[i].bo_index].user_priv;
768
769 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
770 OUT_RING(chan, 0);
771 }
772 } else {
773 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
774 if (ret) {
775 NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
776 goto out;
777 }
778
779 for (i = 0; i < req->nr_push; i++) {
780 struct nouveau_bo *nvbo = (void *)(unsigned long)
781 bo[push[i].bo_index].user_priv;
782 uint32_t cmd;
783
784 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
785 cmd |= 0x20000000;
786 if (unlikely(cmd != req->suffix0)) {
787 if (!nvbo->kmap.virtual) {
788 ret = ttm_bo_kmap(&nvbo->bo, 0,
789 nvbo->bo.mem.
790 num_pages,
791 &nvbo->kmap);
792 if (ret) {
793 WIND_RING(chan);
794 goto out;
795 }
796 nvbo->validate_mapped = true;
797 }
798
799 nouveau_bo_wr32(nvbo, (push[i].offset +
800 push[i].length - 8) / 4, cmd);
801 }
802
803 OUT_RING(chan, 0x20000000 |
804 (nvbo->bo.offset + push[i].offset));
805 OUT_RING(chan, 0);
806 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
807 OUT_RING(chan, 0);
808 }
809 }
810
811 ret = nouveau_fence_new(chan, false, &fence);
812 if (ret) {
813 NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
814 WIND_RING(chan);
815 goto out;
816 }
817
818 out:
819 validate_fini(&op, fence);
820 nouveau_fence_unref(&fence);
821
822 out_prevalid:
823 u_free(bo);
824 u_free(push);
825
826 out_next:
827 if (chan->dma.ib_max) {
828 req->suffix0 = 0x00000000;
829 req->suffix1 = 0x00000000;
830 } else
831 if (drm->device.info.chipset >= 0x25) {
832 req->suffix0 = 0x00020000;
833 req->suffix1 = 0x00000000;
834 } else {
835 req->suffix0 = 0x20000000 |
836 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
837 req->suffix1 = 0x00000000;
838 }
839
840 return nouveau_abi16_put(abi16, ret);
841 }
842
843 static inline uint32_t
844 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
845 {
846 uint32_t flags = 0;
847
848 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
849 flags |= TTM_PL_FLAG_VRAM;
850 if (domain & NOUVEAU_GEM_DOMAIN_GART)
851 flags |= TTM_PL_FLAG_TT;
852
853 return flags;
854 }
855
856 int
857 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
858 struct drm_file *file_priv)
859 {
860 struct drm_nouveau_gem_cpu_prep *req = data;
861 struct drm_gem_object *gem;
862 struct nouveau_bo *nvbo;
863 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
864 int ret;
865 struct nouveau_fence *fence = NULL;
866
867 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
868 if (!gem)
869 return -ENOENT;
870 nvbo = nouveau_gem_object(gem);
871
872 ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL);
873 if (!ret) {
874 ret = ttm_bo_wait(&nvbo->bo, true, true, true);
875 if (!no_wait && ret) {
876 struct fence *excl;
877
878 excl = reservation_object_get_excl(nvbo->bo.resv);
879 fence = nouveau_fence_ref((struct nouveau_fence *)excl);
880 }
881
882 ttm_bo_unreserve(&nvbo->bo);
883 }
884 drm_gem_object_unreference_unlocked(gem);
885
886 if (fence) {
887 ret = nouveau_fence_wait(fence, true, no_wait);
888 nouveau_fence_unref(&fence);
889 }
890
891 return ret;
892 }
893
894 int
895 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
896 struct drm_file *file_priv)
897 {
898 return 0;
899 }
900
901 int
902 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
903 struct drm_file *file_priv)
904 {
905 struct drm_nouveau_gem_info *req = data;
906 struct drm_gem_object *gem;
907 int ret;
908
909 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
910 if (!gem)
911 return -ENOENT;
912
913 ret = nouveau_gem_info(file_priv, gem, req);
914 drm_gem_object_unreference_unlocked(gem);
915 return ret;
916 }
917
This page took 0.075718 seconds and 5 git commands to generate.