2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
36 nouveau_gem_object_del(struct drm_gem_object
*gem
)
38 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
39 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
41 if (gem
->import_attach
)
42 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
44 drm_gem_object_release(gem
);
46 /* reset filp so nouveau_bo_del_ttm() can test for it */
52 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
54 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
55 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
56 struct nouveau_vma
*vma
;
62 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, NULL
);
66 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
68 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
74 ret
= nouveau_bo_vma_add(nvbo
, cli
->vm
, vma
);
84 ttm_bo_unreserve(&nvbo
->bo
);
89 nouveau_gem_object_delete(void *data
)
91 struct nouveau_vma
*vma
= data
;
92 nouveau_vm_unmap(vma
);
98 nouveau_gem_object_unmap(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
100 const bool mapped
= nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
;
101 struct fence
*fence
= NULL
;
103 list_del(&vma
->head
);
106 fence
= reservation_object_get_excl(nvbo
->bo
.resv
);
109 nouveau_fence_work(fence
, nouveau_gem_object_delete
, vma
);
112 nouveau_vm_unmap(vma
);
119 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
121 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
122 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
123 struct nouveau_vma
*vma
;
129 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, NULL
);
133 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
135 if (--vma
->refcount
== 0)
136 nouveau_gem_object_unmap(nvbo
, vma
);
138 ttm_bo_unreserve(&nvbo
->bo
);
142 nouveau_gem_new(struct drm_device
*dev
, int size
, int align
, uint32_t domain
,
143 uint32_t tile_mode
, uint32_t tile_flags
,
144 struct nouveau_bo
**pnvbo
)
146 struct nouveau_drm
*drm
= nouveau_drm(dev
);
147 struct nouveau_bo
*nvbo
;
151 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
152 flags
|= TTM_PL_FLAG_VRAM
;
153 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
154 flags
|= TTM_PL_FLAG_TT
;
155 if (!flags
|| domain
& NOUVEAU_GEM_DOMAIN_CPU
)
156 flags
|= TTM_PL_FLAG_SYSTEM
;
158 ret
= nouveau_bo_new(dev
, size
, align
, flags
, tile_mode
,
159 tile_flags
, NULL
, pnvbo
);
164 /* we restrict allowed domains on nv50+ to only the types
165 * that were requested at creation time. not possibly on
166 * earlier chips without busting the ABI.
168 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
169 NOUVEAU_GEM_DOMAIN_GART
;
170 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
171 nvbo
->valid_domains
&= domain
;
173 /* Initialize the embedded gem-object. We return a single gem-reference
174 * to the caller, instead of a normal nouveau_bo ttm reference. */
175 ret
= drm_gem_object_init(dev
, &nvbo
->gem
, nvbo
->bo
.mem
.size
);
177 nouveau_bo_ref(NULL
, pnvbo
);
181 nvbo
->bo
.persistent_swap_storage
= nvbo
->gem
.filp
;
186 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
187 struct drm_nouveau_gem_info
*rep
)
189 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
190 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
191 struct nouveau_vma
*vma
;
193 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
194 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
196 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
198 rep
->offset
= nvbo
->bo
.offset
;
200 vma
= nouveau_bo_vma_find(nvbo
, cli
->vm
);
204 rep
->offset
= vma
->offset
;
207 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
208 rep
->map_handle
= drm_vma_node_offset_addr(&nvbo
->bo
.vma_node
);
209 rep
->tile_mode
= nvbo
->tile_mode
;
210 rep
->tile_flags
= nvbo
->tile_flags
;
215 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
216 struct drm_file
*file_priv
)
218 struct nouveau_drm
*drm
= nouveau_drm(dev
);
219 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
220 struct nouveau_fb
*pfb
= nvkm_fb(&drm
->device
);
221 struct drm_nouveau_gem_new
*req
= data
;
222 struct nouveau_bo
*nvbo
= NULL
;
225 if (!pfb
->memtype_valid(pfb
, req
->info
.tile_flags
)) {
226 NV_PRINTK(error
, cli
, "bad page flags: 0x%08x\n", req
->info
.tile_flags
);
230 ret
= nouveau_gem_new(dev
, req
->info
.size
, req
->align
,
231 req
->info
.domain
, req
->info
.tile_mode
,
232 req
->info
.tile_flags
, &nvbo
);
236 ret
= drm_gem_handle_create(file_priv
, &nvbo
->gem
, &req
->info
.handle
);
238 ret
= nouveau_gem_info(file_priv
, &nvbo
->gem
, &req
->info
);
240 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
243 /* drop reference from allocate - handle holds it now */
244 drm_gem_object_unreference_unlocked(&nvbo
->gem
);
249 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
250 uint32_t write_domains
, uint32_t valid_domains
)
252 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
253 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
254 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
255 (write_domains
? write_domains
: read_domains
);
256 uint32_t pref_flags
= 0, valid_flags
= 0;
261 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
262 valid_flags
|= TTM_PL_FLAG_VRAM
;
264 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
265 valid_flags
|= TTM_PL_FLAG_TT
;
267 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
268 bo
->mem
.mem_type
== TTM_PL_VRAM
)
269 pref_flags
|= TTM_PL_FLAG_VRAM
;
271 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
272 bo
->mem
.mem_type
== TTM_PL_TT
)
273 pref_flags
|= TTM_PL_FLAG_TT
;
275 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
276 pref_flags
|= TTM_PL_FLAG_VRAM
;
279 pref_flags
|= TTM_PL_FLAG_TT
;
281 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
287 struct list_head vram_list
;
288 struct list_head gart_list
;
289 struct list_head both_list
;
290 struct ww_acquire_ctx ticket
;
294 validate_fini_list(struct list_head
*list
, struct nouveau_fence
*fence
,
295 struct ww_acquire_ctx
*ticket
)
297 struct list_head
*entry
, *tmp
;
298 struct nouveau_bo
*nvbo
;
300 list_for_each_safe(entry
, tmp
, list
) {
301 nvbo
= list_entry(entry
, struct nouveau_bo
, entry
);
304 nouveau_bo_fence(nvbo
, fence
);
306 if (unlikely(nvbo
->validate_mapped
)) {
307 ttm_bo_kunmap(&nvbo
->kmap
);
308 nvbo
->validate_mapped
= false;
311 list_del(&nvbo
->entry
);
312 nvbo
->reserved_by
= NULL
;
313 ttm_bo_unreserve_ticket(&nvbo
->bo
, ticket
);
314 drm_gem_object_unreference_unlocked(&nvbo
->gem
);
319 validate_fini_no_ticket(struct validate_op
*op
, struct nouveau_fence
*fence
)
321 validate_fini_list(&op
->vram_list
, fence
, &op
->ticket
);
322 validate_fini_list(&op
->gart_list
, fence
, &op
->ticket
);
323 validate_fini_list(&op
->both_list
, fence
, &op
->ticket
);
327 validate_fini(struct validate_op
*op
, struct nouveau_fence
*fence
)
329 validate_fini_no_ticket(op
, fence
);
330 ww_acquire_fini(&op
->ticket
);
334 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
335 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
336 int nr_buffers
, struct validate_op
*op
)
338 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
339 struct drm_device
*dev
= chan
->drm
->dev
;
342 struct nouveau_bo
*res_bo
= NULL
;
344 ww_acquire_init(&op
->ticket
, &reservation_ww_class
);
346 if (++trycnt
> 100000) {
347 NV_PRINTK(error
, cli
, "%s failed and gave up.\n", __func__
);
351 for (i
= 0; i
< nr_buffers
; i
++) {
352 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
353 struct drm_gem_object
*gem
;
354 struct nouveau_bo
*nvbo
;
356 gem
= drm_gem_object_lookup(dev
, file_priv
, b
->handle
);
358 NV_PRINTK(error
, cli
, "Unknown handle 0x%08x\n", b
->handle
);
359 ww_acquire_done(&op
->ticket
);
360 validate_fini(op
, NULL
);
363 nvbo
= nouveau_gem_object(gem
);
364 if (nvbo
== res_bo
) {
366 drm_gem_object_unreference_unlocked(gem
);
370 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
371 NV_PRINTK(error
, cli
, "multiple instances of buffer %d on "
372 "validation list\n", b
->handle
);
373 drm_gem_object_unreference_unlocked(gem
);
374 ww_acquire_done(&op
->ticket
);
375 validate_fini(op
, NULL
);
379 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, true, &op
->ticket
);
381 validate_fini_no_ticket(op
, NULL
);
382 if (unlikely(ret
== -EDEADLK
)) {
383 ret
= ttm_bo_reserve_slowpath(&nvbo
->bo
, true,
389 ww_acquire_done(&op
->ticket
);
390 ww_acquire_fini(&op
->ticket
);
391 drm_gem_object_unreference_unlocked(gem
);
392 if (ret
!= -ERESTARTSYS
)
393 NV_PRINTK(error
, cli
, "fail reserve\n");
398 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
399 nvbo
->reserved_by
= file_priv
;
400 nvbo
->pbbo_index
= i
;
401 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
402 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
403 list_add_tail(&nvbo
->entry
, &op
->both_list
);
405 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
406 list_add_tail(&nvbo
->entry
, &op
->vram_list
);
408 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
409 list_add_tail(&nvbo
->entry
, &op
->gart_list
);
411 NV_PRINTK(error
, cli
, "invalid valid domains: 0x%08x\n",
413 list_add_tail(&nvbo
->entry
, &op
->both_list
);
414 ww_acquire_done(&op
->ticket
);
415 validate_fini(op
, NULL
);
422 ww_acquire_done(&op
->ticket
);
427 validate_list(struct nouveau_channel
*chan
, struct nouveau_cli
*cli
,
428 struct list_head
*list
, struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
429 uint64_t user_pbbo_ptr
)
431 struct nouveau_drm
*drm
= chan
->drm
;
432 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
433 (void __force __user
*)(uintptr_t)user_pbbo_ptr
;
434 struct nouveau_bo
*nvbo
;
437 list_for_each_entry(nvbo
, list
, entry
) {
438 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
440 ret
= nouveau_gem_set_domain(&nvbo
->gem
, b
->read_domains
,
444 NV_PRINTK(error
, cli
, "fail set_domain\n");
448 ret
= nouveau_bo_validate(nvbo
, true, false);
450 if (ret
!= -ERESTARTSYS
)
451 NV_PRINTK(error
, cli
, "fail ttm_validate\n");
455 ret
= nouveau_fence_sync(nvbo
, chan
);
457 if (ret
!= -ERESTARTSYS
)
458 NV_PRINTK(error
, cli
, "fail post-validate sync\n");
462 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
463 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
464 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
465 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
466 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
467 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
470 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
471 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
473 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
474 b
->presumed
.offset
= nvbo
->bo
.offset
;
475 b
->presumed
.valid
= 0;
478 if (copy_to_user(&upbbo
[nvbo
->pbbo_index
].presumed
,
479 &b
->presumed
, sizeof(b
->presumed
)))
488 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
489 struct drm_file
*file_priv
,
490 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
491 uint64_t user_buffers
, int nr_buffers
,
492 struct validate_op
*op
, int *apply_relocs
)
494 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
497 INIT_LIST_HEAD(&op
->vram_list
);
498 INIT_LIST_HEAD(&op
->gart_list
);
499 INIT_LIST_HEAD(&op
->both_list
);
504 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
506 if (ret
!= -ERESTARTSYS
)
507 NV_PRINTK(error
, cli
, "validate_init\n");
511 ret
= validate_list(chan
, cli
, &op
->vram_list
, pbbo
, user_buffers
);
512 if (unlikely(ret
< 0)) {
513 if (ret
!= -ERESTARTSYS
)
514 NV_PRINTK(error
, cli
, "validate vram_list\n");
515 validate_fini(op
, NULL
);
520 ret
= validate_list(chan
, cli
, &op
->gart_list
, pbbo
, user_buffers
);
521 if (unlikely(ret
< 0)) {
522 if (ret
!= -ERESTARTSYS
)
523 NV_PRINTK(error
, cli
, "validate gart_list\n");
524 validate_fini(op
, NULL
);
529 ret
= validate_list(chan
, cli
, &op
->both_list
, pbbo
, user_buffers
);
530 if (unlikely(ret
< 0)) {
531 if (ret
!= -ERESTARTSYS
)
532 NV_PRINTK(error
, cli
, "validate both_list\n");
533 validate_fini(op
, NULL
);
538 *apply_relocs
= relocs
;
545 if (!is_vmalloc_addr(addr
))
552 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
555 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
559 mem
= kmalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
563 return ERR_PTR(-ENOMEM
);
565 if (copy_from_user(mem
, userptr
, size
)) {
567 return ERR_PTR(-EFAULT
);
574 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli
*cli
,
575 struct drm_nouveau_gem_pushbuf
*req
,
576 struct drm_nouveau_gem_pushbuf_bo
*bo
)
578 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
582 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
584 return PTR_ERR(reloc
);
586 for (i
= 0; i
< req
->nr_relocs
; i
++) {
587 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
588 struct drm_nouveau_gem_pushbuf_bo
*b
;
589 struct nouveau_bo
*nvbo
;
592 if (unlikely(r
->bo_index
> req
->nr_buffers
)) {
593 NV_PRINTK(error
, cli
, "reloc bo index invalid\n");
598 b
= &bo
[r
->bo_index
];
599 if (b
->presumed
.valid
)
602 if (unlikely(r
->reloc_bo_index
> req
->nr_buffers
)) {
603 NV_PRINTK(error
, cli
, "reloc container bo index invalid\n");
607 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
609 if (unlikely(r
->reloc_bo_offset
+ 4 >
610 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
611 NV_PRINTK(error
, cli
, "reloc outside of bo\n");
616 if (!nvbo
->kmap
.virtual) {
617 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
620 NV_PRINTK(error
, cli
, "failed kmap for reloc\n");
623 nvbo
->validate_mapped
= true;
626 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
627 data
= b
->presumed
.offset
+ r
->data
;
629 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
630 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
634 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
635 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
641 ret
= ttm_bo_wait(&nvbo
->bo
, false, false, false);
643 NV_PRINTK(error
, cli
, "reloc wait_idle failed: %d\n", ret
);
647 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
655 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
656 struct drm_file
*file_priv
)
658 struct nouveau_abi16
*abi16
= nouveau_abi16_get(file_priv
, dev
);
659 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
660 struct nouveau_abi16_chan
*temp
;
661 struct nouveau_drm
*drm
= nouveau_drm(dev
);
662 struct drm_nouveau_gem_pushbuf
*req
= data
;
663 struct drm_nouveau_gem_pushbuf_push
*push
;
664 struct drm_nouveau_gem_pushbuf_bo
*bo
;
665 struct nouveau_channel
*chan
= NULL
;
666 struct validate_op op
;
667 struct nouveau_fence
*fence
= NULL
;
668 int i
, j
, ret
= 0, do_reloc
= 0;
670 if (unlikely(!abi16
))
673 list_for_each_entry(temp
, &abi16
->channels
, head
) {
674 if (temp
->chan
->object
->handle
== (NVDRM_CHAN
| req
->channel
)) {
681 return nouveau_abi16_put(abi16
, -ENOENT
);
683 req
->vram_available
= drm
->gem
.vram_available
;
684 req
->gart_available
= drm
->gem
.gart_available
;
685 if (unlikely(req
->nr_push
== 0))
688 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
689 NV_PRINTK(error
, cli
, "pushbuf push count exceeds limit: %d max %d\n",
690 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
691 return nouveau_abi16_put(abi16
, -EINVAL
);
694 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
695 NV_PRINTK(error
, cli
, "pushbuf bo count exceeds limit: %d max %d\n",
696 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
697 return nouveau_abi16_put(abi16
, -EINVAL
);
700 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
701 NV_PRINTK(error
, cli
, "pushbuf reloc count exceeds limit: %d max %d\n",
702 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
703 return nouveau_abi16_put(abi16
, -EINVAL
);
706 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
708 return nouveau_abi16_put(abi16
, PTR_ERR(push
));
710 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
713 return nouveau_abi16_put(abi16
, PTR_ERR(bo
));
716 /* Ensure all push buffers are on validate list */
717 for (i
= 0; i
< req
->nr_push
; i
++) {
718 if (push
[i
].bo_index
>= req
->nr_buffers
) {
719 NV_PRINTK(error
, cli
, "push %d buffer not in list\n", i
);
725 /* Validate buffer list */
726 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
, req
->buffers
,
727 req
->nr_buffers
, &op
, &do_reloc
);
729 if (ret
!= -ERESTARTSYS
)
730 NV_PRINTK(error
, cli
, "validate: %d\n", ret
);
734 /* Apply any relocations that are required */
736 ret
= nouveau_gem_pushbuf_reloc_apply(cli
, req
, bo
);
738 NV_PRINTK(error
, cli
, "reloc apply: %d\n", ret
);
743 if (chan
->dma
.ib_max
) {
744 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
746 NV_PRINTK(error
, cli
, "nv50cal_space: %d\n", ret
);
750 for (i
= 0; i
< req
->nr_push
; i
++) {
751 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
752 bo
[push
[i
].bo_index
].user_priv
;
754 nv50_dma_push(chan
, nvbo
, push
[i
].offset
,
758 if (drm
->device
.info
.chipset
>= 0x25) {
759 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
761 NV_PRINTK(error
, cli
, "cal_space: %d\n", ret
);
765 for (i
= 0; i
< req
->nr_push
; i
++) {
766 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
767 bo
[push
[i
].bo_index
].user_priv
;
769 OUT_RING(chan
, (nvbo
->bo
.offset
+ push
[i
].offset
) | 2);
773 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
775 NV_PRINTK(error
, cli
, "jmp_space: %d\n", ret
);
779 for (i
= 0; i
< req
->nr_push
; i
++) {
780 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
781 bo
[push
[i
].bo_index
].user_priv
;
784 cmd
= chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2);
786 if (unlikely(cmd
!= req
->suffix0
)) {
787 if (!nvbo
->kmap
.virtual) {
788 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
796 nvbo
->validate_mapped
= true;
799 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
800 push
[i
].length
- 8) / 4, cmd
);
803 OUT_RING(chan
, 0x20000000 |
804 (nvbo
->bo
.offset
+ push
[i
].offset
));
806 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
811 ret
= nouveau_fence_new(chan
, false, &fence
);
813 NV_PRINTK(error
, cli
, "error fencing pushbuf: %d\n", ret
);
819 validate_fini(&op
, fence
);
820 nouveau_fence_unref(&fence
);
827 if (chan
->dma
.ib_max
) {
828 req
->suffix0
= 0x00000000;
829 req
->suffix1
= 0x00000000;
831 if (drm
->device
.info
.chipset
>= 0x25) {
832 req
->suffix0
= 0x00020000;
833 req
->suffix1
= 0x00000000;
835 req
->suffix0
= 0x20000000 |
836 (chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2));
837 req
->suffix1
= 0x00000000;
840 return nouveau_abi16_put(abi16
, ret
);
843 static inline uint32_t
844 domain_to_ttm(struct nouveau_bo
*nvbo
, uint32_t domain
)
848 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
849 flags
|= TTM_PL_FLAG_VRAM
;
850 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
851 flags
|= TTM_PL_FLAG_TT
;
857 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
858 struct drm_file
*file_priv
)
860 struct drm_nouveau_gem_cpu_prep
*req
= data
;
861 struct drm_gem_object
*gem
;
862 struct nouveau_bo
*nvbo
;
863 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
865 struct nouveau_fence
*fence
= NULL
;
867 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
870 nvbo
= nouveau_gem_object(gem
);
872 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, false, NULL
);
874 ret
= ttm_bo_wait(&nvbo
->bo
, true, true, true);
875 if (!no_wait
&& ret
) {
878 excl
= reservation_object_get_excl(nvbo
->bo
.resv
);
879 fence
= nouveau_fence_ref((struct nouveau_fence
*)excl
);
882 ttm_bo_unreserve(&nvbo
->bo
);
884 drm_gem_object_unreference_unlocked(gem
);
887 ret
= nouveau_fence_wait(fence
, true, no_wait
);
888 nouveau_fence_unref(&fence
);
895 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
896 struct drm_file
*file_priv
)
902 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
903 struct drm_file
*file_priv
)
905 struct drm_nouveau_gem_info
*req
= data
;
906 struct drm_gem_object
*gem
;
909 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
913 ret
= nouveau_gem_info(file_priv
, gem
, req
);
914 drm_gem_object_unreference_unlocked(gem
);