2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/fb.h>
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
38 nouveau_gem_object_new(struct drm_gem_object
*gem
)
44 nouveau_gem_object_del(struct drm_gem_object
*gem
)
46 struct nouveau_bo
*nvbo
= gem
->driver_private
;
47 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
53 if (unlikely(nvbo
->pin_refcnt
)) {
55 nouveau_bo_unpin(nvbo
);
58 if (gem
->import_attach
)
59 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
63 drm_gem_object_release(gem
);
68 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
70 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
71 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
72 struct nouveau_vma
*vma
;
78 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
82 vma
= nouveau_bo_vma_find(nvbo
, cli
->base
.vm
);
84 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
90 ret
= nouveau_bo_vma_add(nvbo
, cli
->base
.vm
, vma
);
100 ttm_bo_unreserve(&nvbo
->bo
);
105 nouveau_gem_object_delete(void *data
)
107 struct nouveau_vma
*vma
= data
;
108 nouveau_vm_unmap(vma
);
114 nouveau_gem_object_unmap(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
116 const bool mapped
= nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
;
117 struct nouveau_fence
*fence
= NULL
;
119 list_del(&vma
->head
);
122 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
123 if (nvbo
->bo
.sync_obj
)
124 fence
= nouveau_fence_ref(nvbo
->bo
.sync_obj
);
125 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
129 nouveau_fence_work(fence
, nouveau_gem_object_delete
, vma
);
132 nouveau_vm_unmap(vma
);
136 nouveau_fence_unref(&fence
);
140 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
142 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
143 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
144 struct nouveau_vma
*vma
;
150 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
154 vma
= nouveau_bo_vma_find(nvbo
, cli
->base
.vm
);
156 if (--vma
->refcount
== 0)
157 nouveau_gem_object_unmap(nvbo
, vma
);
159 ttm_bo_unreserve(&nvbo
->bo
);
163 nouveau_gem_new(struct drm_device
*dev
, int size
, int align
, uint32_t domain
,
164 uint32_t tile_mode
, uint32_t tile_flags
,
165 struct nouveau_bo
**pnvbo
)
167 struct nouveau_drm
*drm
= nouveau_drm(dev
);
168 struct nouveau_bo
*nvbo
;
172 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
173 flags
|= TTM_PL_FLAG_VRAM
;
174 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
175 flags
|= TTM_PL_FLAG_TT
;
176 if (!flags
|| domain
& NOUVEAU_GEM_DOMAIN_CPU
)
177 flags
|= TTM_PL_FLAG_SYSTEM
;
179 ret
= nouveau_bo_new(dev
, size
, align
, flags
, tile_mode
,
180 tile_flags
, NULL
, pnvbo
);
185 /* we restrict allowed domains on nv50+ to only the types
186 * that were requested at creation time. not possibly on
187 * earlier chips without busting the ABI.
189 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
190 NOUVEAU_GEM_DOMAIN_GART
;
191 if (nv_device(drm
->device
)->card_type
>= NV_50
)
192 nvbo
->valid_domains
&= domain
;
194 nvbo
->gem
= drm_gem_object_alloc(dev
, nvbo
->bo
.mem
.size
);
196 nouveau_bo_ref(NULL
, pnvbo
);
200 nvbo
->bo
.persistent_swap_storage
= nvbo
->gem
->filp
;
201 nvbo
->gem
->driver_private
= nvbo
;
206 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
207 struct drm_nouveau_gem_info
*rep
)
209 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
210 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
211 struct nouveau_vma
*vma
;
213 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
214 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
216 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
218 rep
->offset
= nvbo
->bo
.offset
;
220 vma
= nouveau_bo_vma_find(nvbo
, cli
->base
.vm
);
224 rep
->offset
= vma
->offset
;
227 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
228 rep
->map_handle
= nvbo
->bo
.addr_space_offset
;
229 rep
->tile_mode
= nvbo
->tile_mode
;
230 rep
->tile_flags
= nvbo
->tile_flags
;
235 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
236 struct drm_file
*file_priv
)
238 struct nouveau_drm
*drm
= nouveau_drm(dev
);
239 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
240 struct nouveau_fb
*pfb
= nouveau_fb(drm
->device
);
241 struct drm_nouveau_gem_new
*req
= data
;
242 struct nouveau_bo
*nvbo
= NULL
;
245 drm
->ttm
.bdev
.dev_mapping
= drm
->dev
->dev_mapping
;
247 if (!pfb
->memtype_valid(pfb
, req
->info
.tile_flags
)) {
248 NV_ERROR(cli
, "bad page flags: 0x%08x\n", req
->info
.tile_flags
);
252 ret
= nouveau_gem_new(dev
, req
->info
.size
, req
->align
,
253 req
->info
.domain
, req
->info
.tile_mode
,
254 req
->info
.tile_flags
, &nvbo
);
258 ret
= drm_gem_handle_create(file_priv
, nvbo
->gem
, &req
->info
.handle
);
260 ret
= nouveau_gem_info(file_priv
, nvbo
->gem
, &req
->info
);
262 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
265 /* drop reference from allocate - handle holds it now */
266 drm_gem_object_unreference_unlocked(nvbo
->gem
);
271 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
272 uint32_t write_domains
, uint32_t valid_domains
)
274 struct nouveau_bo
*nvbo
= gem
->driver_private
;
275 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
276 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
277 (write_domains
? write_domains
: read_domains
);
278 uint32_t pref_flags
= 0, valid_flags
= 0;
283 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
284 valid_flags
|= TTM_PL_FLAG_VRAM
;
286 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
287 valid_flags
|= TTM_PL_FLAG_TT
;
289 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
290 bo
->mem
.mem_type
== TTM_PL_VRAM
)
291 pref_flags
|= TTM_PL_FLAG_VRAM
;
293 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
294 bo
->mem
.mem_type
== TTM_PL_TT
)
295 pref_flags
|= TTM_PL_FLAG_TT
;
297 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
298 pref_flags
|= TTM_PL_FLAG_VRAM
;
301 pref_flags
|= TTM_PL_FLAG_TT
;
303 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
309 struct list_head vram_list
;
310 struct list_head gart_list
;
311 struct list_head both_list
;
315 validate_fini_list(struct list_head
*list
, struct nouveau_fence
*fence
)
317 struct list_head
*entry
, *tmp
;
318 struct nouveau_bo
*nvbo
;
320 list_for_each_safe(entry
, tmp
, list
) {
321 nvbo
= list_entry(entry
, struct nouveau_bo
, entry
);
323 nouveau_bo_fence(nvbo
, fence
);
325 if (unlikely(nvbo
->validate_mapped
)) {
326 ttm_bo_kunmap(&nvbo
->kmap
);
327 nvbo
->validate_mapped
= false;
330 list_del(&nvbo
->entry
);
331 nvbo
->reserved_by
= NULL
;
332 ttm_bo_unreserve(&nvbo
->bo
);
333 drm_gem_object_unreference_unlocked(nvbo
->gem
);
338 validate_fini(struct validate_op
*op
, struct nouveau_fence
* fence
)
340 validate_fini_list(&op
->vram_list
, fence
);
341 validate_fini_list(&op
->gart_list
, fence
);
342 validate_fini_list(&op
->both_list
, fence
);
346 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
347 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
348 int nr_buffers
, struct validate_op
*op
)
350 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
351 struct drm_device
*dev
= chan
->drm
->dev
;
352 struct nouveau_drm
*drm
= nouveau_drm(dev
);
356 struct nouveau_bo
*res_bo
= NULL
;
358 sequence
= atomic_add_return(1, &drm
->ttm
.validate_sequence
);
360 if (++trycnt
> 100000) {
361 NV_ERROR(cli
, "%s failed and gave up.\n", __func__
);
365 for (i
= 0; i
< nr_buffers
; i
++) {
366 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
367 struct drm_gem_object
*gem
;
368 struct nouveau_bo
*nvbo
;
370 gem
= drm_gem_object_lookup(dev
, file_priv
, b
->handle
);
372 NV_ERROR(cli
, "Unknown handle 0x%08x\n", b
->handle
);
373 validate_fini(op
, NULL
);
376 nvbo
= gem
->driver_private
;
377 if (nvbo
== res_bo
) {
379 drm_gem_object_unreference_unlocked(gem
);
383 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
384 NV_ERROR(cli
, "multiple instances of buffer %d on "
385 "validation list\n", b
->handle
);
386 drm_gem_object_unreference_unlocked(gem
);
387 validate_fini(op
, NULL
);
391 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, true, sequence
);
393 validate_fini(op
, NULL
);
394 if (unlikely(ret
== -EAGAIN
)) {
395 sequence
= atomic_add_return(1, &drm
->ttm
.validate_sequence
);
396 ret
= ttm_bo_reserve_slowpath(&nvbo
->bo
, true,
402 drm_gem_object_unreference_unlocked(gem
);
403 if (ret
!= -ERESTARTSYS
)
404 NV_ERROR(cli
, "fail reserve\n");
409 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
410 nvbo
->reserved_by
= file_priv
;
411 nvbo
->pbbo_index
= i
;
412 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
413 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
414 list_add_tail(&nvbo
->entry
, &op
->both_list
);
416 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
417 list_add_tail(&nvbo
->entry
, &op
->vram_list
);
419 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
420 list_add_tail(&nvbo
->entry
, &op
->gart_list
);
422 NV_ERROR(cli
, "invalid valid domains: 0x%08x\n",
424 list_add_tail(&nvbo
->entry
, &op
->both_list
);
425 validate_fini(op
, NULL
);
436 validate_sync(struct nouveau_channel
*chan
, struct nouveau_bo
*nvbo
)
438 struct nouveau_fence
*fence
= NULL
;
441 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
442 if (nvbo
->bo
.sync_obj
)
443 fence
= nouveau_fence_ref(nvbo
->bo
.sync_obj
);
444 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
447 ret
= nouveau_fence_sync(fence
, chan
);
448 nouveau_fence_unref(&fence
);
455 validate_list(struct nouveau_channel
*chan
, struct nouveau_cli
*cli
,
456 struct list_head
*list
, struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
457 uint64_t user_pbbo_ptr
)
459 struct nouveau_drm
*drm
= chan
->drm
;
460 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
461 (void __force __user
*)(uintptr_t)user_pbbo_ptr
;
462 struct nouveau_bo
*nvbo
;
465 list_for_each_entry(nvbo
, list
, entry
) {
466 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
468 ret
= validate_sync(chan
, nvbo
);
470 NV_ERROR(cli
, "fail pre-validate sync\n");
474 ret
= nouveau_gem_set_domain(nvbo
->gem
, b
->read_domains
,
478 NV_ERROR(cli
, "fail set_domain\n");
482 ret
= nouveau_bo_validate(nvbo
, true, false);
484 if (ret
!= -ERESTARTSYS
)
485 NV_ERROR(cli
, "fail ttm_validate\n");
489 ret
= validate_sync(chan
, nvbo
);
491 NV_ERROR(cli
, "fail post-validate sync\n");
495 if (nv_device(drm
->device
)->card_type
< NV_50
) {
496 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
497 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
498 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
499 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
500 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
503 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
504 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
506 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
507 b
->presumed
.offset
= nvbo
->bo
.offset
;
508 b
->presumed
.valid
= 0;
511 if (DRM_COPY_TO_USER(&upbbo
[nvbo
->pbbo_index
].presumed
,
512 &b
->presumed
, sizeof(b
->presumed
)))
521 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
522 struct drm_file
*file_priv
,
523 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
524 uint64_t user_buffers
, int nr_buffers
,
525 struct validate_op
*op
, int *apply_relocs
)
527 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
530 INIT_LIST_HEAD(&op
->vram_list
);
531 INIT_LIST_HEAD(&op
->gart_list
);
532 INIT_LIST_HEAD(&op
->both_list
);
537 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
539 if (ret
!= -ERESTARTSYS
)
540 NV_ERROR(cli
, "validate_init\n");
544 ret
= validate_list(chan
, cli
, &op
->vram_list
, pbbo
, user_buffers
);
545 if (unlikely(ret
< 0)) {
546 if (ret
!= -ERESTARTSYS
)
547 NV_ERROR(cli
, "validate vram_list\n");
548 validate_fini(op
, NULL
);
553 ret
= validate_list(chan
, cli
, &op
->gart_list
, pbbo
, user_buffers
);
554 if (unlikely(ret
< 0)) {
555 if (ret
!= -ERESTARTSYS
)
556 NV_ERROR(cli
, "validate gart_list\n");
557 validate_fini(op
, NULL
);
562 ret
= validate_list(chan
, cli
, &op
->both_list
, pbbo
, user_buffers
);
563 if (unlikely(ret
< 0)) {
564 if (ret
!= -ERESTARTSYS
)
565 NV_ERROR(cli
, "validate both_list\n");
566 validate_fini(op
, NULL
);
571 *apply_relocs
= relocs
;
576 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
579 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
581 mem
= kmalloc(nmemb
* size
, GFP_KERNEL
);
583 return ERR_PTR(-ENOMEM
);
585 if (DRM_COPY_FROM_USER(mem
, userptr
, nmemb
* size
)) {
587 return ERR_PTR(-EFAULT
);
594 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli
*cli
,
595 struct drm_nouveau_gem_pushbuf
*req
,
596 struct drm_nouveau_gem_pushbuf_bo
*bo
)
598 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
602 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
604 return PTR_ERR(reloc
);
606 for (i
= 0; i
< req
->nr_relocs
; i
++) {
607 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
608 struct drm_nouveau_gem_pushbuf_bo
*b
;
609 struct nouveau_bo
*nvbo
;
612 if (unlikely(r
->bo_index
> req
->nr_buffers
)) {
613 NV_ERROR(cli
, "reloc bo index invalid\n");
618 b
= &bo
[r
->bo_index
];
619 if (b
->presumed
.valid
)
622 if (unlikely(r
->reloc_bo_index
> req
->nr_buffers
)) {
623 NV_ERROR(cli
, "reloc container bo index invalid\n");
627 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
629 if (unlikely(r
->reloc_bo_offset
+ 4 >
630 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
631 NV_ERROR(cli
, "reloc outside of bo\n");
636 if (!nvbo
->kmap
.virtual) {
637 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
640 NV_ERROR(cli
, "failed kmap for reloc\n");
643 nvbo
->validate_mapped
= true;
646 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
647 data
= b
->presumed
.offset
+ r
->data
;
649 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
650 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
654 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
655 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
661 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
662 ret
= ttm_bo_wait(&nvbo
->bo
, false, false, false);
663 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
665 NV_ERROR(cli
, "reloc wait_idle failed: %d\n", ret
);
669 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
677 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
678 struct drm_file
*file_priv
)
680 struct nouveau_abi16
*abi16
= nouveau_abi16_get(file_priv
, dev
);
681 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
682 struct nouveau_abi16_chan
*temp
;
683 struct nouveau_drm
*drm
= nouveau_drm(dev
);
684 struct drm_nouveau_gem_pushbuf
*req
= data
;
685 struct drm_nouveau_gem_pushbuf_push
*push
;
686 struct drm_nouveau_gem_pushbuf_bo
*bo
;
687 struct nouveau_channel
*chan
= NULL
;
688 struct validate_op op
;
689 struct nouveau_fence
*fence
= NULL
;
690 int i
, j
, ret
= 0, do_reloc
= 0;
692 if (unlikely(!abi16
))
695 list_for_each_entry(temp
, &abi16
->channels
, head
) {
696 if (temp
->chan
->handle
== (NVDRM_CHAN
| req
->channel
)) {
703 return nouveau_abi16_put(abi16
, -ENOENT
);
705 req
->vram_available
= drm
->gem
.vram_available
;
706 req
->gart_available
= drm
->gem
.gart_available
;
707 if (unlikely(req
->nr_push
== 0))
710 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
711 NV_ERROR(cli
, "pushbuf push count exceeds limit: %d max %d\n",
712 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
713 return nouveau_abi16_put(abi16
, -EINVAL
);
716 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
717 NV_ERROR(cli
, "pushbuf bo count exceeds limit: %d max %d\n",
718 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
719 return nouveau_abi16_put(abi16
, -EINVAL
);
722 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
723 NV_ERROR(cli
, "pushbuf reloc count exceeds limit: %d max %d\n",
724 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
725 return nouveau_abi16_put(abi16
, -EINVAL
);
728 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
730 return nouveau_abi16_put(abi16
, PTR_ERR(push
));
732 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
735 return nouveau_abi16_put(abi16
, PTR_ERR(bo
));
738 /* Ensure all push buffers are on validate list */
739 for (i
= 0; i
< req
->nr_push
; i
++) {
740 if (push
[i
].bo_index
>= req
->nr_buffers
) {
741 NV_ERROR(cli
, "push %d buffer not in list\n", i
);
747 /* Validate buffer list */
748 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
, req
->buffers
,
749 req
->nr_buffers
, &op
, &do_reloc
);
751 if (ret
!= -ERESTARTSYS
)
752 NV_ERROR(cli
, "validate: %d\n", ret
);
756 /* Apply any relocations that are required */
758 ret
= nouveau_gem_pushbuf_reloc_apply(cli
, req
, bo
);
760 NV_ERROR(cli
, "reloc apply: %d\n", ret
);
765 if (chan
->dma
.ib_max
) {
766 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
768 NV_ERROR(cli
, "nv50cal_space: %d\n", ret
);
772 for (i
= 0; i
< req
->nr_push
; i
++) {
773 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
774 bo
[push
[i
].bo_index
].user_priv
;
776 nv50_dma_push(chan
, nvbo
, push
[i
].offset
,
780 if (nv_device(drm
->device
)->chipset
>= 0x25) {
781 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
783 NV_ERROR(cli
, "cal_space: %d\n", ret
);
787 for (i
= 0; i
< req
->nr_push
; i
++) {
788 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
789 bo
[push
[i
].bo_index
].user_priv
;
791 OUT_RING(chan
, (nvbo
->bo
.offset
+ push
[i
].offset
) | 2);
795 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
797 NV_ERROR(cli
, "jmp_space: %d\n", ret
);
801 for (i
= 0; i
< req
->nr_push
; i
++) {
802 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
803 bo
[push
[i
].bo_index
].user_priv
;
806 cmd
= chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2);
808 if (unlikely(cmd
!= req
->suffix0
)) {
809 if (!nvbo
->kmap
.virtual) {
810 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
818 nvbo
->validate_mapped
= true;
821 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
822 push
[i
].length
- 8) / 4, cmd
);
825 OUT_RING(chan
, 0x20000000 |
826 (nvbo
->bo
.offset
+ push
[i
].offset
));
828 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
833 ret
= nouveau_fence_new(chan
, false, &fence
);
835 NV_ERROR(cli
, "error fencing pushbuf: %d\n", ret
);
841 validate_fini(&op
, fence
);
842 nouveau_fence_unref(&fence
);
849 if (chan
->dma
.ib_max
) {
850 req
->suffix0
= 0x00000000;
851 req
->suffix1
= 0x00000000;
853 if (nv_device(drm
->device
)->chipset
>= 0x25) {
854 req
->suffix0
= 0x00020000;
855 req
->suffix1
= 0x00000000;
857 req
->suffix0
= 0x20000000 |
858 (chan
->push
.vma
.offset
+ ((chan
->dma
.cur
+ 2) << 2));
859 req
->suffix1
= 0x00000000;
862 return nouveau_abi16_put(abi16
, ret
);
865 static inline uint32_t
866 domain_to_ttm(struct nouveau_bo
*nvbo
, uint32_t domain
)
870 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
871 flags
|= TTM_PL_FLAG_VRAM
;
872 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
873 flags
|= TTM_PL_FLAG_TT
;
879 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
880 struct drm_file
*file_priv
)
882 struct drm_nouveau_gem_cpu_prep
*req
= data
;
883 struct drm_gem_object
*gem
;
884 struct nouveau_bo
*nvbo
;
885 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
888 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
891 nvbo
= nouveau_gem_object(gem
);
893 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
894 ret
= ttm_bo_wait(&nvbo
->bo
, true, true, no_wait
);
895 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
896 drm_gem_object_unreference_unlocked(gem
);
901 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
902 struct drm_file
*file_priv
)
908 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
909 struct drm_file
*file_priv
)
911 struct drm_nouveau_gem_info
*req
= data
;
912 struct drm_gem_object
*gem
;
915 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
919 ret
= nouveau_gem_info(file_priv
, gem
, req
);
920 drm_gem_object_unreference_unlocked(gem
);