2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_drm.h"
31 #include "nouveau_dma.h"
33 #define nouveau_gem_pushbuf_sync(chan) 0
36 nouveau_gem_object_new(struct drm_gem_object
*gem
)
42 nouveau_gem_object_del(struct drm_gem_object
*gem
)
44 struct nouveau_bo
*nvbo
= gem
->driver_private
;
45 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
51 if (unlikely(nvbo
->pin_refcnt
)) {
53 nouveau_bo_unpin(nvbo
);
58 drm_gem_object_release(gem
);
63 nouveau_gem_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
64 int size
, int align
, uint32_t flags
, uint32_t tile_mode
,
65 uint32_t tile_flags
, bool no_vm
, bool mappable
,
66 struct nouveau_bo
**pnvbo
)
68 struct nouveau_bo
*nvbo
;
71 ret
= nouveau_bo_new(dev
, chan
, size
, align
, flags
, tile_mode
,
72 tile_flags
, no_vm
, mappable
, pnvbo
);
77 nvbo
->gem
= drm_gem_object_alloc(dev
, nvbo
->bo
.mem
.size
);
79 nouveau_bo_ref(NULL
, pnvbo
);
83 nvbo
->bo
.persistant_swap_storage
= nvbo
->gem
->filp
;
84 nvbo
->gem
->driver_private
= nvbo
;
89 nouveau_gem_info(struct drm_gem_object
*gem
, struct drm_nouveau_gem_info
*rep
)
91 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
93 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
94 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
96 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
98 rep
->size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
99 rep
->offset
= nvbo
->bo
.offset
;
100 rep
->map_handle
= nvbo
->mappable
? nvbo
->bo
.addr_space_offset
: 0;
101 rep
->tile_mode
= nvbo
->tile_mode
;
102 rep
->tile_flags
= nvbo
->tile_flags
;
107 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
108 struct drm_file
*file_priv
)
110 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
111 struct drm_nouveau_gem_new
*req
= data
;
112 struct nouveau_bo
*nvbo
= NULL
;
113 struct nouveau_channel
*chan
= NULL
;
117 if (unlikely(dev_priv
->ttm
.bdev
.dev_mapping
== NULL
))
118 dev_priv
->ttm
.bdev
.dev_mapping
= dev_priv
->dev
->dev_mapping
;
120 if (req
->info
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
121 flags
|= TTM_PL_FLAG_VRAM
;
122 if (req
->info
.domain
& NOUVEAU_GEM_DOMAIN_GART
)
123 flags
|= TTM_PL_FLAG_TT
;
124 if (!flags
|| req
->info
.domain
& NOUVEAU_GEM_DOMAIN_CPU
)
125 flags
|= TTM_PL_FLAG_SYSTEM
;
127 if (!dev_priv
->engine
.vram
.flags_valid(dev
, req
->info
.tile_flags
)) {
128 NV_ERROR(dev
, "bad page flags: 0x%08x\n", req
->info
.tile_flags
);
132 if (req
->channel_hint
) {
133 chan
= nouveau_channel_get(dev
, file_priv
, req
->channel_hint
);
135 return PTR_ERR(chan
);
138 ret
= nouveau_gem_new(dev
, chan
, req
->info
.size
, req
->align
, flags
,
139 req
->info
.tile_mode
, req
->info
.tile_flags
, false,
140 (req
->info
.domain
& NOUVEAU_GEM_DOMAIN_MAPPABLE
),
143 nouveau_channel_put(&chan
);
147 ret
= nouveau_gem_info(nvbo
->gem
, &req
->info
);
151 ret
= drm_gem_handle_create(file_priv
, nvbo
->gem
, &req
->info
.handle
);
152 /* drop reference from allocate - handle holds it now */
153 drm_gem_object_unreference_unlocked(nvbo
->gem
);
159 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
160 uint32_t write_domains
, uint32_t valid_domains
)
162 struct nouveau_bo
*nvbo
= gem
->driver_private
;
163 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
164 uint32_t domains
= valid_domains
&
165 (write_domains
? write_domains
: read_domains
);
166 uint32_t pref_flags
= 0, valid_flags
= 0;
171 if (valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
172 valid_flags
|= TTM_PL_FLAG_VRAM
;
174 if (valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
175 valid_flags
|= TTM_PL_FLAG_TT
;
177 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
178 bo
->mem
.mem_type
== TTM_PL_VRAM
)
179 pref_flags
|= TTM_PL_FLAG_VRAM
;
181 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
182 bo
->mem
.mem_type
== TTM_PL_TT
)
183 pref_flags
|= TTM_PL_FLAG_TT
;
185 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
186 pref_flags
|= TTM_PL_FLAG_VRAM
;
189 pref_flags
|= TTM_PL_FLAG_TT
;
191 nouveau_bo_placement_set(nvbo
, pref_flags
, valid_flags
);
197 struct list_head vram_list
;
198 struct list_head gart_list
;
199 struct list_head both_list
;
203 validate_fini_list(struct list_head
*list
, struct nouveau_fence
*fence
)
205 struct list_head
*entry
, *tmp
;
206 struct nouveau_bo
*nvbo
;
208 list_for_each_safe(entry
, tmp
, list
) {
209 nvbo
= list_entry(entry
, struct nouveau_bo
, entry
);
211 nouveau_bo_fence(nvbo
, fence
);
213 if (unlikely(nvbo
->validate_mapped
)) {
214 ttm_bo_kunmap(&nvbo
->kmap
);
215 nvbo
->validate_mapped
= false;
218 list_del(&nvbo
->entry
);
219 nvbo
->reserved_by
= NULL
;
220 ttm_bo_unreserve(&nvbo
->bo
);
221 drm_gem_object_unreference_unlocked(nvbo
->gem
);
226 validate_fini(struct validate_op
*op
, struct nouveau_fence
* fence
)
228 validate_fini_list(&op
->vram_list
, fence
);
229 validate_fini_list(&op
->gart_list
, fence
);
230 validate_fini_list(&op
->both_list
, fence
);
234 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
235 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
236 int nr_buffers
, struct validate_op
*op
)
238 struct drm_device
*dev
= chan
->dev
;
239 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
244 sequence
= atomic_add_return(1, &dev_priv
->ttm
.validate_sequence
);
246 if (++trycnt
> 100000) {
247 NV_ERROR(dev
, "%s failed and gave up.\n", __func__
);
251 for (i
= 0; i
< nr_buffers
; i
++) {
252 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
253 struct drm_gem_object
*gem
;
254 struct nouveau_bo
*nvbo
;
256 gem
= drm_gem_object_lookup(dev
, file_priv
, b
->handle
);
258 NV_ERROR(dev
, "Unknown handle 0x%08x\n", b
->handle
);
259 validate_fini(op
, NULL
);
262 nvbo
= gem
->driver_private
;
264 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
265 NV_ERROR(dev
, "multiple instances of buffer %d on "
266 "validation list\n", b
->handle
);
267 validate_fini(op
, NULL
);
271 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, true, sequence
);
273 validate_fini(op
, NULL
);
274 if (unlikely(ret
== -EAGAIN
))
275 ret
= ttm_bo_wait_unreserved(&nvbo
->bo
, true);
276 drm_gem_object_unreference_unlocked(gem
);
278 if (ret
!= -ERESTARTSYS
)
279 NV_ERROR(dev
, "fail reserve\n");
285 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
286 nvbo
->reserved_by
= file_priv
;
287 nvbo
->pbbo_index
= i
;
288 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
289 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
290 list_add_tail(&nvbo
->entry
, &op
->both_list
);
292 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
293 list_add_tail(&nvbo
->entry
, &op
->vram_list
);
295 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
296 list_add_tail(&nvbo
->entry
, &op
->gart_list
);
298 NV_ERROR(dev
, "invalid valid domains: 0x%08x\n",
300 list_add_tail(&nvbo
->entry
, &op
->both_list
);
301 validate_fini(op
, NULL
);
310 validate_list(struct nouveau_channel
*chan
, struct list_head
*list
,
311 struct drm_nouveau_gem_pushbuf_bo
*pbbo
, uint64_t user_pbbo_ptr
)
313 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
314 (void __force __user
*)(uintptr_t)user_pbbo_ptr
;
315 struct drm_device
*dev
= chan
->dev
;
316 struct nouveau_bo
*nvbo
;
319 list_for_each_entry(nvbo
, list
, entry
) {
320 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
322 ret
= nouveau_fence_sync(nvbo
->bo
.sync_obj
, chan
);
324 NV_ERROR(dev
, "fail pre-validate sync\n");
328 ret
= nouveau_gem_set_domain(nvbo
->gem
, b
->read_domains
,
332 NV_ERROR(dev
, "fail set_domain\n");
336 nvbo
->channel
= (b
->read_domains
& (1 << 31)) ? NULL
: chan
;
337 ret
= nouveau_bo_validate(nvbo
, true, false, false);
338 nvbo
->channel
= NULL
;
340 if (ret
!= -ERESTARTSYS
)
341 NV_ERROR(dev
, "fail ttm_validate\n");
345 ret
= nouveau_fence_sync(nvbo
->bo
.sync_obj
, chan
);
347 NV_ERROR(dev
, "fail post-validate sync\n");
351 if (nvbo
->bo
.offset
== b
->presumed
.offset
&&
352 ((nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
&&
353 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
354 (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
&&
355 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
358 if (nvbo
->bo
.mem
.mem_type
== TTM_PL_TT
)
359 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
361 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
362 b
->presumed
.offset
= nvbo
->bo
.offset
;
363 b
->presumed
.valid
= 0;
366 if (DRM_COPY_TO_USER(&upbbo
[nvbo
->pbbo_index
].presumed
,
367 &b
->presumed
, sizeof(b
->presumed
)))
375 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
376 struct drm_file
*file_priv
,
377 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
378 uint64_t user_buffers
, int nr_buffers
,
379 struct validate_op
*op
, int *apply_relocs
)
381 struct drm_device
*dev
= chan
->dev
;
384 INIT_LIST_HEAD(&op
->vram_list
);
385 INIT_LIST_HEAD(&op
->gart_list
);
386 INIT_LIST_HEAD(&op
->both_list
);
391 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
393 if (ret
!= -ERESTARTSYS
)
394 NV_ERROR(dev
, "validate_init\n");
398 ret
= validate_list(chan
, &op
->vram_list
, pbbo
, user_buffers
);
399 if (unlikely(ret
< 0)) {
400 if (ret
!= -ERESTARTSYS
)
401 NV_ERROR(dev
, "validate vram_list\n");
402 validate_fini(op
, NULL
);
407 ret
= validate_list(chan
, &op
->gart_list
, pbbo
, user_buffers
);
408 if (unlikely(ret
< 0)) {
409 if (ret
!= -ERESTARTSYS
)
410 NV_ERROR(dev
, "validate gart_list\n");
411 validate_fini(op
, NULL
);
416 ret
= validate_list(chan
, &op
->both_list
, pbbo
, user_buffers
);
417 if (unlikely(ret
< 0)) {
418 if (ret
!= -ERESTARTSYS
)
419 NV_ERROR(dev
, "validate both_list\n");
420 validate_fini(op
, NULL
);
425 *apply_relocs
= relocs
;
430 u_memcpya(uint64_t user
, unsigned nmemb
, unsigned size
)
433 void __user
*userptr
= (void __force __user
*)(uintptr_t)user
;
435 mem
= kmalloc(nmemb
* size
, GFP_KERNEL
);
437 return ERR_PTR(-ENOMEM
);
439 if (DRM_COPY_FROM_USER(mem
, userptr
, nmemb
* size
)) {
441 return ERR_PTR(-EFAULT
);
448 nouveau_gem_pushbuf_reloc_apply(struct drm_device
*dev
,
449 struct drm_nouveau_gem_pushbuf
*req
,
450 struct drm_nouveau_gem_pushbuf_bo
*bo
)
452 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
456 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
458 return PTR_ERR(reloc
);
460 for (i
= 0; i
< req
->nr_relocs
; i
++) {
461 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
462 struct drm_nouveau_gem_pushbuf_bo
*b
;
463 struct nouveau_bo
*nvbo
;
466 if (unlikely(r
->bo_index
> req
->nr_buffers
)) {
467 NV_ERROR(dev
, "reloc bo index invalid\n");
472 b
= &bo
[r
->bo_index
];
473 if (b
->presumed
.valid
)
476 if (unlikely(r
->reloc_bo_index
> req
->nr_buffers
)) {
477 NV_ERROR(dev
, "reloc container bo index invalid\n");
481 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
483 if (unlikely(r
->reloc_bo_offset
+ 4 >
484 nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
)) {
485 NV_ERROR(dev
, "reloc outside of bo\n");
490 if (!nvbo
->kmap
.virtual) {
491 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
,
494 NV_ERROR(dev
, "failed kmap for reloc\n");
497 nvbo
->validate_mapped
= true;
500 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
501 data
= b
->presumed
.offset
+ r
->data
;
503 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
504 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
508 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
509 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
515 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
516 ret
= ttm_bo_wait(&nvbo
->bo
, false, false, false);
517 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
519 NV_ERROR(dev
, "reloc wait_idle failed: %d\n", ret
);
523 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
531 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
532 struct drm_file
*file_priv
)
534 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
535 struct drm_nouveau_gem_pushbuf
*req
= data
;
536 struct drm_nouveau_gem_pushbuf_push
*push
;
537 struct drm_nouveau_gem_pushbuf_bo
*bo
;
538 struct nouveau_channel
*chan
;
539 struct validate_op op
;
540 struct nouveau_fence
*fence
= NULL
;
541 int i
, j
, ret
= 0, do_reloc
= 0;
543 chan
= nouveau_channel_get(dev
, file_priv
, req
->channel
);
545 return PTR_ERR(chan
);
547 req
->vram_available
= dev_priv
->fb_aper_free
;
548 req
->gart_available
= dev_priv
->gart_info
.aper_free
;
549 if (unlikely(req
->nr_push
== 0))
552 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
553 NV_ERROR(dev
, "pushbuf push count exceeds limit: %d max %d\n",
554 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
555 nouveau_channel_put(&chan
);
559 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
560 NV_ERROR(dev
, "pushbuf bo count exceeds limit: %d max %d\n",
561 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
562 nouveau_channel_put(&chan
);
566 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
567 NV_ERROR(dev
, "pushbuf reloc count exceeds limit: %d max %d\n",
568 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
569 nouveau_channel_put(&chan
);
573 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
575 nouveau_channel_put(&chan
);
576 return PTR_ERR(push
);
579 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
582 nouveau_channel_put(&chan
);
586 /* Mark push buffers as being used on PFIFO, the validation code
587 * will then make sure that if the pushbuf bo moves, that they
588 * happen on the kernel channel, which will in turn cause a sync
589 * to happen before we try and submit the push buffer.
591 for (i
= 0; i
< req
->nr_push
; i
++) {
592 if (push
[i
].bo_index
>= req
->nr_buffers
) {
593 NV_ERROR(dev
, "push %d buffer not in list\n", i
);
598 bo
[push
[i
].bo_index
].read_domains
|= (1 << 31);
601 /* Validate buffer list */
602 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
, req
->buffers
,
603 req
->nr_buffers
, &op
, &do_reloc
);
605 if (ret
!= -ERESTARTSYS
)
606 NV_ERROR(dev
, "validate: %d\n", ret
);
610 /* Apply any relocations that are required */
612 ret
= nouveau_gem_pushbuf_reloc_apply(dev
, req
, bo
);
614 NV_ERROR(dev
, "reloc apply: %d\n", ret
);
619 if (chan
->dma
.ib_max
) {
620 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 6);
622 NV_INFO(dev
, "nv50cal_space: %d\n", ret
);
626 for (i
= 0; i
< req
->nr_push
; i
++) {
627 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
628 bo
[push
[i
].bo_index
].user_priv
;
630 nv50_dma_push(chan
, nvbo
, push
[i
].offset
,
634 if (dev_priv
->chipset
>= 0x25) {
635 ret
= RING_SPACE(chan
, req
->nr_push
* 2);
637 NV_ERROR(dev
, "cal_space: %d\n", ret
);
641 for (i
= 0; i
< req
->nr_push
; i
++) {
642 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
643 bo
[push
[i
].bo_index
].user_priv
;
644 struct drm_mm_node
*mem
= nvbo
->bo
.mem
.mm_node
;
646 OUT_RING(chan
, ((mem
->start
<< PAGE_SHIFT
) +
647 push
[i
].offset
) | 2);
651 ret
= RING_SPACE(chan
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
653 NV_ERROR(dev
, "jmp_space: %d\n", ret
);
657 for (i
= 0; i
< req
->nr_push
; i
++) {
658 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
659 bo
[push
[i
].bo_index
].user_priv
;
660 struct drm_mm_node
*mem
= nvbo
->bo
.mem
.mm_node
;
663 cmd
= chan
->pushbuf_base
+ ((chan
->dma
.cur
+ 2) << 2);
665 if (unlikely(cmd
!= req
->suffix0
)) {
666 if (!nvbo
->kmap
.virtual) {
667 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
675 nvbo
->validate_mapped
= true;
678 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
679 push
[i
].length
- 8) / 4, cmd
);
682 OUT_RING(chan
, ((mem
->start
<< PAGE_SHIFT
) +
683 push
[i
].offset
) | 0x20000000);
685 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
690 ret
= nouveau_fence_new(chan
, &fence
, true);
692 NV_ERROR(dev
, "error fencing pushbuf: %d\n", ret
);
698 validate_fini(&op
, fence
);
699 nouveau_fence_unref(&fence
);
704 if (chan
->dma
.ib_max
) {
705 req
->suffix0
= 0x00000000;
706 req
->suffix1
= 0x00000000;
708 if (dev_priv
->chipset
>= 0x25) {
709 req
->suffix0
= 0x00020000;
710 req
->suffix1
= 0x00000000;
712 req
->suffix0
= 0x20000000 |
713 (chan
->pushbuf_base
+ ((chan
->dma
.cur
+ 2) << 2));
714 req
->suffix1
= 0x00000000;
717 nouveau_channel_put(&chan
);
721 static inline uint32_t
722 domain_to_ttm(struct nouveau_bo
*nvbo
, uint32_t domain
)
726 if (domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
727 flags
|= TTM_PL_FLAG_VRAM
;
728 if (domain
& NOUVEAU_GEM_DOMAIN_GART
)
729 flags
|= TTM_PL_FLAG_TT
;
735 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
736 struct drm_file
*file_priv
)
738 struct drm_nouveau_gem_cpu_prep
*req
= data
;
739 struct drm_gem_object
*gem
;
740 struct nouveau_bo
*nvbo
;
741 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
744 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
747 nvbo
= nouveau_gem_object(gem
);
749 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
750 ret
= ttm_bo_wait(&nvbo
->bo
, true, true, no_wait
);
751 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
752 drm_gem_object_unreference_unlocked(gem
);
757 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
758 struct drm_file
*file_priv
)
764 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
765 struct drm_file
*file_priv
)
767 struct drm_nouveau_gem_info
*req
= data
;
768 struct drm_gem_object
*gem
;
771 gem
= drm_gem_object_lookup(dev
, file_priv
, req
->handle
);
775 ret
= nouveau_gem_info(gem
, req
);
776 drm_gem_object_unreference_unlocked(gem
);