2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drm.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
42 * NV10-NV40 tiling helpers
46 nv10_bo_update_tile_region(struct drm_device
*dev
, struct nouveau_drm_tile
*reg
,
47 u32 addr
, u32 size
, u32 pitch
, u32 flags
)
49 struct nouveau_drm
*drm
= nouveau_drm(dev
);
50 int i
= reg
- drm
->tile
.reg
;
51 struct nouveau_fb
*pfb
= nvkm_fb(&drm
->device
);
52 struct nouveau_fb_tile
*tile
= &pfb
->tile
.region
[i
];
53 struct nouveau_engine
*engine
;
55 nouveau_fence_unref(®
->fence
);
58 pfb
->tile
.fini(pfb
, i
, tile
);
61 pfb
->tile
.init(pfb
, i
, addr
, size
, pitch
, flags
, tile
);
63 pfb
->tile
.prog(pfb
, i
, tile
);
65 if ((engine
= nouveau_engine(pfb
, NVDEV_ENGINE_GR
)))
66 engine
->tile_prog(engine
, i
);
67 if ((engine
= nouveau_engine(pfb
, NVDEV_ENGINE_MPEG
)))
68 engine
->tile_prog(engine
, i
);
71 static struct nouveau_drm_tile
*
72 nv10_bo_get_tile_region(struct drm_device
*dev
, int i
)
74 struct nouveau_drm
*drm
= nouveau_drm(dev
);
75 struct nouveau_drm_tile
*tile
= &drm
->tile
.reg
[i
];
77 spin_lock(&drm
->tile
.lock
);
80 (!tile
->fence
|| nouveau_fence_done(tile
->fence
)))
85 spin_unlock(&drm
->tile
.lock
);
90 nv10_bo_put_tile_region(struct drm_device
*dev
, struct nouveau_drm_tile
*tile
,
91 struct nouveau_fence
*fence
)
93 struct nouveau_drm
*drm
= nouveau_drm(dev
);
96 spin_lock(&drm
->tile
.lock
);
97 tile
->fence
= nouveau_fence_ref(fence
);
99 spin_unlock(&drm
->tile
.lock
);
103 static struct nouveau_drm_tile
*
104 nv10_bo_set_tiling(struct drm_device
*dev
, u32 addr
,
105 u32 size
, u32 pitch
, u32 flags
)
107 struct nouveau_drm
*drm
= nouveau_drm(dev
);
108 struct nouveau_fb
*pfb
= nvkm_fb(&drm
->device
);
109 struct nouveau_drm_tile
*tile
, *found
= NULL
;
112 for (i
= 0; i
< pfb
->tile
.regions
; i
++) {
113 tile
= nv10_bo_get_tile_region(dev
, i
);
115 if (pitch
&& !found
) {
119 } else if (tile
&& pfb
->tile
.region
[i
].pitch
) {
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev
, tile
, 0, 0, 0, 0);
124 nv10_bo_put_tile_region(dev
, tile
, NULL
);
128 nv10_bo_update_tile_region(dev
, found
, addr
, size
,
134 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
136 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
137 struct drm_device
*dev
= drm
->dev
;
138 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
140 if (unlikely(nvbo
->gem
.filp
))
141 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
142 WARN_ON(nvbo
->pin_refcnt
> 0);
143 nv10_bo_put_tile_region(dev
, nvbo
->tile
, NULL
);
148 nouveau_bo_fixup_align(struct nouveau_bo
*nvbo
, u32 flags
,
149 int *align
, int *size
)
151 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
152 struct nvif_device
*device
= &drm
->device
;
154 if (device
->info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
155 if (nvbo
->tile_mode
) {
156 if (device
->info
.chipset
>= 0x40) {
158 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
160 } else if (device
->info
.chipset
>= 0x30) {
162 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
164 } else if (device
->info
.chipset
>= 0x20) {
166 *size
= roundup(*size
, 64 * nvbo
->tile_mode
);
168 } else if (device
->info
.chipset
>= 0x10) {
170 *size
= roundup(*size
, 32 * nvbo
->tile_mode
);
174 *size
= roundup(*size
, (1 << nvbo
->page_shift
));
175 *align
= max((1 << nvbo
->page_shift
), *align
);
178 *size
= roundup(*size
, PAGE_SIZE
);
182 nouveau_bo_new(struct drm_device
*dev
, int size
, int align
,
183 uint32_t flags
, uint32_t tile_mode
, uint32_t tile_flags
,
185 struct nouveau_bo
**pnvbo
)
187 struct nouveau_drm
*drm
= nouveau_drm(dev
);
188 struct nouveau_bo
*nvbo
;
191 int type
= ttm_bo_type_device
;
196 lpg_shift
= drm
->client
.vm
->vmm
->lpg_shift
;
197 max_size
= INT_MAX
& ~((1 << lpg_shift
) - 1);
199 if (size
<= 0 || size
> max_size
) {
200 NV_WARN(drm
, "skipped size %x\n", (u32
)size
);
205 type
= ttm_bo_type_sg
;
207 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
210 INIT_LIST_HEAD(&nvbo
->head
);
211 INIT_LIST_HEAD(&nvbo
->entry
);
212 INIT_LIST_HEAD(&nvbo
->vma_list
);
213 nvbo
->tile_mode
= tile_mode
;
214 nvbo
->tile_flags
= tile_flags
;
215 nvbo
->bo
.bdev
= &drm
->ttm
.bdev
;
217 nvbo
->page_shift
= 12;
218 if (drm
->client
.vm
) {
219 if (!(flags
& TTM_PL_FLAG_TT
) && size
> 256 * 1024)
220 nvbo
->page_shift
= drm
->client
.vm
->vmm
->lpg_shift
;
223 nouveau_bo_fixup_align(nvbo
, flags
, &align
, &size
);
224 nvbo
->bo
.mem
.num_pages
= size
>> PAGE_SHIFT
;
225 nouveau_bo_placement_set(nvbo
, flags
, 0);
227 acc_size
= ttm_bo_dma_acc_size(&drm
->ttm
.bdev
, size
,
228 sizeof(struct nouveau_bo
));
230 ret
= ttm_bo_init(&drm
->ttm
.bdev
, &nvbo
->bo
, size
,
231 type
, &nvbo
->placement
,
232 align
>> PAGE_SHIFT
, false, NULL
, acc_size
, sg
,
235 /* ttm will call nouveau_bo_del_ttm if it fails.. */
244 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
248 if (type
& TTM_PL_FLAG_VRAM
)
249 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
250 if (type
& TTM_PL_FLAG_TT
)
251 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
252 if (type
& TTM_PL_FLAG_SYSTEM
)
253 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
257 set_placement_range(struct nouveau_bo
*nvbo
, uint32_t type
)
259 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
260 u32 vram_pages
= drm
->device
.info
.ram_size
>> PAGE_SHIFT
;
262 if (drm
->device
.info
.family
== NV_DEVICE_INFO_V0_CELSIUS
&&
263 nvbo
->tile_mode
&& (type
& TTM_PL_FLAG_VRAM
) &&
264 nvbo
->bo
.mem
.num_pages
< vram_pages
/ 4) {
266 * Make sure that the color and depth buffers are handled
267 * by independent memory controller units. Up to a 9x
268 * speed up when alpha-blending and depth-test are enabled
271 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_ZETA
) {
272 nvbo
->placement
.fpfn
= vram_pages
/ 2;
273 nvbo
->placement
.lpfn
= ~0;
275 nvbo
->placement
.fpfn
= 0;
276 nvbo
->placement
.lpfn
= vram_pages
/ 2;
282 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
284 struct ttm_placement
*pl
= &nvbo
->placement
;
285 uint32_t flags
= TTM_PL_MASK_CACHING
|
286 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
288 pl
->placement
= nvbo
->placements
;
289 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
292 pl
->busy_placement
= nvbo
->busy_placements
;
293 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
296 set_placement_range(nvbo
, type
);
300 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
302 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
303 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
306 ret
= ttm_bo_reserve(bo
, false, false, false, NULL
);
310 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
311 NV_ERROR(drm
, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
312 1 << bo
->mem
.mem_type
, memtype
);
317 if (nvbo
->pin_refcnt
++)
320 nouveau_bo_placement_set(nvbo
, memtype
, 0);
322 ret
= nouveau_bo_validate(nvbo
, false, false);
324 switch (bo
->mem
.mem_type
) {
326 drm
->gem
.vram_available
-= bo
->mem
.size
;
329 drm
->gem
.gart_available
-= bo
->mem
.size
;
336 ttm_bo_unreserve(bo
);
341 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
343 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
344 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
347 ret
= ttm_bo_reserve(bo
, false, false, false, NULL
);
351 ref
= --nvbo
->pin_refcnt
;
352 WARN_ON_ONCE(ref
< 0);
356 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
358 ret
= nouveau_bo_validate(nvbo
, false, false);
360 switch (bo
->mem
.mem_type
) {
362 drm
->gem
.vram_available
+= bo
->mem
.size
;
365 drm
->gem
.gart_available
+= bo
->mem
.size
;
373 ttm_bo_unreserve(bo
);
378 nouveau_bo_map(struct nouveau_bo
*nvbo
)
382 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, NULL
);
386 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
387 ttm_bo_unreserve(&nvbo
->bo
);
392 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
395 ttm_bo_kunmap(&nvbo
->kmap
);
399 nouveau_bo_validate(struct nouveau_bo
*nvbo
, bool interruptible
,
404 ret
= ttm_bo_validate(&nvbo
->bo
, &nvbo
->placement
,
405 interruptible
, no_wait_gpu
);
413 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
416 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
419 return ioread16_native((void __force __iomem
*)mem
);
425 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
428 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
431 iowrite16_native(val
, (void __force __iomem
*)mem
);
437 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
440 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
443 return ioread32_native((void __force __iomem
*)mem
);
449 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
452 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
455 iowrite32_native(val
, (void __force __iomem
*)mem
);
460 static struct ttm_tt
*
461 nouveau_ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
462 uint32_t page_flags
, struct page
*dummy_read
)
465 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
466 struct drm_device
*dev
= drm
->dev
;
468 if (drm
->agp
.stat
== ENABLED
) {
469 return ttm_agp_tt_create(bdev
, dev
->agp
->bridge
, size
,
470 page_flags
, dummy_read
);
474 return nouveau_sgdma_create_ttm(bdev
, size
, page_flags
, dummy_read
);
478 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
480 /* We'll do this from user space. */
485 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
486 struct ttm_mem_type_manager
*man
)
488 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
492 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
493 man
->available_caching
= TTM_PL_MASK_CACHING
;
494 man
->default_caching
= TTM_PL_FLAG_CACHED
;
497 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
498 TTM_MEMTYPE_FLAG_MAPPABLE
;
499 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
501 man
->default_caching
= TTM_PL_FLAG_WC
;
503 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
504 /* Some BARs do not support being ioremapped WC */
505 if (nvkm_bar(&drm
->device
)->iomap_uncached
) {
506 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
507 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
510 man
->func
= &nouveau_vram_manager
;
511 man
->io_reserve_fastpath
= false;
512 man
->use_io_reserve_lru
= true;
514 man
->func
= &ttm_bo_manager_func
;
518 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
519 man
->func
= &nouveau_gart_manager
;
521 if (drm
->agp
.stat
!= ENABLED
)
522 man
->func
= &nv04_gart_manager
;
524 man
->func
= &ttm_bo_manager_func
;
526 if (drm
->agp
.stat
== ENABLED
) {
527 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
528 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
530 man
->default_caching
= TTM_PL_FLAG_WC
;
532 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
533 TTM_MEMTYPE_FLAG_CMA
;
534 man
->available_caching
= TTM_PL_MASK_CACHING
;
535 man
->default_caching
= TTM_PL_FLAG_CACHED
;
546 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
548 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
550 switch (bo
->mem
.mem_type
) {
552 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
556 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
560 *pl
= nvbo
->placement
;
565 nve0_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
567 int ret
= RING_SPACE(chan
, 2);
569 BEGIN_NVC0(chan
, NvSubCopy
, 0x0000, 1);
570 OUT_RING (chan
, handle
& 0x0000ffff);
577 nve0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
578 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
580 struct nouveau_mem
*node
= old_mem
->mm_node
;
581 int ret
= RING_SPACE(chan
, 10);
583 BEGIN_NVC0(chan
, NvSubCopy
, 0x0400, 8);
584 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
585 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
586 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
587 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
588 OUT_RING (chan
, PAGE_SIZE
);
589 OUT_RING (chan
, PAGE_SIZE
);
590 OUT_RING (chan
, PAGE_SIZE
);
591 OUT_RING (chan
, new_mem
->num_pages
);
592 BEGIN_IMC0(chan
, NvSubCopy
, 0x0300, 0x0386);
598 nvc0_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
600 int ret
= RING_SPACE(chan
, 2);
602 BEGIN_NVC0(chan
, NvSubCopy
, 0x0000, 1);
603 OUT_RING (chan
, handle
);
609 nvc0_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
610 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
612 struct nouveau_mem
*node
= old_mem
->mm_node
;
613 u64 src_offset
= node
->vma
[0].offset
;
614 u64 dst_offset
= node
->vma
[1].offset
;
615 u32 page_count
= new_mem
->num_pages
;
618 page_count
= new_mem
->num_pages
;
620 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
622 ret
= RING_SPACE(chan
, 11);
626 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 8);
627 OUT_RING (chan
, upper_32_bits(src_offset
));
628 OUT_RING (chan
, lower_32_bits(src_offset
));
629 OUT_RING (chan
, upper_32_bits(dst_offset
));
630 OUT_RING (chan
, lower_32_bits(dst_offset
));
631 OUT_RING (chan
, PAGE_SIZE
);
632 OUT_RING (chan
, PAGE_SIZE
);
633 OUT_RING (chan
, PAGE_SIZE
);
634 OUT_RING (chan
, line_count
);
635 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
636 OUT_RING (chan
, 0x00000110);
638 page_count
-= line_count
;
639 src_offset
+= (PAGE_SIZE
* line_count
);
640 dst_offset
+= (PAGE_SIZE
* line_count
);
647 nvc0_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
648 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
650 struct nouveau_mem
*node
= old_mem
->mm_node
;
651 u64 src_offset
= node
->vma
[0].offset
;
652 u64 dst_offset
= node
->vma
[1].offset
;
653 u32 page_count
= new_mem
->num_pages
;
656 page_count
= new_mem
->num_pages
;
658 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
660 ret
= RING_SPACE(chan
, 12);
664 BEGIN_NVC0(chan
, NvSubCopy
, 0x0238, 2);
665 OUT_RING (chan
, upper_32_bits(dst_offset
));
666 OUT_RING (chan
, lower_32_bits(dst_offset
));
667 BEGIN_NVC0(chan
, NvSubCopy
, 0x030c, 6);
668 OUT_RING (chan
, upper_32_bits(src_offset
));
669 OUT_RING (chan
, lower_32_bits(src_offset
));
670 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
671 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
672 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
673 OUT_RING (chan
, line_count
);
674 BEGIN_NVC0(chan
, NvSubCopy
, 0x0300, 1);
675 OUT_RING (chan
, 0x00100110);
677 page_count
-= line_count
;
678 src_offset
+= (PAGE_SIZE
* line_count
);
679 dst_offset
+= (PAGE_SIZE
* line_count
);
686 nva3_bo_move_copy(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
687 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
689 struct nouveau_mem
*node
= old_mem
->mm_node
;
690 u64 src_offset
= node
->vma
[0].offset
;
691 u64 dst_offset
= node
->vma
[1].offset
;
692 u32 page_count
= new_mem
->num_pages
;
695 page_count
= new_mem
->num_pages
;
697 int line_count
= (page_count
> 8191) ? 8191 : page_count
;
699 ret
= RING_SPACE(chan
, 11);
703 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
704 OUT_RING (chan
, upper_32_bits(src_offset
));
705 OUT_RING (chan
, lower_32_bits(src_offset
));
706 OUT_RING (chan
, upper_32_bits(dst_offset
));
707 OUT_RING (chan
, lower_32_bits(dst_offset
));
708 OUT_RING (chan
, PAGE_SIZE
);
709 OUT_RING (chan
, PAGE_SIZE
);
710 OUT_RING (chan
, PAGE_SIZE
);
711 OUT_RING (chan
, line_count
);
712 BEGIN_NV04(chan
, NvSubCopy
, 0x0300, 1);
713 OUT_RING (chan
, 0x00000110);
715 page_count
-= line_count
;
716 src_offset
+= (PAGE_SIZE
* line_count
);
717 dst_offset
+= (PAGE_SIZE
* line_count
);
724 nv98_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
725 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
727 struct nouveau_mem
*node
= old_mem
->mm_node
;
728 int ret
= RING_SPACE(chan
, 7);
730 BEGIN_NV04(chan
, NvSubCopy
, 0x0320, 6);
731 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
732 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
733 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
734 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
735 OUT_RING (chan
, 0x00000000 /* COPY */);
736 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
742 nv84_bo_move_exec(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
743 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
745 struct nouveau_mem
*node
= old_mem
->mm_node
;
746 int ret
= RING_SPACE(chan
, 7);
748 BEGIN_NV04(chan
, NvSubCopy
, 0x0304, 6);
749 OUT_RING (chan
, new_mem
->num_pages
<< PAGE_SHIFT
);
750 OUT_RING (chan
, upper_32_bits(node
->vma
[0].offset
));
751 OUT_RING (chan
, lower_32_bits(node
->vma
[0].offset
));
752 OUT_RING (chan
, upper_32_bits(node
->vma
[1].offset
));
753 OUT_RING (chan
, lower_32_bits(node
->vma
[1].offset
));
754 OUT_RING (chan
, 0x00000000 /* MODE_COPY, QUERY_NONE */);
760 nv50_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
762 int ret
= RING_SPACE(chan
, 6);
764 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
765 OUT_RING (chan
, handle
);
766 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 3);
767 OUT_RING (chan
, chan
->drm
->ntfy
.handle
);
768 OUT_RING (chan
, chan
->vram
.handle
);
769 OUT_RING (chan
, chan
->vram
.handle
);
776 nv50_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
777 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
779 struct nouveau_mem
*node
= old_mem
->mm_node
;
780 u64 length
= (new_mem
->num_pages
<< PAGE_SHIFT
);
781 u64 src_offset
= node
->vma
[0].offset
;
782 u64 dst_offset
= node
->vma
[1].offset
;
783 int src_tiled
= !!node
->memtype
;
784 int dst_tiled
= !!((struct nouveau_mem
*)new_mem
->mm_node
)->memtype
;
788 u32 amount
, stride
, height
;
790 ret
= RING_SPACE(chan
, 18 + 6 * (src_tiled
+ dst_tiled
));
794 amount
= min(length
, (u64
)(4 * 1024 * 1024));
796 height
= amount
/ stride
;
799 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 7);
802 OUT_RING (chan
, stride
);
803 OUT_RING (chan
, height
);
808 BEGIN_NV04(chan
, NvSubCopy
, 0x0200, 1);
812 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 7);
815 OUT_RING (chan
, stride
);
816 OUT_RING (chan
, height
);
821 BEGIN_NV04(chan
, NvSubCopy
, 0x021c, 1);
825 BEGIN_NV04(chan
, NvSubCopy
, 0x0238, 2);
826 OUT_RING (chan
, upper_32_bits(src_offset
));
827 OUT_RING (chan
, upper_32_bits(dst_offset
));
828 BEGIN_NV04(chan
, NvSubCopy
, 0x030c, 8);
829 OUT_RING (chan
, lower_32_bits(src_offset
));
830 OUT_RING (chan
, lower_32_bits(dst_offset
));
831 OUT_RING (chan
, stride
);
832 OUT_RING (chan
, stride
);
833 OUT_RING (chan
, stride
);
834 OUT_RING (chan
, height
);
835 OUT_RING (chan
, 0x00000101);
836 OUT_RING (chan
, 0x00000000);
837 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
841 src_offset
+= amount
;
842 dst_offset
+= amount
;
849 nv04_bo_move_init(struct nouveau_channel
*chan
, u32 handle
)
851 int ret
= RING_SPACE(chan
, 4);
853 BEGIN_NV04(chan
, NvSubCopy
, 0x0000, 1);
854 OUT_RING (chan
, handle
);
855 BEGIN_NV04(chan
, NvSubCopy
, 0x0180, 1);
856 OUT_RING (chan
, chan
->drm
->ntfy
.handle
);
862 static inline uint32_t
863 nouveau_bo_mem_ctxdma(struct ttm_buffer_object
*bo
,
864 struct nouveau_channel
*chan
, struct ttm_mem_reg
*mem
)
866 if (mem
->mem_type
== TTM_PL_TT
)
868 return chan
->vram
.handle
;
872 nv04_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
873 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
875 u32 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
876 u32 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
877 u32 page_count
= new_mem
->num_pages
;
880 ret
= RING_SPACE(chan
, 3);
884 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
885 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
886 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
888 page_count
= new_mem
->num_pages
;
890 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
892 ret
= RING_SPACE(chan
, 11);
896 BEGIN_NV04(chan
, NvSubCopy
,
897 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
898 OUT_RING (chan
, src_offset
);
899 OUT_RING (chan
, dst_offset
);
900 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
901 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
902 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
903 OUT_RING (chan
, line_count
);
904 OUT_RING (chan
, 0x00000101);
905 OUT_RING (chan
, 0x00000000);
906 BEGIN_NV04(chan
, NvSubCopy
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
909 page_count
-= line_count
;
910 src_offset
+= (PAGE_SIZE
* line_count
);
911 dst_offset
+= (PAGE_SIZE
* line_count
);
918 nouveau_bo_move_prep(struct nouveau_drm
*drm
, struct ttm_buffer_object
*bo
,
919 struct ttm_mem_reg
*mem
)
921 struct nouveau_mem
*old_node
= bo
->mem
.mm_node
;
922 struct nouveau_mem
*new_node
= mem
->mm_node
;
923 u64 size
= (u64
)mem
->num_pages
<< PAGE_SHIFT
;
926 ret
= nouveau_vm_get(drm
->client
.vm
, size
, old_node
->page_shift
,
927 NV_MEM_ACCESS_RW
, &old_node
->vma
[0]);
931 ret
= nouveau_vm_get(drm
->client
.vm
, size
, new_node
->page_shift
,
932 NV_MEM_ACCESS_RW
, &old_node
->vma
[1]);
934 nouveau_vm_put(&old_node
->vma
[0]);
938 nouveau_vm_map(&old_node
->vma
[0], old_node
);
939 nouveau_vm_map(&old_node
->vma
[1], new_node
);
944 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
945 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
947 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
948 struct nouveau_channel
*chan
= drm
->ttm
.chan
;
949 struct nouveau_cli
*cli
= (void *)nvif_client(&chan
->device
->base
);
950 struct nouveau_fence
*fence
;
953 /* create temporary vmas for the transfer and attach them to the
954 * old nouveau_mem node, these will get cleaned up after ttm has
955 * destroyed the ttm_mem_reg
957 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
958 ret
= nouveau_bo_move_prep(drm
, bo
, new_mem
);
963 mutex_lock_nested(&cli
->mutex
, SINGLE_DEPTH_NESTING
);
964 ret
= nouveau_fence_sync(bo
->sync_obj
, chan
);
966 ret
= drm
->ttm
.move(chan
, bo
, &bo
->mem
, new_mem
);
968 ret
= nouveau_fence_new(chan
, false, &fence
);
970 ret
= ttm_bo_move_accel_cleanup(bo
, fence
,
974 nouveau_fence_unref(&fence
);
978 mutex_unlock(&cli
->mutex
);
983 nouveau_bo_move_init(struct nouveau_drm
*drm
)
985 static const struct {
989 int (*exec
)(struct nouveau_channel
*,
990 struct ttm_buffer_object
*,
991 struct ttm_mem_reg
*, struct ttm_mem_reg
*);
992 int (*init
)(struct nouveau_channel
*, u32 handle
);
994 { "COPY", 4, 0xa0b5, nve0_bo_move_copy
, nve0_bo_move_init
},
995 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy
, nvc0_bo_move_init
},
996 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy
, nvc0_bo_move_init
},
997 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy
, nvc0_bo_move_init
},
998 { "COPY", 0, 0x85b5, nva3_bo_move_copy
, nv50_bo_move_init
},
999 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec
, nv50_bo_move_init
},
1000 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf
, nvc0_bo_move_init
},
1001 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf
, nv50_bo_move_init
},
1002 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf
, nv04_bo_move_init
},
1004 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec
, nv50_bo_move_init
},
1005 }, *mthd
= _methods
;
1006 const char *name
= "CPU";
1010 struct nouveau_channel
*chan
;
1015 chan
= drm
->channel
;
1019 ret
= nvif_object_init(chan
->object
, NULL
,
1020 mthd
->oclass
| (mthd
->engine
<< 16),
1021 mthd
->oclass
, NULL
, 0,
1024 ret
= mthd
->init(chan
, drm
->ttm
.copy
.handle
);
1026 nvif_object_fini(&drm
->ttm
.copy
);
1030 drm
->ttm
.move
= mthd
->exec
;
1031 drm
->ttm
.chan
= chan
;
1035 } while ((++mthd
)->exec
);
1037 NV_INFO(drm
, "MM: using %s for buffer copies\n", name
);
1041 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1042 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
1044 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
1045 struct ttm_placement placement
;
1046 struct ttm_mem_reg tmp_mem
;
1049 placement
.fpfn
= placement
.lpfn
= 0;
1050 placement
.num_placement
= placement
.num_busy_placement
= 1;
1051 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
1054 tmp_mem
.mm_node
= NULL
;
1055 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_gpu
);
1059 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
1063 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_gpu
, &tmp_mem
);
1067 ret
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, new_mem
);
1069 ttm_bo_mem_put(bo
, &tmp_mem
);
1074 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1075 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
1077 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
1078 struct ttm_placement placement
;
1079 struct ttm_mem_reg tmp_mem
;
1082 placement
.fpfn
= placement
.lpfn
= 0;
1083 placement
.num_placement
= placement
.num_busy_placement
= 1;
1084 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
1087 tmp_mem
.mm_node
= NULL
;
1088 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_gpu
);
1092 ret
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, &tmp_mem
);
1096 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_gpu
, new_mem
);
1101 ttm_bo_mem_put(bo
, &tmp_mem
);
1106 nouveau_bo_move_ntfy(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
)
1108 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1109 struct nouveau_vma
*vma
;
1111 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1112 if (bo
->destroy
!= nouveau_bo_del_ttm
)
1115 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1116 if (new_mem
&& new_mem
->mem_type
!= TTM_PL_SYSTEM
&&
1117 (new_mem
->mem_type
== TTM_PL_VRAM
||
1118 nvbo
->page_shift
!= vma
->vm
->vmm
->lpg_shift
)) {
1119 nouveau_vm_map(vma
, new_mem
->mm_node
);
1121 nouveau_vm_unmap(vma
);
1127 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
1128 struct nouveau_drm_tile
**new_tile
)
1130 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1131 struct drm_device
*dev
= drm
->dev
;
1132 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1133 u64 offset
= new_mem
->start
<< PAGE_SHIFT
;
1136 if (new_mem
->mem_type
!= TTM_PL_VRAM
)
1139 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_CELSIUS
) {
1140 *new_tile
= nv10_bo_set_tiling(dev
, offset
, new_mem
->size
,
1149 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
1150 struct nouveau_drm_tile
*new_tile
,
1151 struct nouveau_drm_tile
**old_tile
)
1153 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1154 struct drm_device
*dev
= drm
->dev
;
1156 nv10_bo_put_tile_region(dev
, *old_tile
, bo
->sync_obj
);
1157 *old_tile
= new_tile
;
1161 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
1162 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
1164 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1165 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1166 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
1167 struct nouveau_drm_tile
*new_tile
= NULL
;
1170 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
1171 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
1177 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
1178 BUG_ON(bo
->mem
.mm_node
!= NULL
);
1180 new_mem
->mm_node
= NULL
;
1184 /* Hardware assisted copy. */
1185 if (drm
->ttm
.move
) {
1186 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
1187 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
,
1188 no_wait_gpu
, new_mem
);
1189 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
1190 ret
= nouveau_bo_move_flips(bo
, evict
, intr
,
1191 no_wait_gpu
, new_mem
);
1193 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
,
1194 no_wait_gpu
, new_mem
);
1199 /* Fallback to software copy. */
1200 spin_lock(&bo
->bdev
->fence_lock
);
1201 ret
= ttm_bo_wait(bo
, true, intr
, no_wait_gpu
);
1202 spin_unlock(&bo
->bdev
->fence_lock
);
1204 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_gpu
, new_mem
);
1207 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
1209 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
1211 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
1218 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
1220 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1222 return drm_vma_node_verify_access(&nvbo
->gem
.vma_node
, filp
);
1226 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1228 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
1229 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
1230 struct nouveau_mem
*node
= mem
->mm_node
;
1233 mem
->bus
.addr
= NULL
;
1234 mem
->bus
.offset
= 0;
1235 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
1237 mem
->bus
.is_iomem
= false;
1238 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
1240 switch (mem
->mem_type
) {
1246 if (drm
->agp
.stat
== ENABLED
) {
1247 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1248 mem
->bus
.base
= drm
->agp
.base
;
1249 mem
->bus
.is_iomem
= !drm
->dev
->agp
->cant_use_aperture
;
1252 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
|| !node
->memtype
)
1255 /* fallthrough, tiled memory */
1257 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
1258 mem
->bus
.base
= nv_device_resource_start(nvkm_device(&drm
->device
), 1);
1259 mem
->bus
.is_iomem
= true;
1260 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
) {
1261 struct nouveau_bar
*bar
= nvkm_bar(&drm
->device
);
1263 ret
= bar
->umap(bar
, node
, NV_MEM_ACCESS_RW
,
1268 mem
->bus
.offset
= node
->bar_vma
.offset
;
1278 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1280 struct nouveau_drm
*drm
= nouveau_bdev(bdev
);
1281 struct nouveau_bar
*bar
= nvkm_bar(&drm
->device
);
1282 struct nouveau_mem
*node
= mem
->mm_node
;
1284 if (!node
->bar_vma
.node
)
1287 bar
->unmap(bar
, &node
->bar_vma
);
1291 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
1293 struct nouveau_drm
*drm
= nouveau_bdev(bo
->bdev
);
1294 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1295 struct nvif_device
*device
= &drm
->device
;
1296 u32 mappable
= nv_device_resource_len(nvkm_device(device
), 1) >> PAGE_SHIFT
;
1299 /* as long as the bo isn't in vram, and isn't tiled, we've got
1300 * nothing to do here.
1302 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
) {
1303 if (drm
->device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
||
1304 !nouveau_bo_tile_layout(nvbo
))
1307 if (bo
->mem
.mem_type
== TTM_PL_SYSTEM
) {
1308 nouveau_bo_placement_set(nvbo
, TTM_PL_TT
, 0);
1310 ret
= nouveau_bo_validate(nvbo
, false, false);
1317 /* make sure bo is in mappable vram */
1318 if (drm
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
||
1319 bo
->mem
.start
+ bo
->mem
.num_pages
< mappable
)
1323 nvbo
->placement
.fpfn
= 0;
1324 nvbo
->placement
.lpfn
= mappable
;
1325 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_VRAM
, 0);
1326 return nouveau_bo_validate(nvbo
, false, false);
1330 nouveau_ttm_tt_populate(struct ttm_tt
*ttm
)
1332 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1333 struct nouveau_drm
*drm
;
1334 struct nouveau_device
*device
;
1335 struct drm_device
*dev
;
1336 struct device
*pdev
;
1339 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1341 if (ttm
->state
!= tt_unpopulated
)
1344 if (slave
&& ttm
->sg
) {
1345 /* make userspace faulting work */
1346 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
1347 ttm_dma
->dma_address
, ttm
->num_pages
);
1348 ttm
->state
= tt_unbound
;
1352 drm
= nouveau_bdev(ttm
->bdev
);
1353 device
= nvkm_device(&drm
->device
);
1355 pdev
= nv_device_base(device
);
1358 if (drm
->agp
.stat
== ENABLED
) {
1359 return ttm_agp_tt_populate(ttm
);
1363 #ifdef CONFIG_SWIOTLB
1364 if (swiotlb_nr_tbl()) {
1365 return ttm_dma_populate((void *)ttm
, dev
->dev
);
1369 r
= ttm_pool_populate(ttm
);
1374 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1377 addr
= dma_map_page(pdev
, ttm
->pages
[i
], 0, PAGE_SIZE
,
1380 if (dma_mapping_error(pdev
, addr
)) {
1382 dma_unmap_page(pdev
, ttm_dma
->dma_address
[i
],
1383 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1384 ttm_dma
->dma_address
[i
] = 0;
1386 ttm_pool_unpopulate(ttm
);
1390 ttm_dma
->dma_address
[i
] = addr
;
1396 nouveau_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
1398 struct ttm_dma_tt
*ttm_dma
= (void *)ttm
;
1399 struct nouveau_drm
*drm
;
1400 struct nouveau_device
*device
;
1401 struct drm_device
*dev
;
1402 struct device
*pdev
;
1404 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
1409 drm
= nouveau_bdev(ttm
->bdev
);
1410 device
= nvkm_device(&drm
->device
);
1412 pdev
= nv_device_base(device
);
1415 if (drm
->agp
.stat
== ENABLED
) {
1416 ttm_agp_tt_unpopulate(ttm
);
1421 #ifdef CONFIG_SWIOTLB
1422 if (swiotlb_nr_tbl()) {
1423 ttm_dma_unpopulate((void *)ttm
, dev
->dev
);
1428 for (i
= 0; i
< ttm
->num_pages
; i
++) {
1429 if (ttm_dma
->dma_address
[i
]) {
1430 dma_unmap_page(pdev
, ttm_dma
->dma_address
[i
], PAGE_SIZE
,
1435 ttm_pool_unpopulate(ttm
);
1439 nouveau_bo_fence(struct nouveau_bo
*nvbo
, struct nouveau_fence
*fence
)
1441 struct nouveau_fence
*new_fence
= nouveau_fence_ref(fence
);
1442 struct nouveau_fence
*old_fence
= NULL
;
1444 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
1445 old_fence
= nvbo
->bo
.sync_obj
;
1446 nvbo
->bo
.sync_obj
= new_fence
;
1447 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
1449 nouveau_fence_unref(&old_fence
);
1453 nouveau_bo_fence_unref(void **sync_obj
)
1455 nouveau_fence_unref((struct nouveau_fence
**)sync_obj
);
1459 nouveau_bo_fence_ref(void *sync_obj
)
1461 return nouveau_fence_ref(sync_obj
);
1465 nouveau_bo_fence_signalled(void *sync_obj
)
1467 return nouveau_fence_done(sync_obj
);
1471 nouveau_bo_fence_wait(void *sync_obj
, bool lazy
, bool intr
)
1473 return nouveau_fence_wait(sync_obj
, lazy
, intr
);
1477 nouveau_bo_fence_flush(void *sync_obj
)
1482 struct ttm_bo_driver nouveau_bo_driver
= {
1483 .ttm_tt_create
= &nouveau_ttm_tt_create
,
1484 .ttm_tt_populate
= &nouveau_ttm_tt_populate
,
1485 .ttm_tt_unpopulate
= &nouveau_ttm_tt_unpopulate
,
1486 .invalidate_caches
= nouveau_bo_invalidate_caches
,
1487 .init_mem_type
= nouveau_bo_init_mem_type
,
1488 .evict_flags
= nouveau_bo_evict_flags
,
1489 .move_notify
= nouveau_bo_move_ntfy
,
1490 .move
= nouveau_bo_move
,
1491 .verify_access
= nouveau_bo_verify_access
,
1492 .sync_obj_signaled
= nouveau_bo_fence_signalled
,
1493 .sync_obj_wait
= nouveau_bo_fence_wait
,
1494 .sync_obj_flush
= nouveau_bo_fence_flush
,
1495 .sync_obj_unref
= nouveau_bo_fence_unref
,
1496 .sync_obj_ref
= nouveau_bo_fence_ref
,
1497 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
1498 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
1499 .io_mem_free
= &nouveau_ttm_io_mem_free
,
1502 struct nouveau_vma
*
1503 nouveau_bo_vma_find(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
)
1505 struct nouveau_vma
*vma
;
1506 list_for_each_entry(vma
, &nvbo
->vma_list
, head
) {
1515 nouveau_bo_vma_add(struct nouveau_bo
*nvbo
, struct nouveau_vm
*vm
,
1516 struct nouveau_vma
*vma
)
1518 const u32 size
= nvbo
->bo
.mem
.num_pages
<< PAGE_SHIFT
;
1521 ret
= nouveau_vm_get(vm
, size
, nvbo
->page_shift
,
1522 NV_MEM_ACCESS_RW
, vma
);
1526 if ( nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
&&
1527 (nvbo
->bo
.mem
.mem_type
== TTM_PL_VRAM
||
1528 nvbo
->page_shift
!= vma
->vm
->vmm
->lpg_shift
))
1529 nouveau_vm_map(vma
, nvbo
->bo
.mem
.mm_node
);
1531 list_add_tail(&vma
->head
, &nvbo
->vma_list
);
1537 nouveau_bo_vma_del(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
1540 if (nvbo
->bo
.mem
.mem_type
!= TTM_PL_SYSTEM
)
1541 nouveau_vm_unmap(vma
);
1542 nouveau_vm_put(vma
);
1543 list_del(&vma
->head
);