2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
36 #include <linux/log2.h>
37 #include <linux/slab.h>
40 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
42 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
43 struct drm_device
*dev
= dev_priv
->dev
;
44 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
46 if (unlikely(nvbo
->gem
))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
49 nv10_mem_put_tile_region(dev
, nvbo
->tile
, NULL
);
54 nouveau_bo_fixup_align(struct drm_device
*dev
,
55 uint32_t tile_mode
, uint32_t tile_flags
,
56 int *align
, int *size
)
58 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
61 * Some of the tile_flags have a periodic structure of N*4096 bytes,
62 * align to to that as well as the page size. Align the size to the
63 * appropriate boundaries. This does imply that sizes are rounded up
64 * 3-7 pages, so be aware of this and do not waste memory by allocating
67 if (dev_priv
->card_type
== NV_50
) {
68 uint32_t block_size
= dev_priv
->vram_size
>> 15;
76 if (is_power_of_2(block_size
)) {
77 for (i
= 1; i
< 10; i
++) {
78 *align
= 12 * i
* block_size
;
79 if (!(*align
% 65536))
83 for (i
= 1; i
< 10; i
++) {
84 *align
= 8 * i
* block_size
;
85 if (!(*align
% 65536))
89 *size
= roundup(*size
, *align
);
97 if (dev_priv
->chipset
>= 0x40) {
99 *size
= roundup(*size
, 64 * tile_mode
);
101 } else if (dev_priv
->chipset
>= 0x30) {
103 *size
= roundup(*size
, 64 * tile_mode
);
105 } else if (dev_priv
->chipset
>= 0x20) {
107 *size
= roundup(*size
, 64 * tile_mode
);
109 } else if (dev_priv
->chipset
>= 0x10) {
111 *size
= roundup(*size
, 32 * tile_mode
);
116 /* ALIGN works only on powers of two. */
117 *size
= roundup(*size
, PAGE_SIZE
);
119 if (dev_priv
->card_type
== NV_50
) {
120 *size
= roundup(*size
, 65536);
121 *align
= max(65536, *align
);
126 nouveau_bo_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
127 int size
, int align
, uint32_t flags
, uint32_t tile_mode
,
128 uint32_t tile_flags
, bool no_vm
, bool mappable
,
129 struct nouveau_bo
**pnvbo
)
131 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
132 struct nouveau_bo
*nvbo
;
135 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
138 INIT_LIST_HEAD(&nvbo
->head
);
139 INIT_LIST_HEAD(&nvbo
->entry
);
140 nvbo
->mappable
= mappable
;
142 nvbo
->tile_mode
= tile_mode
;
143 nvbo
->tile_flags
= tile_flags
;
144 nvbo
->bo
.bdev
= &dev_priv
->ttm
.bdev
;
146 nouveau_bo_fixup_align(dev
, tile_mode
, nouveau_bo_tile_layout(nvbo
),
148 align
>>= PAGE_SHIFT
;
150 nouveau_bo_placement_set(nvbo
, flags
, 0);
152 nvbo
->channel
= chan
;
153 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
154 ttm_bo_type_device
, &nvbo
->placement
, align
, 0,
155 false, NULL
, size
, nouveau_bo_del_ttm
);
157 /* ttm will call nouveau_bo_del_ttm if it fails.. */
160 nvbo
->channel
= NULL
;
167 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
171 if (type
& TTM_PL_FLAG_VRAM
)
172 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
173 if (type
& TTM_PL_FLAG_TT
)
174 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
175 if (type
& TTM_PL_FLAG_SYSTEM
)
176 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
180 set_placement_range(struct nouveau_bo
*nvbo
, uint32_t type
)
182 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
184 if (dev_priv
->card_type
== NV_10
&&
185 nvbo
->tile_mode
&& (type
& TTM_PL_FLAG_VRAM
)) {
187 * Make sure that the color and depth buffers are handled
188 * by independent memory controller units. Up to a 9x
189 * speed up when alpha-blending and depth-test are enabled
192 int vram_pages
= dev_priv
->vram_size
>> PAGE_SHIFT
;
194 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_ZETA
) {
195 nvbo
->placement
.fpfn
= vram_pages
/ 2;
196 nvbo
->placement
.lpfn
= ~0;
198 nvbo
->placement
.fpfn
= 0;
199 nvbo
->placement
.lpfn
= vram_pages
/ 2;
205 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
207 struct ttm_placement
*pl
= &nvbo
->placement
;
208 uint32_t flags
= TTM_PL_MASK_CACHING
|
209 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
211 pl
->placement
= nvbo
->placements
;
212 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
215 pl
->busy_placement
= nvbo
->busy_placements
;
216 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
219 set_placement_range(nvbo
, type
);
223 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
225 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
226 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
229 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
230 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
231 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
232 1 << bo
->mem
.mem_type
, memtype
);
236 if (nvbo
->pin_refcnt
++)
239 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
243 nouveau_bo_placement_set(nvbo
, memtype
, 0);
245 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false, false);
247 switch (bo
->mem
.mem_type
) {
249 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
252 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
258 ttm_bo_unreserve(bo
);
266 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
268 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
269 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
272 if (--nvbo
->pin_refcnt
)
275 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
279 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
281 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false, false);
283 switch (bo
->mem
.mem_type
) {
285 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
288 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
295 ttm_bo_unreserve(bo
);
300 nouveau_bo_map(struct nouveau_bo
*nvbo
)
304 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
308 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
309 ttm_bo_unreserve(&nvbo
->bo
);
314 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
317 ttm_bo_kunmap(&nvbo
->kmap
);
321 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
324 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
327 return ioread16_native((void __force __iomem
*)mem
);
333 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
336 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
339 iowrite16_native(val
, (void __force __iomem
*)mem
);
345 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
348 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
351 return ioread32_native((void __force __iomem
*)mem
);
357 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
360 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
363 iowrite32_native(val
, (void __force __iomem
*)mem
);
368 static struct ttm_backend
*
369 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
371 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
372 struct drm_device
*dev
= dev_priv
->dev
;
374 switch (dev_priv
->gart_info
.type
) {
376 case NOUVEAU_GART_AGP
:
377 return ttm_agp_backend_init(bdev
, dev
->agp
->bridge
);
379 case NOUVEAU_GART_SGDMA
:
380 return nouveau_sgdma_init_ttm(dev
);
382 NV_ERROR(dev
, "Unknown GART type %d\n",
383 dev_priv
->gart_info
.type
);
391 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
393 /* We'll do this from user space. */
398 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
399 struct ttm_mem_type_manager
*man
)
401 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
402 struct drm_device
*dev
= dev_priv
->dev
;
406 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
407 man
->available_caching
= TTM_PL_MASK_CACHING
;
408 man
->default_caching
= TTM_PL_FLAG_CACHED
;
411 man
->func
= &ttm_bo_manager_func
;
412 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
413 TTM_MEMTYPE_FLAG_MAPPABLE
;
414 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
416 man
->default_caching
= TTM_PL_FLAG_WC
;
417 if (dev_priv
->card_type
== NV_50
)
418 man
->gpu_offset
= 0x40000000;
423 man
->func
= &ttm_bo_manager_func
;
424 switch (dev_priv
->gart_info
.type
) {
425 case NOUVEAU_GART_AGP
:
426 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
427 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
428 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
430 case NOUVEAU_GART_SGDMA
:
431 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
432 TTM_MEMTYPE_FLAG_CMA
;
433 man
->available_caching
= TTM_PL_MASK_CACHING
;
434 man
->default_caching
= TTM_PL_FLAG_CACHED
;
437 NV_ERROR(dev
, "Unknown GART type: %d\n",
438 dev_priv
->gart_info
.type
);
441 man
->gpu_offset
= dev_priv
->vm_gart_base
;
444 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
451 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
453 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
455 switch (bo
->mem
.mem_type
) {
457 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
461 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
465 *pl
= nvbo
->placement
;
469 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
470 * TTM_PL_{VRAM,TT} directly.
474 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
475 struct nouveau_bo
*nvbo
, bool evict
,
476 bool no_wait_reserve
, bool no_wait_gpu
,
477 struct ttm_mem_reg
*new_mem
)
479 struct nouveau_fence
*fence
= NULL
;
482 ret
= nouveau_fence_new(chan
, &fence
, true);
486 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
, evict
,
487 no_wait_reserve
, no_wait_gpu
, new_mem
);
488 nouveau_fence_unref(&fence
);
492 static inline uint32_t
493 nouveau_bo_mem_ctxdma(struct ttm_buffer_object
*bo
,
494 struct nouveau_channel
*chan
, struct ttm_mem_reg
*mem
)
496 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
499 if (mem
->mem_type
== TTM_PL_TT
)
504 if (mem
->mem_type
== TTM_PL_TT
)
505 return chan
->gart_handle
;
506 return chan
->vram_handle
;
510 nv50_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
511 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
513 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
514 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
515 u64 length
= (new_mem
->num_pages
<< PAGE_SHIFT
);
516 u64 src_offset
, dst_offset
;
519 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
520 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
522 if (old_mem
->mem_type
== TTM_PL_VRAM
)
523 src_offset
+= dev_priv
->vm_vram_base
;
525 src_offset
+= dev_priv
->vm_gart_base
;
527 if (new_mem
->mem_type
== TTM_PL_VRAM
)
528 dst_offset
+= dev_priv
->vm_vram_base
;
530 dst_offset
+= dev_priv
->vm_gart_base
;
533 ret
= RING_SPACE(chan
, 3);
537 BEGIN_RING(chan
, NvSubM2MF
, 0x0184, 2);
538 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
539 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
542 u32 amount
, stride
, height
;
544 amount
= min(length
, (u64
)(4 * 1024 * 1024));
546 height
= amount
/ stride
;
548 if (new_mem
->mem_type
== TTM_PL_VRAM
&&
549 nouveau_bo_tile_layout(nvbo
)) {
550 ret
= RING_SPACE(chan
, 8);
554 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 7);
557 OUT_RING (chan
, stride
);
558 OUT_RING (chan
, height
);
563 ret
= RING_SPACE(chan
, 2);
567 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 1);
570 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
571 nouveau_bo_tile_layout(nvbo
)) {
572 ret
= RING_SPACE(chan
, 8);
576 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 7);
579 OUT_RING (chan
, stride
);
580 OUT_RING (chan
, height
);
585 ret
= RING_SPACE(chan
, 2);
589 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 1);
593 ret
= RING_SPACE(chan
, 14);
597 BEGIN_RING(chan
, NvSubM2MF
, 0x0238, 2);
598 OUT_RING (chan
, upper_32_bits(src_offset
));
599 OUT_RING (chan
, upper_32_bits(dst_offset
));
600 BEGIN_RING(chan
, NvSubM2MF
, 0x030c, 8);
601 OUT_RING (chan
, lower_32_bits(src_offset
));
602 OUT_RING (chan
, lower_32_bits(dst_offset
));
603 OUT_RING (chan
, stride
);
604 OUT_RING (chan
, stride
);
605 OUT_RING (chan
, stride
);
606 OUT_RING (chan
, height
);
607 OUT_RING (chan
, 0x00000101);
608 OUT_RING (chan
, 0x00000000);
609 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
613 src_offset
+= amount
;
614 dst_offset
+= amount
;
621 nv04_bo_move_m2mf(struct nouveau_channel
*chan
, struct ttm_buffer_object
*bo
,
622 struct ttm_mem_reg
*old_mem
, struct ttm_mem_reg
*new_mem
)
624 u32 src_offset
= old_mem
->start
<< PAGE_SHIFT
;
625 u32 dst_offset
= new_mem
->start
<< PAGE_SHIFT
;
626 u32 page_count
= new_mem
->num_pages
;
629 ret
= RING_SPACE(chan
, 3);
633 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
634 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, old_mem
));
635 OUT_RING (chan
, nouveau_bo_mem_ctxdma(bo
, chan
, new_mem
));
637 page_count
= new_mem
->num_pages
;
639 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
641 ret
= RING_SPACE(chan
, 11);
645 BEGIN_RING(chan
, NvSubM2MF
,
646 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
647 OUT_RING (chan
, src_offset
);
648 OUT_RING (chan
, dst_offset
);
649 OUT_RING (chan
, PAGE_SIZE
); /* src_pitch */
650 OUT_RING (chan
, PAGE_SIZE
); /* dst_pitch */
651 OUT_RING (chan
, PAGE_SIZE
); /* line_length */
652 OUT_RING (chan
, line_count
);
653 OUT_RING (chan
, 0x00000101);
654 OUT_RING (chan
, 0x00000000);
655 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
658 page_count
-= line_count
;
659 src_offset
+= (PAGE_SIZE
* line_count
);
660 dst_offset
+= (PAGE_SIZE
* line_count
);
667 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
668 bool no_wait_reserve
, bool no_wait_gpu
,
669 struct ttm_mem_reg
*new_mem
)
671 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
672 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
673 struct nouveau_channel
*chan
;
676 chan
= nvbo
->channel
;
677 if (!chan
|| nvbo
->no_vm
) {
678 chan
= dev_priv
->channel
;
679 mutex_lock_nested(&chan
->mutex
, NOUVEAU_KCHANNEL_MUTEX
);
682 if (dev_priv
->card_type
< NV_50
)
683 ret
= nv04_bo_move_m2mf(chan
, bo
, &bo
->mem
, new_mem
);
685 ret
= nv50_bo_move_m2mf(chan
, bo
, &bo
->mem
, new_mem
);
687 ret
= nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
,
689 no_wait_gpu
, new_mem
);
692 if (chan
== dev_priv
->channel
)
693 mutex_unlock(&chan
->mutex
);
698 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
699 bool no_wait_reserve
, bool no_wait_gpu
,
700 struct ttm_mem_reg
*new_mem
)
702 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
703 struct ttm_placement placement
;
704 struct ttm_mem_reg tmp_mem
;
707 placement
.fpfn
= placement
.lpfn
= 0;
708 placement
.num_placement
= placement
.num_busy_placement
= 1;
709 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
712 tmp_mem
.mm_node
= NULL
;
713 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
717 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
721 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
725 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
727 ttm_bo_mem_put(bo
, &tmp_mem
);
732 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
733 bool no_wait_reserve
, bool no_wait_gpu
,
734 struct ttm_mem_reg
*new_mem
)
736 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
737 struct ttm_placement placement
;
738 struct ttm_mem_reg tmp_mem
;
741 placement
.fpfn
= placement
.lpfn
= 0;
742 placement
.num_placement
= placement
.num_busy_placement
= 1;
743 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
746 tmp_mem
.mm_node
= NULL
;
747 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
751 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
755 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
760 ttm_bo_mem_put(bo
, &tmp_mem
);
765 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
766 struct nouveau_tile_reg
**new_tile
)
768 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
769 struct drm_device
*dev
= dev_priv
->dev
;
770 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
774 if (nvbo
->no_vm
|| new_mem
->mem_type
!= TTM_PL_VRAM
) {
780 offset
= new_mem
->start
<< PAGE_SHIFT
;
782 if (dev_priv
->card_type
== NV_50
) {
783 ret
= nv50_mem_vm_bind_linear(dev
,
784 offset
+ dev_priv
->vm_vram_base
,
786 nouveau_bo_tile_layout(nvbo
),
791 } else if (dev_priv
->card_type
>= NV_10
) {
792 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
801 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
802 struct nouveau_tile_reg
*new_tile
,
803 struct nouveau_tile_reg
**old_tile
)
805 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
806 struct drm_device
*dev
= dev_priv
->dev
;
808 if (dev_priv
->card_type
>= NV_10
&&
809 dev_priv
->card_type
< NV_50
) {
810 nv10_mem_put_tile_region(dev
, *old_tile
, bo
->sync_obj
);
811 *old_tile
= new_tile
;
816 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
817 bool no_wait_reserve
, bool no_wait_gpu
,
818 struct ttm_mem_reg
*new_mem
)
820 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
821 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
822 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
823 struct nouveau_tile_reg
*new_tile
= NULL
;
826 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
831 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
832 BUG_ON(bo
->mem
.mm_node
!= NULL
);
834 new_mem
->mm_node
= NULL
;
838 /* Software copy if the card isn't up and running yet. */
839 if (!dev_priv
->channel
) {
840 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
844 /* Hardware assisted copy. */
845 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
846 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
847 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
848 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
850 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
855 /* Fallback to software copy. */
856 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
860 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
862 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
868 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
874 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
876 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
877 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
878 struct drm_device
*dev
= dev_priv
->dev
;
880 mem
->bus
.addr
= NULL
;
882 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
884 mem
->bus
.is_iomem
= false;
885 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
887 switch (mem
->mem_type
) {
893 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
894 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
895 mem
->bus
.base
= dev_priv
->gart_info
.aper_base
;
896 mem
->bus
.is_iomem
= true;
901 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
902 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
903 mem
->bus
.is_iomem
= true;
912 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
917 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
919 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
920 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
922 /* as long as the bo isn't in vram, and isn't tiled, we've got
923 * nothing to do here.
925 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
) {
926 if (dev_priv
->card_type
< NV_50
||
927 !nouveau_bo_tile_layout(nvbo
))
931 /* make sure bo is in mappable vram */
932 if (bo
->mem
.start
+ bo
->mem
.num_pages
< dev_priv
->fb_mappable_pages
)
936 nvbo
->placement
.fpfn
= 0;
937 nvbo
->placement
.lpfn
= dev_priv
->fb_mappable_pages
;
938 nouveau_bo_placement_set(nvbo
, TTM_PL_VRAM
, 0);
939 return ttm_bo_validate(bo
, &nvbo
->placement
, false, true, false);
943 nouveau_bo_fence(struct nouveau_bo
*nvbo
, struct nouveau_fence
*fence
)
945 struct nouveau_fence
*old_fence
;
948 nouveau_fence_ref(fence
);
950 spin_lock(&nvbo
->bo
.bdev
->fence_lock
);
951 old_fence
= nvbo
->bo
.sync_obj
;
952 nvbo
->bo
.sync_obj
= fence
;
953 spin_unlock(&nvbo
->bo
.bdev
->fence_lock
);
955 nouveau_fence_unref(&old_fence
);
958 struct ttm_bo_driver nouveau_bo_driver
= {
959 .create_ttm_backend_entry
= nouveau_bo_create_ttm_backend_entry
,
960 .invalidate_caches
= nouveau_bo_invalidate_caches
,
961 .init_mem_type
= nouveau_bo_init_mem_type
,
962 .evict_flags
= nouveau_bo_evict_flags
,
963 .move
= nouveau_bo_move
,
964 .verify_access
= nouveau_bo_verify_access
,
965 .sync_obj_signaled
= __nouveau_fence_signalled
,
966 .sync_obj_wait
= __nouveau_fence_wait
,
967 .sync_obj_flush
= __nouveau_fence_flush
,
968 .sync_obj_unref
= __nouveau_fence_unref
,
969 .sync_obj_ref
= __nouveau_fence_ref
,
970 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
971 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
972 .io_mem_free
= &nouveau_ttm_io_mem_free
,