2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
37 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
39 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
40 struct drm_device
*dev
= dev_priv
->dev
;
41 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
43 ttm_bo_kunmap(&nvbo
->kmap
);
45 if (unlikely(nvbo
->gem
))
46 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
49 nv10_mem_expire_tiling(dev
, nvbo
->tile
, NULL
);
51 spin_lock(&dev_priv
->ttm
.bo_list_lock
);
52 list_del(&nvbo
->head
);
53 spin_unlock(&dev_priv
->ttm
.bo_list_lock
);
58 nouveau_bo_fixup_align(struct drm_device
*dev
,
59 uint32_t tile_mode
, uint32_t tile_flags
,
60 int *align
, int *size
)
62 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
65 * Some of the tile_flags have a periodic structure of N*4096 bytes,
66 * align to to that as well as the page size. Overallocate memory to
67 * avoid corruption of other buffer objects.
69 if (dev_priv
->card_type
== NV_50
) {
75 if (dev_priv
->chipset
>= 0xA0) {
76 /* This is based on high end cards with 448 bits
77 * memory bus, could be different elsewhere.*/
79 /* 8 * 28672 is the actual alignment requirement
80 * but we must also align to page size. */
81 *align
= 2 * 8 * 28672;
82 } else if (dev_priv
->chipset
>= 0x90) {
87 /* 12 * 8192 is the actual alignment requirement
88 * but we must also align to page size. */
89 *align
= 2 * 12 * 8192;
98 if (dev_priv
->chipset
>= 0x40) {
100 *size
= roundup(*size
, 64 * tile_mode
);
102 } else if (dev_priv
->chipset
>= 0x30) {
104 *size
= roundup(*size
, 64 * tile_mode
);
106 } else if (dev_priv
->chipset
>= 0x20) {
108 *size
= roundup(*size
, 64 * tile_mode
);
110 } else if (dev_priv
->chipset
>= 0x10) {
112 *size
= roundup(*size
, 32 * tile_mode
);
117 *size
= ALIGN(*size
, PAGE_SIZE
);
119 if (dev_priv
->card_type
== NV_50
) {
120 *size
= ALIGN(*size
, 65536);
121 *align
= max(65536, *align
);
126 nouveau_bo_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
127 int size
, int align
, uint32_t flags
, uint32_t tile_mode
,
128 uint32_t tile_flags
, bool no_vm
, bool mappable
,
129 struct nouveau_bo
**pnvbo
)
131 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
132 struct nouveau_bo
*nvbo
;
135 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
138 INIT_LIST_HEAD(&nvbo
->head
);
139 INIT_LIST_HEAD(&nvbo
->entry
);
140 nvbo
->mappable
= mappable
;
142 nvbo
->tile_mode
= tile_mode
;
143 nvbo
->tile_flags
= tile_flags
;
145 nouveau_bo_fixup_align(dev
, tile_mode
, tile_flags
, &align
, &size
);
146 align
>>= PAGE_SHIFT
;
148 nvbo
->placement
.fpfn
= 0;
149 nvbo
->placement
.lpfn
= mappable
? dev_priv
->fb_mappable_pages
: 0;
150 nouveau_bo_placement_set(nvbo
, flags
);
152 nvbo
->channel
= chan
;
153 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
154 ttm_bo_type_device
, &nvbo
->placement
, align
, 0,
155 false, NULL
, size
, nouveau_bo_del_ttm
);
156 nvbo
->channel
= NULL
;
158 /* ttm will call nouveau_bo_del_ttm if it fails.. */
162 spin_lock(&dev_priv
->ttm
.bo_list_lock
);
163 list_add_tail(&nvbo
->head
, &dev_priv
->ttm
.bo_list
);
164 spin_unlock(&dev_priv
->ttm
.bo_list_lock
);
170 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t memtype
)
174 if (memtype
& TTM_PL_FLAG_VRAM
)
175 nvbo
->placements
[n
++] = TTM_PL_FLAG_VRAM
| TTM_PL_MASK_CACHING
;
176 if (memtype
& TTM_PL_FLAG_TT
)
177 nvbo
->placements
[n
++] = TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
178 if (memtype
& TTM_PL_FLAG_SYSTEM
)
179 nvbo
->placements
[n
++] = TTM_PL_FLAG_SYSTEM
| TTM_PL_MASK_CACHING
;
180 nvbo
->placement
.placement
= nvbo
->placements
;
181 nvbo
->placement
.busy_placement
= nvbo
->placements
;
182 nvbo
->placement
.num_placement
= n
;
183 nvbo
->placement
.num_busy_placement
= n
;
185 if (nvbo
->pin_refcnt
) {
187 nvbo
->placements
[n
] |= TTM_PL_FLAG_NO_EVICT
;
192 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
194 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
195 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
198 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
199 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
200 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
201 1 << bo
->mem
.mem_type
, memtype
);
205 if (nvbo
->pin_refcnt
++)
208 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
212 nouveau_bo_placement_set(nvbo
, memtype
);
213 for (i
= 0; i
< nvbo
->placement
.num_placement
; i
++)
214 nvbo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
216 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false);
218 switch (bo
->mem
.mem_type
) {
220 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
223 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
229 ttm_bo_unreserve(bo
);
237 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
239 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
240 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
243 if (--nvbo
->pin_refcnt
)
246 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
250 for (i
= 0; i
< nvbo
->placement
.num_placement
; i
++)
251 nvbo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
253 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false);
255 switch (bo
->mem
.mem_type
) {
257 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
260 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
267 ttm_bo_unreserve(bo
);
272 nouveau_bo_map(struct nouveau_bo
*nvbo
)
276 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
280 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
281 ttm_bo_unreserve(&nvbo
->bo
);
286 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
288 ttm_bo_kunmap(&nvbo
->kmap
);
292 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
295 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
298 return ioread16_native((void __force __iomem
*)mem
);
304 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
307 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
310 iowrite16_native(val
, (void __force __iomem
*)mem
);
316 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
319 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
322 return ioread32_native((void __force __iomem
*)mem
);
328 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
331 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
334 iowrite32_native(val
, (void __force __iomem
*)mem
);
339 static struct ttm_backend
*
340 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
342 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
343 struct drm_device
*dev
= dev_priv
->dev
;
345 switch (dev_priv
->gart_info
.type
) {
347 case NOUVEAU_GART_AGP
:
348 return ttm_agp_backend_init(bdev
, dev
->agp
->bridge
);
350 case NOUVEAU_GART_SGDMA
:
351 return nouveau_sgdma_init_ttm(dev
);
353 NV_ERROR(dev
, "Unknown GART type %d\n",
354 dev_priv
->gart_info
.type
);
362 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
364 /* We'll do this from user space. */
369 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
370 struct ttm_mem_type_manager
*man
)
372 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
373 struct drm_device
*dev
= dev_priv
->dev
;
377 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
378 man
->available_caching
= TTM_PL_MASK_CACHING
;
379 man
->default_caching
= TTM_PL_FLAG_CACHED
;
382 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
383 TTM_MEMTYPE_FLAG_MAPPABLE
|
384 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
;
385 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
387 man
->default_caching
= TTM_PL_FLAG_WC
;
390 man
->io_offset
= drm_get_resource_start(dev
, 1);
391 man
->io_size
= drm_get_resource_len(dev
, 1);
392 if (man
->io_size
> nouveau_mem_fb_amount(dev
))
393 man
->io_size
= nouveau_mem_fb_amount(dev
);
395 man
->gpu_offset
= dev_priv
->vm_vram_base
;
398 switch (dev_priv
->gart_info
.type
) {
399 case NOUVEAU_GART_AGP
:
400 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
401 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
;
402 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
403 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
405 case NOUVEAU_GART_SGDMA
:
406 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
407 TTM_MEMTYPE_FLAG_CMA
;
408 man
->available_caching
= TTM_PL_MASK_CACHING
;
409 man
->default_caching
= TTM_PL_FLAG_CACHED
;
412 NV_ERROR(dev
, "Unknown GART type: %d\n",
413 dev_priv
->gart_info
.type
);
417 man
->io_offset
= dev_priv
->gart_info
.aper_base
;
418 man
->io_size
= dev_priv
->gart_info
.aper_size
;
420 man
->gpu_offset
= dev_priv
->vm_gart_base
;
423 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
430 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
432 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
434 switch (bo
->mem
.mem_type
) {
436 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
|
440 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
);
444 *pl
= nvbo
->placement
;
448 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
449 * TTM_PL_{VRAM,TT} directly.
453 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
454 struct nouveau_bo
*nvbo
, bool evict
, bool no_wait
,
455 struct ttm_mem_reg
*new_mem
)
457 struct nouveau_fence
*fence
= NULL
;
460 ret
= nouveau_fence_new(chan
, &fence
, true);
464 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
,
465 evict
, no_wait
, new_mem
);
466 nouveau_fence_unref((void *)&fence
);
470 static inline uint32_t
471 nouveau_bo_mem_ctxdma(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
,
472 struct ttm_mem_reg
*mem
)
474 if (chan
== nouveau_bdev(nvbo
->bo
.bdev
)->channel
) {
475 if (mem
->mem_type
== TTM_PL_TT
)
480 if (mem
->mem_type
== TTM_PL_TT
)
481 return chan
->gart_handle
;
482 return chan
->vram_handle
;
486 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
487 int no_wait
, struct ttm_mem_reg
*new_mem
)
489 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
490 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
491 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
492 struct nouveau_channel
*chan
;
493 uint64_t src_offset
, dst_offset
;
497 chan
= nvbo
->channel
;
498 if (!chan
|| nvbo
->tile_flags
|| nvbo
->no_vm
)
499 chan
= dev_priv
->channel
;
501 src_offset
= old_mem
->mm_node
->start
<< PAGE_SHIFT
;
502 dst_offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
503 if (chan
!= dev_priv
->channel
) {
504 if (old_mem
->mem_type
== TTM_PL_TT
)
505 src_offset
+= dev_priv
->vm_gart_base
;
507 src_offset
+= dev_priv
->vm_vram_base
;
509 if (new_mem
->mem_type
== TTM_PL_TT
)
510 dst_offset
+= dev_priv
->vm_gart_base
;
512 dst_offset
+= dev_priv
->vm_vram_base
;
515 ret
= RING_SPACE(chan
, 3);
518 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
519 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, old_mem
));
520 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, new_mem
));
522 if (dev_priv
->card_type
>= NV_50
) {
523 ret
= RING_SPACE(chan
, 4);
526 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 1);
528 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 1);
532 page_count
= new_mem
->num_pages
;
534 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
536 if (dev_priv
->card_type
>= NV_50
) {
537 ret
= RING_SPACE(chan
, 3);
540 BEGIN_RING(chan
, NvSubM2MF
, 0x0238, 2);
541 OUT_RING(chan
, upper_32_bits(src_offset
));
542 OUT_RING(chan
, upper_32_bits(dst_offset
));
544 ret
= RING_SPACE(chan
, 11);
547 BEGIN_RING(chan
, NvSubM2MF
,
548 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
549 OUT_RING(chan
, lower_32_bits(src_offset
));
550 OUT_RING(chan
, lower_32_bits(dst_offset
));
551 OUT_RING(chan
, PAGE_SIZE
); /* src_pitch */
552 OUT_RING(chan
, PAGE_SIZE
); /* dst_pitch */
553 OUT_RING(chan
, PAGE_SIZE
); /* line_length */
554 OUT_RING(chan
, line_count
);
555 OUT_RING(chan
, (1<<8)|(1<<0));
557 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
560 page_count
-= line_count
;
561 src_offset
+= (PAGE_SIZE
* line_count
);
562 dst_offset
+= (PAGE_SIZE
* line_count
);
565 return nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
, no_wait
, new_mem
);
569 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
570 bool no_wait
, struct ttm_mem_reg
*new_mem
)
572 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
573 struct ttm_placement placement
;
574 struct ttm_mem_reg tmp_mem
;
577 placement
.fpfn
= placement
.lpfn
= 0;
578 placement
.num_placement
= placement
.num_busy_placement
= 1;
579 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
582 tmp_mem
.mm_node
= NULL
;
583 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait
);
587 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
591 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait
, &tmp_mem
);
595 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait
, new_mem
);
597 if (tmp_mem
.mm_node
) {
598 spin_lock(&bo
->bdev
->glob
->lru_lock
);
599 drm_mm_put_block(tmp_mem
.mm_node
);
600 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
607 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
608 bool no_wait
, struct ttm_mem_reg
*new_mem
)
610 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
611 struct ttm_placement placement
;
612 struct ttm_mem_reg tmp_mem
;
615 placement
.fpfn
= placement
.lpfn
= 0;
616 placement
.num_placement
= placement
.num_busy_placement
= 1;
617 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
620 tmp_mem
.mm_node
= NULL
;
621 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait
);
625 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait
, &tmp_mem
);
629 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait
, new_mem
);
634 if (tmp_mem
.mm_node
) {
635 spin_lock(&bo
->bdev
->glob
->lru_lock
);
636 drm_mm_put_block(tmp_mem
.mm_node
);
637 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
644 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
645 struct nouveau_tile_reg
**new_tile
)
647 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
648 struct drm_device
*dev
= dev_priv
->dev
;
649 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
653 if (nvbo
->no_vm
|| new_mem
->mem_type
!= TTM_PL_VRAM
) {
659 offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
661 if (dev_priv
->card_type
== NV_50
) {
662 ret
= nv50_mem_vm_bind_linear(dev
,
663 offset
+ dev_priv
->vm_vram_base
,
664 new_mem
->size
, nvbo
->tile_flags
,
669 } else if (dev_priv
->card_type
>= NV_10
) {
670 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
678 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
679 struct nouveau_tile_reg
*new_tile
,
680 struct nouveau_tile_reg
**old_tile
)
682 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
683 struct drm_device
*dev
= dev_priv
->dev
;
685 if (dev_priv
->card_type
>= NV_10
&&
686 dev_priv
->card_type
< NV_50
) {
688 nv10_mem_expire_tiling(dev
, *old_tile
, bo
->sync_obj
);
690 *old_tile
= new_tile
;
695 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
696 bool no_wait
, struct ttm_mem_reg
*new_mem
)
698 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
699 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
700 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
701 struct nouveau_tile_reg
*new_tile
= NULL
;
704 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
708 /* Software copy if the card isn't up and running yet. */
709 if (dev_priv
->init_state
!= NOUVEAU_CARD_INIT_DONE
||
710 !dev_priv
->channel
) {
711 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait
, new_mem
);
716 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
717 BUG_ON(bo
->mem
.mm_node
!= NULL
);
719 new_mem
->mm_node
= NULL
;
723 /* Hardware assisted copy. */
724 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
725 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait
, new_mem
);
726 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
727 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait
, new_mem
);
729 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait
, new_mem
);
734 /* Fallback to software copy. */
735 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait
, new_mem
);
739 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
741 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
747 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
752 struct ttm_bo_driver nouveau_bo_driver
= {
753 .create_ttm_backend_entry
= nouveau_bo_create_ttm_backend_entry
,
754 .invalidate_caches
= nouveau_bo_invalidate_caches
,
755 .init_mem_type
= nouveau_bo_init_mem_type
,
756 .evict_flags
= nouveau_bo_evict_flags
,
757 .move
= nouveau_bo_move
,
758 .verify_access
= nouveau_bo_verify_access
,
759 .sync_obj_signaled
= nouveau_fence_signalled
,
760 .sync_obj_wait
= nouveau_fence_wait
,
761 .sync_obj_flush
= nouveau_fence_flush
,
762 .sync_obj_unref
= nouveau_fence_unref
,
763 .sync_obj_ref
= nouveau_fence_ref
,