2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
36 #include <linux/log2.h>
37 #include <linux/slab.h>
40 nouveau_bo_del_ttm(struct ttm_buffer_object
*bo
)
42 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
43 struct drm_device
*dev
= dev_priv
->dev
;
44 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
46 ttm_bo_kunmap(&nvbo
->kmap
);
48 if (unlikely(nvbo
->gem
))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo
);
52 nv10_mem_expire_tiling(dev
, nvbo
->tile
, NULL
);
58 nouveau_bo_fixup_align(struct drm_device
*dev
,
59 uint32_t tile_mode
, uint32_t tile_flags
,
60 int *align
, int *size
)
62 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
65 * Some of the tile_flags have a periodic structure of N*4096 bytes,
66 * align to to that as well as the page size. Align the size to the
67 * appropriate boundaries. This does imply that sizes are rounded up
68 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 if (dev_priv
->card_type
== NV_50
) {
72 uint32_t block_size
= dev_priv
->vram_size
>> 15;
80 if (is_power_of_2(block_size
)) {
81 for (i
= 1; i
< 10; i
++) {
82 *align
= 12 * i
* block_size
;
83 if (!(*align
% 65536))
87 for (i
= 1; i
< 10; i
++) {
88 *align
= 8 * i
* block_size
;
89 if (!(*align
% 65536))
93 *size
= roundup(*size
, *align
);
101 if (dev_priv
->chipset
>= 0x40) {
103 *size
= roundup(*size
, 64 * tile_mode
);
105 } else if (dev_priv
->chipset
>= 0x30) {
107 *size
= roundup(*size
, 64 * tile_mode
);
109 } else if (dev_priv
->chipset
>= 0x20) {
111 *size
= roundup(*size
, 64 * tile_mode
);
113 } else if (dev_priv
->chipset
>= 0x10) {
115 *size
= roundup(*size
, 32 * tile_mode
);
120 /* ALIGN works only on powers of two. */
121 *size
= roundup(*size
, PAGE_SIZE
);
123 if (dev_priv
->card_type
== NV_50
) {
124 *size
= roundup(*size
, 65536);
125 *align
= max(65536, *align
);
130 nouveau_bo_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
131 int size
, int align
, uint32_t flags
, uint32_t tile_mode
,
132 uint32_t tile_flags
, bool no_vm
, bool mappable
,
133 struct nouveau_bo
**pnvbo
)
135 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
136 struct nouveau_bo
*nvbo
;
139 nvbo
= kzalloc(sizeof(struct nouveau_bo
), GFP_KERNEL
);
142 INIT_LIST_HEAD(&nvbo
->head
);
143 INIT_LIST_HEAD(&nvbo
->entry
);
144 nvbo
->mappable
= mappable
;
146 nvbo
->tile_mode
= tile_mode
;
147 nvbo
->tile_flags
= tile_flags
;
149 nouveau_bo_fixup_align(dev
, tile_mode
, tile_flags
, &align
, &size
);
150 align
>>= PAGE_SHIFT
;
152 nvbo
->placement
.fpfn
= 0;
153 nvbo
->placement
.lpfn
= mappable
? dev_priv
->fb_mappable_pages
: 0;
154 nouveau_bo_placement_set(nvbo
, flags
, 0);
156 nvbo
->channel
= chan
;
157 ret
= ttm_bo_init(&dev_priv
->ttm
.bdev
, &nvbo
->bo
, size
,
158 ttm_bo_type_device
, &nvbo
->placement
, align
, 0,
159 false, NULL
, size
, nouveau_bo_del_ttm
);
161 /* ttm will call nouveau_bo_del_ttm if it fails.. */
164 nvbo
->channel
= NULL
;
171 set_placement_list(uint32_t *pl
, unsigned *n
, uint32_t type
, uint32_t flags
)
175 if (type
& TTM_PL_FLAG_VRAM
)
176 pl
[(*n
)++] = TTM_PL_FLAG_VRAM
| flags
;
177 if (type
& TTM_PL_FLAG_TT
)
178 pl
[(*n
)++] = TTM_PL_FLAG_TT
| flags
;
179 if (type
& TTM_PL_FLAG_SYSTEM
)
180 pl
[(*n
)++] = TTM_PL_FLAG_SYSTEM
| flags
;
184 nouveau_bo_placement_set(struct nouveau_bo
*nvbo
, uint32_t type
, uint32_t busy
)
186 struct ttm_placement
*pl
= &nvbo
->placement
;
187 uint32_t flags
= TTM_PL_MASK_CACHING
|
188 (nvbo
->pin_refcnt
? TTM_PL_FLAG_NO_EVICT
: 0);
190 pl
->placement
= nvbo
->placements
;
191 set_placement_list(nvbo
->placements
, &pl
->num_placement
,
194 pl
->busy_placement
= nvbo
->busy_placements
;
195 set_placement_list(nvbo
->busy_placements
, &pl
->num_busy_placement
,
200 nouveau_bo_pin(struct nouveau_bo
*nvbo
, uint32_t memtype
)
202 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
203 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
206 if (nvbo
->pin_refcnt
&& !(memtype
& (1 << bo
->mem
.mem_type
))) {
207 NV_ERROR(nouveau_bdev(bo
->bdev
)->dev
,
208 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo
,
209 1 << bo
->mem
.mem_type
, memtype
);
213 if (nvbo
->pin_refcnt
++)
216 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
220 nouveau_bo_placement_set(nvbo
, memtype
, 0);
222 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false, false);
224 switch (bo
->mem
.mem_type
) {
226 dev_priv
->fb_aper_free
-= bo
->mem
.size
;
229 dev_priv
->gart_info
.aper_free
-= bo
->mem
.size
;
235 ttm_bo_unreserve(bo
);
243 nouveau_bo_unpin(struct nouveau_bo
*nvbo
)
245 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(nvbo
->bo
.bdev
);
246 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
249 if (--nvbo
->pin_refcnt
)
252 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
256 nouveau_bo_placement_set(nvbo
, bo
->mem
.placement
, 0);
258 ret
= ttm_bo_validate(bo
, &nvbo
->placement
, false, false, false);
260 switch (bo
->mem
.mem_type
) {
262 dev_priv
->fb_aper_free
+= bo
->mem
.size
;
265 dev_priv
->gart_info
.aper_free
+= bo
->mem
.size
;
272 ttm_bo_unreserve(bo
);
277 nouveau_bo_map(struct nouveau_bo
*nvbo
)
281 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, false, 0);
285 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, nvbo
->bo
.mem
.num_pages
, &nvbo
->kmap
);
286 ttm_bo_unreserve(&nvbo
->bo
);
291 nouveau_bo_unmap(struct nouveau_bo
*nvbo
)
293 ttm_bo_kunmap(&nvbo
->kmap
);
297 nouveau_bo_rd16(struct nouveau_bo
*nvbo
, unsigned index
)
300 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
303 return ioread16_native((void __force __iomem
*)mem
);
309 nouveau_bo_wr16(struct nouveau_bo
*nvbo
, unsigned index
, u16 val
)
312 u16
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
315 iowrite16_native(val
, (void __force __iomem
*)mem
);
321 nouveau_bo_rd32(struct nouveau_bo
*nvbo
, unsigned index
)
324 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
327 return ioread32_native((void __force __iomem
*)mem
);
333 nouveau_bo_wr32(struct nouveau_bo
*nvbo
, unsigned index
, u32 val
)
336 u32
*mem
= ttm_kmap_obj_virtual(&nvbo
->kmap
, &is_iomem
);
339 iowrite32_native(val
, (void __force __iomem
*)mem
);
344 static struct ttm_backend
*
345 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device
*bdev
)
347 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
348 struct drm_device
*dev
= dev_priv
->dev
;
350 switch (dev_priv
->gart_info
.type
) {
352 case NOUVEAU_GART_AGP
:
353 return ttm_agp_backend_init(bdev
, dev
->agp
->bridge
);
355 case NOUVEAU_GART_SGDMA
:
356 return nouveau_sgdma_init_ttm(dev
);
358 NV_ERROR(dev
, "Unknown GART type %d\n",
359 dev_priv
->gart_info
.type
);
367 nouveau_bo_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
369 /* We'll do this from user space. */
374 nouveau_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
375 struct ttm_mem_type_manager
*man
)
377 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
378 struct drm_device
*dev
= dev_priv
->dev
;
382 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
383 man
->available_caching
= TTM_PL_MASK_CACHING
;
384 man
->default_caching
= TTM_PL_FLAG_CACHED
;
387 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
388 TTM_MEMTYPE_FLAG_MAPPABLE
;
389 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
391 man
->default_caching
= TTM_PL_FLAG_WC
;
392 man
->gpu_offset
= dev_priv
->vm_vram_base
;
395 switch (dev_priv
->gart_info
.type
) {
396 case NOUVEAU_GART_AGP
:
397 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
398 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
399 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
401 case NOUVEAU_GART_SGDMA
:
402 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
403 TTM_MEMTYPE_FLAG_CMA
;
404 man
->available_caching
= TTM_PL_MASK_CACHING
;
405 man
->default_caching
= TTM_PL_FLAG_CACHED
;
408 NV_ERROR(dev
, "Unknown GART type: %d\n",
409 dev_priv
->gart_info
.type
);
412 man
->gpu_offset
= dev_priv
->vm_gart_base
;
415 NV_ERROR(dev
, "Unsupported memory type %u\n", (unsigned)type
);
422 nouveau_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
424 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
426 switch (bo
->mem
.mem_type
) {
428 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_TT
,
432 nouveau_bo_placement_set(nvbo
, TTM_PL_FLAG_SYSTEM
, 0);
436 *pl
= nvbo
->placement
;
440 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
441 * TTM_PL_{VRAM,TT} directly.
445 nouveau_bo_move_accel_cleanup(struct nouveau_channel
*chan
,
446 struct nouveau_bo
*nvbo
, bool evict
,
447 bool no_wait_reserve
, bool no_wait_gpu
,
448 struct ttm_mem_reg
*new_mem
)
450 struct nouveau_fence
*fence
= NULL
;
453 ret
= nouveau_fence_new(chan
, &fence
, true);
457 ret
= ttm_bo_move_accel_cleanup(&nvbo
->bo
, fence
, NULL
,
458 evict
|| (nvbo
->channel
&&
459 nvbo
->channel
!= chan
),
460 no_wait_reserve
, no_wait_gpu
, new_mem
);
461 nouveau_fence_unref((void *)&fence
);
465 static inline uint32_t
466 nouveau_bo_mem_ctxdma(struct nouveau_bo
*nvbo
, struct nouveau_channel
*chan
,
467 struct ttm_mem_reg
*mem
)
469 if (chan
== nouveau_bdev(nvbo
->bo
.bdev
)->channel
) {
470 if (mem
->mem_type
== TTM_PL_TT
)
475 if (mem
->mem_type
== TTM_PL_TT
)
476 return chan
->gart_handle
;
477 return chan
->vram_handle
;
481 nouveau_bo_move_m2mf(struct ttm_buffer_object
*bo
, int evict
, bool intr
,
482 bool no_wait_reserve
, bool no_wait_gpu
,
483 struct ttm_mem_reg
*new_mem
)
485 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
486 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
487 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
488 struct nouveau_channel
*chan
;
489 uint64_t src_offset
, dst_offset
;
493 chan
= nvbo
->channel
;
494 if (!chan
|| nvbo
->tile_flags
|| nvbo
->no_vm
)
495 chan
= dev_priv
->channel
;
497 src_offset
= old_mem
->mm_node
->start
<< PAGE_SHIFT
;
498 dst_offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
499 if (chan
!= dev_priv
->channel
) {
500 if (old_mem
->mem_type
== TTM_PL_TT
)
501 src_offset
+= dev_priv
->vm_gart_base
;
503 src_offset
+= dev_priv
->vm_vram_base
;
505 if (new_mem
->mem_type
== TTM_PL_TT
)
506 dst_offset
+= dev_priv
->vm_gart_base
;
508 dst_offset
+= dev_priv
->vm_vram_base
;
511 ret
= RING_SPACE(chan
, 3);
514 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE
, 2);
515 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, old_mem
));
516 OUT_RING(chan
, nouveau_bo_mem_ctxdma(nvbo
, chan
, new_mem
));
518 if (dev_priv
->card_type
>= NV_50
) {
519 ret
= RING_SPACE(chan
, 4);
522 BEGIN_RING(chan
, NvSubM2MF
, 0x0200, 1);
524 BEGIN_RING(chan
, NvSubM2MF
, 0x021c, 1);
528 page_count
= new_mem
->num_pages
;
530 int line_count
= (page_count
> 2047) ? 2047 : page_count
;
532 if (dev_priv
->card_type
>= NV_50
) {
533 ret
= RING_SPACE(chan
, 3);
536 BEGIN_RING(chan
, NvSubM2MF
, 0x0238, 2);
537 OUT_RING(chan
, upper_32_bits(src_offset
));
538 OUT_RING(chan
, upper_32_bits(dst_offset
));
540 ret
= RING_SPACE(chan
, 11);
543 BEGIN_RING(chan
, NvSubM2MF
,
544 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN
, 8);
545 OUT_RING(chan
, lower_32_bits(src_offset
));
546 OUT_RING(chan
, lower_32_bits(dst_offset
));
547 OUT_RING(chan
, PAGE_SIZE
); /* src_pitch */
548 OUT_RING(chan
, PAGE_SIZE
); /* dst_pitch */
549 OUT_RING(chan
, PAGE_SIZE
); /* line_length */
550 OUT_RING(chan
, line_count
);
551 OUT_RING(chan
, (1<<8)|(1<<0));
553 BEGIN_RING(chan
, NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NOP
, 1);
556 page_count
-= line_count
;
557 src_offset
+= (PAGE_SIZE
* line_count
);
558 dst_offset
+= (PAGE_SIZE
* line_count
);
561 return nouveau_bo_move_accel_cleanup(chan
, nvbo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
565 nouveau_bo_move_flipd(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
566 bool no_wait_reserve
, bool no_wait_gpu
,
567 struct ttm_mem_reg
*new_mem
)
569 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
570 struct ttm_placement placement
;
571 struct ttm_mem_reg tmp_mem
;
574 placement
.fpfn
= placement
.lpfn
= 0;
575 placement
.num_placement
= placement
.num_busy_placement
= 1;
576 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
579 tmp_mem
.mm_node
= NULL
;
580 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
584 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
588 ret
= nouveau_bo_move_m2mf(bo
, true, intr
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
592 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
594 if (tmp_mem
.mm_node
) {
595 spin_lock(&bo
->bdev
->glob
->lru_lock
);
596 drm_mm_put_block(tmp_mem
.mm_node
);
597 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
604 nouveau_bo_move_flips(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
605 bool no_wait_reserve
, bool no_wait_gpu
,
606 struct ttm_mem_reg
*new_mem
)
608 u32 placement_memtype
= TTM_PL_FLAG_TT
| TTM_PL_MASK_CACHING
;
609 struct ttm_placement placement
;
610 struct ttm_mem_reg tmp_mem
;
613 placement
.fpfn
= placement
.lpfn
= 0;
614 placement
.num_placement
= placement
.num_busy_placement
= 1;
615 placement
.placement
= placement
.busy_placement
= &placement_memtype
;
618 tmp_mem
.mm_node
= NULL
;
619 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, intr
, no_wait_reserve
, no_wait_gpu
);
623 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait_reserve
, no_wait_gpu
, &tmp_mem
);
627 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
632 if (tmp_mem
.mm_node
) {
633 spin_lock(&bo
->bdev
->glob
->lru_lock
);
634 drm_mm_put_block(tmp_mem
.mm_node
);
635 spin_unlock(&bo
->bdev
->glob
->lru_lock
);
642 nouveau_bo_vm_bind(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*new_mem
,
643 struct nouveau_tile_reg
**new_tile
)
645 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
646 struct drm_device
*dev
= dev_priv
->dev
;
647 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
651 if (nvbo
->no_vm
|| new_mem
->mem_type
!= TTM_PL_VRAM
) {
657 offset
= new_mem
->mm_node
->start
<< PAGE_SHIFT
;
659 if (dev_priv
->card_type
== NV_50
) {
660 ret
= nv50_mem_vm_bind_linear(dev
,
661 offset
+ dev_priv
->vm_vram_base
,
662 new_mem
->size
, nvbo
->tile_flags
,
667 } else if (dev_priv
->card_type
>= NV_10
) {
668 *new_tile
= nv10_mem_set_tiling(dev
, offset
, new_mem
->size
,
676 nouveau_bo_vm_cleanup(struct ttm_buffer_object
*bo
,
677 struct nouveau_tile_reg
*new_tile
,
678 struct nouveau_tile_reg
**old_tile
)
680 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
681 struct drm_device
*dev
= dev_priv
->dev
;
683 if (dev_priv
->card_type
>= NV_10
&&
684 dev_priv
->card_type
< NV_50
) {
686 nv10_mem_expire_tiling(dev
, *old_tile
, bo
->sync_obj
);
688 *old_tile
= new_tile
;
693 nouveau_bo_move(struct ttm_buffer_object
*bo
, bool evict
, bool intr
,
694 bool no_wait_reserve
, bool no_wait_gpu
,
695 struct ttm_mem_reg
*new_mem
)
697 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
698 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
699 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
700 struct nouveau_tile_reg
*new_tile
= NULL
;
703 ret
= nouveau_bo_vm_bind(bo
, new_mem
, &new_tile
);
707 /* Software copy if the card isn't up and running yet. */
708 if (!dev_priv
->channel
) {
709 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
714 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& !bo
->ttm
) {
715 BUG_ON(bo
->mem
.mm_node
!= NULL
);
717 new_mem
->mm_node
= NULL
;
721 /* Hardware assisted copy. */
722 if (new_mem
->mem_type
== TTM_PL_SYSTEM
)
723 ret
= nouveau_bo_move_flipd(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
724 else if (old_mem
->mem_type
== TTM_PL_SYSTEM
)
725 ret
= nouveau_bo_move_flips(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
727 ret
= nouveau_bo_move_m2mf(bo
, evict
, intr
, no_wait_reserve
, no_wait_gpu
, new_mem
);
732 /* Fallback to software copy. */
733 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
737 nouveau_bo_vm_cleanup(bo
, NULL
, &new_tile
);
739 nouveau_bo_vm_cleanup(bo
, new_tile
, &nvbo
->tile
);
745 nouveau_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
751 nouveau_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
753 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
754 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bdev
);
755 struct drm_device
*dev
= dev_priv
->dev
;
757 mem
->bus
.addr
= NULL
;
759 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
761 mem
->bus
.is_iomem
= false;
762 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
764 switch (mem
->mem_type
) {
770 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
) {
771 mem
->bus
.offset
= mem
->mm_node
->start
<< PAGE_SHIFT
;
772 mem
->bus
.base
= dev_priv
->gart_info
.aper_base
;
773 mem
->bus
.is_iomem
= true;
778 mem
->bus
.offset
= mem
->mm_node
->start
<< PAGE_SHIFT
;
779 mem
->bus
.base
= pci_resource_start(dev
->pdev
, 1);
780 mem
->bus
.is_iomem
= true;
789 nouveau_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
794 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
799 struct ttm_bo_driver nouveau_bo_driver
= {
800 .create_ttm_backend_entry
= nouveau_bo_create_ttm_backend_entry
,
801 .invalidate_caches
= nouveau_bo_invalidate_caches
,
802 .init_mem_type
= nouveau_bo_init_mem_type
,
803 .evict_flags
= nouveau_bo_evict_flags
,
804 .move
= nouveau_bo_move
,
805 .verify_access
= nouveau_bo_verify_access
,
806 .sync_obj_signaled
= nouveau_fence_signalled
,
807 .sync_obj_wait
= nouveau_fence_wait
,
808 .sync_obj_flush
= nouveau_fence_flush
,
809 .sync_obj_unref
= nouveau_fence_unref
,
810 .sync_obj_ref
= nouveau_fence_ref
,
811 .fault_reserve_notify
= &nouveau_ttm_fault_reserve_notify
,
812 .io_mem_reserve
= &nouveau_ttm_io_mem_reserve
,
813 .io_mem_free
= &nouveau_ttm_io_mem_free
,