1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
40 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
42 ttm_bo_mem_put(bo
, &bo
->mem
);
45 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
47 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
49 struct ttm_tt
*ttm
= bo
->ttm
;
50 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
53 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
55 ttm_bo_free_old_node(bo
);
56 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
58 old_mem
->mem_type
= TTM_PL_SYSTEM
;
61 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
62 if (unlikely(ret
!= 0))
65 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
66 ret
= ttm_tt_bind(ttm
, new_mem
);
67 if (unlikely(ret
!= 0))
72 new_mem
->mm_node
= NULL
;
76 EXPORT_SYMBOL(ttm_bo_move_ttm
);
78 int ttm_mem_io_lock(struct ttm_mem_type_manager
*man
, bool interruptible
)
80 if (likely(man
->io_reserve_fastpath
))
84 return mutex_lock_interruptible(&man
->io_reserve_mutex
);
86 mutex_lock(&man
->io_reserve_mutex
);
89 EXPORT_SYMBOL(ttm_mem_io_lock
);
91 void ttm_mem_io_unlock(struct ttm_mem_type_manager
*man
)
93 if (likely(man
->io_reserve_fastpath
))
96 mutex_unlock(&man
->io_reserve_mutex
);
98 EXPORT_SYMBOL(ttm_mem_io_unlock
);
100 static int ttm_mem_io_evict(struct ttm_mem_type_manager
*man
)
102 struct ttm_buffer_object
*bo
;
104 if (!man
->use_io_reserve_lru
|| list_empty(&man
->io_reserve_lru
))
107 bo
= list_first_entry(&man
->io_reserve_lru
,
108 struct ttm_buffer_object
,
110 list_del_init(&bo
->io_reserve_lru
);
111 ttm_bo_unmap_virtual_locked(bo
);
117 int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
,
118 struct ttm_mem_reg
*mem
)
120 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
123 if (!bdev
->driver
->io_mem_reserve
)
125 if (likely(man
->io_reserve_fastpath
))
126 return bdev
->driver
->io_mem_reserve(bdev
, mem
);
128 if (bdev
->driver
->io_mem_reserve
&&
129 mem
->bus
.io_reserved_count
++ == 0) {
131 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
132 if (ret
== -EAGAIN
) {
133 ret
= ttm_mem_io_evict(man
);
140 EXPORT_SYMBOL(ttm_mem_io_reserve
);
142 void ttm_mem_io_free(struct ttm_bo_device
*bdev
,
143 struct ttm_mem_reg
*mem
)
145 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
147 if (likely(man
->io_reserve_fastpath
))
150 if (bdev
->driver
->io_mem_reserve
&&
151 --mem
->bus
.io_reserved_count
== 0 &&
152 bdev
->driver
->io_mem_free
)
153 bdev
->driver
->io_mem_free(bdev
, mem
);
156 EXPORT_SYMBOL(ttm_mem_io_free
);
158 int ttm_mem_io_reserve_vm(struct ttm_buffer_object
*bo
)
160 struct ttm_mem_reg
*mem
= &bo
->mem
;
163 if (!mem
->bus
.io_reserved_vm
) {
164 struct ttm_mem_type_manager
*man
=
165 &bo
->bdev
->man
[mem
->mem_type
];
167 ret
= ttm_mem_io_reserve(bo
->bdev
, mem
);
168 if (unlikely(ret
!= 0))
170 mem
->bus
.io_reserved_vm
= true;
171 if (man
->use_io_reserve_lru
)
172 list_add_tail(&bo
->io_reserve_lru
,
173 &man
->io_reserve_lru
);
178 void ttm_mem_io_free_vm(struct ttm_buffer_object
*bo
)
180 struct ttm_mem_reg
*mem
= &bo
->mem
;
182 if (mem
->bus
.io_reserved_vm
) {
183 mem
->bus
.io_reserved_vm
= false;
184 list_del_init(&bo
->io_reserve_lru
);
185 ttm_mem_io_free(bo
->bdev
, mem
);
189 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
192 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
197 (void) ttm_mem_io_lock(man
, false);
198 ret
= ttm_mem_io_reserve(bdev
, mem
);
199 ttm_mem_io_unlock(man
);
200 if (ret
|| !mem
->bus
.is_iomem
)
204 addr
= mem
->bus
.addr
;
206 if (mem
->placement
& TTM_PL_FLAG_WC
)
207 addr
= ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
209 addr
= ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
211 (void) ttm_mem_io_lock(man
, false);
212 ttm_mem_io_free(bdev
, mem
);
213 ttm_mem_io_unlock(man
);
221 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
224 struct ttm_mem_type_manager
*man
;
226 man
= &bdev
->man
[mem
->mem_type
];
228 if (virtual && mem
->bus
.addr
== NULL
)
230 (void) ttm_mem_io_lock(man
, false);
231 ttm_mem_io_free(bdev
, mem
);
232 ttm_mem_io_unlock(man
);
235 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
238 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
240 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
243 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
244 iowrite32(ioread32(srcP
++), dstP
++);
248 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
252 struct page
*d
= ttm
->pages
[page
];
258 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
261 dst
= kmap_atomic_prot(d
, prot
);
263 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
264 dst
= vmap(&d
, 1, 0, prot
);
271 memcpy_fromio(dst
, src
, PAGE_SIZE
);
276 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
285 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
289 struct page
*s
= ttm
->pages
[page
];
295 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
297 src
= kmap_atomic_prot(s
, prot
);
299 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
300 src
= vmap(&s
, 1, 0, prot
);
307 memcpy_toio(dst
, src
, PAGE_SIZE
);
312 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
321 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
322 bool evict
, bool no_wait_gpu
,
323 struct ttm_mem_reg
*new_mem
)
325 struct ttm_bo_device
*bdev
= bo
->bdev
;
326 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
327 struct ttm_tt
*ttm
= bo
->ttm
;
328 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
329 struct ttm_mem_reg old_copy
= *old_mem
;
335 unsigned long add
= 0;
338 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
341 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
345 if (old_iomap
== NULL
&& new_iomap
== NULL
)
347 if (old_iomap
== NULL
&& ttm
== NULL
)
350 if (ttm
->state
== tt_unpopulated
) {
351 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
353 /* if we fail here don't nuke the mm node
354 * as the bo still owns it */
355 old_copy
.mm_node
= NULL
;
363 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
364 (new_mem
->start
< old_mem
->start
+ old_mem
->size
)) {
366 add
= new_mem
->num_pages
- 1;
369 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
370 page
= i
* dir
+ add
;
371 if (old_iomap
== NULL
) {
372 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
374 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
376 } else if (new_iomap
== NULL
) {
377 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
379 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
382 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
384 /* failing here, means keep old copy as-is */
385 old_copy
.mm_node
= NULL
;
393 new_mem
->mm_node
= NULL
;
395 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
402 ttm_mem_reg_iounmap(bdev
, old_mem
, new_iomap
);
404 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
405 ttm_bo_mem_put(bo
, &old_copy
);
408 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
410 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
416 * ttm_buffer_object_transfer
418 * @bo: A pointer to a struct ttm_buffer_object.
419 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
420 * holding the data of @bo with the old placement.
422 * This is a utility function that may be called after an accelerated move
423 * has been scheduled. A new buffer object is created as a placeholder for
424 * the old data while it's being copied. When that buffer object is idle,
425 * it can be destroyed, releasing the space of the old placement.
430 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
431 struct ttm_buffer_object
**new_obj
)
433 struct ttm_buffer_object
*fbo
;
434 struct ttm_bo_device
*bdev
= bo
->bdev
;
435 struct ttm_bo_driver
*driver
= bdev
->driver
;
437 fbo
= kmalloc(sizeof(*fbo
), GFP_KERNEL
);
444 * Fix up members that we shouldn't copy directly:
445 * TODO: Explicit member copy would probably be better here.
448 init_waitqueue_head(&fbo
->event_queue
);
449 INIT_LIST_HEAD(&fbo
->ddestroy
);
450 INIT_LIST_HEAD(&fbo
->lru
);
451 INIT_LIST_HEAD(&fbo
->swap
);
452 INIT_LIST_HEAD(&fbo
->io_reserve_lru
);
454 atomic_set(&fbo
->cpu_writers
, 0);
456 spin_lock(&bdev
->fence_lock
);
458 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
460 fbo
->sync_obj
= NULL
;
461 spin_unlock(&bdev
->fence_lock
);
462 kref_init(&fbo
->list_kref
);
463 kref_init(&fbo
->kref
);
464 fbo
->destroy
= &ttm_transfered_destroy
;
471 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
473 #if defined(__i386__) || defined(__x86_64__)
474 if (caching_flags
& TTM_PL_FLAG_WC
)
475 tmp
= pgprot_writecombine(tmp
);
476 else if (boot_cpu_data
.x86
> 3)
477 tmp
= pgprot_noncached(tmp
);
479 #elif defined(__powerpc__)
480 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
481 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
482 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
483 pgprot_val(tmp
) |= _PAGE_GUARDED
;
486 #if defined(__ia64__)
487 if (caching_flags
& TTM_PL_FLAG_WC
)
488 tmp
= pgprot_writecombine(tmp
);
490 tmp
= pgprot_noncached(tmp
);
492 #if defined(__sparc__) || defined(__mips__)
493 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
494 tmp
= pgprot_noncached(tmp
);
498 EXPORT_SYMBOL(ttm_io_prot
);
500 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
501 unsigned long offset
,
503 struct ttm_bo_kmap_obj
*map
)
505 struct ttm_mem_reg
*mem
= &bo
->mem
;
507 if (bo
->mem
.bus
.addr
) {
508 map
->bo_kmap_type
= ttm_bo_map_premapped
;
509 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
511 map
->bo_kmap_type
= ttm_bo_map_iomap
;
512 if (mem
->placement
& TTM_PL_FLAG_WC
)
513 map
->virtual = ioremap_wc(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
516 map
->virtual = ioremap_nocache(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
519 return (!map
->virtual) ? -ENOMEM
: 0;
522 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
523 unsigned long start_page
,
524 unsigned long num_pages
,
525 struct ttm_bo_kmap_obj
*map
)
527 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
528 struct ttm_tt
*ttm
= bo
->ttm
;
533 if (ttm
->state
== tt_unpopulated
) {
534 ret
= ttm
->bdev
->driver
->ttm_tt_populate(ttm
);
539 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
541 * We're mapping a single page, and the desired
542 * page protection is consistent with the bo.
545 map
->bo_kmap_type
= ttm_bo_map_kmap
;
546 map
->page
= ttm
->pages
[start_page
];
547 map
->virtual = kmap(map
->page
);
550 * We need to use vmap to get the desired page protection
551 * or to make the buffer object look contiguous.
553 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
555 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
556 map
->bo_kmap_type
= ttm_bo_map_vmap
;
557 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
560 return (!map
->virtual) ? -ENOMEM
: 0;
563 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
564 unsigned long start_page
, unsigned long num_pages
,
565 struct ttm_bo_kmap_obj
*map
)
567 struct ttm_mem_type_manager
*man
=
568 &bo
->bdev
->man
[bo
->mem
.mem_type
];
569 unsigned long offset
, size
;
572 BUG_ON(!list_empty(&bo
->swap
));
575 if (num_pages
> bo
->num_pages
)
577 if (start_page
> bo
->num_pages
)
580 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
583 (void) ttm_mem_io_lock(man
, false);
584 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
585 ttm_mem_io_unlock(man
);
588 if (!bo
->mem
.bus
.is_iomem
) {
589 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
591 offset
= start_page
<< PAGE_SHIFT
;
592 size
= num_pages
<< PAGE_SHIFT
;
593 return ttm_bo_ioremap(bo
, offset
, size
, map
);
596 EXPORT_SYMBOL(ttm_bo_kmap
);
598 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
600 struct ttm_buffer_object
*bo
= map
->bo
;
601 struct ttm_mem_type_manager
*man
=
602 &bo
->bdev
->man
[bo
->mem
.mem_type
];
606 switch (map
->bo_kmap_type
) {
607 case ttm_bo_map_iomap
:
608 iounmap(map
->virtual);
610 case ttm_bo_map_vmap
:
611 vunmap(map
->virtual);
613 case ttm_bo_map_kmap
:
616 case ttm_bo_map_premapped
:
621 (void) ttm_mem_io_lock(man
, false);
622 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
623 ttm_mem_io_unlock(man
);
627 EXPORT_SYMBOL(ttm_bo_kunmap
);
629 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
633 struct ttm_mem_reg
*new_mem
)
635 struct ttm_bo_device
*bdev
= bo
->bdev
;
636 struct ttm_bo_driver
*driver
= bdev
->driver
;
637 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
638 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
640 struct ttm_buffer_object
*ghost_obj
;
641 void *tmp_obj
= NULL
;
643 spin_lock(&bdev
->fence_lock
);
645 tmp_obj
= bo
->sync_obj
;
648 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
650 ret
= ttm_bo_wait(bo
, false, false, false);
651 spin_unlock(&bdev
->fence_lock
);
653 driver
->sync_obj_unref(&tmp_obj
);
657 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
659 ttm_tt_unbind(bo
->ttm
);
660 ttm_tt_destroy(bo
->ttm
);
663 ttm_bo_free_old_node(bo
);
666 * This should help pipeline ordinary buffer moves.
668 * Hang old buffer memory on a new buffer object,
669 * and leave it to be released when the GPU
670 * operation has completed.
673 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
674 spin_unlock(&bdev
->fence_lock
);
676 driver
->sync_obj_unref(&tmp_obj
);
678 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
683 * If we're not moving to fixed memory, the TTM object
684 * needs to stay alive. Otherwhise hang it on the ghost
685 * bo to be unbound and destroyed.
688 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
689 ghost_obj
->ttm
= NULL
;
693 ttm_bo_unreserve(ghost_obj
);
694 ttm_bo_unref(&ghost_obj
);
698 new_mem
->mm_node
= NULL
;
702 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);