1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/vmalloc.h>
37 #include <linux/module.h>
39 void ttm_bo_free_old_node(struct ttm_buffer_object
*bo
)
41 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
43 if (old_mem
->mm_node
) {
44 spin_lock(&bo
->glob
->lru_lock
);
45 drm_mm_put_block(old_mem
->mm_node
);
46 spin_unlock(&bo
->glob
->lru_lock
);
48 old_mem
->mm_node
= NULL
;
51 int ttm_bo_move_ttm(struct ttm_buffer_object
*bo
,
52 bool evict
, bool no_wait_reserve
,
53 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
55 struct ttm_tt
*ttm
= bo
->ttm
;
56 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
59 if (old_mem
->mem_type
!= TTM_PL_SYSTEM
) {
61 ttm_bo_free_old_node(bo
);
62 ttm_flag_masked(&old_mem
->placement
, TTM_PL_FLAG_SYSTEM
,
64 old_mem
->mem_type
= TTM_PL_SYSTEM
;
67 ret
= ttm_tt_set_placement_caching(ttm
, new_mem
->placement
);
68 if (unlikely(ret
!= 0))
71 if (new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
72 ret
= ttm_tt_bind(ttm
, new_mem
);
73 if (unlikely(ret
!= 0))
78 new_mem
->mm_node
= NULL
;
82 EXPORT_SYMBOL(ttm_bo_move_ttm
);
84 int ttm_mem_io_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
86 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
89 if (bdev
->driver
->io_mem_reserve
) {
90 if (!mem
->bus
.io_reserved
) {
91 mem
->bus
.io_reserved
= true;
92 ret
= bdev
->driver
->io_mem_reserve(bdev
, mem
);
93 if (unlikely(ret
!= 0))
97 ret
= ttm_bo_pci_offset(bdev
, mem
, &mem
->bus
.base
, &mem
->bus
.offset
, &mem
->bus
.size
);
98 if (unlikely(ret
!= 0))
100 mem
->bus
.addr
= NULL
;
101 if (!(man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
))
102 mem
->bus
.addr
= (void *)(((u8
*)man
->io_addr
) + mem
->bus
.offset
);
103 mem
->bus
.is_iomem
= (mem
->bus
.size
> 0) ? 1 : 0;
108 void ttm_mem_io_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
110 if (bdev
->driver
->io_mem_reserve
) {
111 if (mem
->bus
.io_reserved
) {
112 mem
->bus
.io_reserved
= false;
113 bdev
->driver
->io_mem_free(bdev
, mem
);
118 int ttm_mem_reg_ioremap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
125 ret
= ttm_mem_io_reserve(bdev
, mem
);
130 addr
= mem
->bus
.addr
;
132 if (mem
->placement
& TTM_PL_FLAG_WC
)
133 addr
= ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
135 addr
= ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
, mem
->bus
.size
);
137 ttm_mem_io_free(bdev
, mem
);
145 void ttm_mem_reg_iounmap(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
,
148 struct ttm_mem_type_manager
*man
;
150 man
= &bdev
->man
[mem
->mem_type
];
152 if (virtual && (man
->flags
& TTM_MEMTYPE_FLAG_NEEDS_IOREMAP
|| mem
->bus
.addr
== NULL
))
154 ttm_mem_io_free(bdev
, mem
);
157 static int ttm_copy_io_page(void *dst
, void *src
, unsigned long page
)
160 (uint32_t *) ((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
162 (uint32_t *) ((unsigned long)src
+ (page
<< PAGE_SHIFT
));
165 for (i
= 0; i
< PAGE_SIZE
/ sizeof(uint32_t); ++i
)
166 iowrite32(ioread32(srcP
++), dstP
++);
170 static int ttm_copy_io_ttm_page(struct ttm_tt
*ttm
, void *src
,
174 struct page
*d
= ttm_tt_get_page(ttm
, page
);
180 src
= (void *)((unsigned long)src
+ (page
<< PAGE_SHIFT
));
183 dst
= kmap_atomic_prot(d
, KM_USER0
, prot
);
185 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
186 dst
= vmap(&d
, 1, 0, prot
);
193 memcpy_fromio(dst
, src
, PAGE_SIZE
);
196 kunmap_atomic(dst
, KM_USER0
);
198 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
207 static int ttm_copy_ttm_io_page(struct ttm_tt
*ttm
, void *dst
,
211 struct page
*s
= ttm_tt_get_page(ttm
, page
);
217 dst
= (void *)((unsigned long)dst
+ (page
<< PAGE_SHIFT
));
219 src
= kmap_atomic_prot(s
, KM_USER0
, prot
);
221 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
222 src
= vmap(&s
, 1, 0, prot
);
229 memcpy_toio(dst
, src
, PAGE_SIZE
);
232 kunmap_atomic(src
, KM_USER0
);
234 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
))
243 int ttm_bo_move_memcpy(struct ttm_buffer_object
*bo
,
244 bool evict
, bool no_wait_reserve
, bool no_wait_gpu
,
245 struct ttm_mem_reg
*new_mem
)
247 struct ttm_bo_device
*bdev
= bo
->bdev
;
248 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
249 struct ttm_tt
*ttm
= bo
->ttm
;
250 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
251 struct ttm_mem_reg old_copy
= *old_mem
;
257 unsigned long add
= 0;
260 ret
= ttm_mem_reg_ioremap(bdev
, old_mem
, &old_iomap
);
263 ret
= ttm_mem_reg_ioremap(bdev
, new_mem
, &new_iomap
);
267 if (old_iomap
== NULL
&& new_iomap
== NULL
)
269 if (old_iomap
== NULL
&& ttm
== NULL
)
275 if ((old_mem
->mem_type
== new_mem
->mem_type
) &&
276 (new_mem
->mm_node
->start
<
277 old_mem
->mm_node
->start
+ old_mem
->mm_node
->size
)) {
279 add
= new_mem
->num_pages
- 1;
282 for (i
= 0; i
< new_mem
->num_pages
; ++i
) {
283 page
= i
* dir
+ add
;
284 if (old_iomap
== NULL
) {
285 pgprot_t prot
= ttm_io_prot(old_mem
->placement
,
287 ret
= ttm_copy_ttm_io_page(ttm
, new_iomap
, page
,
289 } else if (new_iomap
== NULL
) {
290 pgprot_t prot
= ttm_io_prot(new_mem
->placement
,
292 ret
= ttm_copy_io_ttm_page(ttm
, old_iomap
, page
,
295 ret
= ttm_copy_io_page(new_iomap
, old_iomap
, page
);
301 ttm_bo_free_old_node(bo
);
304 new_mem
->mm_node
= NULL
;
306 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && (ttm
!= NULL
)) {
313 ttm_mem_reg_iounmap(bdev
, new_mem
, new_iomap
);
315 ttm_mem_reg_iounmap(bdev
, &old_copy
, old_iomap
);
318 EXPORT_SYMBOL(ttm_bo_move_memcpy
);
320 static void ttm_transfered_destroy(struct ttm_buffer_object
*bo
)
326 * ttm_buffer_object_transfer
328 * @bo: A pointer to a struct ttm_buffer_object.
329 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
330 * holding the data of @bo with the old placement.
332 * This is a utility function that may be called after an accelerated move
333 * has been scheduled. A new buffer object is created as a placeholder for
334 * the old data while it's being copied. When that buffer object is idle,
335 * it can be destroyed, releasing the space of the old placement.
340 static int ttm_buffer_object_transfer(struct ttm_buffer_object
*bo
,
341 struct ttm_buffer_object
**new_obj
)
343 struct ttm_buffer_object
*fbo
;
344 struct ttm_bo_device
*bdev
= bo
->bdev
;
345 struct ttm_bo_driver
*driver
= bdev
->driver
;
347 fbo
= kzalloc(sizeof(*fbo
), GFP_KERNEL
);
354 * Fix up members that we shouldn't copy directly:
355 * TODO: Explicit member copy would probably be better here.
358 spin_lock_init(&fbo
->lock
);
359 init_waitqueue_head(&fbo
->event_queue
);
360 INIT_LIST_HEAD(&fbo
->ddestroy
);
361 INIT_LIST_HEAD(&fbo
->lru
);
362 INIT_LIST_HEAD(&fbo
->swap
);
365 fbo
->sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
366 if (fbo
->mem
.mm_node
)
367 fbo
->mem
.mm_node
->private = (void *)fbo
;
368 kref_init(&fbo
->list_kref
);
369 kref_init(&fbo
->kref
);
370 fbo
->destroy
= &ttm_transfered_destroy
;
376 pgprot_t
ttm_io_prot(uint32_t caching_flags
, pgprot_t tmp
)
378 #if defined(__i386__) || defined(__x86_64__)
379 if (caching_flags
& TTM_PL_FLAG_WC
)
380 tmp
= pgprot_writecombine(tmp
);
381 else if (boot_cpu_data
.x86
> 3)
382 tmp
= pgprot_noncached(tmp
);
384 #elif defined(__powerpc__)
385 if (!(caching_flags
& TTM_PL_FLAG_CACHED
)) {
386 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
387 if (caching_flags
& TTM_PL_FLAG_UNCACHED
)
388 pgprot_val(tmp
) |= _PAGE_GUARDED
;
391 #if defined(__ia64__)
392 if (caching_flags
& TTM_PL_FLAG_WC
)
393 tmp
= pgprot_writecombine(tmp
);
395 tmp
= pgprot_noncached(tmp
);
397 #if defined(__sparc__)
398 if (!(caching_flags
& TTM_PL_FLAG_CACHED
))
399 tmp
= pgprot_noncached(tmp
);
403 EXPORT_SYMBOL(ttm_io_prot
);
405 static int ttm_bo_ioremap(struct ttm_buffer_object
*bo
,
406 unsigned long offset
,
408 struct ttm_bo_kmap_obj
*map
)
410 struct ttm_mem_reg
*mem
= &bo
->mem
;
412 if (bo
->mem
.bus
.addr
) {
413 map
->bo_kmap_type
= ttm_bo_map_premapped
;
414 map
->virtual = (void *)(((u8
*)bo
->mem
.bus
.addr
) + offset
);
416 map
->bo_kmap_type
= ttm_bo_map_iomap
;
417 if (mem
->placement
& TTM_PL_FLAG_WC
)
418 map
->virtual = ioremap_wc(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
421 map
->virtual = ioremap_nocache(bo
->mem
.bus
.base
+ bo
->mem
.bus
.offset
+ offset
,
424 return (!map
->virtual) ? -ENOMEM
: 0;
427 static int ttm_bo_kmap_ttm(struct ttm_buffer_object
*bo
,
428 unsigned long start_page
,
429 unsigned long num_pages
,
430 struct ttm_bo_kmap_obj
*map
)
432 struct ttm_mem_reg
*mem
= &bo
->mem
; pgprot_t prot
;
433 struct ttm_tt
*ttm
= bo
->ttm
;
438 if (num_pages
== 1 && (mem
->placement
& TTM_PL_FLAG_CACHED
)) {
440 * We're mapping a single page, and the desired
441 * page protection is consistent with the bo.
444 map
->bo_kmap_type
= ttm_bo_map_kmap
;
445 map
->page
= ttm_tt_get_page(ttm
, start_page
);
446 map
->virtual = kmap(map
->page
);
449 * Populate the part we're mapping;
451 for (i
= start_page
; i
< start_page
+ num_pages
; ++i
) {
452 d
= ttm_tt_get_page(ttm
, i
);
458 * We need to use vmap to get the desired page protection
459 * or to make the buffer object look contiguous.
461 prot
= (mem
->placement
& TTM_PL_FLAG_CACHED
) ?
463 ttm_io_prot(mem
->placement
, PAGE_KERNEL
);
464 map
->bo_kmap_type
= ttm_bo_map_vmap
;
465 map
->virtual = vmap(ttm
->pages
+ start_page
, num_pages
,
468 return (!map
->virtual) ? -ENOMEM
: 0;
471 int ttm_bo_kmap(struct ttm_buffer_object
*bo
,
472 unsigned long start_page
, unsigned long num_pages
,
473 struct ttm_bo_kmap_obj
*map
)
475 unsigned long offset
, size
;
478 BUG_ON(!list_empty(&bo
->swap
));
481 if (num_pages
> bo
->num_pages
)
483 if (start_page
> bo
->num_pages
)
486 if (num_pages
> 1 && !DRM_SUSER(DRM_CURPROC
))
489 ret
= ttm_mem_io_reserve(bo
->bdev
, &bo
->mem
);
492 if (!bo
->mem
.bus
.is_iomem
) {
493 return ttm_bo_kmap_ttm(bo
, start_page
, num_pages
, map
);
495 offset
= start_page
<< PAGE_SHIFT
;
496 size
= num_pages
<< PAGE_SHIFT
;
497 return ttm_bo_ioremap(bo
, offset
, size
, map
);
500 EXPORT_SYMBOL(ttm_bo_kmap
);
502 void ttm_bo_kunmap(struct ttm_bo_kmap_obj
*map
)
506 switch (map
->bo_kmap_type
) {
507 case ttm_bo_map_iomap
:
508 iounmap(map
->virtual);
509 ttm_mem_io_free(map
->bo
->bdev
, &map
->bo
->mem
);
511 case ttm_bo_map_vmap
:
512 vunmap(map
->virtual);
514 case ttm_bo_map_kmap
:
517 case ttm_bo_map_premapped
:
525 EXPORT_SYMBOL(ttm_bo_kunmap
);
527 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object
*bo
,
530 bool evict
, bool no_wait_reserve
,
532 struct ttm_mem_reg
*new_mem
)
534 struct ttm_bo_device
*bdev
= bo
->bdev
;
535 struct ttm_bo_driver
*driver
= bdev
->driver
;
536 struct ttm_mem_type_manager
*man
= &bdev
->man
[new_mem
->mem_type
];
537 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
539 struct ttm_buffer_object
*ghost_obj
;
540 void *tmp_obj
= NULL
;
542 spin_lock(&bo
->lock
);
544 tmp_obj
= bo
->sync_obj
;
547 bo
->sync_obj
= driver
->sync_obj_ref(sync_obj
);
548 bo
->sync_obj_arg
= sync_obj_arg
;
550 ret
= ttm_bo_wait(bo
, false, false, false);
551 spin_unlock(&bo
->lock
);
553 driver
->sync_obj_unref(&tmp_obj
);
557 ttm_bo_free_old_node(bo
);
558 if ((man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
560 ttm_tt_unbind(bo
->ttm
);
561 ttm_tt_destroy(bo
->ttm
);
566 * This should help pipeline ordinary buffer moves.
568 * Hang old buffer memory on a new buffer object,
569 * and leave it to be released when the GPU
570 * operation has completed.
573 set_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
574 spin_unlock(&bo
->lock
);
576 driver
->sync_obj_unref(&tmp_obj
);
578 ret
= ttm_buffer_object_transfer(bo
, &ghost_obj
);
583 * If we're not moving to fixed memory, the TTM object
584 * needs to stay alive. Otherwhise hang it on the ghost
585 * bo to be unbound and destroyed.
588 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
589 ghost_obj
->ttm
= NULL
;
593 ttm_bo_unreserve(ghost_obj
);
594 ttm_bo_unref(&ghost_obj
);
598 new_mem
->mm_node
= NULL
;
602 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup
);