1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #define pr_fmt(fmt) "[TTM] " fmt
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/jiffies.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
40 #include <linux/file.h>
41 #include <linux/module.h>
42 #include <linux/atomic.h>
44 #define TTM_ASSERT_LOCKED(param)
45 #define TTM_DEBUG(fmt, arg...)
46 #define TTM_BO_HASH_ORDER 13
48 static int ttm_bo_setup_vm(struct ttm_buffer_object
*bo
);
49 static int ttm_bo_swapout(struct ttm_mem_shrink
*shrink
);
50 static void ttm_bo_global_kobj_release(struct kobject
*kobj
);
52 static struct attribute ttm_bo_count
= {
57 static inline int ttm_mem_type_from_flags(uint32_t flags
, uint32_t *mem_type
)
61 for (i
= 0; i
<= TTM_PL_PRIV5
; i
++)
62 if (flags
& (1 << i
)) {
69 static void ttm_mem_type_debug(struct ttm_bo_device
*bdev
, int mem_type
)
71 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem_type
];
73 pr_err(" has_type: %d\n", man
->has_type
);
74 pr_err(" use_type: %d\n", man
->use_type
);
75 pr_err(" flags: 0x%08X\n", man
->flags
);
76 pr_err(" gpu_offset: 0x%08lX\n", man
->gpu_offset
);
77 pr_err(" size: %llu\n", man
->size
);
78 pr_err(" available_caching: 0x%08X\n", man
->available_caching
);
79 pr_err(" default_caching: 0x%08X\n", man
->default_caching
);
80 if (mem_type
!= TTM_PL_SYSTEM
)
81 (*man
->func
->debug
)(man
, TTM_PFX
);
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object
*bo
,
85 struct ttm_placement
*placement
)
89 pr_err("No space for %p (%lu pages, %luK, %luM)\n",
90 bo
, bo
->mem
.num_pages
, bo
->mem
.size
>> 10,
92 for (i
= 0; i
< placement
->num_placement
; i
++) {
93 ret
= ttm_mem_type_from_flags(placement
->placement
[i
],
97 pr_err(" placement[%d]=0x%08X (%d)\n",
98 i
, placement
->placement
[i
], mem_type
);
99 ttm_mem_type_debug(bo
->bdev
, mem_type
);
103 static ssize_t
ttm_bo_global_show(struct kobject
*kobj
,
104 struct attribute
*attr
,
107 struct ttm_bo_global
*glob
=
108 container_of(kobj
, struct ttm_bo_global
, kobj
);
110 return snprintf(buffer
, PAGE_SIZE
, "%lu\n",
111 (unsigned long) atomic_read(&glob
->bo_count
));
114 static struct attribute
*ttm_bo_global_attrs
[] = {
119 static const struct sysfs_ops ttm_bo_global_ops
= {
120 .show
= &ttm_bo_global_show
123 static struct kobj_type ttm_bo_glob_kobj_type
= {
124 .release
= &ttm_bo_global_kobj_release
,
125 .sysfs_ops
= &ttm_bo_global_ops
,
126 .default_attrs
= ttm_bo_global_attrs
130 static inline uint32_t ttm_bo_type_flags(unsigned type
)
135 static void ttm_bo_release_list(struct kref
*list_kref
)
137 struct ttm_buffer_object
*bo
=
138 container_of(list_kref
, struct ttm_buffer_object
, list_kref
);
139 struct ttm_bo_device
*bdev
= bo
->bdev
;
140 size_t acc_size
= bo
->acc_size
;
142 BUG_ON(atomic_read(&bo
->list_kref
.refcount
));
143 BUG_ON(atomic_read(&bo
->kref
.refcount
));
144 BUG_ON(atomic_read(&bo
->cpu_writers
));
145 BUG_ON(bo
->sync_obj
!= NULL
);
146 BUG_ON(bo
->mem
.mm_node
!= NULL
);
147 BUG_ON(!list_empty(&bo
->lru
));
148 BUG_ON(!list_empty(&bo
->ddestroy
));
151 ttm_tt_destroy(bo
->ttm
);
152 atomic_dec(&bo
->glob
->bo_count
);
158 ttm_mem_global_free(bdev
->glob
->mem_glob
, acc_size
);
161 static int ttm_bo_wait_unreserved(struct ttm_buffer_object
*bo
,
165 return wait_event_interruptible(bo
->event_queue
,
166 !ttm_bo_is_reserved(bo
));
168 wait_event(bo
->event_queue
, !ttm_bo_is_reserved(bo
));
173 void ttm_bo_add_to_lru(struct ttm_buffer_object
*bo
)
175 struct ttm_bo_device
*bdev
= bo
->bdev
;
176 struct ttm_mem_type_manager
*man
;
178 BUG_ON(!ttm_bo_is_reserved(bo
));
180 if (!(bo
->mem
.placement
& TTM_PL_FLAG_NO_EVICT
)) {
182 BUG_ON(!list_empty(&bo
->lru
));
184 man
= &bdev
->man
[bo
->mem
.mem_type
];
185 list_add_tail(&bo
->lru
, &man
->lru
);
186 kref_get(&bo
->list_kref
);
188 if (bo
->ttm
!= NULL
) {
189 list_add_tail(&bo
->swap
, &bo
->glob
->swap_lru
);
190 kref_get(&bo
->list_kref
);
195 int ttm_bo_del_from_lru(struct ttm_buffer_object
*bo
)
199 if (!list_empty(&bo
->swap
)) {
200 list_del_init(&bo
->swap
);
203 if (!list_empty(&bo
->lru
)) {
204 list_del_init(&bo
->lru
);
209 * TODO: Add a driver hook to delete from
210 * driver-specific LRU's here.
216 int ttm_bo_reserve_nolru(struct ttm_buffer_object
*bo
,
218 bool no_wait
, bool use_sequence
, uint32_t sequence
)
222 while (unlikely(atomic_xchg(&bo
->reserved
, 1) != 0)) {
224 * Deadlock avoidance for multi-bo reserving.
226 if (use_sequence
&& bo
->seq_valid
) {
228 * We've already reserved this one.
230 if (unlikely(sequence
== bo
->val_seq
))
233 * Already reserved by a thread that will not back
234 * off for us. We need to back off.
236 if (unlikely(sequence
- bo
->val_seq
< (1 << 31)))
243 ret
= ttm_bo_wait_unreserved(bo
, interruptible
);
250 bool wake_up
= false;
252 * Wake up waiters that may need to recheck for deadlock,
253 * if we decreased the sequence number.
255 if (unlikely((bo
->val_seq
- sequence
< (1 << 31))
260 * In the worst case with memory ordering these values can be
261 * seen in the wrong order. However since we call wake_up_all
262 * in that case, this will hopefully not pose a problem,
263 * and the worst case would only cause someone to accidentally
264 * hit -EAGAIN in ttm_bo_reserve when they see old value of
265 * val_seq. However this would only happen if seq_valid was
266 * written before val_seq was, and just means some slightly
267 * increased cpu usage
269 bo
->val_seq
= sequence
;
270 bo
->seq_valid
= true;
272 wake_up_all(&bo
->event_queue
);
274 bo
->seq_valid
= false;
279 EXPORT_SYMBOL(ttm_bo_reserve
);
281 static void ttm_bo_ref_bug(struct kref
*list_kref
)
286 void ttm_bo_list_ref_sub(struct ttm_buffer_object
*bo
, int count
,
289 kref_sub(&bo
->list_kref
, count
,
290 (never_free
) ? ttm_bo_ref_bug
: ttm_bo_release_list
);
293 int ttm_bo_reserve(struct ttm_buffer_object
*bo
,
295 bool no_wait
, bool use_sequence
, uint32_t sequence
)
297 struct ttm_bo_global
*glob
= bo
->glob
;
301 ret
= ttm_bo_reserve_nolru(bo
, interruptible
, no_wait
, use_sequence
,
303 if (likely(ret
== 0)) {
304 spin_lock(&glob
->lru_lock
);
305 put_count
= ttm_bo_del_from_lru(bo
);
306 spin_unlock(&glob
->lru_lock
);
307 ttm_bo_list_ref_sub(bo
, put_count
, true);
313 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object
*bo
,
314 bool interruptible
, uint32_t sequence
)
316 bool wake_up
= false;
319 while (unlikely(atomic_xchg(&bo
->reserved
, 1) != 0)) {
320 WARN_ON(bo
->seq_valid
&& sequence
== bo
->val_seq
);
322 ret
= ttm_bo_wait_unreserved(bo
, interruptible
);
328 if ((bo
->val_seq
- sequence
< (1 << 31)) || !bo
->seq_valid
)
332 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number.
335 bo
->val_seq
= sequence
;
336 bo
->seq_valid
= true;
338 wake_up_all(&bo
->event_queue
);
343 int ttm_bo_reserve_slowpath(struct ttm_buffer_object
*bo
,
344 bool interruptible
, uint32_t sequence
)
346 struct ttm_bo_global
*glob
= bo
->glob
;
349 ret
= ttm_bo_reserve_slowpath_nolru(bo
, interruptible
, sequence
);
351 spin_lock(&glob
->lru_lock
);
352 put_count
= ttm_bo_del_from_lru(bo
);
353 spin_unlock(&glob
->lru_lock
);
354 ttm_bo_list_ref_sub(bo
, put_count
, true);
358 EXPORT_SYMBOL(ttm_bo_reserve_slowpath
);
360 void ttm_bo_unreserve_locked(struct ttm_buffer_object
*bo
)
362 ttm_bo_add_to_lru(bo
);
363 atomic_set(&bo
->reserved
, 0);
364 wake_up_all(&bo
->event_queue
);
367 void ttm_bo_unreserve(struct ttm_buffer_object
*bo
)
369 struct ttm_bo_global
*glob
= bo
->glob
;
371 spin_lock(&glob
->lru_lock
);
372 ttm_bo_unreserve_locked(bo
);
373 spin_unlock(&glob
->lru_lock
);
375 EXPORT_SYMBOL(ttm_bo_unreserve
);
378 * Call bo->mutex locked.
380 static int ttm_bo_add_ttm(struct ttm_buffer_object
*bo
, bool zero_alloc
)
382 struct ttm_bo_device
*bdev
= bo
->bdev
;
383 struct ttm_bo_global
*glob
= bo
->glob
;
385 uint32_t page_flags
= 0;
387 TTM_ASSERT_LOCKED(&bo
->mutex
);
390 if (bdev
->need_dma32
)
391 page_flags
|= TTM_PAGE_FLAG_DMA32
;
394 case ttm_bo_type_device
:
396 page_flags
|= TTM_PAGE_FLAG_ZERO_ALLOC
;
397 case ttm_bo_type_kernel
:
398 bo
->ttm
= bdev
->driver
->ttm_tt_create(bdev
, bo
->num_pages
<< PAGE_SHIFT
,
399 page_flags
, glob
->dummy_read_page
);
400 if (unlikely(bo
->ttm
== NULL
))
404 bo
->ttm
= bdev
->driver
->ttm_tt_create(bdev
, bo
->num_pages
<< PAGE_SHIFT
,
405 page_flags
| TTM_PAGE_FLAG_SG
,
406 glob
->dummy_read_page
);
407 if (unlikely(bo
->ttm
== NULL
)) {
411 bo
->ttm
->sg
= bo
->sg
;
414 pr_err("Illegal buffer object type\n");
422 static int ttm_bo_handle_move_mem(struct ttm_buffer_object
*bo
,
423 struct ttm_mem_reg
*mem
,
424 bool evict
, bool interruptible
,
427 struct ttm_bo_device
*bdev
= bo
->bdev
;
428 bool old_is_pci
= ttm_mem_reg_is_pci(bdev
, &bo
->mem
);
429 bool new_is_pci
= ttm_mem_reg_is_pci(bdev
, mem
);
430 struct ttm_mem_type_manager
*old_man
= &bdev
->man
[bo
->mem
.mem_type
];
431 struct ttm_mem_type_manager
*new_man
= &bdev
->man
[mem
->mem_type
];
434 if (old_is_pci
|| new_is_pci
||
435 ((mem
->placement
& bo
->mem
.placement
& TTM_PL_MASK_CACHING
) == 0)) {
436 ret
= ttm_mem_io_lock(old_man
, true);
437 if (unlikely(ret
!= 0))
439 ttm_bo_unmap_virtual_locked(bo
);
440 ttm_mem_io_unlock(old_man
);
444 * Create and bind a ttm if required.
447 if (!(new_man
->flags
& TTM_MEMTYPE_FLAG_FIXED
)) {
448 if (bo
->ttm
== NULL
) {
449 bool zero
= !(old_man
->flags
& TTM_MEMTYPE_FLAG_FIXED
);
450 ret
= ttm_bo_add_ttm(bo
, zero
);
455 ret
= ttm_tt_set_placement_caching(bo
->ttm
, mem
->placement
);
459 if (mem
->mem_type
!= TTM_PL_SYSTEM
) {
460 ret
= ttm_tt_bind(bo
->ttm
, mem
);
465 if (bo
->mem
.mem_type
== TTM_PL_SYSTEM
) {
466 if (bdev
->driver
->move_notify
)
467 bdev
->driver
->move_notify(bo
, mem
);
474 if (bdev
->driver
->move_notify
)
475 bdev
->driver
->move_notify(bo
, mem
);
477 if (!(old_man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) &&
478 !(new_man
->flags
& TTM_MEMTYPE_FLAG_FIXED
))
479 ret
= ttm_bo_move_ttm(bo
, evict
, no_wait_gpu
, mem
);
480 else if (bdev
->driver
->move
)
481 ret
= bdev
->driver
->move(bo
, evict
, interruptible
,
484 ret
= ttm_bo_move_memcpy(bo
, evict
, no_wait_gpu
, mem
);
487 if (bdev
->driver
->move_notify
) {
488 struct ttm_mem_reg tmp_mem
= *mem
;
491 bdev
->driver
->move_notify(bo
, mem
);
501 ret
= bdev
->driver
->invalidate_caches(bdev
, bo
->mem
.placement
);
503 pr_err("Can not flush read caches\n");
507 if (bo
->mem
.mm_node
) {
508 bo
->offset
= (bo
->mem
.start
<< PAGE_SHIFT
) +
509 bdev
->man
[bo
->mem
.mem_type
].gpu_offset
;
510 bo
->cur_placement
= bo
->mem
.placement
;
517 new_man
= &bdev
->man
[bo
->mem
.mem_type
];
518 if ((new_man
->flags
& TTM_MEMTYPE_FLAG_FIXED
) && bo
->ttm
) {
519 ttm_tt_unbind(bo
->ttm
);
520 ttm_tt_destroy(bo
->ttm
);
529 * Will release GPU memory type usage on destruction.
530 * This is the place to put in driver specific hooks to release
531 * driver private resources.
532 * Will release the bo::reserved lock.
535 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object
*bo
)
537 if (bo
->bdev
->driver
->move_notify
)
538 bo
->bdev
->driver
->move_notify(bo
, NULL
);
541 ttm_tt_unbind(bo
->ttm
);
542 ttm_tt_destroy(bo
->ttm
);
545 ttm_bo_mem_put(bo
, &bo
->mem
);
547 atomic_set(&bo
->reserved
, 0);
548 wake_up_all(&bo
->event_queue
);
551 * Since the final reference to this bo may not be dropped by
552 * the current task we have to put a memory barrier here to make
553 * sure the changes done in this function are always visible.
555 * This function only needs protection against the final kref_put.
557 smp_mb__before_atomic_dec();
560 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object
*bo
)
562 struct ttm_bo_device
*bdev
= bo
->bdev
;
563 struct ttm_bo_global
*glob
= bo
->glob
;
564 struct ttm_bo_driver
*driver
= bdev
->driver
;
565 void *sync_obj
= NULL
;
569 spin_lock(&glob
->lru_lock
);
570 ret
= ttm_bo_reserve_nolru(bo
, false, true, false, 0);
572 spin_lock(&bdev
->fence_lock
);
573 (void) ttm_bo_wait(bo
, false, false, true);
574 if (!ret
&& !bo
->sync_obj
) {
575 spin_unlock(&bdev
->fence_lock
);
576 put_count
= ttm_bo_del_from_lru(bo
);
578 spin_unlock(&glob
->lru_lock
);
579 ttm_bo_cleanup_memtype_use(bo
);
581 ttm_bo_list_ref_sub(bo
, put_count
, true);
586 sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
587 spin_unlock(&bdev
->fence_lock
);
590 atomic_set(&bo
->reserved
, 0);
591 wake_up_all(&bo
->event_queue
);
594 kref_get(&bo
->list_kref
);
595 list_add_tail(&bo
->ddestroy
, &bdev
->ddestroy
);
596 spin_unlock(&glob
->lru_lock
);
599 driver
->sync_obj_flush(sync_obj
);
600 driver
->sync_obj_unref(&sync_obj
);
602 schedule_delayed_work(&bdev
->wq
,
603 ((HZ
/ 100) < 1) ? 1 : HZ
/ 100);
607 * function ttm_bo_cleanup_refs_and_unlock
608 * If bo idle, remove from delayed- and lru lists, and unref.
609 * If not idle, do nothing.
611 * Must be called with lru_lock and reservation held, this function
612 * will drop both before returning.
614 * @interruptible Any sleeps should occur interruptibly.
615 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
618 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object
*bo
,
622 struct ttm_bo_device
*bdev
= bo
->bdev
;
623 struct ttm_bo_driver
*driver
= bdev
->driver
;
624 struct ttm_bo_global
*glob
= bo
->glob
;
628 spin_lock(&bdev
->fence_lock
);
629 ret
= ttm_bo_wait(bo
, false, false, true);
631 if (ret
&& !no_wait_gpu
) {
635 * Take a reference to the fence and unreserve,
636 * at this point the buffer should be dead, so
637 * no new sync objects can be attached.
639 sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
640 spin_unlock(&bdev
->fence_lock
);
642 atomic_set(&bo
->reserved
, 0);
643 wake_up_all(&bo
->event_queue
);
644 spin_unlock(&glob
->lru_lock
);
646 ret
= driver
->sync_obj_wait(sync_obj
, false, interruptible
);
647 driver
->sync_obj_unref(&sync_obj
);
652 * remove sync_obj with ttm_bo_wait, the wait should be
653 * finished, and no new wait object should have been added.
655 spin_lock(&bdev
->fence_lock
);
656 ret
= ttm_bo_wait(bo
, false, false, true);
658 spin_unlock(&bdev
->fence_lock
);
662 spin_lock(&glob
->lru_lock
);
663 ret
= ttm_bo_reserve_nolru(bo
, false, true, false, 0);
666 * We raced, and lost, someone else holds the reservation now,
667 * and is probably busy in ttm_bo_cleanup_memtype_use.
669 * Even if it's not the case, because we finished waiting any
670 * delayed destruction would succeed, so just return success
674 spin_unlock(&glob
->lru_lock
);
678 spin_unlock(&bdev
->fence_lock
);
680 if (ret
|| unlikely(list_empty(&bo
->ddestroy
))) {
681 atomic_set(&bo
->reserved
, 0);
682 wake_up_all(&bo
->event_queue
);
683 spin_unlock(&glob
->lru_lock
);
687 put_count
= ttm_bo_del_from_lru(bo
);
688 list_del_init(&bo
->ddestroy
);
691 spin_unlock(&glob
->lru_lock
);
692 ttm_bo_cleanup_memtype_use(bo
);
694 ttm_bo_list_ref_sub(bo
, put_count
, true);
700 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
701 * encountered buffers.
704 static int ttm_bo_delayed_delete(struct ttm_bo_device
*bdev
, bool remove_all
)
706 struct ttm_bo_global
*glob
= bdev
->glob
;
707 struct ttm_buffer_object
*entry
= NULL
;
710 spin_lock(&glob
->lru_lock
);
711 if (list_empty(&bdev
->ddestroy
))
714 entry
= list_first_entry(&bdev
->ddestroy
,
715 struct ttm_buffer_object
, ddestroy
);
716 kref_get(&entry
->list_kref
);
719 struct ttm_buffer_object
*nentry
= NULL
;
721 if (entry
->ddestroy
.next
!= &bdev
->ddestroy
) {
722 nentry
= list_first_entry(&entry
->ddestroy
,
723 struct ttm_buffer_object
, ddestroy
);
724 kref_get(&nentry
->list_kref
);
727 ret
= ttm_bo_reserve_nolru(entry
, false, true, false, 0);
728 if (remove_all
&& ret
) {
729 spin_unlock(&glob
->lru_lock
);
730 ret
= ttm_bo_reserve_nolru(entry
, false, false,
732 spin_lock(&glob
->lru_lock
);
736 ret
= ttm_bo_cleanup_refs_and_unlock(entry
, false,
739 spin_unlock(&glob
->lru_lock
);
741 kref_put(&entry
->list_kref
, ttm_bo_release_list
);
747 spin_lock(&glob
->lru_lock
);
748 if (list_empty(&entry
->ddestroy
))
753 spin_unlock(&glob
->lru_lock
);
756 kref_put(&entry
->list_kref
, ttm_bo_release_list
);
760 static void ttm_bo_delayed_workqueue(struct work_struct
*work
)
762 struct ttm_bo_device
*bdev
=
763 container_of(work
, struct ttm_bo_device
, wq
.work
);
765 if (ttm_bo_delayed_delete(bdev
, false)) {
766 schedule_delayed_work(&bdev
->wq
,
767 ((HZ
/ 100) < 1) ? 1 : HZ
/ 100);
771 static void ttm_bo_release(struct kref
*kref
)
773 struct ttm_buffer_object
*bo
=
774 container_of(kref
, struct ttm_buffer_object
, kref
);
775 struct ttm_bo_device
*bdev
= bo
->bdev
;
776 struct ttm_mem_type_manager
*man
= &bdev
->man
[bo
->mem
.mem_type
];
778 write_lock(&bdev
->vm_lock
);
779 if (likely(bo
->vm_node
!= NULL
)) {
780 rb_erase(&bo
->vm_rb
, &bdev
->addr_space_rb
);
781 drm_mm_put_block(bo
->vm_node
);
784 write_unlock(&bdev
->vm_lock
);
785 ttm_mem_io_lock(man
, false);
786 ttm_mem_io_free_vm(bo
);
787 ttm_mem_io_unlock(man
);
788 ttm_bo_cleanup_refs_or_queue(bo
);
789 kref_put(&bo
->list_kref
, ttm_bo_release_list
);
792 void ttm_bo_unref(struct ttm_buffer_object
**p_bo
)
794 struct ttm_buffer_object
*bo
= *p_bo
;
797 kref_put(&bo
->kref
, ttm_bo_release
);
799 EXPORT_SYMBOL(ttm_bo_unref
);
801 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device
*bdev
)
803 return cancel_delayed_work_sync(&bdev
->wq
);
805 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue
);
807 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device
*bdev
, int resched
)
810 schedule_delayed_work(&bdev
->wq
,
811 ((HZ
/ 100) < 1) ? 1 : HZ
/ 100);
813 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue
);
815 static int ttm_bo_evict(struct ttm_buffer_object
*bo
, bool interruptible
,
818 struct ttm_bo_device
*bdev
= bo
->bdev
;
819 struct ttm_mem_reg evict_mem
;
820 struct ttm_placement placement
;
823 spin_lock(&bdev
->fence_lock
);
824 ret
= ttm_bo_wait(bo
, false, interruptible
, no_wait_gpu
);
825 spin_unlock(&bdev
->fence_lock
);
827 if (unlikely(ret
!= 0)) {
828 if (ret
!= -ERESTARTSYS
) {
829 pr_err("Failed to expire sync object before buffer eviction\n");
834 BUG_ON(!ttm_bo_is_reserved(bo
));
837 evict_mem
.mm_node
= NULL
;
838 evict_mem
.bus
.io_reserved_vm
= false;
839 evict_mem
.bus
.io_reserved_count
= 0;
843 placement
.num_placement
= 0;
844 placement
.num_busy_placement
= 0;
845 bdev
->driver
->evict_flags(bo
, &placement
);
846 ret
= ttm_bo_mem_space(bo
, &placement
, &evict_mem
, interruptible
,
849 if (ret
!= -ERESTARTSYS
) {
850 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
852 ttm_bo_mem_space_debug(bo
, &placement
);
857 ret
= ttm_bo_handle_move_mem(bo
, &evict_mem
, true, interruptible
,
860 if (ret
!= -ERESTARTSYS
)
861 pr_err("Buffer eviction failed\n");
862 ttm_bo_mem_put(bo
, &evict_mem
);
870 static int ttm_mem_evict_first(struct ttm_bo_device
*bdev
,
875 struct ttm_bo_global
*glob
= bdev
->glob
;
876 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem_type
];
877 struct ttm_buffer_object
*bo
;
878 int ret
= -EBUSY
, put_count
;
880 spin_lock(&glob
->lru_lock
);
881 list_for_each_entry(bo
, &man
->lru
, lru
) {
882 ret
= ttm_bo_reserve_nolru(bo
, false, true, false, 0);
888 spin_unlock(&glob
->lru_lock
);
892 kref_get(&bo
->list_kref
);
894 if (!list_empty(&bo
->ddestroy
)) {
895 ret
= ttm_bo_cleanup_refs_and_unlock(bo
, interruptible
,
897 kref_put(&bo
->list_kref
, ttm_bo_release_list
);
901 put_count
= ttm_bo_del_from_lru(bo
);
902 spin_unlock(&glob
->lru_lock
);
906 ttm_bo_list_ref_sub(bo
, put_count
, true);
908 ret
= ttm_bo_evict(bo
, interruptible
, no_wait_gpu
);
909 ttm_bo_unreserve(bo
);
911 kref_put(&bo
->list_kref
, ttm_bo_release_list
);
915 void ttm_bo_mem_put(struct ttm_buffer_object
*bo
, struct ttm_mem_reg
*mem
)
917 struct ttm_mem_type_manager
*man
= &bo
->bdev
->man
[mem
->mem_type
];
920 (*man
->func
->put_node
)(man
, mem
);
922 EXPORT_SYMBOL(ttm_bo_mem_put
);
925 * Repeatedly evict memory from the LRU for @mem_type until we create enough
926 * space, or we've evicted everything and there isn't enough space.
928 static int ttm_bo_mem_force_space(struct ttm_buffer_object
*bo
,
930 struct ttm_placement
*placement
,
931 struct ttm_mem_reg
*mem
,
935 struct ttm_bo_device
*bdev
= bo
->bdev
;
936 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem_type
];
940 ret
= (*man
->func
->get_node
)(man
, bo
, placement
, mem
);
941 if (unlikely(ret
!= 0))
945 ret
= ttm_mem_evict_first(bdev
, mem_type
,
946 interruptible
, no_wait_gpu
);
947 if (unlikely(ret
!= 0))
950 if (mem
->mm_node
== NULL
)
952 mem
->mem_type
= mem_type
;
956 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager
*man
,
957 uint32_t cur_placement
,
958 uint32_t proposed_placement
)
960 uint32_t caching
= proposed_placement
& TTM_PL_MASK_CACHING
;
961 uint32_t result
= proposed_placement
& ~TTM_PL_MASK_CACHING
;
964 * Keep current caching if possible.
967 if ((cur_placement
& caching
) != 0)
968 result
|= (cur_placement
& caching
);
969 else if ((man
->default_caching
& caching
) != 0)
970 result
|= man
->default_caching
;
971 else if ((TTM_PL_FLAG_CACHED
& caching
) != 0)
972 result
|= TTM_PL_FLAG_CACHED
;
973 else if ((TTM_PL_FLAG_WC
& caching
) != 0)
974 result
|= TTM_PL_FLAG_WC
;
975 else if ((TTM_PL_FLAG_UNCACHED
& caching
) != 0)
976 result
|= TTM_PL_FLAG_UNCACHED
;
981 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager
*man
,
983 uint32_t proposed_placement
,
984 uint32_t *masked_placement
)
986 uint32_t cur_flags
= ttm_bo_type_flags(mem_type
);
988 if ((cur_flags
& proposed_placement
& TTM_PL_MASK_MEM
) == 0)
991 if ((proposed_placement
& man
->available_caching
) == 0)
994 cur_flags
|= (proposed_placement
& man
->available_caching
);
996 *masked_placement
= cur_flags
;
1001 * Creates space for memory region @mem according to its type.
1003 * This function first searches for free space in compatible memory types in
1004 * the priority order defined by the driver. If free space isn't found, then
1005 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1008 int ttm_bo_mem_space(struct ttm_buffer_object
*bo
,
1009 struct ttm_placement
*placement
,
1010 struct ttm_mem_reg
*mem
,
1014 struct ttm_bo_device
*bdev
= bo
->bdev
;
1015 struct ttm_mem_type_manager
*man
;
1016 uint32_t mem_type
= TTM_PL_SYSTEM
;
1017 uint32_t cur_flags
= 0;
1018 bool type_found
= false;
1019 bool type_ok
= false;
1020 bool has_erestartsys
= false;
1023 mem
->mm_node
= NULL
;
1024 for (i
= 0; i
< placement
->num_placement
; ++i
) {
1025 ret
= ttm_mem_type_from_flags(placement
->placement
[i
],
1029 man
= &bdev
->man
[mem_type
];
1031 type_ok
= ttm_bo_mt_compatible(man
,
1033 placement
->placement
[i
],
1039 cur_flags
= ttm_bo_select_caching(man
, bo
->mem
.placement
,
1042 * Use the access and other non-mapping-related flag bits from
1043 * the memory placement flags to the current flags
1045 ttm_flag_masked(&cur_flags
, placement
->placement
[i
],
1046 ~TTM_PL_MASK_MEMTYPE
);
1048 if (mem_type
== TTM_PL_SYSTEM
)
1051 if (man
->has_type
&& man
->use_type
) {
1053 ret
= (*man
->func
->get_node
)(man
, bo
, placement
, mem
);
1061 if ((type_ok
&& (mem_type
== TTM_PL_SYSTEM
)) || mem
->mm_node
) {
1062 mem
->mem_type
= mem_type
;
1063 mem
->placement
= cur_flags
;
1070 for (i
= 0; i
< placement
->num_busy_placement
; ++i
) {
1071 ret
= ttm_mem_type_from_flags(placement
->busy_placement
[i
],
1075 man
= &bdev
->man
[mem_type
];
1078 if (!ttm_bo_mt_compatible(man
,
1080 placement
->busy_placement
[i
],
1084 cur_flags
= ttm_bo_select_caching(man
, bo
->mem
.placement
,
1087 * Use the access and other non-mapping-related flag bits from
1088 * the memory placement flags to the current flags
1090 ttm_flag_masked(&cur_flags
, placement
->busy_placement
[i
],
1091 ~TTM_PL_MASK_MEMTYPE
);
1094 if (mem_type
== TTM_PL_SYSTEM
) {
1095 mem
->mem_type
= mem_type
;
1096 mem
->placement
= cur_flags
;
1097 mem
->mm_node
= NULL
;
1101 ret
= ttm_bo_mem_force_space(bo
, mem_type
, placement
, mem
,
1102 interruptible
, no_wait_gpu
);
1103 if (ret
== 0 && mem
->mm_node
) {
1104 mem
->placement
= cur_flags
;
1107 if (ret
== -ERESTARTSYS
)
1108 has_erestartsys
= true;
1110 ret
= (has_erestartsys
) ? -ERESTARTSYS
: -ENOMEM
;
1113 EXPORT_SYMBOL(ttm_bo_mem_space
);
1115 int ttm_bo_move_buffer(struct ttm_buffer_object
*bo
,
1116 struct ttm_placement
*placement
,
1121 struct ttm_mem_reg mem
;
1122 struct ttm_bo_device
*bdev
= bo
->bdev
;
1124 BUG_ON(!ttm_bo_is_reserved(bo
));
1127 * FIXME: It's possible to pipeline buffer moves.
1128 * Have the driver move function wait for idle when necessary,
1129 * instead of doing it here.
1131 spin_lock(&bdev
->fence_lock
);
1132 ret
= ttm_bo_wait(bo
, false, interruptible
, no_wait_gpu
);
1133 spin_unlock(&bdev
->fence_lock
);
1136 mem
.num_pages
= bo
->num_pages
;
1137 mem
.size
= mem
.num_pages
<< PAGE_SHIFT
;
1138 mem
.page_alignment
= bo
->mem
.page_alignment
;
1139 mem
.bus
.io_reserved_vm
= false;
1140 mem
.bus
.io_reserved_count
= 0;
1142 * Determine where to move the buffer.
1144 ret
= ttm_bo_mem_space(bo
, placement
, &mem
,
1145 interruptible
, no_wait_gpu
);
1148 ret
= ttm_bo_handle_move_mem(bo
, &mem
, false,
1149 interruptible
, no_wait_gpu
);
1151 if (ret
&& mem
.mm_node
)
1152 ttm_bo_mem_put(bo
, &mem
);
1156 static int ttm_bo_mem_compat(struct ttm_placement
*placement
,
1157 struct ttm_mem_reg
*mem
)
1161 if (mem
->mm_node
&& placement
->lpfn
!= 0 &&
1162 (mem
->start
< placement
->fpfn
||
1163 mem
->start
+ mem
->num_pages
> placement
->lpfn
))
1166 for (i
= 0; i
< placement
->num_placement
; i
++) {
1167 if ((placement
->placement
[i
] & mem
->placement
&
1168 TTM_PL_MASK_CACHING
) &&
1169 (placement
->placement
[i
] & mem
->placement
&
1176 int ttm_bo_validate(struct ttm_buffer_object
*bo
,
1177 struct ttm_placement
*placement
,
1183 BUG_ON(!ttm_bo_is_reserved(bo
));
1184 /* Check that range is valid */
1185 if (placement
->lpfn
|| placement
->fpfn
)
1186 if (placement
->fpfn
> placement
->lpfn
||
1187 (placement
->lpfn
- placement
->fpfn
) < bo
->num_pages
)
1190 * Check whether we need to move buffer.
1192 ret
= ttm_bo_mem_compat(placement
, &bo
->mem
);
1194 ret
= ttm_bo_move_buffer(bo
, placement
, interruptible
,
1200 * Use the access and other non-mapping-related flag bits from
1201 * the compatible memory placement flags to the active flags
1203 ttm_flag_masked(&bo
->mem
.placement
, placement
->placement
[ret
],
1204 ~TTM_PL_MASK_MEMTYPE
);
1207 * We might need to add a TTM.
1209 if (bo
->mem
.mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
1210 ret
= ttm_bo_add_ttm(bo
, true);
1216 EXPORT_SYMBOL(ttm_bo_validate
);
1218 int ttm_bo_check_placement(struct ttm_buffer_object
*bo
,
1219 struct ttm_placement
*placement
)
1221 BUG_ON((placement
->fpfn
|| placement
->lpfn
) &&
1222 (bo
->mem
.num_pages
> (placement
->lpfn
- placement
->fpfn
)));
1227 int ttm_bo_init(struct ttm_bo_device
*bdev
,
1228 struct ttm_buffer_object
*bo
,
1230 enum ttm_bo_type type
,
1231 struct ttm_placement
*placement
,
1232 uint32_t page_alignment
,
1234 struct file
*persistent_swap_storage
,
1236 struct sg_table
*sg
,
1237 void (*destroy
) (struct ttm_buffer_object
*))
1240 unsigned long num_pages
;
1241 struct ttm_mem_global
*mem_glob
= bdev
->glob
->mem_glob
;
1243 ret
= ttm_mem_global_alloc(mem_glob
, acc_size
, false, false);
1245 pr_err("Out of kernel memory\n");
1253 num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1254 if (num_pages
== 0) {
1255 pr_err("Illegal buffer object size\n");
1260 ttm_mem_global_free(mem_glob
, acc_size
);
1263 bo
->destroy
= destroy
;
1265 kref_init(&bo
->kref
);
1266 kref_init(&bo
->list_kref
);
1267 atomic_set(&bo
->cpu_writers
, 0);
1268 atomic_set(&bo
->reserved
, 1);
1269 init_waitqueue_head(&bo
->event_queue
);
1270 INIT_LIST_HEAD(&bo
->lru
);
1271 INIT_LIST_HEAD(&bo
->ddestroy
);
1272 INIT_LIST_HEAD(&bo
->swap
);
1273 INIT_LIST_HEAD(&bo
->io_reserve_lru
);
1275 bo
->glob
= bdev
->glob
;
1277 bo
->num_pages
= num_pages
;
1278 bo
->mem
.size
= num_pages
<< PAGE_SHIFT
;
1279 bo
->mem
.mem_type
= TTM_PL_SYSTEM
;
1280 bo
->mem
.num_pages
= bo
->num_pages
;
1281 bo
->mem
.mm_node
= NULL
;
1282 bo
->mem
.page_alignment
= page_alignment
;
1283 bo
->mem
.bus
.io_reserved_vm
= false;
1284 bo
->mem
.bus
.io_reserved_count
= 0;
1286 bo
->mem
.placement
= (TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
);
1287 bo
->seq_valid
= false;
1288 bo
->persistent_swap_storage
= persistent_swap_storage
;
1289 bo
->acc_size
= acc_size
;
1291 atomic_inc(&bo
->glob
->bo_count
);
1293 ret
= ttm_bo_check_placement(bo
, placement
);
1294 if (unlikely(ret
!= 0))
1298 * For ttm_bo_type_device buffers, allocate
1299 * address space from the device.
1301 if (bo
->type
== ttm_bo_type_device
||
1302 bo
->type
== ttm_bo_type_sg
) {
1303 ret
= ttm_bo_setup_vm(bo
);
1308 ret
= ttm_bo_validate(bo
, placement
, interruptible
, false);
1312 ttm_bo_unreserve(bo
);
1316 ttm_bo_unreserve(bo
);
1321 EXPORT_SYMBOL(ttm_bo_init
);
1323 size_t ttm_bo_acc_size(struct ttm_bo_device
*bdev
,
1324 unsigned long bo_size
,
1325 unsigned struct_size
)
1327 unsigned npages
= (PAGE_ALIGN(bo_size
)) >> PAGE_SHIFT
;
1330 size
+= ttm_round_pot(struct_size
);
1331 size
+= PAGE_ALIGN(npages
* sizeof(void *));
1332 size
+= ttm_round_pot(sizeof(struct ttm_tt
));
1335 EXPORT_SYMBOL(ttm_bo_acc_size
);
1337 size_t ttm_bo_dma_acc_size(struct ttm_bo_device
*bdev
,
1338 unsigned long bo_size
,
1339 unsigned struct_size
)
1341 unsigned npages
= (PAGE_ALIGN(bo_size
)) >> PAGE_SHIFT
;
1344 size
+= ttm_round_pot(struct_size
);
1345 size
+= PAGE_ALIGN(npages
* sizeof(void *));
1346 size
+= PAGE_ALIGN(npages
* sizeof(dma_addr_t
));
1347 size
+= ttm_round_pot(sizeof(struct ttm_dma_tt
));
1350 EXPORT_SYMBOL(ttm_bo_dma_acc_size
);
1352 int ttm_bo_create(struct ttm_bo_device
*bdev
,
1354 enum ttm_bo_type type
,
1355 struct ttm_placement
*placement
,
1356 uint32_t page_alignment
,
1358 struct file
*persistent_swap_storage
,
1359 struct ttm_buffer_object
**p_bo
)
1361 struct ttm_buffer_object
*bo
;
1365 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
1366 if (unlikely(bo
== NULL
))
1369 acc_size
= ttm_bo_acc_size(bdev
, size
, sizeof(struct ttm_buffer_object
));
1370 ret
= ttm_bo_init(bdev
, bo
, size
, type
, placement
, page_alignment
,
1371 interruptible
, persistent_swap_storage
, acc_size
,
1373 if (likely(ret
== 0))
1378 EXPORT_SYMBOL(ttm_bo_create
);
1380 static int ttm_bo_force_list_clean(struct ttm_bo_device
*bdev
,
1381 unsigned mem_type
, bool allow_errors
)
1383 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem_type
];
1384 struct ttm_bo_global
*glob
= bdev
->glob
;
1388 * Can't use standard list traversal since we're unlocking.
1391 spin_lock(&glob
->lru_lock
);
1392 while (!list_empty(&man
->lru
)) {
1393 spin_unlock(&glob
->lru_lock
);
1394 ret
= ttm_mem_evict_first(bdev
, mem_type
, false, false);
1399 pr_err("Cleanup eviction failed\n");
1402 spin_lock(&glob
->lru_lock
);
1404 spin_unlock(&glob
->lru_lock
);
1408 int ttm_bo_clean_mm(struct ttm_bo_device
*bdev
, unsigned mem_type
)
1410 struct ttm_mem_type_manager
*man
;
1413 if (mem_type
>= TTM_NUM_MEM_TYPES
) {
1414 pr_err("Illegal memory type %d\n", mem_type
);
1417 man
= &bdev
->man
[mem_type
];
1419 if (!man
->has_type
) {
1420 pr_err("Trying to take down uninitialized memory manager type %u\n",
1425 man
->use_type
= false;
1426 man
->has_type
= false;
1430 ttm_bo_force_list_clean(bdev
, mem_type
, false);
1432 ret
= (*man
->func
->takedown
)(man
);
1437 EXPORT_SYMBOL(ttm_bo_clean_mm
);
1439 int ttm_bo_evict_mm(struct ttm_bo_device
*bdev
, unsigned mem_type
)
1441 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem_type
];
1443 if (mem_type
== 0 || mem_type
>= TTM_NUM_MEM_TYPES
) {
1444 pr_err("Illegal memory manager memory type %u\n", mem_type
);
1448 if (!man
->has_type
) {
1449 pr_err("Memory type %u has not been initialized\n", mem_type
);
1453 return ttm_bo_force_list_clean(bdev
, mem_type
, true);
1455 EXPORT_SYMBOL(ttm_bo_evict_mm
);
1457 int ttm_bo_init_mm(struct ttm_bo_device
*bdev
, unsigned type
,
1458 unsigned long p_size
)
1461 struct ttm_mem_type_manager
*man
;
1463 BUG_ON(type
>= TTM_NUM_MEM_TYPES
);
1464 man
= &bdev
->man
[type
];
1465 BUG_ON(man
->has_type
);
1466 man
->io_reserve_fastpath
= true;
1467 man
->use_io_reserve_lru
= false;
1468 mutex_init(&man
->io_reserve_mutex
);
1469 INIT_LIST_HEAD(&man
->io_reserve_lru
);
1471 ret
= bdev
->driver
->init_mem_type(bdev
, type
, man
);
1477 if (type
!= TTM_PL_SYSTEM
) {
1478 ret
= (*man
->func
->init
)(man
, p_size
);
1482 man
->has_type
= true;
1483 man
->use_type
= true;
1486 INIT_LIST_HEAD(&man
->lru
);
1490 EXPORT_SYMBOL(ttm_bo_init_mm
);
1492 static void ttm_bo_global_kobj_release(struct kobject
*kobj
)
1494 struct ttm_bo_global
*glob
=
1495 container_of(kobj
, struct ttm_bo_global
, kobj
);
1497 ttm_mem_unregister_shrink(glob
->mem_glob
, &glob
->shrink
);
1498 __free_page(glob
->dummy_read_page
);
1502 void ttm_bo_global_release(struct drm_global_reference
*ref
)
1504 struct ttm_bo_global
*glob
= ref
->object
;
1506 kobject_del(&glob
->kobj
);
1507 kobject_put(&glob
->kobj
);
1509 EXPORT_SYMBOL(ttm_bo_global_release
);
1511 int ttm_bo_global_init(struct drm_global_reference
*ref
)
1513 struct ttm_bo_global_ref
*bo_ref
=
1514 container_of(ref
, struct ttm_bo_global_ref
, ref
);
1515 struct ttm_bo_global
*glob
= ref
->object
;
1518 mutex_init(&glob
->device_list_mutex
);
1519 spin_lock_init(&glob
->lru_lock
);
1520 glob
->mem_glob
= bo_ref
->mem_glob
;
1521 glob
->dummy_read_page
= alloc_page(__GFP_ZERO
| GFP_DMA32
);
1523 if (unlikely(glob
->dummy_read_page
== NULL
)) {
1528 INIT_LIST_HEAD(&glob
->swap_lru
);
1529 INIT_LIST_HEAD(&glob
->device_list
);
1531 ttm_mem_init_shrink(&glob
->shrink
, ttm_bo_swapout
);
1532 ret
= ttm_mem_register_shrink(glob
->mem_glob
, &glob
->shrink
);
1533 if (unlikely(ret
!= 0)) {
1534 pr_err("Could not register buffer object swapout\n");
1538 atomic_set(&glob
->bo_count
, 0);
1540 ret
= kobject_init_and_add(
1541 &glob
->kobj
, &ttm_bo_glob_kobj_type
, ttm_get_kobj(), "buffer_objects");
1542 if (unlikely(ret
!= 0))
1543 kobject_put(&glob
->kobj
);
1546 __free_page(glob
->dummy_read_page
);
1551 EXPORT_SYMBOL(ttm_bo_global_init
);
1554 int ttm_bo_device_release(struct ttm_bo_device
*bdev
)
1557 unsigned i
= TTM_NUM_MEM_TYPES
;
1558 struct ttm_mem_type_manager
*man
;
1559 struct ttm_bo_global
*glob
= bdev
->glob
;
1562 man
= &bdev
->man
[i
];
1563 if (man
->has_type
) {
1564 man
->use_type
= false;
1565 if ((i
!= TTM_PL_SYSTEM
) && ttm_bo_clean_mm(bdev
, i
)) {
1567 pr_err("DRM memory manager type %d is not clean\n",
1570 man
->has_type
= false;
1574 mutex_lock(&glob
->device_list_mutex
);
1575 list_del(&bdev
->device_list
);
1576 mutex_unlock(&glob
->device_list_mutex
);
1578 cancel_delayed_work_sync(&bdev
->wq
);
1580 while (ttm_bo_delayed_delete(bdev
, true))
1583 spin_lock(&glob
->lru_lock
);
1584 if (list_empty(&bdev
->ddestroy
))
1585 TTM_DEBUG("Delayed destroy list was clean\n");
1587 if (list_empty(&bdev
->man
[0].lru
))
1588 TTM_DEBUG("Swap list was clean\n");
1589 spin_unlock(&glob
->lru_lock
);
1591 BUG_ON(!drm_mm_clean(&bdev
->addr_space_mm
));
1592 write_lock(&bdev
->vm_lock
);
1593 drm_mm_takedown(&bdev
->addr_space_mm
);
1594 write_unlock(&bdev
->vm_lock
);
1598 EXPORT_SYMBOL(ttm_bo_device_release
);
1600 int ttm_bo_device_init(struct ttm_bo_device
*bdev
,
1601 struct ttm_bo_global
*glob
,
1602 struct ttm_bo_driver
*driver
,
1603 uint64_t file_page_offset
,
1608 rwlock_init(&bdev
->vm_lock
);
1609 bdev
->driver
= driver
;
1611 memset(bdev
->man
, 0, sizeof(bdev
->man
));
1614 * Initialize the system memory buffer type.
1615 * Other types need to be driver / IOCTL initialized.
1617 ret
= ttm_bo_init_mm(bdev
, TTM_PL_SYSTEM
, 0);
1618 if (unlikely(ret
!= 0))
1621 bdev
->addr_space_rb
= RB_ROOT
;
1622 ret
= drm_mm_init(&bdev
->addr_space_mm
, file_page_offset
, 0x10000000);
1623 if (unlikely(ret
!= 0))
1624 goto out_no_addr_mm
;
1626 INIT_DELAYED_WORK(&bdev
->wq
, ttm_bo_delayed_workqueue
);
1627 INIT_LIST_HEAD(&bdev
->ddestroy
);
1628 bdev
->dev_mapping
= NULL
;
1630 bdev
->need_dma32
= need_dma32
;
1632 spin_lock_init(&bdev
->fence_lock
);
1633 mutex_lock(&glob
->device_list_mutex
);
1634 list_add_tail(&bdev
->device_list
, &glob
->device_list
);
1635 mutex_unlock(&glob
->device_list_mutex
);
1639 ttm_bo_clean_mm(bdev
, 0);
1643 EXPORT_SYMBOL(ttm_bo_device_init
);
1646 * buffer object vm functions.
1649 bool ttm_mem_reg_is_pci(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
1651 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
1653 if (!(man
->flags
& TTM_MEMTYPE_FLAG_FIXED
)) {
1654 if (mem
->mem_type
== TTM_PL_SYSTEM
)
1657 if (man
->flags
& TTM_MEMTYPE_FLAG_CMA
)
1660 if (mem
->placement
& TTM_PL_FLAG_CACHED
)
1666 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object
*bo
)
1668 struct ttm_bo_device
*bdev
= bo
->bdev
;
1669 loff_t offset
= (loff_t
) bo
->addr_space_offset
;
1670 loff_t holelen
= ((loff_t
) bo
->mem
.num_pages
) << PAGE_SHIFT
;
1672 if (!bdev
->dev_mapping
)
1674 unmap_mapping_range(bdev
->dev_mapping
, offset
, holelen
, 1);
1675 ttm_mem_io_free_vm(bo
);
1678 void ttm_bo_unmap_virtual(struct ttm_buffer_object
*bo
)
1680 struct ttm_bo_device
*bdev
= bo
->bdev
;
1681 struct ttm_mem_type_manager
*man
= &bdev
->man
[bo
->mem
.mem_type
];
1683 ttm_mem_io_lock(man
, false);
1684 ttm_bo_unmap_virtual_locked(bo
);
1685 ttm_mem_io_unlock(man
);
1689 EXPORT_SYMBOL(ttm_bo_unmap_virtual
);
1691 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object
*bo
)
1693 struct ttm_bo_device
*bdev
= bo
->bdev
;
1694 struct rb_node
**cur
= &bdev
->addr_space_rb
.rb_node
;
1695 struct rb_node
*parent
= NULL
;
1696 struct ttm_buffer_object
*cur_bo
;
1697 unsigned long offset
= bo
->vm_node
->start
;
1698 unsigned long cur_offset
;
1702 cur_bo
= rb_entry(parent
, struct ttm_buffer_object
, vm_rb
);
1703 cur_offset
= cur_bo
->vm_node
->start
;
1704 if (offset
< cur_offset
)
1705 cur
= &parent
->rb_left
;
1706 else if (offset
> cur_offset
)
1707 cur
= &parent
->rb_right
;
1712 rb_link_node(&bo
->vm_rb
, parent
, cur
);
1713 rb_insert_color(&bo
->vm_rb
, &bdev
->addr_space_rb
);
1719 * @bo: the buffer to allocate address space for
1721 * Allocate address space in the drm device so that applications
1722 * can mmap the buffer and access the contents. This only
1723 * applies to ttm_bo_type_device objects as others are not
1724 * placed in the drm device address space.
1727 static int ttm_bo_setup_vm(struct ttm_buffer_object
*bo
)
1729 struct ttm_bo_device
*bdev
= bo
->bdev
;
1733 ret
= drm_mm_pre_get(&bdev
->addr_space_mm
);
1734 if (unlikely(ret
!= 0))
1737 write_lock(&bdev
->vm_lock
);
1738 bo
->vm_node
= drm_mm_search_free(&bdev
->addr_space_mm
,
1739 bo
->mem
.num_pages
, 0, 0);
1741 if (unlikely(bo
->vm_node
== NULL
)) {
1746 bo
->vm_node
= drm_mm_get_block_atomic(bo
->vm_node
,
1747 bo
->mem
.num_pages
, 0);
1749 if (unlikely(bo
->vm_node
== NULL
)) {
1750 write_unlock(&bdev
->vm_lock
);
1754 ttm_bo_vm_insert_rb(bo
);
1755 write_unlock(&bdev
->vm_lock
);
1756 bo
->addr_space_offset
= ((uint64_t) bo
->vm_node
->start
) << PAGE_SHIFT
;
1760 write_unlock(&bdev
->vm_lock
);
1764 int ttm_bo_wait(struct ttm_buffer_object
*bo
,
1765 bool lazy
, bool interruptible
, bool no_wait
)
1767 struct ttm_bo_driver
*driver
= bo
->bdev
->driver
;
1768 struct ttm_bo_device
*bdev
= bo
->bdev
;
1772 if (likely(bo
->sync_obj
== NULL
))
1775 while (bo
->sync_obj
) {
1777 if (driver
->sync_obj_signaled(bo
->sync_obj
)) {
1778 void *tmp_obj
= bo
->sync_obj
;
1779 bo
->sync_obj
= NULL
;
1780 clear_bit(TTM_BO_PRIV_FLAG_MOVING
, &bo
->priv_flags
);
1781 spin_unlock(&bdev
->fence_lock
);
1782 driver
->sync_obj_unref(&tmp_obj
);
1783 spin_lock(&bdev
->fence_lock
);
1790 sync_obj
= driver
->sync_obj_ref(bo
->sync_obj
);
1791 spin_unlock(&bdev
->fence_lock
);
1792 ret
= driver
->sync_obj_wait(sync_obj
,
1793 lazy
, interruptible
);
1794 if (unlikely(ret
!= 0)) {
1795 driver
->sync_obj_unref(&sync_obj
);
1796 spin_lock(&bdev
->fence_lock
);
1799 spin_lock(&bdev
->fence_lock
);
1800 if (likely(bo
->sync_obj
== sync_obj
)) {
1801 void *tmp_obj
= bo
->sync_obj
;
1802 bo
->sync_obj
= NULL
;
1803 clear_bit(TTM_BO_PRIV_FLAG_MOVING
,
1805 spin_unlock(&bdev
->fence_lock
);
1806 driver
->sync_obj_unref(&sync_obj
);
1807 driver
->sync_obj_unref(&tmp_obj
);
1808 spin_lock(&bdev
->fence_lock
);
1810 spin_unlock(&bdev
->fence_lock
);
1811 driver
->sync_obj_unref(&sync_obj
);
1812 spin_lock(&bdev
->fence_lock
);
1817 EXPORT_SYMBOL(ttm_bo_wait
);
1819 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object
*bo
, bool no_wait
)
1821 struct ttm_bo_device
*bdev
= bo
->bdev
;
1825 * Using ttm_bo_reserve makes sure the lru lists are updated.
1828 ret
= ttm_bo_reserve(bo
, true, no_wait
, false, 0);
1829 if (unlikely(ret
!= 0))
1831 spin_lock(&bdev
->fence_lock
);
1832 ret
= ttm_bo_wait(bo
, false, true, no_wait
);
1833 spin_unlock(&bdev
->fence_lock
);
1834 if (likely(ret
== 0))
1835 atomic_inc(&bo
->cpu_writers
);
1836 ttm_bo_unreserve(bo
);
1839 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab
);
1841 void ttm_bo_synccpu_write_release(struct ttm_buffer_object
*bo
)
1843 atomic_dec(&bo
->cpu_writers
);
1845 EXPORT_SYMBOL(ttm_bo_synccpu_write_release
);
1848 * A buffer object shrink method that tries to swap out the first
1849 * buffer object on the bo_global::swap_lru list.
1852 static int ttm_bo_swapout(struct ttm_mem_shrink
*shrink
)
1854 struct ttm_bo_global
*glob
=
1855 container_of(shrink
, struct ttm_bo_global
, shrink
);
1856 struct ttm_buffer_object
*bo
;
1859 uint32_t swap_placement
= (TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_SYSTEM
);
1861 spin_lock(&glob
->lru_lock
);
1862 list_for_each_entry(bo
, &glob
->swap_lru
, swap
) {
1863 ret
= ttm_bo_reserve_nolru(bo
, false, true, false, 0);
1869 spin_unlock(&glob
->lru_lock
);
1873 kref_get(&bo
->list_kref
);
1875 if (!list_empty(&bo
->ddestroy
)) {
1876 ret
= ttm_bo_cleanup_refs_and_unlock(bo
, false, false);
1877 kref_put(&bo
->list_kref
, ttm_bo_release_list
);
1881 put_count
= ttm_bo_del_from_lru(bo
);
1882 spin_unlock(&glob
->lru_lock
);
1884 ttm_bo_list_ref_sub(bo
, put_count
, true);
1887 * Wait for GPU, then move to system cached.
1890 spin_lock(&bo
->bdev
->fence_lock
);
1891 ret
= ttm_bo_wait(bo
, false, false, false);
1892 spin_unlock(&bo
->bdev
->fence_lock
);
1894 if (unlikely(ret
!= 0))
1897 if ((bo
->mem
.placement
& swap_placement
) != swap_placement
) {
1898 struct ttm_mem_reg evict_mem
;
1900 evict_mem
= bo
->mem
;
1901 evict_mem
.mm_node
= NULL
;
1902 evict_mem
.placement
= TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
;
1903 evict_mem
.mem_type
= TTM_PL_SYSTEM
;
1905 ret
= ttm_bo_handle_move_mem(bo
, &evict_mem
, true,
1907 if (unlikely(ret
!= 0))
1911 ttm_bo_unmap_virtual(bo
);
1914 * Swap out. Buffer will be swapped in again as soon as
1915 * anyone tries to access a ttm page.
1918 if (bo
->bdev
->driver
->swap_notify
)
1919 bo
->bdev
->driver
->swap_notify(bo
);
1921 ret
= ttm_tt_swapout(bo
->ttm
, bo
->persistent_swap_storage
);
1926 * Unreserve without putting on LRU to avoid swapping out an
1927 * already swapped buffer.
1930 atomic_set(&bo
->reserved
, 0);
1931 wake_up_all(&bo
->event_queue
);
1932 kref_put(&bo
->list_kref
, ttm_bo_release_list
);
1936 void ttm_bo_swapout_all(struct ttm_bo_device
*bdev
)
1938 while (ttm_bo_swapout(&bdev
->glob
->shrink
) == 0)
1941 EXPORT_SYMBOL(ttm_bo_swapout_all
);