drm/radeon/kms: fix warning about cur_placement being uninitialised.
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
ca262a99
JG
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
ba4e7d97
TH
38
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
ba4e7d97 54static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
a987fcaa
TH
55static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
fb53f862
JG
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75 struct ttm_mem_type_manager *man)
76{
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching);
88 spin_lock(&glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&glob->lru_lock);
91}
92
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man);
113 }
114}
115
a987fcaa
TH
116static ssize_t ttm_bo_global_show(struct kobject *kobj,
117 struct attribute *attr,
118 char *buffer)
119{
120 struct ttm_bo_global *glob =
121 container_of(kobj, struct ttm_bo_global, kobj);
122
123 return snprintf(buffer, PAGE_SIZE, "%lu\n",
124 (unsigned long) atomic_read(&glob->bo_count));
125}
126
127static struct attribute *ttm_bo_global_attrs[] = {
128 &ttm_bo_count,
129 NULL
130};
131
132static struct sysfs_ops ttm_bo_global_ops = {
133 .show = &ttm_bo_global_show
134};
135
136static struct kobj_type ttm_bo_glob_kobj_type = {
137 .release = &ttm_bo_global_kobj_release,
138 .sysfs_ops = &ttm_bo_global_ops,
139 .default_attrs = ttm_bo_global_attrs
140};
141
ba4e7d97
TH
142
143static inline uint32_t ttm_bo_type_flags(unsigned type)
144{
145 return 1 << (type);
146}
147
148static void ttm_bo_release_list(struct kref *list_kref)
149{
150 struct ttm_buffer_object *bo =
151 container_of(list_kref, struct ttm_buffer_object, list_kref);
152 struct ttm_bo_device *bdev = bo->bdev;
153
154 BUG_ON(atomic_read(&bo->list_kref.refcount));
155 BUG_ON(atomic_read(&bo->kref.refcount));
156 BUG_ON(atomic_read(&bo->cpu_writers));
157 BUG_ON(bo->sync_obj != NULL);
158 BUG_ON(bo->mem.mm_node != NULL);
159 BUG_ON(!list_empty(&bo->lru));
160 BUG_ON(!list_empty(&bo->ddestroy));
161
162 if (bo->ttm)
163 ttm_tt_destroy(bo->ttm);
a987fcaa 164 atomic_dec(&bo->glob->bo_count);
ba4e7d97
TH
165 if (bo->destroy)
166 bo->destroy(bo);
167 else {
a987fcaa 168 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
ba4e7d97
TH
169 kfree(bo);
170 }
171}
172
173int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
174{
175
176 if (interruptible) {
177 int ret = 0;
178
179 ret = wait_event_interruptible(bo->event_queue,
180 atomic_read(&bo->reserved) == 0);
181 if (unlikely(ret != 0))
98ffc415 182 return ret;
ba4e7d97
TH
183 } else {
184 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
185 }
186 return 0;
187}
188
189static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
190{
191 struct ttm_bo_device *bdev = bo->bdev;
192 struct ttm_mem_type_manager *man;
193
194 BUG_ON(!atomic_read(&bo->reserved));
195
196 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
197
198 BUG_ON(!list_empty(&bo->lru));
199
200 man = &bdev->man[bo->mem.mem_type];
201 list_add_tail(&bo->lru, &man->lru);
202 kref_get(&bo->list_kref);
203
204 if (bo->ttm != NULL) {
a987fcaa 205 list_add_tail(&bo->swap, &bo->glob->swap_lru);
ba4e7d97
TH
206 kref_get(&bo->list_kref);
207 }
208 }
209}
210
211/**
212 * Call with the lru_lock held.
213 */
214
215static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
216{
217 int put_count = 0;
218
219 if (!list_empty(&bo->swap)) {
220 list_del_init(&bo->swap);
221 ++put_count;
222 }
223 if (!list_empty(&bo->lru)) {
224 list_del_init(&bo->lru);
225 ++put_count;
226 }
227
228 /*
229 * TODO: Add a driver hook to delete from
230 * driver-specific LRU's here.
231 */
232
233 return put_count;
234}
235
236int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
237 bool interruptible,
238 bool no_wait, bool use_sequence, uint32_t sequence)
239{
a987fcaa 240 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
241 int ret;
242
243 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
244 if (use_sequence && bo->seq_valid &&
245 (sequence - bo->val_seq < (1 << 31))) {
246 return -EAGAIN;
247 }
248
249 if (no_wait)
250 return -EBUSY;
251
a987fcaa 252 spin_unlock(&glob->lru_lock);
ba4e7d97 253 ret = ttm_bo_wait_unreserved(bo, interruptible);
a987fcaa 254 spin_lock(&glob->lru_lock);
ba4e7d97
TH
255
256 if (unlikely(ret))
257 return ret;
258 }
259
260 if (use_sequence) {
261 bo->val_seq = sequence;
262 bo->seq_valid = true;
263 } else {
264 bo->seq_valid = false;
265 }
266
267 return 0;
268}
269EXPORT_SYMBOL(ttm_bo_reserve);
270
271static void ttm_bo_ref_bug(struct kref *list_kref)
272{
273 BUG();
274}
275
276int ttm_bo_reserve(struct ttm_buffer_object *bo,
277 bool interruptible,
278 bool no_wait, bool use_sequence, uint32_t sequence)
279{
a987fcaa 280 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
281 int put_count = 0;
282 int ret;
283
a987fcaa 284 spin_lock(&glob->lru_lock);
ba4e7d97
TH
285 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
286 sequence);
287 if (likely(ret == 0))
288 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 289 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
290
291 while (put_count--)
292 kref_put(&bo->list_kref, ttm_bo_ref_bug);
293
294 return ret;
295}
296
297void ttm_bo_unreserve(struct ttm_buffer_object *bo)
298{
a987fcaa 299 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 300
a987fcaa 301 spin_lock(&glob->lru_lock);
ba4e7d97
TH
302 ttm_bo_add_to_lru(bo);
303 atomic_set(&bo->reserved, 0);
304 wake_up_all(&bo->event_queue);
a987fcaa 305 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
306}
307EXPORT_SYMBOL(ttm_bo_unreserve);
308
309/*
310 * Call bo->mutex locked.
311 */
ba4e7d97
TH
312static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
313{
314 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 315 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
316 int ret = 0;
317 uint32_t page_flags = 0;
318
319 TTM_ASSERT_LOCKED(&bo->mutex);
320 bo->ttm = NULL;
321
ad49f501
DA
322 if (bdev->need_dma32)
323 page_flags |= TTM_PAGE_FLAG_DMA32;
324
ba4e7d97
TH
325 switch (bo->type) {
326 case ttm_bo_type_device:
327 if (zero_alloc)
328 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
329 case ttm_bo_type_kernel:
330 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
a987fcaa 331 page_flags, glob->dummy_read_page);
ba4e7d97
TH
332 if (unlikely(bo->ttm == NULL))
333 ret = -ENOMEM;
334 break;
335 case ttm_bo_type_user:
336 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
337 page_flags | TTM_PAGE_FLAG_USER,
a987fcaa 338 glob->dummy_read_page);
447aeb90 339 if (unlikely(bo->ttm == NULL)) {
ba4e7d97 340 ret = -ENOMEM;
447aeb90
DA
341 break;
342 }
ba4e7d97
TH
343
344 ret = ttm_tt_set_user(bo->ttm, current,
345 bo->buffer_start, bo->num_pages);
346 if (unlikely(ret != 0))
347 ttm_tt_destroy(bo->ttm);
348 break;
349 default:
350 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
351 ret = -EINVAL;
352 break;
353 }
354
355 return ret;
356}
357
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait)
361{
362 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
364 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
365 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
366 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
367 int ret = 0;
368
369 if (old_is_pci || new_is_pci ||
370 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
371 ttm_bo_unmap_virtual(bo);
372
373 /*
374 * Create and bind a ttm if required.
375 */
376
377 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
378 ret = ttm_bo_add_ttm(bo, false);
379 if (ret)
380 goto out_err;
381
382 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
383 if (ret)
87ef9209 384 goto out_err;
ba4e7d97
TH
385
386 if (mem->mem_type != TTM_PL_SYSTEM) {
387 ret = ttm_tt_bind(bo->ttm, mem);
388 if (ret)
389 goto out_err;
390 }
391
392 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
ca262a99 393 bo->mem = *mem;
ba4e7d97 394 mem->mm_node = NULL;
ba4e7d97
TH
395 goto moved;
396 }
397
398 }
399
e024e110
DA
400 if (bdev->driver->move_notify)
401 bdev->driver->move_notify(bo, mem);
402
ba4e7d97
TH
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
406 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem);
409 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
411
412 if (ret)
413 goto out_err;
414
415moved:
416 if (bo->evicted) {
417 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
418 if (ret)
419 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
420 bo->evicted = false;
421 }
422
423 if (bo->mem.mm_node) {
424 spin_lock(&bo->lock);
425 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock);
429 }
430
431 return 0;
432
433out_err:
434 new_man = &bdev->man[bo->mem.mem_type];
435 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436 ttm_tt_unbind(bo->ttm);
437 ttm_tt_destroy(bo->ttm);
438 bo->ttm = NULL;
439 }
440
441 return ret;
442}
443
444/**
445 * If bo idle, remove from delayed- and lru lists, and unref.
446 * If not idle, and already on delayed list, do nothing.
447 * If not idle, and not on delayed list, put on delayed list,
448 * up the list_kref and schedule a delayed list check.
449 */
450
451static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
452{
453 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 454 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
455 struct ttm_bo_driver *driver = bdev->driver;
456 int ret;
457
458 spin_lock(&bo->lock);
459 (void) ttm_bo_wait(bo, false, false, !remove_all);
460
461 if (!bo->sync_obj) {
462 int put_count;
463
464 spin_unlock(&bo->lock);
465
a987fcaa 466 spin_lock(&glob->lru_lock);
ba4e7d97
TH
467 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
468 BUG_ON(ret);
469 if (bo->ttm)
470 ttm_tt_unbind(bo->ttm);
471
472 if (!list_empty(&bo->ddestroy)) {
473 list_del_init(&bo->ddestroy);
474 kref_put(&bo->list_kref, ttm_bo_ref_bug);
475 }
476 if (bo->mem.mm_node) {
ca262a99 477 bo->mem.mm_node->private = NULL;
ba4e7d97
TH
478 drm_mm_put_block(bo->mem.mm_node);
479 bo->mem.mm_node = NULL;
480 }
481 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 482 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
483
484 atomic_set(&bo->reserved, 0);
485
486 while (put_count--)
487 kref_put(&bo->list_kref, ttm_bo_release_list);
488
489 return 0;
490 }
491
a987fcaa 492 spin_lock(&glob->lru_lock);
ba4e7d97
TH
493 if (list_empty(&bo->ddestroy)) {
494 void *sync_obj = bo->sync_obj;
495 void *sync_obj_arg = bo->sync_obj_arg;
496
497 kref_get(&bo->list_kref);
498 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
a987fcaa 499 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
500 spin_unlock(&bo->lock);
501
502 if (sync_obj)
503 driver->sync_obj_flush(sync_obj, sync_obj_arg);
504 schedule_delayed_work(&bdev->wq,
505 ((HZ / 100) < 1) ? 1 : HZ / 100);
506 ret = 0;
507
508 } else {
a987fcaa 509 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
510 spin_unlock(&bo->lock);
511 ret = -EBUSY;
512 }
513
514 return ret;
515}
516
517/**
518 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
519 * encountered buffers.
520 */
521
522static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
523{
a987fcaa 524 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97
TH
525 struct ttm_buffer_object *entry, *nentry;
526 struct list_head *list, *next;
527 int ret;
528
a987fcaa 529 spin_lock(&glob->lru_lock);
ba4e7d97
TH
530 list_for_each_safe(list, next, &bdev->ddestroy) {
531 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
532 nentry = NULL;
533
534 /*
535 * Protect the next list entry from destruction while we
536 * unlock the lru_lock.
537 */
538
539 if (next != &bdev->ddestroy) {
540 nentry = list_entry(next, struct ttm_buffer_object,
541 ddestroy);
542 kref_get(&nentry->list_kref);
543 }
544 kref_get(&entry->list_kref);
545
a987fcaa 546 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
547 ret = ttm_bo_cleanup_refs(entry, remove_all);
548 kref_put(&entry->list_kref, ttm_bo_release_list);
549
a987fcaa 550 spin_lock(&glob->lru_lock);
ba4e7d97
TH
551 if (nentry) {
552 bool next_onlist = !list_empty(next);
a987fcaa 553 spin_unlock(&glob->lru_lock);
ba4e7d97 554 kref_put(&nentry->list_kref, ttm_bo_release_list);
a987fcaa 555 spin_lock(&glob->lru_lock);
ba4e7d97
TH
556 /*
557 * Someone might have raced us and removed the
558 * next entry from the list. We don't bother restarting
559 * list traversal.
560 */
561
562 if (!next_onlist)
563 break;
564 }
565 if (ret)
566 break;
567 }
568 ret = !list_empty(&bdev->ddestroy);
a987fcaa 569 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
570
571 return ret;
572}
573
574static void ttm_bo_delayed_workqueue(struct work_struct *work)
575{
576 struct ttm_bo_device *bdev =
577 container_of(work, struct ttm_bo_device, wq.work);
578
579 if (ttm_bo_delayed_delete(bdev, false)) {
580 schedule_delayed_work(&bdev->wq,
581 ((HZ / 100) < 1) ? 1 : HZ / 100);
582 }
583}
584
585static void ttm_bo_release(struct kref *kref)
586{
587 struct ttm_buffer_object *bo =
588 container_of(kref, struct ttm_buffer_object, kref);
589 struct ttm_bo_device *bdev = bo->bdev;
590
591 if (likely(bo->vm_node != NULL)) {
592 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
593 drm_mm_put_block(bo->vm_node);
594 bo->vm_node = NULL;
595 }
596 write_unlock(&bdev->vm_lock);
597 ttm_bo_cleanup_refs(bo, false);
598 kref_put(&bo->list_kref, ttm_bo_release_list);
599 write_lock(&bdev->vm_lock);
600}
601
602void ttm_bo_unref(struct ttm_buffer_object **p_bo)
603{
604 struct ttm_buffer_object *bo = *p_bo;
605 struct ttm_bo_device *bdev = bo->bdev;
606
607 *p_bo = NULL;
608 write_lock(&bdev->vm_lock);
609 kref_put(&bo->kref, ttm_bo_release);
610 write_unlock(&bdev->vm_lock);
611}
612EXPORT_SYMBOL(ttm_bo_unref);
613
ca262a99
JG
614static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 bool no_wait)
ba4e7d97 616{
ba4e7d97 617 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 618 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 619 struct ttm_mem_reg evict_mem;
ca262a99
JG
620 struct ttm_placement placement;
621 int ret = 0;
ba4e7d97
TH
622
623 spin_lock(&bo->lock);
624 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
625 spin_unlock(&bo->lock);
626
78ecf091 627 if (unlikely(ret != 0)) {
98ffc415 628 if (ret != -ERESTARTSYS) {
78ecf091
TH
629 printk(KERN_ERR TTM_PFX
630 "Failed to expire sync object before "
631 "buffer eviction.\n");
632 }
ba4e7d97
TH
633 goto out;
634 }
635
636 BUG_ON(!atomic_read(&bo->reserved));
637
638 evict_mem = bo->mem;
639 evict_mem.mm_node = NULL;
640
7cb7d1d7
JG
641 placement.fpfn = 0;
642 placement.lpfn = 0;
643 placement.num_placement = 0;
644 placement.num_busy_placement = 0;
ca262a99
JG
645 bdev->driver->evict_flags(bo, &placement);
646 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
647 no_wait);
ba4e7d97 648 if (ret) {
fb53f862 649 if (ret != -ERESTARTSYS) {
ba4e7d97
TH
650 printk(KERN_ERR TTM_PFX
651 "Failed to find memory space for "
652 "buffer 0x%p eviction.\n", bo);
fb53f862
JG
653 ttm_bo_mem_space_debug(bo, &placement);
654 }
ba4e7d97
TH
655 goto out;
656 }
657
658 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
659 no_wait);
660 if (ret) {
98ffc415 661 if (ret != -ERESTARTSYS)
ba4e7d97 662 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
ca262a99
JG
663 spin_lock(&glob->lru_lock);
664 if (evict_mem.mm_node) {
665 evict_mem.mm_node->private = NULL;
666 drm_mm_put_block(evict_mem.mm_node);
667 evict_mem.mm_node = NULL;
668 }
669 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
670 goto out;
671 }
ca262a99
JG
672 bo->evicted = true;
673out:
674 return ret;
675}
676
677static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
678 uint32_t mem_type,
679 bool interruptible, bool no_wait)
680{
681 struct ttm_bo_global *glob = bdev->glob;
682 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
683 struct ttm_buffer_object *bo;
684 int ret, put_count = 0;
ba4e7d97 685
a987fcaa 686 spin_lock(&glob->lru_lock);
ca262a99
JG
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref);
689 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
690 if (likely(ret == 0))
691 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 692 spin_unlock(&glob->lru_lock);
ca262a99
JG
693 if (unlikely(ret != 0))
694 return ret;
695 while (put_count--)
696 kref_put(&bo->list_kref, ttm_bo_ref_bug);
697 ret = ttm_bo_evict(bo, interruptible, no_wait);
698 ttm_bo_unreserve(bo);
699 kref_put(&bo->list_kref, ttm_bo_release_list);
ba4e7d97
TH
700 return ret;
701}
702
ca262a99
JG
703static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
704 struct ttm_mem_type_manager *man,
705 struct ttm_placement *placement,
706 struct ttm_mem_reg *mem,
707 struct drm_mm_node **node)
708{
709 struct ttm_bo_global *glob = bo->glob;
710 unsigned long lpfn;
711 int ret;
712
713 lpfn = placement->lpfn;
714 if (!lpfn)
715 lpfn = man->size;
716 *node = NULL;
717 do {
718 ret = drm_mm_pre_get(&man->manager);
719 if (unlikely(ret))
720 return ret;
721
722 spin_lock(&glob->lru_lock);
723 *node = drm_mm_search_free_in_range(&man->manager,
724 mem->num_pages, mem->page_alignment,
725 placement->fpfn, lpfn, 1);
726 if (unlikely(*node == NULL)) {
727 spin_unlock(&glob->lru_lock);
728 return 0;
729 }
730 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
731 mem->page_alignment,
732 placement->fpfn,
733 lpfn);
734 spin_unlock(&glob->lru_lock);
735 } while (*node == NULL);
736 return 0;
737}
738
ba4e7d97
TH
739/**
740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
741 * space, or we've evicted everything and there isn't enough space.
742 */
ca262a99
JG
743static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
744 uint32_t mem_type,
745 struct ttm_placement *placement,
746 struct ttm_mem_reg *mem,
747 bool interruptible, bool no_wait)
ba4e7d97 748{
ca262a99 749 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 750 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 751 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
ca262a99 752 struct drm_mm_node *node;
ba4e7d97
TH
753 int ret;
754
ba4e7d97 755 do {
ca262a99
JG
756 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
757 if (unlikely(ret != 0))
758 return ret;
ba4e7d97
TH
759 if (node)
760 break;
ca262a99
JG
761 spin_lock(&glob->lru_lock);
762 if (list_empty(&man->lru)) {
763 spin_unlock(&glob->lru_lock);
ba4e7d97 764 break;
ca262a99 765 }
a987fcaa 766 spin_unlock(&glob->lru_lock);
ca262a99
JG
767 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
768 no_wait);
ba4e7d97
TH
769 if (unlikely(ret != 0))
770 return ret;
ba4e7d97 771 } while (1);
ca262a99 772 if (node == NULL)
ba4e7d97 773 return -ENOMEM;
ba4e7d97
TH
774 mem->mm_node = node;
775 mem->mem_type = mem_type;
776 return 0;
777}
778
ae3e8122
TH
779static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
780 uint32_t cur_placement,
781 uint32_t proposed_placement)
782{
783 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
784 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
785
786 /**
787 * Keep current caching if possible.
788 */
789
790 if ((cur_placement & caching) != 0)
791 result |= (cur_placement & caching);
792 else if ((man->default_caching & caching) != 0)
793 result |= man->default_caching;
794 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
795 result |= TTM_PL_FLAG_CACHED;
796 else if ((TTM_PL_FLAG_WC & caching) != 0)
797 result |= TTM_PL_FLAG_WC;
798 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
799 result |= TTM_PL_FLAG_UNCACHED;
800
801 return result;
802}
803
ba4e7d97
TH
804static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
805 bool disallow_fixed,
806 uint32_t mem_type,
ae3e8122
TH
807 uint32_t proposed_placement,
808 uint32_t *masked_placement)
ba4e7d97
TH
809{
810 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
811
812 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
813 return false;
814
ae3e8122 815 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
ba4e7d97
TH
816 return false;
817
ae3e8122 818 if ((proposed_placement & man->available_caching) == 0)
ba4e7d97 819 return false;
ba4e7d97 820
ae3e8122
TH
821 cur_flags |= (proposed_placement & man->available_caching);
822
823 *masked_placement = cur_flags;
ba4e7d97
TH
824 return true;
825}
826
827/**
828 * Creates space for memory region @mem according to its type.
829 *
830 * This function first searches for free space in compatible memory types in
831 * the priority order defined by the driver. If free space isn't found, then
832 * ttm_bo_mem_force_space is attempted in priority order to evict and find
833 * space.
834 */
835int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ca262a99
JG
836 struct ttm_placement *placement,
837 struct ttm_mem_reg *mem,
838 bool interruptible, bool no_wait)
ba4e7d97
TH
839{
840 struct ttm_bo_device *bdev = bo->bdev;
841 struct ttm_mem_type_manager *man;
ba4e7d97
TH
842 uint32_t mem_type = TTM_PL_SYSTEM;
843 uint32_t cur_flags = 0;
844 bool type_found = false;
845 bool type_ok = false;
98ffc415 846 bool has_erestartsys = false;
ba4e7d97 847 struct drm_mm_node *node = NULL;
ca262a99 848 int i, ret;
ba4e7d97
TH
849
850 mem->mm_node = NULL;
ca262a99
JG
851 for (i = 0; i <= placement->num_placement; ++i) {
852 ret = ttm_mem_type_from_flags(placement->placement[i],
853 &mem_type);
854 if (ret)
855 return ret;
ba4e7d97
TH
856 man = &bdev->man[mem_type];
857
858 type_ok = ttm_bo_mt_compatible(man,
ca262a99
JG
859 bo->type == ttm_bo_type_user,
860 mem_type,
861 placement->placement[i],
862 &cur_flags);
ba4e7d97
TH
863
864 if (!type_ok)
865 continue;
866
ae3e8122
TH
867 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
868 cur_flags);
ca262a99
JG
869 /*
870 * Use the access and other non-mapping-related flag bits from
871 * the memory placement flags to the current flags
872 */
873 ttm_flag_masked(&cur_flags, placement->placement[i],
874 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 875
ba4e7d97
TH
876 if (mem_type == TTM_PL_SYSTEM)
877 break;
878
879 if (man->has_type && man->use_type) {
880 type_found = true;
ca262a99
JG
881 ret = ttm_bo_man_get_node(bo, man, placement, mem,
882 &node);
883 if (unlikely(ret))
884 return ret;
ba4e7d97
TH
885 }
886 if (node)
887 break;
888 }
889
890 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
891 mem->mm_node = node;
892 mem->mem_type = mem_type;
893 mem->placement = cur_flags;
ca262a99
JG
894 if (node)
895 node->private = bo;
ba4e7d97
TH
896 return 0;
897 }
898
899 if (!type_found)
900 return -EINVAL;
901
ca262a99
JG
902 for (i = 0; i <= placement->num_busy_placement; ++i) {
903 ret = ttm_mem_type_from_flags(placement->placement[i],
904 &mem_type);
905 if (ret)
906 return ret;
ba4e7d97 907 man = &bdev->man[mem_type];
ba4e7d97
TH
908 if (!man->has_type)
909 continue;
ba4e7d97 910 if (!ttm_bo_mt_compatible(man,
ca262a99
JG
911 bo->type == ttm_bo_type_user,
912 mem_type,
913 placement->placement[i],
914 &cur_flags))
ba4e7d97
TH
915 continue;
916
ae3e8122
TH
917 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
918 cur_flags);
ca262a99
JG
919 /*
920 * Use the access and other non-mapping-related flag bits from
921 * the memory placement flags to the current flags
922 */
923 ttm_flag_masked(&cur_flags, placement->placement[i],
924 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 925
ca262a99
JG
926 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
927 interruptible, no_wait);
ba4e7d97
TH
928 if (ret == 0 && mem->mm_node) {
929 mem->placement = cur_flags;
ca262a99 930 mem->mm_node->private = bo;
ba4e7d97
TH
931 return 0;
932 }
98ffc415
TH
933 if (ret == -ERESTARTSYS)
934 has_erestartsys = true;
ba4e7d97 935 }
98ffc415 936 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
ba4e7d97
TH
937 return ret;
938}
939EXPORT_SYMBOL(ttm_bo_mem_space);
940
941int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
942{
ba4e7d97
TH
943 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
944 return -EBUSY;
945
98ffc415
TH
946 return wait_event_interruptible(bo->event_queue,
947 atomic_read(&bo->cpu_writers) == 0);
ba4e7d97
TH
948}
949
950int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
ca262a99
JG
951 struct ttm_placement *placement,
952 bool interruptible, bool no_wait)
ba4e7d97 953{
a987fcaa 954 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
955 int ret = 0;
956 struct ttm_mem_reg mem;
957
958 BUG_ON(!atomic_read(&bo->reserved));
959
960 /*
961 * FIXME: It's possible to pipeline buffer moves.
962 * Have the driver move function wait for idle when necessary,
963 * instead of doing it here.
964 */
ba4e7d97
TH
965 spin_lock(&bo->lock);
966 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
967 spin_unlock(&bo->lock);
ba4e7d97
TH
968 if (ret)
969 return ret;
ba4e7d97
TH
970 mem.num_pages = bo->num_pages;
971 mem.size = mem.num_pages << PAGE_SHIFT;
972 mem.page_alignment = bo->mem.page_alignment;
ba4e7d97
TH
973 /*
974 * Determine where to move the buffer.
975 */
ca262a99 976 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
ba4e7d97
TH
977 if (ret)
978 goto out_unlock;
ba4e7d97 979 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
ba4e7d97
TH
980out_unlock:
981 if (ret && mem.mm_node) {
a987fcaa 982 spin_lock(&glob->lru_lock);
ca262a99 983 mem.mm_node->private = NULL;
ba4e7d97 984 drm_mm_put_block(mem.mm_node);
a987fcaa 985 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
986 }
987 return ret;
988}
989
ca262a99 990static int ttm_bo_mem_compat(struct ttm_placement *placement,
ba4e7d97
TH
991 struct ttm_mem_reg *mem)
992{
ca262a99
JG
993 int i;
994
995 for (i = 0; i < placement->num_placement; i++) {
996 if ((placement->placement[i] & mem->placement &
997 TTM_PL_MASK_CACHING) &&
998 (placement->placement[i] & mem->placement &
999 TTM_PL_MASK_MEM))
1000 return i;
1001 }
1002 return -1;
ba4e7d97
TH
1003}
1004
1005int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
ca262a99
JG
1006 struct ttm_placement *placement,
1007 bool interruptible, bool no_wait)
ba4e7d97
TH
1008{
1009 int ret;
1010
1011 BUG_ON(!atomic_read(&bo->reserved));
ca262a99
JG
1012 /* Check that range is valid */
1013 if (placement->lpfn || placement->fpfn)
1014 if (placement->fpfn > placement->lpfn ||
1015 (placement->lpfn - placement->fpfn) < bo->num_pages)
1016 return -EINVAL;
ba4e7d97
TH
1017 /*
1018 * Check whether we need to move buffer.
1019 */
ca262a99
JG
1020 ret = ttm_bo_mem_compat(placement, &bo->mem);
1021 if (ret < 0) {
1022 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
1023 if (ret)
ba4e7d97 1024 return ret;
ca262a99
JG
1025 } else {
1026 /*
1027 * Use the access and other non-mapping-related flag bits from
1028 * the compatible memory placement flags to the active flags
1029 */
1030 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1031 ~TTM_PL_MASK_MEMTYPE);
ba4e7d97 1032 }
ba4e7d97
TH
1033 /*
1034 * We might need to add a TTM.
1035 */
ba4e7d97
TH
1036 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1037 ret = ttm_bo_add_ttm(bo, true);
1038 if (ret)
1039 return ret;
1040 }
ba4e7d97
TH
1041 return 0;
1042}
1043EXPORT_SYMBOL(ttm_buffer_object_validate);
1044
1045int
1046ttm_bo_check_placement(struct ttm_buffer_object *bo,
1047 uint32_t set_flags, uint32_t clr_flags)
1048{
1049 uint32_t new_mask = set_flags | clr_flags;
1050
1051 if ((bo->type == ttm_bo_type_user) &&
1052 (clr_flags & TTM_PL_FLAG_CACHED)) {
1053 printk(KERN_ERR TTM_PFX
1054 "User buffers require cache-coherent memory.\n");
1055 return -EINVAL;
1056 }
1057
1058 if (!capable(CAP_SYS_ADMIN)) {
1059 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1060 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1061 " NO_EVICT status.\n");
1062 return -EINVAL;
1063 }
1064
1065 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1066 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1067 printk(KERN_ERR TTM_PFX
1068 "Incompatible memory specification"
1069 " for NO_EVICT buffer.\n");
1070 return -EINVAL;
1071 }
1072 }
1073 return 0;
1074}
1075
1076int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1077 struct ttm_buffer_object *bo,
1078 unsigned long size,
1079 enum ttm_bo_type type,
1080 uint32_t flags,
1081 uint32_t page_alignment,
1082 unsigned long buffer_start,
1083 bool interruptible,
1084 struct file *persistant_swap_storage,
1085 size_t acc_size,
1086 void (*destroy) (struct ttm_buffer_object *))
1087{
ca262a99 1088 int i, c, ret = 0;
ba4e7d97 1089 unsigned long num_pages;
ca262a99
JG
1090 uint32_t placements[8];
1091 struct ttm_placement placement;
ba4e7d97
TH
1092
1093 size += buffer_start & ~PAGE_MASK;
1094 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1095 if (num_pages == 0) {
1096 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1097 return -EINVAL;
1098 }
1099 bo->destroy = destroy;
1100
1101 spin_lock_init(&bo->lock);
1102 kref_init(&bo->kref);
1103 kref_init(&bo->list_kref);
1104 atomic_set(&bo->cpu_writers, 0);
1105 atomic_set(&bo->reserved, 1);
1106 init_waitqueue_head(&bo->event_queue);
1107 INIT_LIST_HEAD(&bo->lru);
1108 INIT_LIST_HEAD(&bo->ddestroy);
1109 INIT_LIST_HEAD(&bo->swap);
1110 bo->bdev = bdev;
a987fcaa 1111 bo->glob = bdev->glob;
ba4e7d97
TH
1112 bo->type = type;
1113 bo->num_pages = num_pages;
1114 bo->mem.mem_type = TTM_PL_SYSTEM;
1115 bo->mem.num_pages = bo->num_pages;
1116 bo->mem.mm_node = NULL;
1117 bo->mem.page_alignment = page_alignment;
1118 bo->buffer_start = buffer_start & PAGE_MASK;
1119 bo->priv_flags = 0;
1120 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1121 bo->seq_valid = false;
1122 bo->persistant_swap_storage = persistant_swap_storage;
1123 bo->acc_size = acc_size;
a987fcaa 1124 atomic_inc(&bo->glob->bo_count);
ba4e7d97
TH
1125
1126 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1127 if (unlikely(ret != 0))
1128 goto out_err;
1129
1130 /*
1131 * If no caching attributes are set, accept any form of caching.
1132 */
1133
1134 if ((flags & TTM_PL_MASK_CACHING) == 0)
1135 flags |= TTM_PL_MASK_CACHING;
1136
1137 /*
1138 * For ttm_bo_type_device buffers, allocate
1139 * address space from the device.
1140 */
1141
1142 if (bo->type == ttm_bo_type_device) {
1143 ret = ttm_bo_setup_vm(bo);
1144 if (ret)
1145 goto out_err;
1146 }
1147
ca262a99
JG
1148 placement.fpfn = 0;
1149 placement.lpfn = 0;
1150 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1151 if (flags & (1 << i))
1152 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1153 placement.placement = placements;
1154 placement.num_placement = c;
1155 placement.busy_placement = placements;
1156 placement.num_busy_placement = c;
1157 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
ba4e7d97
TH
1158 if (ret)
1159 goto out_err;
1160
1161 ttm_bo_unreserve(bo);
1162 return 0;
1163
1164out_err:
1165 ttm_bo_unreserve(bo);
1166 ttm_bo_unref(&bo);
1167
1168 return ret;
1169}
1170EXPORT_SYMBOL(ttm_buffer_object_init);
1171
a987fcaa 1172static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
ba4e7d97
TH
1173 unsigned long num_pages)
1174{
1175 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1176 PAGE_MASK;
1177
a987fcaa 1178 return glob->ttm_bo_size + 2 * page_array_size;
ba4e7d97
TH
1179}
1180
1181int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1182 unsigned long size,
1183 enum ttm_bo_type type,
1184 uint32_t flags,
1185 uint32_t page_alignment,
1186 unsigned long buffer_start,
1187 bool interruptible,
1188 struct file *persistant_swap_storage,
1189 struct ttm_buffer_object **p_bo)
1190{
1191 struct ttm_buffer_object *bo;
a987fcaa 1192 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
ca262a99 1193 int ret;
ba4e7d97
TH
1194
1195 size_t acc_size =
a987fcaa 1196 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
5fd9cbad 1197 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
ba4e7d97
TH
1198 if (unlikely(ret != 0))
1199 return ret;
1200
1201 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1202
1203 if (unlikely(bo == NULL)) {
5fd9cbad 1204 ttm_mem_global_free(mem_glob, acc_size);
ba4e7d97
TH
1205 return -ENOMEM;
1206 }
1207
1208 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1209 page_alignment, buffer_start,
1210 interruptible,
1211 persistant_swap_storage, acc_size, NULL);
1212 if (likely(ret == 0))
1213 *p_bo = bo;
1214
1215 return ret;
1216}
1217
ba4e7d97 1218static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
ca262a99 1219 unsigned mem_type, bool allow_errors)
ba4e7d97 1220{
ca262a99 1221 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
a987fcaa 1222 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 1223 int ret;
ba4e7d97
TH
1224
1225 /*
1226 * Can't use standard list traversal since we're unlocking.
1227 */
1228
a987fcaa 1229 spin_lock(&glob->lru_lock);
ca262a99 1230 while (!list_empty(&man->lru)) {
a987fcaa 1231 spin_unlock(&glob->lru_lock);
ca262a99
JG
1232 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1233 if (ret) {
1234 if (allow_errors) {
1235 return ret;
1236 } else {
1237 printk(KERN_ERR TTM_PFX
1238 "Cleanup eviction failed\n");
1239 }
1240 }
a987fcaa 1241 spin_lock(&glob->lru_lock);
ba4e7d97 1242 }
a987fcaa 1243 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1244 return 0;
1245}
1246
1247int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1248{
a987fcaa 1249 struct ttm_bo_global *glob = bdev->glob;
c96e7c7a 1250 struct ttm_mem_type_manager *man;
ba4e7d97
TH
1251 int ret = -EINVAL;
1252
1253 if (mem_type >= TTM_NUM_MEM_TYPES) {
1254 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1255 return ret;
1256 }
c96e7c7a 1257 man = &bdev->man[mem_type];
ba4e7d97
TH
1258
1259 if (!man->has_type) {
1260 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1261 "memory manager type %u\n", mem_type);
1262 return ret;
1263 }
1264
1265 man->use_type = false;
1266 man->has_type = false;
1267
1268 ret = 0;
1269 if (mem_type > 0) {
ca262a99 1270 ttm_bo_force_list_clean(bdev, mem_type, false);
ba4e7d97 1271
a987fcaa 1272 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1273 if (drm_mm_clean(&man->manager))
1274 drm_mm_takedown(&man->manager);
1275 else
1276 ret = -EBUSY;
1277
a987fcaa 1278 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1279 }
1280
1281 return ret;
1282}
1283EXPORT_SYMBOL(ttm_bo_clean_mm);
1284
1285int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1286{
1287 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1288
1289 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1290 printk(KERN_ERR TTM_PFX
1291 "Illegal memory manager memory type %u.\n",
1292 mem_type);
1293 return -EINVAL;
1294 }
1295
1296 if (!man->has_type) {
1297 printk(KERN_ERR TTM_PFX
1298 "Memory type %u has not been initialized.\n",
1299 mem_type);
1300 return 0;
1301 }
1302
ca262a99 1303 return ttm_bo_force_list_clean(bdev, mem_type, true);
ba4e7d97
TH
1304}
1305EXPORT_SYMBOL(ttm_bo_evict_mm);
1306
1307int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ca262a99 1308 unsigned long p_size)
ba4e7d97
TH
1309{
1310 int ret = -EINVAL;
1311 struct ttm_mem_type_manager *man;
1312
1313 if (type >= TTM_NUM_MEM_TYPES) {
1314 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1315 return ret;
1316 }
1317
1318 man = &bdev->man[type];
1319 if (man->has_type) {
1320 printk(KERN_ERR TTM_PFX
1321 "Memory manager already initialized for type %d\n",
1322 type);
1323 return ret;
1324 }
1325
1326 ret = bdev->driver->init_mem_type(bdev, type, man);
1327 if (ret)
1328 return ret;
1329
1330 ret = 0;
1331 if (type != TTM_PL_SYSTEM) {
1332 if (!p_size) {
1333 printk(KERN_ERR TTM_PFX
1334 "Zero size memory manager type %d\n",
1335 type);
1336 return ret;
1337 }
ca262a99 1338 ret = drm_mm_init(&man->manager, 0, p_size);
ba4e7d97
TH
1339 if (ret)
1340 return ret;
1341 }
1342 man->has_type = true;
1343 man->use_type = true;
1344 man->size = p_size;
1345
1346 INIT_LIST_HEAD(&man->lru);
1347
1348 return 0;
1349}
1350EXPORT_SYMBOL(ttm_bo_init_mm);
1351
a987fcaa
TH
1352static void ttm_bo_global_kobj_release(struct kobject *kobj)
1353{
1354 struct ttm_bo_global *glob =
1355 container_of(kobj, struct ttm_bo_global, kobj);
1356
a987fcaa
TH
1357 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1358 __free_page(glob->dummy_read_page);
1359 kfree(glob);
1360}
1361
1362void ttm_bo_global_release(struct ttm_global_reference *ref)
1363{
1364 struct ttm_bo_global *glob = ref->object;
1365
1366 kobject_del(&glob->kobj);
1367 kobject_put(&glob->kobj);
1368}
1369EXPORT_SYMBOL(ttm_bo_global_release);
1370
1371int ttm_bo_global_init(struct ttm_global_reference *ref)
1372{
1373 struct ttm_bo_global_ref *bo_ref =
1374 container_of(ref, struct ttm_bo_global_ref, ref);
1375 struct ttm_bo_global *glob = ref->object;
1376 int ret;
1377
1378 mutex_init(&glob->device_list_mutex);
1379 spin_lock_init(&glob->lru_lock);
1380 glob->mem_glob = bo_ref->mem_glob;
1381 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1382
1383 if (unlikely(glob->dummy_read_page == NULL)) {
1384 ret = -ENOMEM;
1385 goto out_no_drp;
1386 }
1387
1388 INIT_LIST_HEAD(&glob->swap_lru);
1389 INIT_LIST_HEAD(&glob->device_list);
1390
1391 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1392 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1393 if (unlikely(ret != 0)) {
1394 printk(KERN_ERR TTM_PFX
1395 "Could not register buffer object swapout.\n");
1396 goto out_no_shrink;
1397 }
1398
1399 glob->ttm_bo_extra_size =
1400 ttm_round_pot(sizeof(struct ttm_tt)) +
1401 ttm_round_pot(sizeof(struct ttm_backend));
1402
1403 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1404 ttm_round_pot(sizeof(struct ttm_buffer_object));
1405
1406 atomic_set(&glob->bo_count, 0);
1407
1408 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1409 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1410 if (unlikely(ret != 0))
1411 kobject_put(&glob->kobj);
1412 return ret;
1413out_no_shrink:
1414 __free_page(glob->dummy_read_page);
1415out_no_drp:
1416 kfree(glob);
1417 return ret;
1418}
1419EXPORT_SYMBOL(ttm_bo_global_init);
1420
1421
ba4e7d97
TH
1422int ttm_bo_device_release(struct ttm_bo_device *bdev)
1423{
1424 int ret = 0;
1425 unsigned i = TTM_NUM_MEM_TYPES;
1426 struct ttm_mem_type_manager *man;
a987fcaa 1427 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97
TH
1428
1429 while (i--) {
1430 man = &bdev->man[i];
1431 if (man->has_type) {
1432 man->use_type = false;
1433 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1434 ret = -EBUSY;
1435 printk(KERN_ERR TTM_PFX
1436 "DRM memory manager type %d "
1437 "is not clean.\n", i);
1438 }
1439 man->has_type = false;
1440 }
1441 }
1442
a987fcaa
TH
1443 mutex_lock(&glob->device_list_mutex);
1444 list_del(&bdev->device_list);
1445 mutex_unlock(&glob->device_list_mutex);
1446
ba4e7d97
TH
1447 if (!cancel_delayed_work(&bdev->wq))
1448 flush_scheduled_work();
1449
1450 while (ttm_bo_delayed_delete(bdev, true))
1451 ;
1452
a987fcaa 1453 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1454 if (list_empty(&bdev->ddestroy))
1455 TTM_DEBUG("Delayed destroy list was clean\n");
1456
1457 if (list_empty(&bdev->man[0].lru))
1458 TTM_DEBUG("Swap list was clean\n");
a987fcaa 1459 spin_unlock(&glob->lru_lock);
ba4e7d97 1460
ba4e7d97
TH
1461 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1462 write_lock(&bdev->vm_lock);
1463 drm_mm_takedown(&bdev->addr_space_mm);
1464 write_unlock(&bdev->vm_lock);
1465
ba4e7d97
TH
1466 return ret;
1467}
1468EXPORT_SYMBOL(ttm_bo_device_release);
1469
ba4e7d97 1470int ttm_bo_device_init(struct ttm_bo_device *bdev,
a987fcaa
TH
1471 struct ttm_bo_global *glob,
1472 struct ttm_bo_driver *driver,
51c8b407 1473 uint64_t file_page_offset,
ad49f501 1474 bool need_dma32)
ba4e7d97
TH
1475{
1476 int ret = -EINVAL;
1477
ba4e7d97 1478 rwlock_init(&bdev->vm_lock);
ba4e7d97 1479 bdev->driver = driver;
ba4e7d97
TH
1480
1481 memset(bdev->man, 0, sizeof(bdev->man));
1482
ba4e7d97
TH
1483 /*
1484 * Initialize the system memory buffer type.
1485 * Other types need to be driver / IOCTL initialized.
1486 */
ca262a99 1487 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
ba4e7d97 1488 if (unlikely(ret != 0))
a987fcaa 1489 goto out_no_sys;
ba4e7d97
TH
1490
1491 bdev->addr_space_rb = RB_ROOT;
1492 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1493 if (unlikely(ret != 0))
a987fcaa 1494 goto out_no_addr_mm;
ba4e7d97
TH
1495
1496 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1497 bdev->nice_mode = true;
1498 INIT_LIST_HEAD(&bdev->ddestroy);
ba4e7d97 1499 bdev->dev_mapping = NULL;
a987fcaa 1500 bdev->glob = glob;
ad49f501 1501 bdev->need_dma32 = need_dma32;
ba4e7d97 1502
a987fcaa
TH
1503 mutex_lock(&glob->device_list_mutex);
1504 list_add_tail(&bdev->device_list, &glob->device_list);
1505 mutex_unlock(&glob->device_list_mutex);
ba4e7d97
TH
1506
1507 return 0;
a987fcaa 1508out_no_addr_mm:
ba4e7d97 1509 ttm_bo_clean_mm(bdev, 0);
a987fcaa 1510out_no_sys:
ba4e7d97
TH
1511 return ret;
1512}
1513EXPORT_SYMBOL(ttm_bo_device_init);
1514
1515/*
1516 * buffer object vm functions.
1517 */
1518
1519bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1520{
1521 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1522
1523 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1524 if (mem->mem_type == TTM_PL_SYSTEM)
1525 return false;
1526
1527 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1528 return false;
1529
1530 if (mem->placement & TTM_PL_FLAG_CACHED)
1531 return false;
1532 }
1533 return true;
1534}
1535
1536int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1537 struct ttm_mem_reg *mem,
1538 unsigned long *bus_base,
1539 unsigned long *bus_offset, unsigned long *bus_size)
1540{
1541 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1542
1543 *bus_size = 0;
1544 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1545 return -EINVAL;
1546
1547 if (ttm_mem_reg_is_pci(bdev, mem)) {
1548 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1549 *bus_size = mem->num_pages << PAGE_SHIFT;
1550 *bus_base = man->io_offset;
1551 }
1552
1553 return 0;
1554}
1555
1556void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1557{
1558 struct ttm_bo_device *bdev = bo->bdev;
1559 loff_t offset = (loff_t) bo->addr_space_offset;
1560 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1561
1562 if (!bdev->dev_mapping)
1563 return;
1564
1565 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1566}
e024e110 1567EXPORT_SYMBOL(ttm_bo_unmap_virtual);
ba4e7d97
TH
1568
1569static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1570{
1571 struct ttm_bo_device *bdev = bo->bdev;
1572 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1573 struct rb_node *parent = NULL;
1574 struct ttm_buffer_object *cur_bo;
1575 unsigned long offset = bo->vm_node->start;
1576 unsigned long cur_offset;
1577
1578 while (*cur) {
1579 parent = *cur;
1580 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1581 cur_offset = cur_bo->vm_node->start;
1582 if (offset < cur_offset)
1583 cur = &parent->rb_left;
1584 else if (offset > cur_offset)
1585 cur = &parent->rb_right;
1586 else
1587 BUG();
1588 }
1589
1590 rb_link_node(&bo->vm_rb, parent, cur);
1591 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1592}
1593
1594/**
1595 * ttm_bo_setup_vm:
1596 *
1597 * @bo: the buffer to allocate address space for
1598 *
1599 * Allocate address space in the drm device so that applications
1600 * can mmap the buffer and access the contents. This only
1601 * applies to ttm_bo_type_device objects as others are not
1602 * placed in the drm device address space.
1603 */
1604
1605static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1606{
1607 struct ttm_bo_device *bdev = bo->bdev;
1608 int ret;
1609
1610retry_pre_get:
1611 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1612 if (unlikely(ret != 0))
1613 return ret;
1614
1615 write_lock(&bdev->vm_lock);
1616 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1617 bo->mem.num_pages, 0, 0);
1618
1619 if (unlikely(bo->vm_node == NULL)) {
1620 ret = -ENOMEM;
1621 goto out_unlock;
1622 }
1623
1624 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1625 bo->mem.num_pages, 0);
1626
1627 if (unlikely(bo->vm_node == NULL)) {
1628 write_unlock(&bdev->vm_lock);
1629 goto retry_pre_get;
1630 }
1631
1632 ttm_bo_vm_insert_rb(bo);
1633 write_unlock(&bdev->vm_lock);
1634 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1635
1636 return 0;
1637out_unlock:
1638 write_unlock(&bdev->vm_lock);
1639 return ret;
1640}
1641
1642int ttm_bo_wait(struct ttm_buffer_object *bo,
1643 bool lazy, bool interruptible, bool no_wait)
1644{
1645 struct ttm_bo_driver *driver = bo->bdev->driver;
1646 void *sync_obj;
1647 void *sync_obj_arg;
1648 int ret = 0;
1649
1650 if (likely(bo->sync_obj == NULL))
1651 return 0;
1652
1653 while (bo->sync_obj) {
1654
1655 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1656 void *tmp_obj = bo->sync_obj;
1657 bo->sync_obj = NULL;
1658 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1659 spin_unlock(&bo->lock);
1660 driver->sync_obj_unref(&tmp_obj);
1661 spin_lock(&bo->lock);
1662 continue;
1663 }
1664
1665 if (no_wait)
1666 return -EBUSY;
1667
1668 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1669 sync_obj_arg = bo->sync_obj_arg;
1670 spin_unlock(&bo->lock);
1671 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1672 lazy, interruptible);
1673 if (unlikely(ret != 0)) {
1674 driver->sync_obj_unref(&sync_obj);
1675 spin_lock(&bo->lock);
1676 return ret;
1677 }
1678 spin_lock(&bo->lock);
1679 if (likely(bo->sync_obj == sync_obj &&
1680 bo->sync_obj_arg == sync_obj_arg)) {
1681 void *tmp_obj = bo->sync_obj;
1682 bo->sync_obj = NULL;
1683 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1684 &bo->priv_flags);
1685 spin_unlock(&bo->lock);
1686 driver->sync_obj_unref(&sync_obj);
1687 driver->sync_obj_unref(&tmp_obj);
1688 spin_lock(&bo->lock);
fee280d3
TH
1689 } else {
1690 spin_unlock(&bo->lock);
1691 driver->sync_obj_unref(&sync_obj);
1692 spin_lock(&bo->lock);
ba4e7d97
TH
1693 }
1694 }
1695 return 0;
1696}
1697EXPORT_SYMBOL(ttm_bo_wait);
1698
1699void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1700{
1701 atomic_set(&bo->reserved, 0);
1702 wake_up_all(&bo->event_queue);
1703}
1704
1705int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1706 bool no_wait)
1707{
1708 int ret;
1709
1710 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1711 if (no_wait)
1712 return -EBUSY;
1713 else if (interruptible) {
1714 ret = wait_event_interruptible
1715 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1716 if (unlikely(ret != 0))
98ffc415 1717 return ret;
ba4e7d97
TH
1718 } else {
1719 wait_event(bo->event_queue,
1720 atomic_read(&bo->reserved) == 0);
1721 }
1722 }
1723 return 0;
1724}
1725
1726int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1727{
1728 int ret = 0;
1729
1730 /*
1731 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1732 * makes sure the lru lists are updated.
1733 */
1734
1735 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1736 if (unlikely(ret != 0))
1737 return ret;
1738 spin_lock(&bo->lock);
1739 ret = ttm_bo_wait(bo, false, true, no_wait);
1740 spin_unlock(&bo->lock);
1741 if (likely(ret == 0))
1742 atomic_inc(&bo->cpu_writers);
1743 ttm_bo_unreserve(bo);
1744 return ret;
1745}
1746
1747void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1748{
1749 if (atomic_dec_and_test(&bo->cpu_writers))
1750 wake_up_all(&bo->event_queue);
1751}
1752
1753/**
1754 * A buffer object shrink method that tries to swap out the first
1755 * buffer object on the bo_global::swap_lru list.
1756 */
1757
1758static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1759{
a987fcaa
TH
1760 struct ttm_bo_global *glob =
1761 container_of(shrink, struct ttm_bo_global, shrink);
ba4e7d97
TH
1762 struct ttm_buffer_object *bo;
1763 int ret = -EBUSY;
1764 int put_count;
1765 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1766
a987fcaa 1767 spin_lock(&glob->lru_lock);
ba4e7d97 1768 while (ret == -EBUSY) {
a987fcaa
TH
1769 if (unlikely(list_empty(&glob->swap_lru))) {
1770 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1771 return -EBUSY;
1772 }
1773
a987fcaa 1774 bo = list_first_entry(&glob->swap_lru,
ba4e7d97
TH
1775 struct ttm_buffer_object, swap);
1776 kref_get(&bo->list_kref);
1777
1778 /**
1779 * Reserve buffer. Since we unlock while sleeping, we need
1780 * to re-check that nobody removed us from the swap-list while
1781 * we slept.
1782 */
1783
1784 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1785 if (unlikely(ret == -EBUSY)) {
a987fcaa 1786 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1787 ttm_bo_wait_unreserved(bo, false);
1788 kref_put(&bo->list_kref, ttm_bo_release_list);
a987fcaa 1789 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1790 }
1791 }
1792
1793 BUG_ON(ret != 0);
1794 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 1795 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1796
1797 while (put_count--)
1798 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1799
1800 /**
1801 * Wait for GPU, then move to system cached.
1802 */
1803
1804 spin_lock(&bo->lock);
1805 ret = ttm_bo_wait(bo, false, false, false);
1806 spin_unlock(&bo->lock);
1807
1808 if (unlikely(ret != 0))
1809 goto out;
1810
1811 if ((bo->mem.placement & swap_placement) != swap_placement) {
1812 struct ttm_mem_reg evict_mem;
1813
1814 evict_mem = bo->mem;
1815 evict_mem.mm_node = NULL;
1816 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1817 evict_mem.mem_type = TTM_PL_SYSTEM;
1818
1819 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1820 false, false);
1821 if (unlikely(ret != 0))
1822 goto out;
1823 }
1824
1825 ttm_bo_unmap_virtual(bo);
1826
1827 /**
1828 * Swap out. Buffer will be swapped in again as soon as
1829 * anyone tries to access a ttm page.
1830 */
1831
1832 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1833out:
1834
1835 /**
1836 *
1837 * Unreserve without putting on LRU to avoid swapping out an
1838 * already swapped buffer.
1839 */
1840
1841 atomic_set(&bo->reserved, 0);
1842 wake_up_all(&bo->event_queue);
1843 kref_put(&bo->list_kref, ttm_bo_release_list);
1844 return ret;
1845}
1846
1847void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1848{
a987fcaa 1849 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
ba4e7d97
TH
1850 ;
1851}
This page took 0.154958 seconds and 5 git commands to generate.