1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/version.h>
32 #include <linux/vmalloc.h>
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/file.h>
37 #include <linux/swap.h>
38 #include "ttm/ttm_module.h"
39 #include "ttm/ttm_bo_driver.h"
40 #include "ttm/ttm_placement.h"
42 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
44 #if defined(CONFIG_X86)
45 static void ttm_tt_clflush_page(struct page
*page
)
47 uint8_t *page_virtual
;
50 if (unlikely(page
== NULL
))
53 page_virtual
= kmap_atomic(page
, KM_USER0
);
55 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
56 clflush(page_virtual
+ i
);
58 kunmap_atomic(page_virtual
, KM_USER0
);
61 static void ttm_tt_cache_flush_clflush(struct page
*pages
[],
62 unsigned long num_pages
)
67 for (i
= 0; i
< num_pages
; ++i
)
68 ttm_tt_clflush_page(*pages
++);
72 static void ttm_tt_ipi_handler(void *null
)
78 void ttm_tt_cache_flush(struct page
*pages
[], unsigned long num_pages
)
81 #if defined(CONFIG_X86)
82 if (cpu_has_clflush
) {
83 ttm_tt_cache_flush_clflush(pages
, num_pages
);
87 if (on_each_cpu(ttm_tt_ipi_handler
, NULL
, 1) != 0)
88 printk(KERN_ERR TTM_PFX
89 "Timed out waiting for drm cache flush.\n");
94 * Allocates storage for pointers to the pages that back the ttm.
96 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
98 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
100 unsigned long size
= ttm
->num_pages
* sizeof(*ttm
->pages
);
103 if (size
<= PAGE_SIZE
)
104 ttm
->pages
= kzalloc(size
, GFP_KERNEL
);
107 ttm
->pages
= vmalloc_user(size
);
109 ttm
->page_flags
|= TTM_PAGE_FLAG_VMALLOC
;
113 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
115 if (ttm
->page_flags
& TTM_PAGE_FLAG_VMALLOC
) {
117 ttm
->page_flags
&= ~TTM_PAGE_FLAG_VMALLOC
;
124 static struct page
*ttm_tt_alloc_page(unsigned page_flags
)
126 if (page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
127 return alloc_page(GFP_HIGHUSER
| __GFP_ZERO
);
129 return alloc_page(GFP_HIGHUSER
);
132 static void ttm_tt_free_user_pages(struct ttm_tt
*ttm
)
138 struct ttm_backend
*be
= ttm
->be
;
140 BUG_ON(!(ttm
->page_flags
& TTM_PAGE_FLAG_USER
));
141 write
= ((ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0);
142 dirty
= ((ttm
->page_flags
& TTM_PAGE_FLAG_USER_DIRTY
) != 0);
147 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
148 page
= ttm
->pages
[i
];
152 if (page
== ttm
->dummy_read_page
) {
157 if (write
&& dirty
&& !PageReserved(page
))
158 set_page_dirty_lock(page
);
160 ttm
->pages
[i
] = NULL
;
161 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
, false);
164 ttm
->state
= tt_unpopulated
;
165 ttm
->first_himem_page
= ttm
->num_pages
;
166 ttm
->last_lomem_page
= -1;
169 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
172 struct ttm_bo_device
*bdev
= ttm
->bdev
;
173 struct ttm_mem_global
*mem_glob
= bdev
->mem_glob
;
176 while (NULL
== (p
= ttm
->pages
[index
])) {
177 p
= ttm_tt_alloc_page(ttm
->page_flags
);
182 if (PageHighMem(p
)) {
184 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
186 if (unlikely(ret
!= 0))
188 ttm
->pages
[--ttm
->first_himem_page
] = p
;
191 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
192 false, false, false);
193 if (unlikely(ret
!= 0))
195 ttm
->pages
[++ttm
->last_lomem_page
] = p
;
204 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
208 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
209 ret
= ttm_tt_swapin(ttm
);
210 if (unlikely(ret
!= 0))
213 return __ttm_tt_get_page(ttm
, index
);
216 int ttm_tt_populate(struct ttm_tt
*ttm
)
220 struct ttm_backend
*be
;
223 if (ttm
->state
!= tt_unpopulated
)
226 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
227 ret
= ttm_tt_swapin(ttm
);
228 if (unlikely(ret
!= 0))
234 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
235 page
= __ttm_tt_get_page(ttm
, i
);
240 be
->func
->populate(be
, ttm
->num_pages
, ttm
->pages
,
241 ttm
->dummy_read_page
);
242 ttm
->state
= tt_unbound
;
247 static inline int ttm_tt_set_page_caching(struct page
*p
,
248 enum ttm_caching_state c_state
)
255 return set_pages_wb(p
, 1);
257 return set_memory_wc((unsigned long) page_address(p
), 1);
259 return set_pages_uc(p
, 1);
262 #else /* CONFIG_X86 */
263 static inline int ttm_tt_set_page_caching(struct page
*p
,
264 enum ttm_caching_state c_state
)
268 #endif /* CONFIG_X86 */
271 * Change caching policy for the linear kernel map
272 * for range of pages in a ttm.
275 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
276 enum ttm_caching_state c_state
)
279 struct page
*cur_page
;
282 if (ttm
->caching_state
== c_state
)
285 if (c_state
!= tt_cached
) {
286 ret
= ttm_tt_populate(ttm
);
287 if (unlikely(ret
!= 0))
291 if (ttm
->caching_state
== tt_cached
)
292 ttm_tt_cache_flush(ttm
->pages
, ttm
->num_pages
);
294 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
295 cur_page
= ttm
->pages
[i
];
296 if (likely(cur_page
!= NULL
)) {
297 ret
= ttm_tt_set_page_caching(cur_page
, c_state
);
298 if (unlikely(ret
!= 0))
303 ttm
->caching_state
= c_state
;
308 for (j
= 0; j
< i
; ++j
) {
309 cur_page
= ttm
->pages
[j
];
310 if (likely(cur_page
!= NULL
)) {
311 (void)ttm_tt_set_page_caching(cur_page
,
319 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
321 enum ttm_caching_state state
;
323 if (placement
& TTM_PL_FLAG_WC
)
325 else if (placement
& TTM_PL_FLAG_UNCACHED
)
330 return ttm_tt_set_caching(ttm
, state
);
333 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
336 struct page
*cur_page
;
337 struct ttm_backend
*be
= ttm
->be
;
341 (void)ttm_tt_set_caching(ttm
, tt_cached
);
342 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
343 cur_page
= ttm
->pages
[i
];
344 ttm
->pages
[i
] = NULL
;
346 if (page_count(cur_page
) != 1)
347 printk(KERN_ERR TTM_PFX
348 "Erroneous page count. "
350 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
,
351 PageHighMem(cur_page
));
352 __free_page(cur_page
);
355 ttm
->state
= tt_unpopulated
;
356 ttm
->first_himem_page
= ttm
->num_pages
;
357 ttm
->last_lomem_page
= -1;
360 void ttm_tt_destroy(struct ttm_tt
*ttm
)
362 struct ttm_backend
*be
;
364 if (unlikely(ttm
== NULL
))
368 if (likely(be
!= NULL
)) {
369 be
->func
->destroy(be
);
373 if (likely(ttm
->pages
!= NULL
)) {
374 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
375 ttm_tt_free_user_pages(ttm
);
377 ttm_tt_free_alloced_pages(ttm
);
379 ttm_tt_free_page_directory(ttm
);
382 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
) &&
384 fput(ttm
->swap_storage
);
389 int ttm_tt_set_user(struct ttm_tt
*ttm
,
390 struct task_struct
*tsk
,
391 unsigned long start
, unsigned long num_pages
)
393 struct mm_struct
*mm
= tsk
->mm
;
395 int write
= (ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0;
396 struct ttm_mem_global
*mem_glob
= ttm
->bdev
->mem_glob
;
398 BUG_ON(num_pages
!= ttm
->num_pages
);
399 BUG_ON((ttm
->page_flags
& TTM_PAGE_FLAG_USER
) == 0);
402 * Account user pages as lowmem pages for now.
405 ret
= ttm_mem_global_alloc(mem_glob
, num_pages
* PAGE_SIZE
,
406 false, false, false);
407 if (unlikely(ret
!= 0))
410 down_read(&mm
->mmap_sem
);
411 ret
= get_user_pages(tsk
, mm
, start
, num_pages
,
412 write
, 0, ttm
->pages
, NULL
);
413 up_read(&mm
->mmap_sem
);
415 if (ret
!= num_pages
&& write
) {
416 ttm_tt_free_user_pages(ttm
);
417 ttm_mem_global_free(mem_glob
, num_pages
* PAGE_SIZE
, false);
423 ttm
->state
= tt_unbound
;
428 struct ttm_tt
*ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
429 uint32_t page_flags
, struct page
*dummy_read_page
)
431 struct ttm_bo_driver
*bo_driver
= bdev
->driver
;
437 ttm
= kzalloc(sizeof(*ttm
), GFP_KERNEL
);
443 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
444 ttm
->first_himem_page
= ttm
->num_pages
;
445 ttm
->last_lomem_page
= -1;
446 ttm
->caching_state
= tt_cached
;
447 ttm
->page_flags
= page_flags
;
449 ttm
->dummy_read_page
= dummy_read_page
;
451 ttm_tt_alloc_page_directory(ttm
);
454 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
457 ttm
->be
= bo_driver
->create_ttm_backend_entry(bdev
);
460 printk(KERN_ERR TTM_PFX
"Failed creating ttm backend entry\n");
463 ttm
->state
= tt_unpopulated
;
467 void ttm_tt_unbind(struct ttm_tt
*ttm
)
470 struct ttm_backend
*be
= ttm
->be
;
472 if (ttm
->state
== tt_bound
) {
473 ret
= be
->func
->unbind(be
);
475 ttm
->state
= tt_unbound
;
479 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
482 struct ttm_backend
*be
;
487 if (ttm
->state
== tt_bound
)
492 ret
= ttm_tt_populate(ttm
);
496 ret
= be
->func
->bind(be
, bo_mem
);
498 printk(KERN_ERR TTM_PFX
"Couldn't bind backend.\n");
502 ttm
->state
= tt_bound
;
504 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
505 ttm
->page_flags
|= TTM_PAGE_FLAG_USER_DIRTY
;
508 EXPORT_SYMBOL(ttm_tt_bind
);
510 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
512 struct address_space
*swap_space
;
513 struct file
*swap_storage
;
514 struct page
*from_page
;
515 struct page
*to_page
;
521 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
522 ret
= ttm_tt_set_user(ttm
, ttm
->tsk
, ttm
->start
,
524 if (unlikely(ret
!= 0))
527 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
531 swap_storage
= ttm
->swap_storage
;
532 BUG_ON(swap_storage
== NULL
);
534 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
536 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
537 from_page
= read_mapping_page(swap_space
, i
, NULL
);
538 if (IS_ERR(from_page
))
540 to_page
= __ttm_tt_get_page(ttm
, i
);
541 if (unlikely(to_page
== NULL
))
545 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
546 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
547 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
548 kunmap_atomic(to_virtual
, KM_USER1
);
549 kunmap_atomic(from_virtual
, KM_USER0
);
551 page_cache_release(from_page
);
554 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
))
556 ttm
->swap_storage
= NULL
;
557 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
561 ttm_tt_free_alloced_pages(ttm
);
565 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistant_swap_storage
)
567 struct address_space
*swap_space
;
568 struct file
*swap_storage
;
569 struct page
*from_page
;
570 struct page
*to_page
;
575 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
576 BUG_ON(ttm
->caching_state
!= tt_cached
);
579 * For user buffers, just unpin the pages, as there should be
583 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
584 ttm_tt_free_user_pages(ttm
);
585 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
586 ttm
->swap_storage
= NULL
;
590 if (!persistant_swap_storage
) {
591 swap_storage
= shmem_file_setup("ttm swap",
592 ttm
->num_pages
<< PAGE_SHIFT
,
594 if (unlikely(IS_ERR(swap_storage
))) {
595 printk(KERN_ERR
"Failed allocating swap storage.\n");
599 swap_storage
= persistant_swap_storage
;
601 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
603 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
604 from_page
= ttm
->pages
[i
];
605 if (unlikely(from_page
== NULL
))
607 to_page
= read_mapping_page(swap_space
, i
, NULL
);
608 if (unlikely(to_page
== NULL
))
612 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
613 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
614 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
615 kunmap_atomic(to_virtual
, KM_USER1
);
616 kunmap_atomic(from_virtual
, KM_USER0
);
618 set_page_dirty(to_page
);
619 mark_page_accessed(to_page
);
620 page_cache_release(to_page
);
623 ttm_tt_free_alloced_pages(ttm
);
624 ttm
->swap_storage
= swap_storage
;
625 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
626 if (persistant_swap_storage
)
627 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTANT_SWAP
;
631 if (!persistant_swap_storage
)