1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/sched.h>
32 #include <linux/highmem.h>
33 #include <linux/pagemap.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include <linux/slab.h>
38 #include <linux/export.h>
39 #include "drm_cache.h"
40 #include "drm_mem_util.h"
41 #include "ttm/ttm_module.h"
42 #include "ttm/ttm_bo_driver.h"
43 #include "ttm/ttm_placement.h"
44 #include "ttm/ttm_page_alloc.h"
46 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
49 * Allocates storage for pointers to the pages that back the ttm.
51 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
53 ttm
->pages
= drm_calloc_large(ttm
->num_pages
, sizeof(*ttm
->pages
));
54 ttm
->dma_address
= drm_calloc_large(ttm
->num_pages
,
55 sizeof(*ttm
->dma_address
));
58 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
60 drm_free_large(ttm
->pages
);
62 drm_free_large(ttm
->dma_address
);
63 ttm
->dma_address
= NULL
;
66 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
69 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
72 if (NULL
== (p
= ttm
->pages
[index
])) {
74 ret
= ttm_get_pages(&p
, ttm
->page_flags
, ttm
->caching_state
, 1,
75 &ttm
->dma_address
[index
]);
79 ret
= ttm_mem_global_alloc_page(mem_glob
, p
, false, false);
80 if (unlikely(ret
!= 0))
83 ttm
->pages
[index
] = p
;
87 ttm_put_pages(&p
, 1, ttm
->page_flags
,
88 ttm
->caching_state
, &ttm
->dma_address
[index
]);
92 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
96 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
97 ret
= ttm_tt_swapin(ttm
);
98 if (unlikely(ret
!= 0))
101 return __ttm_tt_get_page(ttm
, index
);
104 int ttm_tt_populate(struct ttm_tt
*ttm
)
110 if (ttm
->state
!= tt_unpopulated
)
113 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
114 ret
= ttm_tt_swapin(ttm
);
115 if (unlikely(ret
!= 0))
119 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
120 page
= __ttm_tt_get_page(ttm
, i
);
124 ttm
->state
= tt_unbound
;
127 EXPORT_SYMBOL(ttm_tt_populate
);
130 static inline int ttm_tt_set_page_caching(struct page
*p
,
131 enum ttm_caching_state c_old
,
132 enum ttm_caching_state c_new
)
139 if (c_old
!= tt_cached
) {
140 /* p isn't in the default caching state, set it to
141 * writeback first to free its current memtype. */
143 ret
= set_pages_wb(p
, 1);
149 ret
= set_memory_wc((unsigned long) page_address(p
), 1);
150 else if (c_new
== tt_uncached
)
151 ret
= set_pages_uc(p
, 1);
155 #else /* CONFIG_X86 */
156 static inline int ttm_tt_set_page_caching(struct page
*p
,
157 enum ttm_caching_state c_old
,
158 enum ttm_caching_state c_new
)
162 #endif /* CONFIG_X86 */
165 * Change caching policy for the linear kernel map
166 * for range of pages in a ttm.
169 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
170 enum ttm_caching_state c_state
)
173 struct page
*cur_page
;
176 if (ttm
->caching_state
== c_state
)
179 if (ttm
->state
== tt_unpopulated
) {
180 /* Change caching but don't populate */
181 ttm
->caching_state
= c_state
;
185 if (ttm
->caching_state
== tt_cached
)
186 drm_clflush_pages(ttm
->pages
, ttm
->num_pages
);
188 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
189 cur_page
= ttm
->pages
[i
];
190 if (likely(cur_page
!= NULL
)) {
191 ret
= ttm_tt_set_page_caching(cur_page
,
194 if (unlikely(ret
!= 0))
199 ttm
->caching_state
= c_state
;
204 for (j
= 0; j
< i
; ++j
) {
205 cur_page
= ttm
->pages
[j
];
206 if (likely(cur_page
!= NULL
)) {
207 (void)ttm_tt_set_page_caching(cur_page
, c_state
,
215 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
217 enum ttm_caching_state state
;
219 if (placement
& TTM_PL_FLAG_WC
)
221 else if (placement
& TTM_PL_FLAG_UNCACHED
)
226 return ttm_tt_set_caching(ttm
, state
);
228 EXPORT_SYMBOL(ttm_tt_set_placement_caching
);
230 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
234 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
236 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
238 ttm_put_pages(&ttm
->pages
[i
], 1, ttm
->page_flags
,
239 ttm
->caching_state
, &ttm
->dma_address
[i
]);
242 ttm
->state
= tt_unpopulated
;
245 void ttm_tt_destroy(struct ttm_tt
*ttm
)
247 if (unlikely(ttm
== NULL
))
250 if (ttm
->state
== tt_bound
) {
254 if (likely(ttm
->pages
!= NULL
)) {
255 ttm_tt_free_alloced_pages(ttm
);
256 ttm_tt_free_page_directory(ttm
);
259 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTENT_SWAP
) &&
261 fput(ttm
->swap_storage
);
263 ttm
->swap_storage
= NULL
;
264 ttm
->func
->destroy(ttm
);
267 int ttm_tt_init(struct ttm_tt
*ttm
, struct ttm_bo_device
*bdev
,
268 unsigned long size
, uint32_t page_flags
,
269 struct page
*dummy_read_page
)
272 ttm
->glob
= bdev
->glob
;
273 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
274 ttm
->caching_state
= tt_cached
;
275 ttm
->page_flags
= page_flags
;
276 ttm
->dummy_read_page
= dummy_read_page
;
277 ttm
->state
= tt_unpopulated
;
279 ttm_tt_alloc_page_directory(ttm
);
280 if (!ttm
->pages
|| !ttm
->dma_address
) {
282 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
287 EXPORT_SYMBOL(ttm_tt_init
);
289 void ttm_tt_unbind(struct ttm_tt
*ttm
)
293 if (ttm
->state
== tt_bound
) {
294 ret
= ttm
->func
->unbind(ttm
);
296 ttm
->state
= tt_unbound
;
300 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
307 if (ttm
->state
== tt_bound
)
310 ret
= ttm_tt_populate(ttm
);
314 ret
= ttm
->func
->bind(ttm
, bo_mem
);
315 if (unlikely(ret
!= 0))
318 ttm
->state
= tt_bound
;
322 EXPORT_SYMBOL(ttm_tt_bind
);
324 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
326 struct address_space
*swap_space
;
327 struct file
*swap_storage
;
328 struct page
*from_page
;
329 struct page
*to_page
;
335 swap_storage
= ttm
->swap_storage
;
336 BUG_ON(swap_storage
== NULL
);
338 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
340 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
341 from_page
= shmem_read_mapping_page(swap_space
, i
);
342 if (IS_ERR(from_page
)) {
343 ret
= PTR_ERR(from_page
);
346 to_page
= __ttm_tt_get_page(ttm
, i
);
347 if (unlikely(to_page
== NULL
))
351 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
352 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
353 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
354 kunmap_atomic(to_virtual
, KM_USER1
);
355 kunmap_atomic(from_virtual
, KM_USER0
);
357 page_cache_release(from_page
);
360 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTENT_SWAP
))
362 ttm
->swap_storage
= NULL
;
363 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
367 ttm_tt_free_alloced_pages(ttm
);
371 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistent_swap_storage
)
373 struct address_space
*swap_space
;
374 struct file
*swap_storage
;
375 struct page
*from_page
;
376 struct page
*to_page
;
382 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
383 BUG_ON(ttm
->caching_state
!= tt_cached
);
385 if (!persistent_swap_storage
) {
386 swap_storage
= shmem_file_setup("ttm swap",
387 ttm
->num_pages
<< PAGE_SHIFT
,
389 if (unlikely(IS_ERR(swap_storage
))) {
390 printk(KERN_ERR
"Failed allocating swap storage.\n");
391 return PTR_ERR(swap_storage
);
394 swap_storage
= persistent_swap_storage
;
396 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
398 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
399 from_page
= ttm
->pages
[i
];
400 if (unlikely(from_page
== NULL
))
402 to_page
= shmem_read_mapping_page(swap_space
, i
);
403 if (unlikely(IS_ERR(to_page
))) {
404 ret
= PTR_ERR(to_page
);
408 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
409 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
410 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
411 kunmap_atomic(to_virtual
, KM_USER1
);
412 kunmap_atomic(from_virtual
, KM_USER0
);
414 set_page_dirty(to_page
);
415 mark_page_accessed(to_page
);
416 page_cache_release(to_page
);
419 ttm_tt_free_alloced_pages(ttm
);
420 ttm
->swap_storage
= swap_storage
;
421 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
422 if (persistent_swap_storage
)
423 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTENT_SWAP
;
427 if (!persistent_swap_storage
)