Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
ba4e7d97 TH |
31 | #include <linux/sched.h> |
32 | #include <linux/highmem.h> | |
33 | #include <linux/pagemap.h> | |
3142b651 | 34 | #include <linux/shmem_fs.h> |
ba4e7d97 TH |
35 | #include <linux/file.h> |
36 | #include <linux/swap.h> | |
5a0e3ad6 | 37 | #include <linux/slab.h> |
2d1a8a48 | 38 | #include <linux/export.h> |
c9c97b8c | 39 | #include "drm_cache.h" |
72e942dd | 40 | #include "drm_mem_util.h" |
ba4e7d97 TH |
41 | #include "ttm/ttm_module.h" |
42 | #include "ttm/ttm_bo_driver.h" | |
43 | #include "ttm/ttm_placement.h" | |
1403b1a3 | 44 | #include "ttm/ttm_page_alloc.h" |
ba4e7d97 | 45 | |
ba4e7d97 TH |
46 | /** |
47 | * Allocates storage for pointers to the pages that back the ttm. | |
ba4e7d97 TH |
48 | */ |
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | |
50 | { | |
8e7e7052 | 51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); |
ba4e7d97 TH |
52 | } |
53 | ||
8e7e7052 | 54 | static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) |
ba4e7d97 | 55 | { |
8e7e7052 JG |
56 | ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); |
57 | ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, | |
58 | sizeof(*ttm->dma_address)); | |
ba4e7d97 TH |
59 | } |
60 | ||
ba4e7d97 TH |
61 | #ifdef CONFIG_X86 |
62 | static inline int ttm_tt_set_page_caching(struct page *p, | |
f0e2f38b FJ |
63 | enum ttm_caching_state c_old, |
64 | enum ttm_caching_state c_new) | |
ba4e7d97 | 65 | { |
db78e27d FJ |
66 | int ret = 0; |
67 | ||
ba4e7d97 TH |
68 | if (PageHighMem(p)) |
69 | return 0; | |
70 | ||
f0e2f38b | 71 | if (c_old != tt_cached) { |
db78e27d FJ |
72 | /* p isn't in the default caching state, set it to |
73 | * writeback first to free its current memtype. */ | |
74 | ||
75 | ret = set_pages_wb(p, 1); | |
76 | if (ret) | |
77 | return ret; | |
ba4e7d97 | 78 | } |
db78e27d | 79 | |
f0e2f38b | 80 | if (c_new == tt_wc) |
db78e27d | 81 | ret = set_memory_wc((unsigned long) page_address(p), 1); |
f0e2f38b | 82 | else if (c_new == tt_uncached) |
db78e27d FJ |
83 | ret = set_pages_uc(p, 1); |
84 | ||
85 | return ret; | |
ba4e7d97 TH |
86 | } |
87 | #else /* CONFIG_X86 */ | |
88 | static inline int ttm_tt_set_page_caching(struct page *p, | |
f0e2f38b FJ |
89 | enum ttm_caching_state c_old, |
90 | enum ttm_caching_state c_new) | |
ba4e7d97 TH |
91 | { |
92 | return 0; | |
93 | } | |
94 | #endif /* CONFIG_X86 */ | |
95 | ||
96 | /* | |
97 | * Change caching policy for the linear kernel map | |
98 | * for range of pages in a ttm. | |
99 | */ | |
100 | ||
101 | static int ttm_tt_set_caching(struct ttm_tt *ttm, | |
102 | enum ttm_caching_state c_state) | |
103 | { | |
104 | int i, j; | |
105 | struct page *cur_page; | |
106 | int ret; | |
107 | ||
108 | if (ttm->caching_state == c_state) | |
109 | return 0; | |
110 | ||
1403b1a3 PN |
111 | if (ttm->state == tt_unpopulated) { |
112 | /* Change caching but don't populate */ | |
113 | ttm->caching_state = c_state; | |
114 | return 0; | |
ba4e7d97 TH |
115 | } |
116 | ||
117 | if (ttm->caching_state == tt_cached) | |
c9c97b8c | 118 | drm_clflush_pages(ttm->pages, ttm->num_pages); |
ba4e7d97 TH |
119 | |
120 | for (i = 0; i < ttm->num_pages; ++i) { | |
121 | cur_page = ttm->pages[i]; | |
122 | if (likely(cur_page != NULL)) { | |
f0e2f38b FJ |
123 | ret = ttm_tt_set_page_caching(cur_page, |
124 | ttm->caching_state, | |
125 | c_state); | |
ba4e7d97 TH |
126 | if (unlikely(ret != 0)) |
127 | goto out_err; | |
128 | } | |
129 | } | |
130 | ||
131 | ttm->caching_state = c_state; | |
132 | ||
133 | return 0; | |
134 | ||
135 | out_err: | |
136 | for (j = 0; j < i; ++j) { | |
137 | cur_page = ttm->pages[j]; | |
138 | if (likely(cur_page != NULL)) { | |
f0e2f38b | 139 | (void)ttm_tt_set_page_caching(cur_page, c_state, |
ba4e7d97 TH |
140 | ttm->caching_state); |
141 | } | |
142 | } | |
143 | ||
144 | return ret; | |
145 | } | |
146 | ||
147 | int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) | |
148 | { | |
149 | enum ttm_caching_state state; | |
150 | ||
151 | if (placement & TTM_PL_FLAG_WC) | |
152 | state = tt_wc; | |
153 | else if (placement & TTM_PL_FLAG_UNCACHED) | |
154 | state = tt_uncached; | |
155 | else | |
156 | state = tt_cached; | |
157 | ||
158 | return ttm_tt_set_caching(ttm, state); | |
159 | } | |
df67bed9 | 160 | EXPORT_SYMBOL(ttm_tt_set_placement_caching); |
ba4e7d97 | 161 | |
ba4e7d97 TH |
162 | void ttm_tt_destroy(struct ttm_tt *ttm) |
163 | { | |
ba4e7d97 TH |
164 | if (unlikely(ttm == NULL)) |
165 | return; | |
166 | ||
649bf3ca JG |
167 | if (ttm->state == tt_bound) { |
168 | ttm_tt_unbind(ttm); | |
ba4e7d97 TH |
169 | } |
170 | ||
171 | if (likely(ttm->pages != NULL)) { | |
b1e5f172 | 172 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); |
ba4e7d97 TH |
173 | } |
174 | ||
5df23979 | 175 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && |
ba4e7d97 TH |
176 | ttm->swap_storage) |
177 | fput(ttm->swap_storage); | |
178 | ||
649bf3ca JG |
179 | ttm->swap_storage = NULL; |
180 | ttm->func->destroy(ttm); | |
ba4e7d97 TH |
181 | } |
182 | ||
649bf3ca JG |
183 | int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, |
184 | unsigned long size, uint32_t page_flags, | |
185 | struct page *dummy_read_page) | |
ba4e7d97 | 186 | { |
649bf3ca | 187 | ttm->bdev = bdev; |
a987fcaa | 188 | ttm->glob = bdev->glob; |
ba4e7d97 | 189 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
ba4e7d97 TH |
190 | ttm->caching_state = tt_cached; |
191 | ttm->page_flags = page_flags; | |
ba4e7d97 | 192 | ttm->dummy_read_page = dummy_read_page; |
649bf3ca | 193 | ttm->state = tt_unpopulated; |
ba4e7d97 TH |
194 | |
195 | ttm_tt_alloc_page_directory(ttm); | |
8e7e7052 | 196 | if (!ttm->pages) { |
ba4e7d97 TH |
197 | ttm_tt_destroy(ttm); |
198 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | |
649bf3ca | 199 | return -ENOMEM; |
ba4e7d97 | 200 | } |
649bf3ca | 201 | return 0; |
ba4e7d97 | 202 | } |
649bf3ca | 203 | EXPORT_SYMBOL(ttm_tt_init); |
ba4e7d97 | 204 | |
8e7e7052 JG |
205 | void ttm_tt_fini(struct ttm_tt *ttm) |
206 | { | |
207 | drm_free_large(ttm->pages); | |
208 | ttm->pages = NULL; | |
209 | } | |
210 | EXPORT_SYMBOL(ttm_tt_fini); | |
211 | ||
212 | int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | |
213 | unsigned long size, uint32_t page_flags, | |
214 | struct page *dummy_read_page) | |
215 | { | |
216 | struct ttm_tt *ttm = &ttm_dma->ttm; | |
217 | ||
218 | ttm->bdev = bdev; | |
219 | ttm->glob = bdev->glob; | |
220 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
221 | ttm->caching_state = tt_cached; | |
222 | ttm->page_flags = page_flags; | |
223 | ttm->dummy_read_page = dummy_read_page; | |
224 | ttm->state = tt_unpopulated; | |
225 | ||
226 | INIT_LIST_HEAD(&ttm_dma->pages_list); | |
227 | ttm_dma_tt_alloc_page_directory(ttm_dma); | |
228 | if (!ttm->pages || !ttm_dma->dma_address) { | |
229 | ttm_tt_destroy(ttm); | |
230 | printk(KERN_ERR TTM_PFX "Failed allocating page table\n"); | |
231 | return -ENOMEM; | |
232 | } | |
233 | return 0; | |
234 | } | |
235 | EXPORT_SYMBOL(ttm_dma_tt_init); | |
236 | ||
237 | void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) | |
238 | { | |
239 | struct ttm_tt *ttm = &ttm_dma->ttm; | |
240 | ||
241 | drm_free_large(ttm->pages); | |
242 | ttm->pages = NULL; | |
243 | drm_free_large(ttm_dma->dma_address); | |
244 | ttm_dma->dma_address = NULL; | |
245 | } | |
246 | EXPORT_SYMBOL(ttm_dma_tt_fini); | |
247 | ||
ba4e7d97 TH |
248 | void ttm_tt_unbind(struct ttm_tt *ttm) |
249 | { | |
250 | int ret; | |
ba4e7d97 TH |
251 | |
252 | if (ttm->state == tt_bound) { | |
649bf3ca | 253 | ret = ttm->func->unbind(ttm); |
ba4e7d97 TH |
254 | BUG_ON(ret); |
255 | ttm->state = tt_unbound; | |
256 | } | |
257 | } | |
258 | ||
259 | int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |
260 | { | |
261 | int ret = 0; | |
ba4e7d97 TH |
262 | |
263 | if (!ttm) | |
264 | return -EINVAL; | |
265 | ||
266 | if (ttm->state == tt_bound) | |
267 | return 0; | |
268 | ||
b1e5f172 | 269 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
ba4e7d97 TH |
270 | if (ret) |
271 | return ret; | |
272 | ||
649bf3ca | 273 | ret = ttm->func->bind(ttm, bo_mem); |
7dcebb52 | 274 | if (unlikely(ret != 0)) |
ba4e7d97 | 275 | return ret; |
ba4e7d97 TH |
276 | |
277 | ttm->state = tt_bound; | |
278 | ||
ba4e7d97 TH |
279 | return 0; |
280 | } | |
281 | EXPORT_SYMBOL(ttm_tt_bind); | |
282 | ||
b1e5f172 | 283 | int ttm_tt_swapin(struct ttm_tt *ttm) |
ba4e7d97 TH |
284 | { |
285 | struct address_space *swap_space; | |
286 | struct file *swap_storage; | |
287 | struct page *from_page; | |
288 | struct page *to_page; | |
289 | void *from_virtual; | |
290 | void *to_virtual; | |
291 | int i; | |
290e5505 | 292 | int ret = -ENOMEM; |
ba4e7d97 | 293 | |
ba4e7d97 TH |
294 | swap_storage = ttm->swap_storage; |
295 | BUG_ON(swap_storage == NULL); | |
296 | ||
297 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | |
298 | ||
299 | for (i = 0; i < ttm->num_pages; ++i) { | |
3142b651 | 300 | from_page = shmem_read_mapping_page(swap_space, i); |
290e5505 MM |
301 | if (IS_ERR(from_page)) { |
302 | ret = PTR_ERR(from_page); | |
ba4e7d97 | 303 | goto out_err; |
290e5505 | 304 | } |
b1e5f172 | 305 | to_page = ttm->pages[i]; |
ba4e7d97 TH |
306 | if (unlikely(to_page == NULL)) |
307 | goto out_err; | |
308 | ||
309 | preempt_disable(); | |
310 | from_virtual = kmap_atomic(from_page, KM_USER0); | |
311 | to_virtual = kmap_atomic(to_page, KM_USER1); | |
312 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | |
313 | kunmap_atomic(to_virtual, KM_USER1); | |
314 | kunmap_atomic(from_virtual, KM_USER0); | |
315 | preempt_enable(); | |
316 | page_cache_release(from_page); | |
317 | } | |
318 | ||
5df23979 | 319 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) |
ba4e7d97 TH |
320 | fput(swap_storage); |
321 | ttm->swap_storage = NULL; | |
322 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | |
323 | ||
324 | return 0; | |
325 | out_err: | |
290e5505 | 326 | return ret; |
ba4e7d97 TH |
327 | } |
328 | ||
5df23979 | 329 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) |
ba4e7d97 TH |
330 | { |
331 | struct address_space *swap_space; | |
332 | struct file *swap_storage; | |
333 | struct page *from_page; | |
334 | struct page *to_page; | |
335 | void *from_virtual; | |
336 | void *to_virtual; | |
337 | int i; | |
290e5505 | 338 | int ret = -ENOMEM; |
ba4e7d97 TH |
339 | |
340 | BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); | |
341 | BUG_ON(ttm->caching_state != tt_cached); | |
342 | ||
5df23979 | 343 | if (!persistent_swap_storage) { |
ba4e7d97 TH |
344 | swap_storage = shmem_file_setup("ttm swap", |
345 | ttm->num_pages << PAGE_SHIFT, | |
346 | 0); | |
347 | if (unlikely(IS_ERR(swap_storage))) { | |
348 | printk(KERN_ERR "Failed allocating swap storage.\n"); | |
290e5505 | 349 | return PTR_ERR(swap_storage); |
ba4e7d97 TH |
350 | } |
351 | } else | |
5df23979 | 352 | swap_storage = persistent_swap_storage; |
ba4e7d97 TH |
353 | |
354 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | |
355 | ||
356 | for (i = 0; i < ttm->num_pages; ++i) { | |
357 | from_page = ttm->pages[i]; | |
358 | if (unlikely(from_page == NULL)) | |
359 | continue; | |
3142b651 | 360 | to_page = shmem_read_mapping_page(swap_space, i); |
290e5505 MM |
361 | if (unlikely(IS_ERR(to_page))) { |
362 | ret = PTR_ERR(to_page); | |
ba4e7d97 | 363 | goto out_err; |
290e5505 | 364 | } |
ba4e7d97 TH |
365 | preempt_disable(); |
366 | from_virtual = kmap_atomic(from_page, KM_USER0); | |
367 | to_virtual = kmap_atomic(to_page, KM_USER1); | |
368 | memcpy(to_virtual, from_virtual, PAGE_SIZE); | |
369 | kunmap_atomic(to_virtual, KM_USER1); | |
370 | kunmap_atomic(from_virtual, KM_USER0); | |
371 | preempt_enable(); | |
372 | set_page_dirty(to_page); | |
373 | mark_page_accessed(to_page); | |
374 | page_cache_release(to_page); | |
375 | } | |
376 | ||
b1e5f172 | 377 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); |
ba4e7d97 TH |
378 | ttm->swap_storage = swap_storage; |
379 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | |
5df23979 JE |
380 | if (persistent_swap_storage) |
381 | ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; | |
ba4e7d97 TH |
382 | |
383 | return 0; | |
384 | out_err: | |
5df23979 | 385 | if (!persistent_swap_storage) |
ba4e7d97 TH |
386 | fput(swap_storage); |
387 | ||
290e5505 | 388 | return ret; |
ba4e7d97 | 389 | } |