Merge branch 'for-linus' of git://neil.brown.name/md
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include <linux/sched.h>
32 #include <linux/highmem.h>
33 #include <linux/pagemap.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/slab.h>
37 #include "drm_cache.h"
38 #include "drm_mem_util.h"
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include "ttm/ttm_page_alloc.h"
43
44 static int ttm_tt_swapin(struct ttm_tt *ttm);
45
46 /**
47 * Allocates storage for pointers to the pages that back the ttm.
48 */
49 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50 {
51 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
52 }
53
54 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
55 {
56 drm_free_large(ttm->pages);
57 ttm->pages = NULL;
58 }
59
60 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
61 {
62 int write;
63 int dirty;
64 struct page *page;
65 int i;
66 struct ttm_backend *be = ttm->be;
67
68 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
69 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
70 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
71
72 if (be)
73 be->func->clear(be);
74
75 for (i = 0; i < ttm->num_pages; ++i) {
76 page = ttm->pages[i];
77 if (page == NULL)
78 continue;
79
80 if (page == ttm->dummy_read_page) {
81 BUG_ON(write);
82 continue;
83 }
84
85 if (write && dirty && !PageReserved(page))
86 set_page_dirty_lock(page);
87
88 ttm->pages[i] = NULL;
89 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
90 put_page(page);
91 }
92 ttm->state = tt_unpopulated;
93 ttm->first_himem_page = ttm->num_pages;
94 ttm->last_lomem_page = -1;
95 }
96
97 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
98 {
99 struct page *p;
100 struct list_head h;
101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
102 int ret;
103
104 while (NULL == (p = ttm->pages[index])) {
105
106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
111 return NULL;
112
113 p = list_first_entry(&h, struct page, lru);
114
115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
116 if (unlikely(ret != 0))
117 goto out_err;
118
119 if (PageHighMem(p))
120 ttm->pages[--ttm->first_himem_page] = p;
121 else
122 ttm->pages[++ttm->last_lomem_page] = p;
123 }
124 return p;
125 out_err:
126 put_page(p);
127 return NULL;
128 }
129
130 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
131 {
132 int ret;
133
134 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
135 ret = ttm_tt_swapin(ttm);
136 if (unlikely(ret != 0))
137 return NULL;
138 }
139 return __ttm_tt_get_page(ttm, index);
140 }
141
142 int ttm_tt_populate(struct ttm_tt *ttm)
143 {
144 struct page *page;
145 unsigned long i;
146 struct ttm_backend *be;
147 int ret;
148
149 if (ttm->state != tt_unpopulated)
150 return 0;
151
152 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
153 ret = ttm_tt_swapin(ttm);
154 if (unlikely(ret != 0))
155 return ret;
156 }
157
158 be = ttm->be;
159
160 for (i = 0; i < ttm->num_pages; ++i) {
161 page = __ttm_tt_get_page(ttm, i);
162 if (!page)
163 return -ENOMEM;
164 }
165
166 be->func->populate(be, ttm->num_pages, ttm->pages,
167 ttm->dummy_read_page);
168 ttm->state = tt_unbound;
169 return 0;
170 }
171 EXPORT_SYMBOL(ttm_tt_populate);
172
173 #ifdef CONFIG_X86
174 static inline int ttm_tt_set_page_caching(struct page *p,
175 enum ttm_caching_state c_old,
176 enum ttm_caching_state c_new)
177 {
178 int ret = 0;
179
180 if (PageHighMem(p))
181 return 0;
182
183 if (c_old != tt_cached) {
184 /* p isn't in the default caching state, set it to
185 * writeback first to free its current memtype. */
186
187 ret = set_pages_wb(p, 1);
188 if (ret)
189 return ret;
190 }
191
192 if (c_new == tt_wc)
193 ret = set_memory_wc((unsigned long) page_address(p), 1);
194 else if (c_new == tt_uncached)
195 ret = set_pages_uc(p, 1);
196
197 return ret;
198 }
199 #else /* CONFIG_X86 */
200 static inline int ttm_tt_set_page_caching(struct page *p,
201 enum ttm_caching_state c_old,
202 enum ttm_caching_state c_new)
203 {
204 return 0;
205 }
206 #endif /* CONFIG_X86 */
207
208 /*
209 * Change caching policy for the linear kernel map
210 * for range of pages in a ttm.
211 */
212
213 static int ttm_tt_set_caching(struct ttm_tt *ttm,
214 enum ttm_caching_state c_state)
215 {
216 int i, j;
217 struct page *cur_page;
218 int ret;
219
220 if (ttm->caching_state == c_state)
221 return 0;
222
223 if (ttm->state == tt_unpopulated) {
224 /* Change caching but don't populate */
225 ttm->caching_state = c_state;
226 return 0;
227 }
228
229 if (ttm->caching_state == tt_cached)
230 drm_clflush_pages(ttm->pages, ttm->num_pages);
231
232 for (i = 0; i < ttm->num_pages; ++i) {
233 cur_page = ttm->pages[i];
234 if (likely(cur_page != NULL)) {
235 ret = ttm_tt_set_page_caching(cur_page,
236 ttm->caching_state,
237 c_state);
238 if (unlikely(ret != 0))
239 goto out_err;
240 }
241 }
242
243 ttm->caching_state = c_state;
244
245 return 0;
246
247 out_err:
248 for (j = 0; j < i; ++j) {
249 cur_page = ttm->pages[j];
250 if (likely(cur_page != NULL)) {
251 (void)ttm_tt_set_page_caching(cur_page, c_state,
252 ttm->caching_state);
253 }
254 }
255
256 return ret;
257 }
258
259 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
260 {
261 enum ttm_caching_state state;
262
263 if (placement & TTM_PL_FLAG_WC)
264 state = tt_wc;
265 else if (placement & TTM_PL_FLAG_UNCACHED)
266 state = tt_uncached;
267 else
268 state = tt_cached;
269
270 return ttm_tt_set_caching(ttm, state);
271 }
272 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
273
274 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
275 {
276 int i;
277 unsigned count = 0;
278 struct list_head h;
279 struct page *cur_page;
280 struct ttm_backend *be = ttm->be;
281
282 INIT_LIST_HEAD(&h);
283
284 if (be)
285 be->func->clear(be);
286 for (i = 0; i < ttm->num_pages; ++i) {
287
288 cur_page = ttm->pages[i];
289 ttm->pages[i] = NULL;
290 if (cur_page) {
291 if (page_count(cur_page) != 1)
292 printk(KERN_ERR TTM_PFX
293 "Erroneous page count. "
294 "Leaking pages.\n");
295 ttm_mem_global_free_page(ttm->glob->mem_glob,
296 cur_page);
297 list_add(&cur_page->lru, &h);
298 count++;
299 }
300 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
302 ttm->state = tt_unpopulated;
303 ttm->first_himem_page = ttm->num_pages;
304 ttm->last_lomem_page = -1;
305 }
306
307 void ttm_tt_destroy(struct ttm_tt *ttm)
308 {
309 struct ttm_backend *be;
310
311 if (unlikely(ttm == NULL))
312 return;
313
314 be = ttm->be;
315 if (likely(be != NULL)) {
316 be->func->destroy(be);
317 ttm->be = NULL;
318 }
319
320 if (likely(ttm->pages != NULL)) {
321 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
322 ttm_tt_free_user_pages(ttm);
323 else
324 ttm_tt_free_alloced_pages(ttm);
325
326 ttm_tt_free_page_directory(ttm);
327 }
328
329 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
330 ttm->swap_storage)
331 fput(ttm->swap_storage);
332
333 kfree(ttm);
334 }
335
336 int ttm_tt_set_user(struct ttm_tt *ttm,
337 struct task_struct *tsk,
338 unsigned long start, unsigned long num_pages)
339 {
340 struct mm_struct *mm = tsk->mm;
341 int ret;
342 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
343 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
344
345 BUG_ON(num_pages != ttm->num_pages);
346 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
347
348 /**
349 * Account user pages as lowmem pages for now.
350 */
351
352 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
353 false, false);
354 if (unlikely(ret != 0))
355 return ret;
356
357 down_read(&mm->mmap_sem);
358 ret = get_user_pages(tsk, mm, start, num_pages,
359 write, 0, ttm->pages, NULL);
360 up_read(&mm->mmap_sem);
361
362 if (ret != num_pages && write) {
363 ttm_tt_free_user_pages(ttm);
364 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
365 return -ENOMEM;
366 }
367
368 ttm->tsk = tsk;
369 ttm->start = start;
370 ttm->state = tt_unbound;
371
372 return 0;
373 }
374
375 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
376 uint32_t page_flags, struct page *dummy_read_page)
377 {
378 struct ttm_bo_driver *bo_driver = bdev->driver;
379 struct ttm_tt *ttm;
380
381 if (!bo_driver)
382 return NULL;
383
384 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
385 if (!ttm)
386 return NULL;
387
388 ttm->glob = bdev->glob;
389 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
390 ttm->first_himem_page = ttm->num_pages;
391 ttm->last_lomem_page = -1;
392 ttm->caching_state = tt_cached;
393 ttm->page_flags = page_flags;
394
395 ttm->dummy_read_page = dummy_read_page;
396
397 ttm_tt_alloc_page_directory(ttm);
398 if (!ttm->pages) {
399 ttm_tt_destroy(ttm);
400 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
401 return NULL;
402 }
403 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
404 if (!ttm->be) {
405 ttm_tt_destroy(ttm);
406 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
407 return NULL;
408 }
409 ttm->state = tt_unpopulated;
410 return ttm;
411 }
412
413 void ttm_tt_unbind(struct ttm_tt *ttm)
414 {
415 int ret;
416 struct ttm_backend *be = ttm->be;
417
418 if (ttm->state == tt_bound) {
419 ret = be->func->unbind(be);
420 BUG_ON(ret);
421 ttm->state = tt_unbound;
422 }
423 }
424
425 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
426 {
427 int ret = 0;
428 struct ttm_backend *be;
429
430 if (!ttm)
431 return -EINVAL;
432
433 if (ttm->state == tt_bound)
434 return 0;
435
436 be = ttm->be;
437
438 ret = ttm_tt_populate(ttm);
439 if (ret)
440 return ret;
441
442 ret = be->func->bind(be, bo_mem);
443 if (ret) {
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret;
446 }
447
448 ttm->state = tt_bound;
449
450 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
451 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
452 return 0;
453 }
454 EXPORT_SYMBOL(ttm_tt_bind);
455
456 static int ttm_tt_swapin(struct ttm_tt *ttm)
457 {
458 struct address_space *swap_space;
459 struct file *swap_storage;
460 struct page *from_page;
461 struct page *to_page;
462 void *from_virtual;
463 void *to_virtual;
464 int i;
465 int ret = -ENOMEM;
466
467 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
468 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
469 ttm->num_pages);
470 if (unlikely(ret != 0))
471 return ret;
472
473 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
474 return 0;
475 }
476
477 swap_storage = ttm->swap_storage;
478 BUG_ON(swap_storage == NULL);
479
480 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
481
482 for (i = 0; i < ttm->num_pages; ++i) {
483 from_page = read_mapping_page(swap_space, i, NULL);
484 if (IS_ERR(from_page)) {
485 ret = PTR_ERR(from_page);
486 goto out_err;
487 }
488 to_page = __ttm_tt_get_page(ttm, i);
489 if (unlikely(to_page == NULL))
490 goto out_err;
491
492 preempt_disable();
493 from_virtual = kmap_atomic(from_page, KM_USER0);
494 to_virtual = kmap_atomic(to_page, KM_USER1);
495 memcpy(to_virtual, from_virtual, PAGE_SIZE);
496 kunmap_atomic(to_virtual, KM_USER1);
497 kunmap_atomic(from_virtual, KM_USER0);
498 preempt_enable();
499 page_cache_release(from_page);
500 }
501
502 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
503 fput(swap_storage);
504 ttm->swap_storage = NULL;
505 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
506
507 return 0;
508 out_err:
509 ttm_tt_free_alloced_pages(ttm);
510 return ret;
511 }
512
513 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
514 {
515 struct address_space *swap_space;
516 struct file *swap_storage;
517 struct page *from_page;
518 struct page *to_page;
519 void *from_virtual;
520 void *to_virtual;
521 int i;
522 int ret = -ENOMEM;
523
524 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
525 BUG_ON(ttm->caching_state != tt_cached);
526
527 /*
528 * For user buffers, just unpin the pages, as there should be
529 * vma references.
530 */
531
532 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
533 ttm_tt_free_user_pages(ttm);
534 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
535 ttm->swap_storage = NULL;
536 return 0;
537 }
538
539 if (!persistant_swap_storage) {
540 swap_storage = shmem_file_setup("ttm swap",
541 ttm->num_pages << PAGE_SHIFT,
542 0);
543 if (unlikely(IS_ERR(swap_storage))) {
544 printk(KERN_ERR "Failed allocating swap storage.\n");
545 return PTR_ERR(swap_storage);
546 }
547 } else
548 swap_storage = persistant_swap_storage;
549
550 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
551
552 for (i = 0; i < ttm->num_pages; ++i) {
553 from_page = ttm->pages[i];
554 if (unlikely(from_page == NULL))
555 continue;
556 to_page = read_mapping_page(swap_space, i, NULL);
557 if (unlikely(IS_ERR(to_page))) {
558 ret = PTR_ERR(to_page);
559 goto out_err;
560 }
561 preempt_disable();
562 from_virtual = kmap_atomic(from_page, KM_USER0);
563 to_virtual = kmap_atomic(to_page, KM_USER1);
564 memcpy(to_virtual, from_virtual, PAGE_SIZE);
565 kunmap_atomic(to_virtual, KM_USER1);
566 kunmap_atomic(from_virtual, KM_USER0);
567 preempt_enable();
568 set_page_dirty(to_page);
569 mark_page_accessed(to_page);
570 page_cache_release(to_page);
571 }
572
573 ttm_tt_free_alloced_pages(ttm);
574 ttm->swap_storage = swap_storage;
575 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
576 if (persistant_swap_storage)
577 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
578
579 return 0;
580 out_err:
581 if (!persistant_swap_storage)
582 fput(swap_storage);
583
584 return ret;
585 }
This page took 0.082508 seconds and 5 git commands to generate.