Merge branch 'nvmf-4.8-rc' of git://git.infradead.org/nvme-fabrics into for-linus
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <linux/swap.h>
39 #include <linux/slab.h>
40 #include <linux/export.h>
41 #include <drm/drm_cache.h>
42 #include <drm/drm_mem_util.h>
43 #include <drm/ttm/ttm_module.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_placement.h>
46 #include <drm/ttm/ttm_page_alloc.h>
47
48 /**
49 * Allocates storage for pointers to the pages that back the ttm.
50 */
51 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52 {
53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
54 }
55
56 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57 {
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
59 sizeof(*ttm->ttm.pages) +
60 sizeof(*ttm->dma_address) +
61 sizeof(*ttm->cpu_address));
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
63 ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
64 }
65
66 #ifdef CONFIG_X86
67 static inline int ttm_tt_set_page_caching(struct page *p,
68 enum ttm_caching_state c_old,
69 enum ttm_caching_state c_new)
70 {
71 int ret = 0;
72
73 if (PageHighMem(p))
74 return 0;
75
76 if (c_old != tt_cached) {
77 /* p isn't in the default caching state, set it to
78 * writeback first to free its current memtype. */
79
80 ret = set_pages_wb(p, 1);
81 if (ret)
82 return ret;
83 }
84
85 if (c_new == tt_wc)
86 ret = set_memory_wc((unsigned long) page_address(p), 1);
87 else if (c_new == tt_uncached)
88 ret = set_pages_uc(p, 1);
89
90 return ret;
91 }
92 #else /* CONFIG_X86 */
93 static inline int ttm_tt_set_page_caching(struct page *p,
94 enum ttm_caching_state c_old,
95 enum ttm_caching_state c_new)
96 {
97 return 0;
98 }
99 #endif /* CONFIG_X86 */
100
101 /*
102 * Change caching policy for the linear kernel map
103 * for range of pages in a ttm.
104 */
105
106 static int ttm_tt_set_caching(struct ttm_tt *ttm,
107 enum ttm_caching_state c_state)
108 {
109 int i, j;
110 struct page *cur_page;
111 int ret;
112
113 if (ttm->caching_state == c_state)
114 return 0;
115
116 if (ttm->state == tt_unpopulated) {
117 /* Change caching but don't populate */
118 ttm->caching_state = c_state;
119 return 0;
120 }
121
122 if (ttm->caching_state == tt_cached)
123 drm_clflush_pages(ttm->pages, ttm->num_pages);
124
125 for (i = 0; i < ttm->num_pages; ++i) {
126 cur_page = ttm->pages[i];
127 if (likely(cur_page != NULL)) {
128 ret = ttm_tt_set_page_caching(cur_page,
129 ttm->caching_state,
130 c_state);
131 if (unlikely(ret != 0))
132 goto out_err;
133 }
134 }
135
136 ttm->caching_state = c_state;
137
138 return 0;
139
140 out_err:
141 for (j = 0; j < i; ++j) {
142 cur_page = ttm->pages[j];
143 if (likely(cur_page != NULL)) {
144 (void)ttm_tt_set_page_caching(cur_page, c_state,
145 ttm->caching_state);
146 }
147 }
148
149 return ret;
150 }
151
152 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
153 {
154 enum ttm_caching_state state;
155
156 if (placement & TTM_PL_FLAG_WC)
157 state = tt_wc;
158 else if (placement & TTM_PL_FLAG_UNCACHED)
159 state = tt_uncached;
160 else
161 state = tt_cached;
162
163 return ttm_tt_set_caching(ttm, state);
164 }
165 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
166
167 void ttm_tt_destroy(struct ttm_tt *ttm)
168 {
169 if (ttm == NULL)
170 return;
171
172 ttm_tt_unbind(ttm);
173
174 if (ttm->state == tt_unbound)
175 ttm_tt_unpopulate(ttm);
176
177 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
178 ttm->swap_storage)
179 fput(ttm->swap_storage);
180
181 ttm->swap_storage = NULL;
182 ttm->func->destroy(ttm);
183 }
184
185 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
186 unsigned long size, uint32_t page_flags,
187 struct page *dummy_read_page)
188 {
189 ttm->bdev = bdev;
190 ttm->glob = bdev->glob;
191 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
192 ttm->caching_state = tt_cached;
193 ttm->page_flags = page_flags;
194 ttm->dummy_read_page = dummy_read_page;
195 ttm->state = tt_unpopulated;
196 ttm->swap_storage = NULL;
197
198 ttm_tt_alloc_page_directory(ttm);
199 if (!ttm->pages) {
200 ttm_tt_destroy(ttm);
201 pr_err("Failed allocating page table\n");
202 return -ENOMEM;
203 }
204 return 0;
205 }
206 EXPORT_SYMBOL(ttm_tt_init);
207
208 void ttm_tt_fini(struct ttm_tt *ttm)
209 {
210 drm_free_large(ttm->pages);
211 ttm->pages = NULL;
212 }
213 EXPORT_SYMBOL(ttm_tt_fini);
214
215 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
216 unsigned long size, uint32_t page_flags,
217 struct page *dummy_read_page)
218 {
219 struct ttm_tt *ttm = &ttm_dma->ttm;
220
221 ttm->bdev = bdev;
222 ttm->glob = bdev->glob;
223 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
224 ttm->caching_state = tt_cached;
225 ttm->page_flags = page_flags;
226 ttm->dummy_read_page = dummy_read_page;
227 ttm->state = tt_unpopulated;
228 ttm->swap_storage = NULL;
229
230 INIT_LIST_HEAD(&ttm_dma->pages_list);
231 ttm_dma_tt_alloc_page_directory(ttm_dma);
232 if (!ttm->pages) {
233 ttm_tt_destroy(ttm);
234 pr_err("Failed allocating page table\n");
235 return -ENOMEM;
236 }
237 return 0;
238 }
239 EXPORT_SYMBOL(ttm_dma_tt_init);
240
241 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
242 {
243 struct ttm_tt *ttm = &ttm_dma->ttm;
244
245 drm_free_large(ttm->pages);
246 ttm->pages = NULL;
247 ttm_dma->cpu_address = NULL;
248 ttm_dma->dma_address = NULL;
249 }
250 EXPORT_SYMBOL(ttm_dma_tt_fini);
251
252 void ttm_tt_unbind(struct ttm_tt *ttm)
253 {
254 int ret;
255
256 if (ttm->state == tt_bound) {
257 ret = ttm->func->unbind(ttm);
258 BUG_ON(ret);
259 ttm->state = tt_unbound;
260 }
261 }
262
263 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
264 {
265 int ret = 0;
266
267 if (!ttm)
268 return -EINVAL;
269
270 if (ttm->state == tt_bound)
271 return 0;
272
273 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
274 if (ret)
275 return ret;
276
277 ret = ttm->func->bind(ttm, bo_mem);
278 if (unlikely(ret != 0))
279 return ret;
280
281 ttm->state = tt_bound;
282
283 return 0;
284 }
285 EXPORT_SYMBOL(ttm_tt_bind);
286
287 int ttm_tt_swapin(struct ttm_tt *ttm)
288 {
289 struct address_space *swap_space;
290 struct file *swap_storage;
291 struct page *from_page;
292 struct page *to_page;
293 int i;
294 int ret = -ENOMEM;
295
296 swap_storage = ttm->swap_storage;
297 BUG_ON(swap_storage == NULL);
298
299 swap_space = swap_storage->f_mapping;
300
301 for (i = 0; i < ttm->num_pages; ++i) {
302 from_page = shmem_read_mapping_page(swap_space, i);
303 if (IS_ERR(from_page)) {
304 ret = PTR_ERR(from_page);
305 goto out_err;
306 }
307 to_page = ttm->pages[i];
308 if (unlikely(to_page == NULL))
309 goto out_err;
310
311 copy_highpage(to_page, from_page);
312 put_page(from_page);
313 }
314
315 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
316 fput(swap_storage);
317 ttm->swap_storage = NULL;
318 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
319
320 return 0;
321 out_err:
322 return ret;
323 }
324
325 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
326 {
327 struct address_space *swap_space;
328 struct file *swap_storage;
329 struct page *from_page;
330 struct page *to_page;
331 int i;
332 int ret = -ENOMEM;
333
334 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
335 BUG_ON(ttm->caching_state != tt_cached);
336
337 if (!persistent_swap_storage) {
338 swap_storage = shmem_file_setup("ttm swap",
339 ttm->num_pages << PAGE_SHIFT,
340 0);
341 if (IS_ERR(swap_storage)) {
342 pr_err("Failed allocating swap storage\n");
343 return PTR_ERR(swap_storage);
344 }
345 } else
346 swap_storage = persistent_swap_storage;
347
348 swap_space = swap_storage->f_mapping;
349
350 for (i = 0; i < ttm->num_pages; ++i) {
351 from_page = ttm->pages[i];
352 if (unlikely(from_page == NULL))
353 continue;
354 to_page = shmem_read_mapping_page(swap_space, i);
355 if (IS_ERR(to_page)) {
356 ret = PTR_ERR(to_page);
357 goto out_err;
358 }
359 copy_highpage(to_page, from_page);
360 set_page_dirty(to_page);
361 mark_page_accessed(to_page);
362 put_page(to_page);
363 }
364
365 ttm_tt_unpopulate(ttm);
366 ttm->swap_storage = swap_storage;
367 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
368 if (persistent_swap_storage)
369 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
370
371 return 0;
372 out_err:
373 if (!persistent_swap_storage)
374 fput(swap_storage);
375
376 return ret;
377 }
378
379 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
380 {
381 pgoff_t i;
382 struct page **page = ttm->pages;
383
384 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
385 return;
386
387 for (i = 0; i < ttm->num_pages; ++i) {
388 (*page)->mapping = NULL;
389 (*page++)->index = 0;
390 }
391 }
392
393 void ttm_tt_unpopulate(struct ttm_tt *ttm)
394 {
395 if (ttm->state == tt_unpopulated)
396 return;
397
398 ttm_tt_clear_mapping(ttm);
399 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
400 }
This page took 0.044222 seconds and 5 git commands to generate.