Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
31 | #include <ttm/ttm_module.h> | |
32 | #include <ttm/ttm_bo_driver.h> | |
33 | #include <ttm/ttm_placement.h> | |
34 | #include <linux/mm.h> | |
ba4e7d97 TH |
35 | #include <linux/rbtree.h> |
36 | #include <linux/module.h> | |
37 | #include <linux/uaccess.h> | |
38 | ||
39 | #define TTM_BO_VM_NUM_PREFAULT 16 | |
40 | ||
41 | static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, | |
42 | unsigned long page_start, | |
43 | unsigned long num_pages) | |
44 | { | |
45 | struct rb_node *cur = bdev->addr_space_rb.rb_node; | |
46 | unsigned long cur_offset; | |
47 | struct ttm_buffer_object *bo; | |
48 | struct ttm_buffer_object *best_bo = NULL; | |
49 | ||
50 | while (likely(cur != NULL)) { | |
51 | bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); | |
52 | cur_offset = bo->vm_node->start; | |
53 | if (page_start >= cur_offset) { | |
54 | cur = cur->rb_right; | |
55 | best_bo = bo; | |
56 | if (page_start == cur_offset) | |
57 | break; | |
58 | } else | |
59 | cur = cur->rb_left; | |
60 | } | |
61 | ||
62 | if (unlikely(best_bo == NULL)) | |
63 | return NULL; | |
64 | ||
65 | if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < | |
66 | (page_start + num_pages))) | |
67 | return NULL; | |
68 | ||
69 | return best_bo; | |
70 | } | |
71 | ||
72 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
73 | { | |
74 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | |
75 | vma->vm_private_data; | |
76 | struct ttm_bo_device *bdev = bo->bdev; | |
ba4e7d97 TH |
77 | unsigned long page_offset; |
78 | unsigned long page_last; | |
79 | unsigned long pfn; | |
80 | struct ttm_tt *ttm = NULL; | |
81 | struct page *page; | |
82 | int ret; | |
83 | int i; | |
ba4e7d97 TH |
84 | unsigned long address = (unsigned long)vmf->virtual_address; |
85 | int retval = VM_FAULT_NOPAGE; | |
eba67093 TH |
86 | struct ttm_mem_type_manager *man = |
87 | &bdev->man[bo->mem.mem_type]; | |
ba4e7d97 TH |
88 | |
89 | /* | |
90 | * Work around locking order reversal in fault / nopfn | |
91 | * between mmap_sem and bo_reserve: Perform a trylock operation | |
92 | * for reserve, and if it fails, retry the fault after scheduling. | |
93 | */ | |
94 | ||
95 | ret = ttm_bo_reserve(bo, true, true, false, 0); | |
96 | if (unlikely(ret != 0)) { | |
97 | if (ret == -EBUSY) | |
98 | set_need_resched(); | |
99 | return VM_FAULT_NOPAGE; | |
100 | } | |
101 | ||
82c5da6b JG |
102 | if (bdev->driver->fault_reserve_notify) { |
103 | ret = bdev->driver->fault_reserve_notify(bo); | |
104 | switch (ret) { | |
105 | case 0: | |
106 | break; | |
107 | case -EBUSY: | |
108 | set_need_resched(); | |
109 | case -ERESTARTSYS: | |
110 | retval = VM_FAULT_NOPAGE; | |
111 | goto out_unlock; | |
112 | default: | |
113 | retval = VM_FAULT_SIGBUS; | |
114 | goto out_unlock; | |
115 | } | |
116 | } | |
e024e110 | 117 | |
ba4e7d97 TH |
118 | /* |
119 | * Wait for buffer data in transit, due to a pipelined | |
120 | * move. | |
121 | */ | |
122 | ||
702adba2 | 123 | spin_lock(&bdev->fence_lock); |
ba4e7d97 TH |
124 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { |
125 | ret = ttm_bo_wait(bo, false, true, false); | |
702adba2 | 126 | spin_unlock(&bdev->fence_lock); |
ba4e7d97 | 127 | if (unlikely(ret != 0)) { |
98ffc415 | 128 | retval = (ret != -ERESTARTSYS) ? |
ba4e7d97 TH |
129 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; |
130 | goto out_unlock; | |
131 | } | |
132 | } else | |
702adba2 | 133 | spin_unlock(&bdev->fence_lock); |
ba4e7d97 | 134 | |
eba67093 TH |
135 | ret = ttm_mem_io_lock(man, true); |
136 | if (unlikely(ret != 0)) { | |
137 | retval = VM_FAULT_NOPAGE; | |
ba4e7d97 TH |
138 | goto out_unlock; |
139 | } | |
eba67093 TH |
140 | ret = ttm_mem_io_reserve_vm(bo); |
141 | if (unlikely(ret != 0)) { | |
142 | retval = VM_FAULT_SIGBUS; | |
143 | goto out_io_unlock; | |
144 | } | |
ba4e7d97 | 145 | |
ba4e7d97 TH |
146 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
147 | bo->vm_node->start - vma->vm_pgoff; | |
148 | page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + | |
149 | bo->vm_node->start - vma->vm_pgoff; | |
150 | ||
151 | if (unlikely(page_offset >= bo->num_pages)) { | |
152 | retval = VM_FAULT_SIGBUS; | |
eba67093 | 153 | goto out_io_unlock; |
ba4e7d97 TH |
154 | } |
155 | ||
156 | /* | |
157 | * Strictly, we're not allowed to modify vma->vm_page_prot here, | |
158 | * since the mmap_sem is only held in read mode. However, we | |
159 | * modify only the caching bits of vma->vm_page_prot and | |
160 | * consider those bits protected by | |
161 | * the bo->mutex, as we should be the only writers. | |
162 | * There shouldn't really be any readers of these bits except | |
163 | * within vm_insert_mixed()? fork? | |
164 | * | |
165 | * TODO: Add a list of vmas to the bo, and change the | |
166 | * vma->vm_page_prot when the object changes caching policy, with | |
167 | * the correct locks held. | |
168 | */ | |
82c5da6b | 169 | if (bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
170 | vma->vm_page_prot = ttm_io_prot(bo->mem.placement, |
171 | vma->vm_page_prot); | |
172 | } else { | |
173 | ttm = bo->ttm; | |
174 | vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? | |
175 | vm_get_page_prot(vma->vm_flags) : | |
176 | ttm_io_prot(bo->mem.placement, vma->vm_page_prot); | |
b1e5f172 JG |
177 | |
178 | /* Allocate all page at once, most common usage */ | |
179 | if (ttm->bdev->driver->ttm_tt_populate(ttm)) { | |
180 | retval = VM_FAULT_OOM; | |
181 | goto out_io_unlock; | |
182 | } | |
ba4e7d97 TH |
183 | } |
184 | ||
185 | /* | |
186 | * Speculatively prefault a number of pages. Only error on | |
187 | * first page. | |
188 | */ | |
ba4e7d97 | 189 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
82c5da6b JG |
190 | if (bo->mem.bus.is_iomem) |
191 | pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; | |
ba4e7d97 | 192 | else { |
b1e5f172 | 193 | page = ttm->pages[page_offset]; |
ba4e7d97 TH |
194 | if (unlikely(!page && i == 0)) { |
195 | retval = VM_FAULT_OOM; | |
eba67093 | 196 | goto out_io_unlock; |
ba4e7d97 TH |
197 | } else if (unlikely(!page)) { |
198 | break; | |
199 | } | |
200 | pfn = page_to_pfn(page); | |
201 | } | |
202 | ||
203 | ret = vm_insert_mixed(vma, address, pfn); | |
204 | /* | |
205 | * Somebody beat us to this PTE or prefaulting to | |
206 | * an already populated PTE, or prefaulting error. | |
207 | */ | |
208 | ||
209 | if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) | |
210 | break; | |
211 | else if (unlikely(ret != 0)) { | |
212 | retval = | |
213 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | |
eba67093 | 214 | goto out_io_unlock; |
ba4e7d97 TH |
215 | } |
216 | ||
217 | address += PAGE_SIZE; | |
218 | if (unlikely(++page_offset >= page_last)) | |
219 | break; | |
220 | } | |
eba67093 TH |
221 | out_io_unlock: |
222 | ttm_mem_io_unlock(man); | |
ba4e7d97 TH |
223 | out_unlock: |
224 | ttm_bo_unreserve(bo); | |
225 | return retval; | |
226 | } | |
227 | ||
228 | static void ttm_bo_vm_open(struct vm_area_struct *vma) | |
229 | { | |
230 | struct ttm_buffer_object *bo = | |
231 | (struct ttm_buffer_object *)vma->vm_private_data; | |
232 | ||
233 | (void)ttm_bo_reference(bo); | |
234 | } | |
235 | ||
236 | static void ttm_bo_vm_close(struct vm_area_struct *vma) | |
237 | { | |
82c5da6b | 238 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; |
ba4e7d97 TH |
239 | |
240 | ttm_bo_unref(&bo); | |
241 | vma->vm_private_data = NULL; | |
242 | } | |
243 | ||
f0f37e2f | 244 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
ba4e7d97 TH |
245 | .fault = ttm_bo_vm_fault, |
246 | .open = ttm_bo_vm_open, | |
247 | .close = ttm_bo_vm_close | |
248 | }; | |
249 | ||
250 | int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |
251 | struct ttm_bo_device *bdev) | |
252 | { | |
253 | struct ttm_bo_driver *driver; | |
254 | struct ttm_buffer_object *bo; | |
255 | int ret; | |
256 | ||
257 | read_lock(&bdev->vm_lock); | |
258 | bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, | |
259 | (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); | |
260 | if (likely(bo != NULL)) | |
261 | ttm_bo_reference(bo); | |
262 | read_unlock(&bdev->vm_lock); | |
263 | ||
264 | if (unlikely(bo == NULL)) { | |
265 | printk(KERN_ERR TTM_PFX | |
266 | "Could not find buffer object to map.\n"); | |
267 | return -EINVAL; | |
268 | } | |
269 | ||
270 | driver = bo->bdev->driver; | |
271 | if (unlikely(!driver->verify_access)) { | |
272 | ret = -EPERM; | |
273 | goto out_unref; | |
274 | } | |
275 | ret = driver->verify_access(bo, filp); | |
276 | if (unlikely(ret != 0)) | |
277 | goto out_unref; | |
278 | ||
279 | vma->vm_ops = &ttm_bo_vm_ops; | |
280 | ||
281 | /* | |
282 | * Note: We're transferring the bo reference to | |
283 | * vma->vm_private_data here. | |
284 | */ | |
285 | ||
286 | vma->vm_private_data = bo; | |
287 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; | |
288 | return 0; | |
289 | out_unref: | |
290 | ttm_bo_unref(&bo); | |
291 | return ret; | |
292 | } | |
293 | EXPORT_SYMBOL(ttm_bo_mmap); | |
294 | ||
295 | int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |
296 | { | |
297 | if (vma->vm_pgoff != 0) | |
298 | return -EACCES; | |
299 | ||
300 | vma->vm_ops = &ttm_bo_vm_ops; | |
301 | vma->vm_private_data = ttm_bo_reference(bo); | |
302 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; | |
303 | return 0; | |
304 | } | |
305 | EXPORT_SYMBOL(ttm_fbdev_mmap); | |
306 | ||
307 | ||
308 | ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, | |
309 | const char __user *wbuf, char __user *rbuf, size_t count, | |
310 | loff_t *f_pos, bool write) | |
311 | { | |
312 | struct ttm_buffer_object *bo; | |
313 | struct ttm_bo_driver *driver; | |
314 | struct ttm_bo_kmap_obj map; | |
315 | unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); | |
316 | unsigned long kmap_offset; | |
317 | unsigned long kmap_end; | |
318 | unsigned long kmap_num; | |
319 | size_t io_size; | |
320 | unsigned int page_offset; | |
321 | char *virtual; | |
322 | int ret; | |
323 | bool no_wait = false; | |
324 | bool dummy; | |
325 | ||
326 | read_lock(&bdev->vm_lock); | |
327 | bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); | |
328 | if (likely(bo != NULL)) | |
329 | ttm_bo_reference(bo); | |
330 | read_unlock(&bdev->vm_lock); | |
331 | ||
332 | if (unlikely(bo == NULL)) | |
333 | return -EFAULT; | |
334 | ||
335 | driver = bo->bdev->driver; | |
b8ff7357 | 336 | if (unlikely(!driver->verify_access)) { |
ba4e7d97 TH |
337 | ret = -EPERM; |
338 | goto out_unref; | |
339 | } | |
340 | ||
341 | ret = driver->verify_access(bo, filp); | |
342 | if (unlikely(ret != 0)) | |
343 | goto out_unref; | |
344 | ||
345 | kmap_offset = dev_offset - bo->vm_node->start; | |
916635bf | 346 | if (unlikely(kmap_offset >= bo->num_pages)) { |
ba4e7d97 TH |
347 | ret = -EFBIG; |
348 | goto out_unref; | |
349 | } | |
350 | ||
351 | page_offset = *f_pos & ~PAGE_MASK; | |
352 | io_size = bo->num_pages - kmap_offset; | |
353 | io_size = (io_size << PAGE_SHIFT) - page_offset; | |
354 | if (count < io_size) | |
355 | io_size = count; | |
356 | ||
357 | kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; | |
358 | kmap_num = kmap_end - kmap_offset + 1; | |
359 | ||
360 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | |
361 | ||
362 | switch (ret) { | |
363 | case 0: | |
364 | break; | |
ba4e7d97 TH |
365 | case -EBUSY: |
366 | ret = -EAGAIN; | |
367 | goto out_unref; | |
368 | default: | |
369 | goto out_unref; | |
370 | } | |
371 | ||
372 | ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); | |
373 | if (unlikely(ret != 0)) { | |
374 | ttm_bo_unreserve(bo); | |
375 | goto out_unref; | |
376 | } | |
377 | ||
378 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | |
379 | virtual += page_offset; | |
380 | ||
381 | if (write) | |
382 | ret = copy_from_user(virtual, wbuf, io_size); | |
383 | else | |
384 | ret = copy_to_user(rbuf, virtual, io_size); | |
385 | ||
386 | ttm_bo_kunmap(&map); | |
387 | ttm_bo_unreserve(bo); | |
388 | ttm_bo_unref(&bo); | |
389 | ||
390 | if (unlikely(ret != 0)) | |
391 | return -EFBIG; | |
392 | ||
393 | *f_pos += io_size; | |
394 | ||
395 | return io_size; | |
396 | out_unref: | |
397 | ttm_bo_unref(&bo); | |
398 | return ret; | |
399 | } | |
400 | ||
401 | ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, | |
402 | char __user *rbuf, size_t count, loff_t *f_pos, | |
403 | bool write) | |
404 | { | |
405 | struct ttm_bo_kmap_obj map; | |
406 | unsigned long kmap_offset; | |
407 | unsigned long kmap_end; | |
408 | unsigned long kmap_num; | |
409 | size_t io_size; | |
410 | unsigned int page_offset; | |
411 | char *virtual; | |
412 | int ret; | |
413 | bool no_wait = false; | |
414 | bool dummy; | |
415 | ||
416 | kmap_offset = (*f_pos >> PAGE_SHIFT); | |
916635bf | 417 | if (unlikely(kmap_offset >= bo->num_pages)) |
ba4e7d97 TH |
418 | return -EFBIG; |
419 | ||
420 | page_offset = *f_pos & ~PAGE_MASK; | |
421 | io_size = bo->num_pages - kmap_offset; | |
422 | io_size = (io_size << PAGE_SHIFT) - page_offset; | |
423 | if (count < io_size) | |
424 | io_size = count; | |
425 | ||
426 | kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; | |
427 | kmap_num = kmap_end - kmap_offset + 1; | |
428 | ||
429 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | |
430 | ||
431 | switch (ret) { | |
432 | case 0: | |
433 | break; | |
ba4e7d97 TH |
434 | case -EBUSY: |
435 | return -EAGAIN; | |
436 | default: | |
437 | return ret; | |
438 | } | |
439 | ||
440 | ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); | |
441 | if (unlikely(ret != 0)) { | |
442 | ttm_bo_unreserve(bo); | |
443 | return ret; | |
444 | } | |
445 | ||
446 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | |
447 | virtual += page_offset; | |
448 | ||
449 | if (write) | |
450 | ret = copy_from_user(virtual, wbuf, io_size); | |
451 | else | |
452 | ret = copy_to_user(rbuf, virtual, io_size); | |
453 | ||
454 | ttm_bo_kunmap(&map); | |
455 | ttm_bo_unreserve(bo); | |
456 | ttm_bo_unref(&bo); | |
457 | ||
458 | if (unlikely(ret != 0)) | |
459 | return ret; | |
460 | ||
461 | *f_pos += io_size; | |
462 | ||
463 | return io_size; | |
464 | } |