Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include "drmP.h" | |
26 | #include "i915_drm.h" | |
27 | #include "i915_drv.h" | |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
ad46cb53 CW |
35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | |
37 | struct drm_device *dev; | |
38 | struct i915_mmu_notifier *mn; | |
39 | struct hlist_node node; | |
40 | struct kref kref; | |
41 | struct work_struct work; | |
42 | }; | |
43 | ||
5cc9ed4b CW |
44 | #if defined(CONFIG_MMU_NOTIFIER) |
45 | #include <linux/interval_tree.h> | |
46 | ||
47 | struct i915_mmu_notifier { | |
48 | spinlock_t lock; | |
49 | struct hlist_node node; | |
50 | struct mmu_notifier mn; | |
51 | struct rb_root objects; | |
ec8b0dd5 | 52 | struct list_head linear; |
5cc9ed4b | 53 | unsigned long serial; |
ec8b0dd5 | 54 | bool has_linear; |
5cc9ed4b CW |
55 | }; |
56 | ||
57 | struct i915_mmu_object { | |
ad46cb53 | 58 | struct i915_mmu_notifier *mn; |
5cc9ed4b | 59 | struct interval_tree_node it; |
ec8b0dd5 | 60 | struct list_head link; |
5cc9ed4b | 61 | struct drm_i915_gem_object *obj; |
ec8b0dd5 | 62 | bool is_linear; |
5cc9ed4b CW |
63 | }; |
64 | ||
ec8b0dd5 CW |
65 | static unsigned long cancel_userptr(struct drm_i915_gem_object *obj) |
66 | { | |
67 | struct drm_device *dev = obj->base.dev; | |
68 | unsigned long end; | |
69 | ||
70 | mutex_lock(&dev->struct_mutex); | |
71 | /* Cancel any active worker and force us to re-evaluate gup */ | |
72 | obj->userptr.work = NULL; | |
73 | ||
74 | if (obj->pages != NULL) { | |
75 | struct drm_i915_private *dev_priv = to_i915(dev); | |
76 | struct i915_vma *vma, *tmp; | |
77 | bool was_interruptible; | |
78 | ||
79 | was_interruptible = dev_priv->mm.interruptible; | |
80 | dev_priv->mm.interruptible = false; | |
81 | ||
82 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { | |
83 | int ret = i915_vma_unbind(vma); | |
84 | WARN_ON(ret && ret != -EIO); | |
85 | } | |
86 | WARN_ON(i915_gem_object_put_pages(obj)); | |
87 | ||
88 | dev_priv->mm.interruptible = was_interruptible; | |
89 | } | |
90 | ||
91 | end = obj->userptr.ptr + obj->base.size; | |
92 | ||
93 | drm_gem_object_unreference(&obj->base); | |
94 | mutex_unlock(&dev->struct_mutex); | |
95 | ||
96 | return end; | |
97 | } | |
98 | ||
48777767 CW |
99 | static void *invalidate_range__linear(struct i915_mmu_notifier *mn, |
100 | struct mm_struct *mm, | |
101 | unsigned long start, | |
102 | unsigned long end) | |
ec8b0dd5 | 103 | { |
ad46cb53 | 104 | struct i915_mmu_object *mo; |
ec8b0dd5 CW |
105 | unsigned long serial; |
106 | ||
107 | restart: | |
108 | serial = mn->serial; | |
ad46cb53 | 109 | list_for_each_entry(mo, &mn->linear, link) { |
ec8b0dd5 CW |
110 | struct drm_i915_gem_object *obj; |
111 | ||
ad46cb53 | 112 | if (mo->it.last < start || mo->it.start > end) |
ec8b0dd5 CW |
113 | continue; |
114 | ||
ad46cb53 | 115 | obj = mo->obj; |
ec8b0dd5 CW |
116 | drm_gem_object_reference(&obj->base); |
117 | spin_unlock(&mn->lock); | |
118 | ||
119 | cancel_userptr(obj); | |
120 | ||
121 | spin_lock(&mn->lock); | |
122 | if (serial != mn->serial) | |
123 | goto restart; | |
124 | } | |
125 | ||
48777767 | 126 | return NULL; |
ec8b0dd5 CW |
127 | } |
128 | ||
5cc9ed4b CW |
129 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
130 | struct mm_struct *mm, | |
131 | unsigned long start, | |
132 | unsigned long end) | |
133 | { | |
134 | struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); | |
135 | struct interval_tree_node *it = NULL; | |
ec8b0dd5 | 136 | unsigned long next = start; |
5cc9ed4b CW |
137 | unsigned long serial = 0; |
138 | ||
139 | end--; /* interval ranges are inclusive, but invalidate range is exclusive */ | |
ec8b0dd5 | 140 | while (next < end) { |
48777767 | 141 | struct drm_i915_gem_object *obj = NULL; |
5cc9ed4b | 142 | |
5cc9ed4b | 143 | spin_lock(&mn->lock); |
ec8b0dd5 | 144 | if (mn->has_linear) |
48777767 CW |
145 | it = invalidate_range__linear(mn, mm, start, end); |
146 | else if (serial == mn->serial) | |
ec8b0dd5 | 147 | it = interval_tree_iter_next(it, next, end); |
5cc9ed4b CW |
148 | else |
149 | it = interval_tree_iter_first(&mn->objects, start, end); | |
150 | if (it != NULL) { | |
151 | obj = container_of(it, struct i915_mmu_object, it)->obj; | |
152 | drm_gem_object_reference(&obj->base); | |
153 | serial = mn->serial; | |
154 | } | |
155 | spin_unlock(&mn->lock); | |
156 | if (obj == NULL) | |
157 | return; | |
158 | ||
ec8b0dd5 | 159 | next = cancel_userptr(obj); |
5cc9ed4b CW |
160 | } |
161 | } | |
162 | ||
163 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
164 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
165 | }; | |
166 | ||
167 | static struct i915_mmu_notifier * | |
ad46cb53 | 168 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 169 | { |
ad46cb53 | 170 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
171 | int ret; |
172 | ||
ad46cb53 CW |
173 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
174 | if (mn == NULL) | |
5cc9ed4b CW |
175 | return ERR_PTR(-ENOMEM); |
176 | ||
ad46cb53 CW |
177 | spin_lock_init(&mn->lock); |
178 | mn->mn.ops = &i915_gem_userptr_notifier; | |
179 | mn->objects = RB_ROOT; | |
180 | mn->serial = 1; | |
181 | INIT_LIST_HEAD(&mn->linear); | |
182 | mn->has_linear = false; | |
183 | ||
184 | /* Protected by mmap_sem (write-lock) */ | |
185 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 186 | if (ret) { |
ad46cb53 | 187 | kfree(mn); |
5cc9ed4b CW |
188 | return ERR_PTR(ret); |
189 | } | |
190 | ||
ad46cb53 | 191 | return mn; |
5cc9ed4b CW |
192 | } |
193 | ||
ad46cb53 | 194 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) |
5cc9ed4b | 195 | { |
ad46cb53 CW |
196 | if (++mn->serial == 0) |
197 | mn->serial = 1; | |
5cc9ed4b CW |
198 | } |
199 | ||
200 | static int | |
ad46cb53 CW |
201 | i915_mmu_notifier_add(struct drm_device *dev, |
202 | struct i915_mmu_notifier *mn, | |
203 | struct i915_mmu_object *mo) | |
5cc9ed4b CW |
204 | { |
205 | struct interval_tree_node *it; | |
206 | int ret; | |
207 | ||
ad46cb53 | 208 | ret = i915_mutex_lock_interruptible(dev); |
5cc9ed4b CW |
209 | if (ret) |
210 | return ret; | |
211 | ||
212 | /* Make sure we drop the final active reference (and thereby | |
213 | * remove the objects from the interval tree) before we do | |
214 | * the check for overlapping objects. | |
215 | */ | |
ad46cb53 | 216 | i915_gem_retire_requests(dev); |
5cc9ed4b | 217 | |
ad46cb53 CW |
218 | spin_lock(&mn->lock); |
219 | it = interval_tree_iter_first(&mn->objects, | |
220 | mo->it.start, mo->it.last); | |
5cc9ed4b CW |
221 | if (it) { |
222 | struct drm_i915_gem_object *obj; | |
223 | ||
224 | /* We only need to check the first object in the range as it | |
225 | * either has cancelled gup work queued and we need to | |
226 | * return back to the user to give time for the gup-workers | |
227 | * to flush their object references upon which the object will | |
228 | * be removed from the interval-tree, or the the range is | |
229 | * still in use by another client and the overlap is invalid. | |
ec8b0dd5 CW |
230 | * |
231 | * If we do have an overlap, we cannot use the interval tree | |
232 | * for fast range invalidation. | |
5cc9ed4b CW |
233 | */ |
234 | ||
235 | obj = container_of(it, struct i915_mmu_object, it)->obj; | |
ec8b0dd5 | 236 | if (!obj->userptr.workers) |
ad46cb53 | 237 | mn->has_linear = mo->is_linear = true; |
ec8b0dd5 CW |
238 | else |
239 | ret = -EAGAIN; | |
240 | } else | |
ad46cb53 | 241 | interval_tree_insert(&mo->it, &mn->objects); |
ec8b0dd5 CW |
242 | |
243 | if (ret == 0) { | |
ad46cb53 CW |
244 | list_add(&mo->link, &mn->linear); |
245 | __i915_mmu_notifier_update_serial(mn); | |
5cc9ed4b | 246 | } |
ad46cb53 CW |
247 | spin_unlock(&mn->lock); |
248 | mutex_unlock(&dev->struct_mutex); | |
5cc9ed4b CW |
249 | |
250 | return ret; | |
251 | } | |
252 | ||
ad46cb53 CW |
253 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) |
254 | { | |
255 | struct i915_mmu_object *mo; | |
256 | ||
257 | list_for_each_entry(mo, &mn->linear, link) | |
258 | if (mo->is_linear) | |
259 | return true; | |
260 | ||
261 | return false; | |
262 | } | |
263 | ||
264 | static void | |
265 | i915_mmu_notifier_del(struct i915_mmu_notifier *mn, | |
266 | struct i915_mmu_object *mo) | |
267 | { | |
268 | spin_lock(&mn->lock); | |
269 | list_del(&mo->link); | |
270 | if (mo->is_linear) | |
271 | mn->has_linear = i915_mmu_notifier_has_linear(mn); | |
272 | else | |
273 | interval_tree_remove(&mo->it, &mn->objects); | |
274 | __i915_mmu_notifier_update_serial(mn); | |
275 | spin_unlock(&mn->lock); | |
276 | } | |
277 | ||
5cc9ed4b CW |
278 | static void |
279 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
280 | { | |
ad46cb53 | 281 | struct i915_mmu_object *mo; |
5cc9ed4b | 282 | |
ad46cb53 CW |
283 | mo = obj->userptr.mmu_object; |
284 | if (mo == NULL) | |
5cc9ed4b CW |
285 | return; |
286 | ||
ad46cb53 CW |
287 | i915_mmu_notifier_del(mo->mn, mo); |
288 | kfree(mo); | |
289 | ||
290 | obj->userptr.mmu_object = NULL; | |
291 | } | |
292 | ||
293 | static struct i915_mmu_notifier * | |
294 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
295 | { | |
e9681366 CW |
296 | struct i915_mmu_notifier *mn = mm->mn; |
297 | ||
298 | mn = mm->mn; | |
299 | if (mn) | |
300 | return mn; | |
301 | ||
302 | down_write(&mm->mm->mmap_sem); | |
303 | mutex_lock(&to_i915(mm->dev)->mm_lock); | |
304 | if ((mn = mm->mn) == NULL) { | |
305 | mn = i915_mmu_notifier_create(mm->mm); | |
306 | if (!IS_ERR(mn)) | |
307 | mm->mn = mn; | |
ad46cb53 | 308 | } |
e9681366 CW |
309 | mutex_unlock(&to_i915(mm->dev)->mm_lock); |
310 | up_write(&mm->mm->mmap_sem); | |
311 | ||
312 | return mn; | |
5cc9ed4b CW |
313 | } |
314 | ||
315 | static int | |
316 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
317 | unsigned flags) | |
318 | { | |
ad46cb53 CW |
319 | struct i915_mmu_notifier *mn; |
320 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
321 | int ret; |
322 | ||
323 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
324 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
325 | ||
ad46cb53 CW |
326 | if (WARN_ON(obj->userptr.mm == NULL)) |
327 | return -EINVAL; | |
5cc9ed4b | 328 | |
ad46cb53 CW |
329 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
330 | if (IS_ERR(mn)) | |
331 | return PTR_ERR(mn); | |
5cc9ed4b | 332 | |
ad46cb53 CW |
333 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
334 | if (mo == NULL) | |
335 | return -ENOMEM; | |
5cc9ed4b | 336 | |
ad46cb53 CW |
337 | mo->mn = mn; |
338 | mo->it.start = obj->userptr.ptr; | |
339 | mo->it.last = mo->it.start + obj->base.size - 1; | |
340 | mo->obj = obj; | |
5cc9ed4b | 341 | |
ad46cb53 CW |
342 | ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); |
343 | if (ret) { | |
344 | kfree(mo); | |
345 | return ret; | |
346 | } | |
347 | ||
348 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 349 | return 0; |
ad46cb53 CW |
350 | } |
351 | ||
352 | static void | |
353 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
354 | struct mm_struct *mm) | |
355 | { | |
356 | if (mn == NULL) | |
357 | return; | |
5cc9ed4b | 358 | |
ad46cb53 | 359 | mmu_notifier_unregister(&mn->mn, mm); |
5cc9ed4b | 360 | kfree(mn); |
5cc9ed4b CW |
361 | } |
362 | ||
363 | #else | |
364 | ||
365 | static void | |
366 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
367 | { | |
368 | } | |
369 | ||
370 | static int | |
371 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
372 | unsigned flags) | |
373 | { | |
374 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
375 | return -ENODEV; | |
376 | ||
377 | if (!capable(CAP_SYS_ADMIN)) | |
378 | return -EPERM; | |
379 | ||
380 | return 0; | |
381 | } | |
ad46cb53 CW |
382 | |
383 | static void | |
384 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
385 | struct mm_struct *mm) | |
386 | { | |
387 | } | |
388 | ||
5cc9ed4b CW |
389 | #endif |
390 | ||
ad46cb53 CW |
391 | static struct i915_mm_struct * |
392 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
393 | { | |
394 | struct i915_mm_struct *mm; | |
395 | ||
396 | /* Protected by dev_priv->mm_lock */ | |
397 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
398 | if (mm->mm == real) | |
399 | return mm; | |
400 | ||
401 | return NULL; | |
402 | } | |
403 | ||
404 | static int | |
405 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
406 | { | |
407 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
408 | struct i915_mm_struct *mm; | |
409 | int ret = 0; | |
410 | ||
411 | /* During release of the GEM object we hold the struct_mutex. This | |
412 | * precludes us from calling mmput() at that time as that may be | |
413 | * the last reference and so call exit_mmap(). exit_mmap() will | |
414 | * attempt to reap the vma, and if we were holding a GTT mmap | |
415 | * would then call drm_gem_vm_close() and attempt to reacquire | |
416 | * the struct mutex. So in order to avoid that recursion, we have | |
417 | * to defer releasing the mm reference until after we drop the | |
418 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
419 | * up. | |
420 | */ | |
421 | mutex_lock(&dev_priv->mm_lock); | |
422 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
423 | if (mm == NULL) { | |
424 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
425 | if (mm == NULL) { | |
426 | ret = -ENOMEM; | |
427 | goto out; | |
428 | } | |
429 | ||
430 | kref_init(&mm->kref); | |
431 | mm->dev = obj->base.dev; | |
432 | ||
433 | mm->mm = current->mm; | |
434 | atomic_inc(¤t->mm->mm_count); | |
435 | ||
436 | mm->mn = NULL; | |
437 | ||
438 | /* Protected by dev_priv->mm_lock */ | |
439 | hash_add(dev_priv->mm_structs, | |
440 | &mm->node, (unsigned long)mm->mm); | |
441 | } else | |
442 | kref_get(&mm->kref); | |
443 | ||
444 | obj->userptr.mm = mm; | |
445 | out: | |
446 | mutex_unlock(&dev_priv->mm_lock); | |
447 | return ret; | |
448 | } | |
449 | ||
450 | static void | |
451 | __i915_mm_struct_free__worker(struct work_struct *work) | |
452 | { | |
453 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
454 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
455 | mmdrop(mm->mm); | |
456 | kfree(mm); | |
457 | } | |
458 | ||
459 | static void | |
460 | __i915_mm_struct_free(struct kref *kref) | |
461 | { | |
462 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
463 | ||
464 | /* Protected by dev_priv->mm_lock */ | |
465 | hash_del(&mm->node); | |
466 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | |
467 | ||
468 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
469 | schedule_work(&mm->work); | |
470 | } | |
471 | ||
472 | static void | |
473 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
474 | { | |
475 | if (obj->userptr.mm == NULL) | |
476 | return; | |
477 | ||
478 | kref_put_mutex(&obj->userptr.mm->kref, | |
479 | __i915_mm_struct_free, | |
480 | &to_i915(obj->base.dev)->mm_lock); | |
481 | obj->userptr.mm = NULL; | |
482 | } | |
483 | ||
5cc9ed4b CW |
484 | struct get_pages_work { |
485 | struct work_struct work; | |
486 | struct drm_i915_gem_object *obj; | |
487 | struct task_struct *task; | |
488 | }; | |
489 | ||
5cc9ed4b CW |
490 | #if IS_ENABLED(CONFIG_SWIOTLB) |
491 | #define swiotlb_active() swiotlb_nr_tbl() | |
492 | #else | |
493 | #define swiotlb_active() 0 | |
494 | #endif | |
495 | ||
496 | static int | |
497 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
498 | { | |
499 | struct scatterlist *sg; | |
500 | int ret, n; | |
501 | ||
502 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
503 | if (*st == NULL) | |
504 | return -ENOMEM; | |
505 | ||
506 | if (swiotlb_active()) { | |
507 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
508 | if (ret) | |
509 | goto err; | |
510 | ||
511 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
512 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
513 | } else { | |
514 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
515 | 0, num_pages << PAGE_SHIFT, | |
516 | GFP_KERNEL); | |
517 | if (ret) | |
518 | goto err; | |
519 | } | |
520 | ||
521 | return 0; | |
522 | ||
523 | err: | |
524 | kfree(*st); | |
525 | *st = NULL; | |
526 | return ret; | |
527 | } | |
528 | ||
529 | static void | |
530 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
531 | { | |
532 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
533 | struct drm_i915_gem_object *obj = work->obj; | |
534 | struct drm_device *dev = obj->base.dev; | |
535 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
536 | struct page **pvec; | |
537 | int pinned, ret; | |
538 | ||
539 | ret = -ENOMEM; | |
540 | pinned = 0; | |
541 | ||
542 | pvec = kmalloc(num_pages*sizeof(struct page *), | |
543 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | |
544 | if (pvec == NULL) | |
545 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | |
546 | if (pvec != NULL) { | |
ad46cb53 | 547 | struct mm_struct *mm = obj->userptr.mm->mm; |
5cc9ed4b CW |
548 | |
549 | down_read(&mm->mmap_sem); | |
550 | while (pinned < num_pages) { | |
551 | ret = get_user_pages(work->task, mm, | |
552 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
553 | num_pages - pinned, | |
554 | !obj->userptr.read_only, 0, | |
555 | pvec + pinned, NULL); | |
556 | if (ret < 0) | |
557 | break; | |
558 | ||
559 | pinned += ret; | |
560 | } | |
561 | up_read(&mm->mmap_sem); | |
562 | } | |
563 | ||
564 | mutex_lock(&dev->struct_mutex); | |
565 | if (obj->userptr.work != &work->work) { | |
566 | ret = 0; | |
567 | } else if (pinned == num_pages) { | |
568 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
569 | if (ret == 0) { | |
570 | list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); | |
571 | pinned = 0; | |
572 | } | |
573 | } | |
574 | ||
575 | obj->userptr.work = ERR_PTR(ret); | |
576 | obj->userptr.workers--; | |
577 | drm_gem_object_unreference(&obj->base); | |
578 | mutex_unlock(&dev->struct_mutex); | |
579 | ||
580 | release_pages(pvec, pinned, 0); | |
581 | drm_free_large(pvec); | |
582 | ||
583 | put_task_struct(work->task); | |
584 | kfree(work); | |
585 | } | |
586 | ||
587 | static int | |
588 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
589 | { | |
590 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
591 | struct page **pvec; | |
592 | int pinned, ret; | |
593 | ||
594 | /* If userspace should engineer that these pages are replaced in | |
595 | * the vma between us binding this page into the GTT and completion | |
596 | * of rendering... Their loss. If they change the mapping of their | |
597 | * pages they need to create a new bo to point to the new vma. | |
598 | * | |
599 | * However, that still leaves open the possibility of the vma | |
600 | * being copied upon fork. Which falls under the same userspace | |
601 | * synchronisation issue as a regular bo, except that this time | |
602 | * the process may not be expecting that a particular piece of | |
603 | * memory is tied to the GPU. | |
604 | * | |
605 | * Fortunately, we can hook into the mmu_notifier in order to | |
606 | * discard the page references prior to anything nasty happening | |
607 | * to the vma (discard or cloning) which should prevent the more | |
608 | * egregious cases from causing harm. | |
609 | */ | |
610 | ||
611 | pvec = NULL; | |
612 | pinned = 0; | |
ad46cb53 | 613 | if (obj->userptr.mm->mm == current->mm) { |
5cc9ed4b CW |
614 | pvec = kmalloc(num_pages*sizeof(struct page *), |
615 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | |
616 | if (pvec == NULL) { | |
617 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | |
618 | if (pvec == NULL) | |
619 | return -ENOMEM; | |
620 | } | |
621 | ||
622 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
623 | !obj->userptr.read_only, pvec); | |
624 | } | |
625 | if (pinned < num_pages) { | |
626 | if (pinned < 0) { | |
627 | ret = pinned; | |
628 | pinned = 0; | |
629 | } else { | |
630 | /* Spawn a worker so that we can acquire the | |
631 | * user pages without holding our mutex. Access | |
632 | * to the user pages requires mmap_sem, and we have | |
633 | * a strict lock ordering of mmap_sem, struct_mutex - | |
634 | * we already hold struct_mutex here and so cannot | |
635 | * call gup without encountering a lock inversion. | |
636 | * | |
637 | * Userspace will keep on repeating the operation | |
638 | * (thanks to EAGAIN) until either we hit the fast | |
639 | * path or the worker completes. If the worker is | |
640 | * cancelled or superseded, the task is still run | |
641 | * but the results ignored. (This leads to | |
642 | * complications that we may have a stray object | |
643 | * refcount that we need to be wary of when | |
644 | * checking for existing objects during creation.) | |
645 | * If the worker encounters an error, it reports | |
646 | * that error back to this function through | |
647 | * obj->userptr.work = ERR_PTR. | |
648 | */ | |
649 | ret = -EAGAIN; | |
650 | if (obj->userptr.work == NULL && | |
651 | obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) { | |
652 | struct get_pages_work *work; | |
653 | ||
654 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
655 | if (work != NULL) { | |
656 | obj->userptr.work = &work->work; | |
657 | obj->userptr.workers++; | |
658 | ||
659 | work->obj = obj; | |
660 | drm_gem_object_reference(&obj->base); | |
661 | ||
662 | work->task = current; | |
663 | get_task_struct(work->task); | |
664 | ||
665 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
666 | schedule_work(&work->work); | |
667 | } else | |
668 | ret = -ENOMEM; | |
669 | } else { | |
670 | if (IS_ERR(obj->userptr.work)) { | |
671 | ret = PTR_ERR(obj->userptr.work); | |
672 | obj->userptr.work = NULL; | |
673 | } | |
674 | } | |
675 | } | |
676 | } else { | |
677 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
678 | if (ret == 0) { | |
679 | obj->userptr.work = NULL; | |
680 | pinned = 0; | |
681 | } | |
682 | } | |
683 | ||
684 | release_pages(pvec, pinned, 0); | |
685 | drm_free_large(pvec); | |
686 | return ret; | |
687 | } | |
688 | ||
689 | static void | |
690 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
691 | { | |
692 | struct scatterlist *sg; | |
693 | int i; | |
694 | ||
695 | BUG_ON(obj->userptr.work != NULL); | |
696 | ||
697 | if (obj->madv != I915_MADV_WILLNEED) | |
698 | obj->dirty = 0; | |
699 | ||
700 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | |
701 | struct page *page = sg_page(sg); | |
702 | ||
703 | if (obj->dirty) | |
704 | set_page_dirty(page); | |
705 | ||
706 | mark_page_accessed(page); | |
707 | page_cache_release(page); | |
708 | } | |
709 | obj->dirty = 0; | |
710 | ||
711 | sg_free_table(obj->pages); | |
712 | kfree(obj->pages); | |
713 | } | |
714 | ||
715 | static void | |
716 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
717 | { | |
718 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 719 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
720 | } |
721 | ||
722 | static int | |
723 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
724 | { | |
ad46cb53 | 725 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
726 | return 0; |
727 | ||
728 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
729 | } | |
730 | ||
731 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
732 | .dmabuf_export = i915_gem_userptr_dmabuf_export, | |
733 | .get_pages = i915_gem_userptr_get_pages, | |
734 | .put_pages = i915_gem_userptr_put_pages, | |
735 | .release = i915_gem_userptr_release, | |
736 | }; | |
737 | ||
738 | /** | |
739 | * Creates a new mm object that wraps some normal memory from the process | |
740 | * context - user memory. | |
741 | * | |
742 | * We impose several restrictions upon the memory being mapped | |
743 | * into the GPU. | |
744 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 745 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 746 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 747 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 748 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 749 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
750 | * accessible directly by the CPU, but reads and writes by the GPU may |
751 | * incur the cost of a snoop (unless you have an LLC architecture). | |
752 | * | |
753 | * Synchronisation between multiple users and the GPU is left to userspace | |
754 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
755 | * GPU relinquishes the VMA before it is returned back to the system | |
756 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
757 | * malloc() library may not immediately relinquish the VMA after free() and | |
758 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
759 | * Caveat emptor. | |
760 | * | |
761 | * Also note, that the object created here is not currently a "first class" | |
762 | * object, in that several ioctls are banned. These are the CPU access | |
763 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
764 | * direct access via your pointer rather than use those ioctls. | |
765 | * | |
766 | * If you think this is a good interface to use to pass GPU memory between | |
767 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
768 | * dma-buf instead. | |
769 | */ | |
770 | int | |
771 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
772 | { | |
773 | struct drm_i915_private *dev_priv = dev->dev_private; | |
774 | struct drm_i915_gem_userptr *args = data; | |
775 | struct drm_i915_gem_object *obj; | |
776 | int ret; | |
777 | u32 handle; | |
778 | ||
779 | if (args->flags & ~(I915_USERPTR_READ_ONLY | | |
780 | I915_USERPTR_UNSYNCHRONIZED)) | |
781 | return -EINVAL; | |
782 | ||
783 | if (offset_in_page(args->user_ptr | args->user_size)) | |
784 | return -EINVAL; | |
785 | ||
786 | if (args->user_size > dev_priv->gtt.base.total) | |
787 | return -E2BIG; | |
788 | ||
789 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, | |
790 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
791 | return -EFAULT; | |
792 | ||
793 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
794 | /* On almost all of the current hw, we cannot tell the GPU that a | |
795 | * page is readonly, so this is just a placeholder in the uAPI. | |
796 | */ | |
797 | return -ENODEV; | |
798 | } | |
799 | ||
5cc9ed4b CW |
800 | obj = i915_gem_object_alloc(dev); |
801 | if (obj == NULL) | |
802 | return -ENOMEM; | |
803 | ||
804 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
805 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
806 | obj->cache_level = I915_CACHE_LLC; | |
807 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
808 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
809 | ||
810 | obj->userptr.ptr = args->user_ptr; | |
811 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
812 | ||
813 | /* And keep a pointer to the current->mm for resolving the user pages | |
814 | * at binding. This means that we need to hook into the mmu_notifier | |
815 | * in order to detect if the mmu is destroyed. | |
816 | */ | |
ad46cb53 CW |
817 | ret = i915_gem_userptr_init__mm_struct(obj); |
818 | if (ret == 0) | |
5cc9ed4b CW |
819 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
820 | if (ret == 0) | |
821 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
822 | ||
823 | /* drop reference from allocate - handle holds it now */ | |
824 | drm_gem_object_unreference_unlocked(&obj->base); | |
825 | if (ret) | |
826 | return ret; | |
827 | ||
828 | args->handle = handle; | |
829 | return 0; | |
830 | } | |
831 | ||
832 | int | |
833 | i915_gem_init_userptr(struct drm_device *dev) | |
834 | { | |
5cc9ed4b | 835 | struct drm_i915_private *dev_priv = to_i915(dev); |
ad46cb53 CW |
836 | mutex_init(&dev_priv->mm_lock); |
837 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b CW |
838 | return 0; |
839 | } |