Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include "drmP.h" | |
26 | #include "i915_drm.h" | |
27 | #include "i915_drv.h" | |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
35 | #if defined(CONFIG_MMU_NOTIFIER) | |
36 | #include <linux/interval_tree.h> | |
37 | ||
38 | struct i915_mmu_notifier { | |
39 | spinlock_t lock; | |
40 | struct hlist_node node; | |
41 | struct mmu_notifier mn; | |
42 | struct rb_root objects; | |
ec8b0dd5 | 43 | struct list_head linear; |
5cc9ed4b CW |
44 | struct drm_device *dev; |
45 | struct mm_struct *mm; | |
46 | struct work_struct work; | |
47 | unsigned long count; | |
48 | unsigned long serial; | |
ec8b0dd5 | 49 | bool has_linear; |
5cc9ed4b CW |
50 | }; |
51 | ||
52 | struct i915_mmu_object { | |
53 | struct i915_mmu_notifier *mmu; | |
54 | struct interval_tree_node it; | |
ec8b0dd5 | 55 | struct list_head link; |
5cc9ed4b | 56 | struct drm_i915_gem_object *obj; |
ec8b0dd5 | 57 | bool is_linear; |
5cc9ed4b CW |
58 | }; |
59 | ||
ec8b0dd5 CW |
60 | static unsigned long cancel_userptr(struct drm_i915_gem_object *obj) |
61 | { | |
62 | struct drm_device *dev = obj->base.dev; | |
63 | unsigned long end; | |
64 | ||
65 | mutex_lock(&dev->struct_mutex); | |
66 | /* Cancel any active worker and force us to re-evaluate gup */ | |
67 | obj->userptr.work = NULL; | |
68 | ||
69 | if (obj->pages != NULL) { | |
70 | struct drm_i915_private *dev_priv = to_i915(dev); | |
71 | struct i915_vma *vma, *tmp; | |
72 | bool was_interruptible; | |
73 | ||
74 | was_interruptible = dev_priv->mm.interruptible; | |
75 | dev_priv->mm.interruptible = false; | |
76 | ||
77 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { | |
78 | int ret = i915_vma_unbind(vma); | |
79 | WARN_ON(ret && ret != -EIO); | |
80 | } | |
81 | WARN_ON(i915_gem_object_put_pages(obj)); | |
82 | ||
83 | dev_priv->mm.interruptible = was_interruptible; | |
84 | } | |
85 | ||
86 | end = obj->userptr.ptr + obj->base.size; | |
87 | ||
88 | drm_gem_object_unreference(&obj->base); | |
89 | mutex_unlock(&dev->struct_mutex); | |
90 | ||
91 | return end; | |
92 | } | |
93 | ||
94 | static void invalidate_range__linear(struct i915_mmu_notifier *mn, | |
95 | struct mm_struct *mm, | |
96 | unsigned long start, | |
97 | unsigned long end) | |
98 | { | |
99 | struct i915_mmu_object *mmu; | |
100 | unsigned long serial; | |
101 | ||
102 | restart: | |
103 | serial = mn->serial; | |
104 | list_for_each_entry(mmu, &mn->linear, link) { | |
105 | struct drm_i915_gem_object *obj; | |
106 | ||
107 | if (mmu->it.last < start || mmu->it.start > end) | |
108 | continue; | |
109 | ||
110 | obj = mmu->obj; | |
111 | drm_gem_object_reference(&obj->base); | |
112 | spin_unlock(&mn->lock); | |
113 | ||
114 | cancel_userptr(obj); | |
115 | ||
116 | spin_lock(&mn->lock); | |
117 | if (serial != mn->serial) | |
118 | goto restart; | |
119 | } | |
120 | ||
121 | spin_unlock(&mn->lock); | |
122 | } | |
123 | ||
5cc9ed4b CW |
124 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
125 | struct mm_struct *mm, | |
126 | unsigned long start, | |
127 | unsigned long end) | |
128 | { | |
129 | struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); | |
130 | struct interval_tree_node *it = NULL; | |
ec8b0dd5 | 131 | unsigned long next = start; |
5cc9ed4b CW |
132 | unsigned long serial = 0; |
133 | ||
134 | end--; /* interval ranges are inclusive, but invalidate range is exclusive */ | |
ec8b0dd5 | 135 | while (next < end) { |
5cc9ed4b CW |
136 | struct drm_i915_gem_object *obj; |
137 | ||
138 | obj = NULL; | |
139 | spin_lock(&mn->lock); | |
ec8b0dd5 CW |
140 | if (mn->has_linear) |
141 | return invalidate_range__linear(mn, mm, start, end); | |
5cc9ed4b | 142 | if (serial == mn->serial) |
ec8b0dd5 | 143 | it = interval_tree_iter_next(it, next, end); |
5cc9ed4b CW |
144 | else |
145 | it = interval_tree_iter_first(&mn->objects, start, end); | |
146 | if (it != NULL) { | |
147 | obj = container_of(it, struct i915_mmu_object, it)->obj; | |
148 | drm_gem_object_reference(&obj->base); | |
149 | serial = mn->serial; | |
150 | } | |
151 | spin_unlock(&mn->lock); | |
152 | if (obj == NULL) | |
153 | return; | |
154 | ||
ec8b0dd5 | 155 | next = cancel_userptr(obj); |
5cc9ed4b CW |
156 | } |
157 | } | |
158 | ||
159 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
160 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
161 | }; | |
162 | ||
163 | static struct i915_mmu_notifier * | |
164 | __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) | |
165 | { | |
166 | struct drm_i915_private *dev_priv = to_i915(dev); | |
167 | struct i915_mmu_notifier *mmu; | |
168 | ||
169 | /* Protected by dev->struct_mutex */ | |
170 | hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm) | |
171 | if (mmu->mm == mm) | |
172 | return mmu; | |
173 | ||
174 | return NULL; | |
175 | } | |
176 | ||
177 | static struct i915_mmu_notifier * | |
178 | i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm) | |
179 | { | |
180 | struct drm_i915_private *dev_priv = to_i915(dev); | |
181 | struct i915_mmu_notifier *mmu; | |
182 | int ret; | |
183 | ||
184 | lockdep_assert_held(&dev->struct_mutex); | |
185 | ||
186 | mmu = __i915_mmu_notifier_lookup(dev, mm); | |
187 | if (mmu) | |
188 | return mmu; | |
189 | ||
190 | mmu = kmalloc(sizeof(*mmu), GFP_KERNEL); | |
191 | if (mmu == NULL) | |
192 | return ERR_PTR(-ENOMEM); | |
193 | ||
194 | spin_lock_init(&mmu->lock); | |
195 | mmu->dev = dev; | |
196 | mmu->mn.ops = &i915_gem_userptr_notifier; | |
197 | mmu->mm = mm; | |
198 | mmu->objects = RB_ROOT; | |
199 | mmu->count = 0; | |
6c308fec | 200 | mmu->serial = 1; |
ec8b0dd5 CW |
201 | INIT_LIST_HEAD(&mmu->linear); |
202 | mmu->has_linear = false; | |
5cc9ed4b CW |
203 | |
204 | /* Protected by mmap_sem (write-lock) */ | |
205 | ret = __mmu_notifier_register(&mmu->mn, mm); | |
206 | if (ret) { | |
207 | kfree(mmu); | |
208 | return ERR_PTR(ret); | |
209 | } | |
210 | ||
211 | /* Protected by dev->struct_mutex */ | |
212 | hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm); | |
213 | return mmu; | |
214 | } | |
215 | ||
216 | static void | |
217 | __i915_mmu_notifier_destroy_worker(struct work_struct *work) | |
218 | { | |
219 | struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); | |
220 | mmu_notifier_unregister(&mmu->mn, mmu->mm); | |
221 | kfree(mmu); | |
222 | } | |
223 | ||
224 | static void | |
225 | __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu) | |
226 | { | |
227 | lockdep_assert_held(&mmu->dev->struct_mutex); | |
228 | ||
229 | /* Protected by dev->struct_mutex */ | |
230 | hash_del(&mmu->node); | |
231 | ||
232 | /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex. | |
233 | * We enter the function holding struct_mutex, therefore we need | |
234 | * to drop our mutex prior to calling mmu_notifier_unregister in | |
235 | * order to prevent lock inversion (and system-wide deadlock) | |
236 | * between the mmap_sem and struct-mutex. Hence we defer the | |
237 | * unregistration to a workqueue where we hold no locks. | |
238 | */ | |
239 | INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker); | |
240 | schedule_work(&mmu->work); | |
241 | } | |
242 | ||
243 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu) | |
244 | { | |
245 | if (++mmu->serial == 0) | |
246 | mmu->serial = 1; | |
247 | } | |
248 | ||
ec8b0dd5 CW |
249 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu) |
250 | { | |
251 | struct i915_mmu_object *mn; | |
252 | ||
253 | list_for_each_entry(mn, &mmu->linear, link) | |
254 | if (mn->is_linear) | |
255 | return true; | |
256 | ||
257 | return false; | |
258 | } | |
259 | ||
5cc9ed4b CW |
260 | static void |
261 | i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, | |
262 | struct i915_mmu_object *mn) | |
263 | { | |
264 | lockdep_assert_held(&mmu->dev->struct_mutex); | |
265 | ||
266 | spin_lock(&mmu->lock); | |
ec8b0dd5 CW |
267 | list_del(&mn->link); |
268 | if (mn->is_linear) | |
269 | mmu->has_linear = i915_mmu_notifier_has_linear(mmu); | |
270 | else | |
271 | interval_tree_remove(&mn->it, &mmu->objects); | |
5cc9ed4b CW |
272 | __i915_mmu_notifier_update_serial(mmu); |
273 | spin_unlock(&mmu->lock); | |
274 | ||
275 | /* Protected against _add() by dev->struct_mutex */ | |
276 | if (--mmu->count == 0) | |
277 | __i915_mmu_notifier_destroy(mmu); | |
278 | } | |
279 | ||
280 | static int | |
281 | i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | |
282 | struct i915_mmu_object *mn) | |
283 | { | |
284 | struct interval_tree_node *it; | |
285 | int ret; | |
286 | ||
287 | ret = i915_mutex_lock_interruptible(mmu->dev); | |
288 | if (ret) | |
289 | return ret; | |
290 | ||
291 | /* Make sure we drop the final active reference (and thereby | |
292 | * remove the objects from the interval tree) before we do | |
293 | * the check for overlapping objects. | |
294 | */ | |
295 | i915_gem_retire_requests(mmu->dev); | |
296 | ||
5cc9ed4b CW |
297 | spin_lock(&mmu->lock); |
298 | it = interval_tree_iter_first(&mmu->objects, | |
299 | mn->it.start, mn->it.last); | |
300 | if (it) { | |
301 | struct drm_i915_gem_object *obj; | |
302 | ||
303 | /* We only need to check the first object in the range as it | |
304 | * either has cancelled gup work queued and we need to | |
305 | * return back to the user to give time for the gup-workers | |
306 | * to flush their object references upon which the object will | |
307 | * be removed from the interval-tree, or the the range is | |
308 | * still in use by another client and the overlap is invalid. | |
ec8b0dd5 CW |
309 | * |
310 | * If we do have an overlap, we cannot use the interval tree | |
311 | * for fast range invalidation. | |
5cc9ed4b CW |
312 | */ |
313 | ||
314 | obj = container_of(it, struct i915_mmu_object, it)->obj; | |
ec8b0dd5 CW |
315 | if (!obj->userptr.workers) |
316 | mmu->has_linear = mn->is_linear = true; | |
317 | else | |
318 | ret = -EAGAIN; | |
319 | } else | |
5cc9ed4b | 320 | interval_tree_insert(&mn->it, &mmu->objects); |
ec8b0dd5 CW |
321 | |
322 | if (ret == 0) { | |
323 | list_add(&mn->link, &mmu->linear); | |
5cc9ed4b | 324 | __i915_mmu_notifier_update_serial(mmu); |
5cc9ed4b CW |
325 | } |
326 | spin_unlock(&mmu->lock); | |
327 | mutex_unlock(&mmu->dev->struct_mutex); | |
328 | ||
329 | return ret; | |
330 | } | |
331 | ||
332 | static void | |
333 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
334 | { | |
335 | struct i915_mmu_object *mn; | |
336 | ||
337 | mn = obj->userptr.mn; | |
338 | if (mn == NULL) | |
339 | return; | |
340 | ||
341 | i915_mmu_notifier_del(mn->mmu, mn); | |
342 | obj->userptr.mn = NULL; | |
343 | } | |
344 | ||
345 | static int | |
346 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
347 | unsigned flags) | |
348 | { | |
349 | struct i915_mmu_notifier *mmu; | |
350 | struct i915_mmu_object *mn; | |
351 | int ret; | |
352 | ||
353 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
354 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
355 | ||
356 | down_write(&obj->userptr.mm->mmap_sem); | |
357 | ret = i915_mutex_lock_interruptible(obj->base.dev); | |
358 | if (ret == 0) { | |
359 | mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm); | |
360 | if (!IS_ERR(mmu)) | |
361 | mmu->count++; /* preemptive add to act as a refcount */ | |
362 | else | |
363 | ret = PTR_ERR(mmu); | |
364 | mutex_unlock(&obj->base.dev->struct_mutex); | |
365 | } | |
366 | up_write(&obj->userptr.mm->mmap_sem); | |
367 | if (ret) | |
368 | return ret; | |
369 | ||
370 | mn = kzalloc(sizeof(*mn), GFP_KERNEL); | |
371 | if (mn == NULL) { | |
372 | ret = -ENOMEM; | |
373 | goto destroy_mmu; | |
374 | } | |
375 | ||
376 | mn->mmu = mmu; | |
377 | mn->it.start = obj->userptr.ptr; | |
378 | mn->it.last = mn->it.start + obj->base.size - 1; | |
379 | mn->obj = obj; | |
380 | ||
381 | ret = i915_mmu_notifier_add(mmu, mn); | |
382 | if (ret) | |
383 | goto free_mn; | |
384 | ||
385 | obj->userptr.mn = mn; | |
386 | return 0; | |
387 | ||
388 | free_mn: | |
389 | kfree(mn); | |
390 | destroy_mmu: | |
391 | mutex_lock(&obj->base.dev->struct_mutex); | |
392 | if (--mmu->count == 0) | |
393 | __i915_mmu_notifier_destroy(mmu); | |
394 | mutex_unlock(&obj->base.dev->struct_mutex); | |
395 | return ret; | |
396 | } | |
397 | ||
398 | #else | |
399 | ||
400 | static void | |
401 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
402 | { | |
403 | } | |
404 | ||
405 | static int | |
406 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
407 | unsigned flags) | |
408 | { | |
409 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
410 | return -ENODEV; | |
411 | ||
412 | if (!capable(CAP_SYS_ADMIN)) | |
413 | return -EPERM; | |
414 | ||
415 | return 0; | |
416 | } | |
417 | #endif | |
418 | ||
419 | struct get_pages_work { | |
420 | struct work_struct work; | |
421 | struct drm_i915_gem_object *obj; | |
422 | struct task_struct *task; | |
423 | }; | |
424 | ||
425 | ||
426 | #if IS_ENABLED(CONFIG_SWIOTLB) | |
427 | #define swiotlb_active() swiotlb_nr_tbl() | |
428 | #else | |
429 | #define swiotlb_active() 0 | |
430 | #endif | |
431 | ||
432 | static int | |
433 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
434 | { | |
435 | struct scatterlist *sg; | |
436 | int ret, n; | |
437 | ||
438 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
439 | if (*st == NULL) | |
440 | return -ENOMEM; | |
441 | ||
442 | if (swiotlb_active()) { | |
443 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
444 | if (ret) | |
445 | goto err; | |
446 | ||
447 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
448 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
449 | } else { | |
450 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
451 | 0, num_pages << PAGE_SHIFT, | |
452 | GFP_KERNEL); | |
453 | if (ret) | |
454 | goto err; | |
455 | } | |
456 | ||
457 | return 0; | |
458 | ||
459 | err: | |
460 | kfree(*st); | |
461 | *st = NULL; | |
462 | return ret; | |
463 | } | |
464 | ||
465 | static void | |
466 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
467 | { | |
468 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
469 | struct drm_i915_gem_object *obj = work->obj; | |
470 | struct drm_device *dev = obj->base.dev; | |
471 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
472 | struct page **pvec; | |
473 | int pinned, ret; | |
474 | ||
475 | ret = -ENOMEM; | |
476 | pinned = 0; | |
477 | ||
478 | pvec = kmalloc(num_pages*sizeof(struct page *), | |
479 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | |
480 | if (pvec == NULL) | |
481 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | |
482 | if (pvec != NULL) { | |
483 | struct mm_struct *mm = obj->userptr.mm; | |
484 | ||
485 | down_read(&mm->mmap_sem); | |
486 | while (pinned < num_pages) { | |
487 | ret = get_user_pages(work->task, mm, | |
488 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
489 | num_pages - pinned, | |
490 | !obj->userptr.read_only, 0, | |
491 | pvec + pinned, NULL); | |
492 | if (ret < 0) | |
493 | break; | |
494 | ||
495 | pinned += ret; | |
496 | } | |
497 | up_read(&mm->mmap_sem); | |
498 | } | |
499 | ||
500 | mutex_lock(&dev->struct_mutex); | |
501 | if (obj->userptr.work != &work->work) { | |
502 | ret = 0; | |
503 | } else if (pinned == num_pages) { | |
504 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
505 | if (ret == 0) { | |
506 | list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); | |
507 | pinned = 0; | |
508 | } | |
509 | } | |
510 | ||
511 | obj->userptr.work = ERR_PTR(ret); | |
512 | obj->userptr.workers--; | |
513 | drm_gem_object_unreference(&obj->base); | |
514 | mutex_unlock(&dev->struct_mutex); | |
515 | ||
516 | release_pages(pvec, pinned, 0); | |
517 | drm_free_large(pvec); | |
518 | ||
519 | put_task_struct(work->task); | |
520 | kfree(work); | |
521 | } | |
522 | ||
523 | static int | |
524 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
525 | { | |
526 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
527 | struct page **pvec; | |
528 | int pinned, ret; | |
529 | ||
530 | /* If userspace should engineer that these pages are replaced in | |
531 | * the vma between us binding this page into the GTT and completion | |
532 | * of rendering... Their loss. If they change the mapping of their | |
533 | * pages they need to create a new bo to point to the new vma. | |
534 | * | |
535 | * However, that still leaves open the possibility of the vma | |
536 | * being copied upon fork. Which falls under the same userspace | |
537 | * synchronisation issue as a regular bo, except that this time | |
538 | * the process may not be expecting that a particular piece of | |
539 | * memory is tied to the GPU. | |
540 | * | |
541 | * Fortunately, we can hook into the mmu_notifier in order to | |
542 | * discard the page references prior to anything nasty happening | |
543 | * to the vma (discard or cloning) which should prevent the more | |
544 | * egregious cases from causing harm. | |
545 | */ | |
546 | ||
547 | pvec = NULL; | |
548 | pinned = 0; | |
549 | if (obj->userptr.mm == current->mm) { | |
550 | pvec = kmalloc(num_pages*sizeof(struct page *), | |
551 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | |
552 | if (pvec == NULL) { | |
553 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | |
554 | if (pvec == NULL) | |
555 | return -ENOMEM; | |
556 | } | |
557 | ||
558 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
559 | !obj->userptr.read_only, pvec); | |
560 | } | |
561 | if (pinned < num_pages) { | |
562 | if (pinned < 0) { | |
563 | ret = pinned; | |
564 | pinned = 0; | |
565 | } else { | |
566 | /* Spawn a worker so that we can acquire the | |
567 | * user pages without holding our mutex. Access | |
568 | * to the user pages requires mmap_sem, and we have | |
569 | * a strict lock ordering of mmap_sem, struct_mutex - | |
570 | * we already hold struct_mutex here and so cannot | |
571 | * call gup without encountering a lock inversion. | |
572 | * | |
573 | * Userspace will keep on repeating the operation | |
574 | * (thanks to EAGAIN) until either we hit the fast | |
575 | * path or the worker completes. If the worker is | |
576 | * cancelled or superseded, the task is still run | |
577 | * but the results ignored. (This leads to | |
578 | * complications that we may have a stray object | |
579 | * refcount that we need to be wary of when | |
580 | * checking for existing objects during creation.) | |
581 | * If the worker encounters an error, it reports | |
582 | * that error back to this function through | |
583 | * obj->userptr.work = ERR_PTR. | |
584 | */ | |
585 | ret = -EAGAIN; | |
586 | if (obj->userptr.work == NULL && | |
587 | obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) { | |
588 | struct get_pages_work *work; | |
589 | ||
590 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
591 | if (work != NULL) { | |
592 | obj->userptr.work = &work->work; | |
593 | obj->userptr.workers++; | |
594 | ||
595 | work->obj = obj; | |
596 | drm_gem_object_reference(&obj->base); | |
597 | ||
598 | work->task = current; | |
599 | get_task_struct(work->task); | |
600 | ||
601 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
602 | schedule_work(&work->work); | |
603 | } else | |
604 | ret = -ENOMEM; | |
605 | } else { | |
606 | if (IS_ERR(obj->userptr.work)) { | |
607 | ret = PTR_ERR(obj->userptr.work); | |
608 | obj->userptr.work = NULL; | |
609 | } | |
610 | } | |
611 | } | |
612 | } else { | |
613 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
614 | if (ret == 0) { | |
615 | obj->userptr.work = NULL; | |
616 | pinned = 0; | |
617 | } | |
618 | } | |
619 | ||
620 | release_pages(pvec, pinned, 0); | |
621 | drm_free_large(pvec); | |
622 | return ret; | |
623 | } | |
624 | ||
625 | static void | |
626 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
627 | { | |
628 | struct scatterlist *sg; | |
629 | int i; | |
630 | ||
631 | BUG_ON(obj->userptr.work != NULL); | |
632 | ||
633 | if (obj->madv != I915_MADV_WILLNEED) | |
634 | obj->dirty = 0; | |
635 | ||
636 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | |
637 | struct page *page = sg_page(sg); | |
638 | ||
639 | if (obj->dirty) | |
640 | set_page_dirty(page); | |
641 | ||
642 | mark_page_accessed(page); | |
643 | page_cache_release(page); | |
644 | } | |
645 | obj->dirty = 0; | |
646 | ||
647 | sg_free_table(obj->pages); | |
648 | kfree(obj->pages); | |
649 | } | |
650 | ||
651 | static void | |
652 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
653 | { | |
654 | i915_gem_userptr_release__mmu_notifier(obj); | |
655 | ||
656 | if (obj->userptr.mm) { | |
657 | mmput(obj->userptr.mm); | |
658 | obj->userptr.mm = NULL; | |
659 | } | |
660 | } | |
661 | ||
662 | static int | |
663 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
664 | { | |
665 | if (obj->userptr.mn) | |
666 | return 0; | |
667 | ||
668 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
669 | } | |
670 | ||
671 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
672 | .dmabuf_export = i915_gem_userptr_dmabuf_export, | |
673 | .get_pages = i915_gem_userptr_get_pages, | |
674 | .put_pages = i915_gem_userptr_put_pages, | |
675 | .release = i915_gem_userptr_release, | |
676 | }; | |
677 | ||
678 | /** | |
679 | * Creates a new mm object that wraps some normal memory from the process | |
680 | * context - user memory. | |
681 | * | |
682 | * We impose several restrictions upon the memory being mapped | |
683 | * into the GPU. | |
684 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 685 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 686 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 687 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 688 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 689 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
690 | * accessible directly by the CPU, but reads and writes by the GPU may |
691 | * incur the cost of a snoop (unless you have an LLC architecture). | |
692 | * | |
693 | * Synchronisation between multiple users and the GPU is left to userspace | |
694 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
695 | * GPU relinquishes the VMA before it is returned back to the system | |
696 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
697 | * malloc() library may not immediately relinquish the VMA after free() and | |
698 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
699 | * Caveat emptor. | |
700 | * | |
701 | * Also note, that the object created here is not currently a "first class" | |
702 | * object, in that several ioctls are banned. These are the CPU access | |
703 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
704 | * direct access via your pointer rather than use those ioctls. | |
705 | * | |
706 | * If you think this is a good interface to use to pass GPU memory between | |
707 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
708 | * dma-buf instead. | |
709 | */ | |
710 | int | |
711 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
712 | { | |
713 | struct drm_i915_private *dev_priv = dev->dev_private; | |
714 | struct drm_i915_gem_userptr *args = data; | |
715 | struct drm_i915_gem_object *obj; | |
716 | int ret; | |
717 | u32 handle; | |
718 | ||
719 | if (args->flags & ~(I915_USERPTR_READ_ONLY | | |
720 | I915_USERPTR_UNSYNCHRONIZED)) | |
721 | return -EINVAL; | |
722 | ||
723 | if (offset_in_page(args->user_ptr | args->user_size)) | |
724 | return -EINVAL; | |
725 | ||
726 | if (args->user_size > dev_priv->gtt.base.total) | |
727 | return -E2BIG; | |
728 | ||
729 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, | |
730 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
731 | return -EFAULT; | |
732 | ||
733 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
734 | /* On almost all of the current hw, we cannot tell the GPU that a | |
735 | * page is readonly, so this is just a placeholder in the uAPI. | |
736 | */ | |
737 | return -ENODEV; | |
738 | } | |
739 | ||
740 | /* Allocate the new object */ | |
741 | obj = i915_gem_object_alloc(dev); | |
742 | if (obj == NULL) | |
743 | return -ENOMEM; | |
744 | ||
745 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
746 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
747 | obj->cache_level = I915_CACHE_LLC; | |
748 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
749 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
750 | ||
751 | obj->userptr.ptr = args->user_ptr; | |
752 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
753 | ||
754 | /* And keep a pointer to the current->mm for resolving the user pages | |
755 | * at binding. This means that we need to hook into the mmu_notifier | |
756 | * in order to detect if the mmu is destroyed. | |
757 | */ | |
758 | ret = -ENOMEM; | |
759 | if ((obj->userptr.mm = get_task_mm(current))) | |
760 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); | |
761 | if (ret == 0) | |
762 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
763 | ||
764 | /* drop reference from allocate - handle holds it now */ | |
765 | drm_gem_object_unreference_unlocked(&obj->base); | |
766 | if (ret) | |
767 | return ret; | |
768 | ||
769 | args->handle = handle; | |
770 | return 0; | |
771 | } | |
772 | ||
773 | int | |
774 | i915_gem_init_userptr(struct drm_device *dev) | |
775 | { | |
776 | #if defined(CONFIG_MMU_NOTIFIER) | |
777 | struct drm_i915_private *dev_priv = to_i915(dev); | |
778 | hash_init(dev_priv->mmu_notifiers); | |
779 | #endif | |
780 | return 0; | |
781 | } |