Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_userptr.c
1 /*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "drmP.h"
26 #include "i915_drm.h"
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
34
35 #if defined(CONFIG_MMU_NOTIFIER)
36 #include <linux/interval_tree.h>
37
38 struct i915_mmu_notifier {
39 spinlock_t lock;
40 struct hlist_node node;
41 struct mmu_notifier mn;
42 struct rb_root objects;
43 struct drm_device *dev;
44 struct mm_struct *mm;
45 struct work_struct work;
46 unsigned long count;
47 unsigned long serial;
48 };
49
50 struct i915_mmu_object {
51 struct i915_mmu_notifier *mmu;
52 struct interval_tree_node it;
53 struct drm_i915_gem_object *obj;
54 };
55
56 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
57 struct mm_struct *mm,
58 unsigned long start,
59 unsigned long end)
60 {
61 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
62 struct interval_tree_node *it = NULL;
63 unsigned long serial = 0;
64
65 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
66 while (start < end) {
67 struct drm_i915_gem_object *obj;
68
69 obj = NULL;
70 spin_lock(&mn->lock);
71 if (serial == mn->serial)
72 it = interval_tree_iter_next(it, start, end);
73 else
74 it = interval_tree_iter_first(&mn->objects, start, end);
75 if (it != NULL) {
76 obj = container_of(it, struct i915_mmu_object, it)->obj;
77 drm_gem_object_reference(&obj->base);
78 serial = mn->serial;
79 }
80 spin_unlock(&mn->lock);
81 if (obj == NULL)
82 return;
83
84 mutex_lock(&mn->dev->struct_mutex);
85 /* Cancel any active worker and force us to re-evaluate gup */
86 obj->userptr.work = NULL;
87
88 if (obj->pages != NULL) {
89 struct drm_i915_private *dev_priv = to_i915(mn->dev);
90 struct i915_vma *vma, *tmp;
91 bool was_interruptible;
92
93 was_interruptible = dev_priv->mm.interruptible;
94 dev_priv->mm.interruptible = false;
95
96 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
97 int ret = i915_vma_unbind(vma);
98 WARN_ON(ret && ret != -EIO);
99 }
100 WARN_ON(i915_gem_object_put_pages(obj));
101
102 dev_priv->mm.interruptible = was_interruptible;
103 }
104
105 start = obj->userptr.ptr + obj->base.size;
106
107 drm_gem_object_unreference(&obj->base);
108 mutex_unlock(&mn->dev->struct_mutex);
109 }
110 }
111
112 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
113 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
114 };
115
116 static struct i915_mmu_notifier *
117 __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
118 {
119 struct drm_i915_private *dev_priv = to_i915(dev);
120 struct i915_mmu_notifier *mmu;
121
122 /* Protected by dev->struct_mutex */
123 hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
124 if (mmu->mm == mm)
125 return mmu;
126
127 return NULL;
128 }
129
130 static struct i915_mmu_notifier *
131 i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
132 {
133 struct drm_i915_private *dev_priv = to_i915(dev);
134 struct i915_mmu_notifier *mmu;
135 int ret;
136
137 lockdep_assert_held(&dev->struct_mutex);
138
139 mmu = __i915_mmu_notifier_lookup(dev, mm);
140 if (mmu)
141 return mmu;
142
143 mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
144 if (mmu == NULL)
145 return ERR_PTR(-ENOMEM);
146
147 spin_lock_init(&mmu->lock);
148 mmu->dev = dev;
149 mmu->mn.ops = &i915_gem_userptr_notifier;
150 mmu->mm = mm;
151 mmu->objects = RB_ROOT;
152 mmu->count = 0;
153 mmu->serial = 0;
154
155 /* Protected by mmap_sem (write-lock) */
156 ret = __mmu_notifier_register(&mmu->mn, mm);
157 if (ret) {
158 kfree(mmu);
159 return ERR_PTR(ret);
160 }
161
162 /* Protected by dev->struct_mutex */
163 hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
164 return mmu;
165 }
166
167 static void
168 __i915_mmu_notifier_destroy_worker(struct work_struct *work)
169 {
170 struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
171 mmu_notifier_unregister(&mmu->mn, mmu->mm);
172 kfree(mmu);
173 }
174
175 static void
176 __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
177 {
178 lockdep_assert_held(&mmu->dev->struct_mutex);
179
180 /* Protected by dev->struct_mutex */
181 hash_del(&mmu->node);
182
183 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
184 * We enter the function holding struct_mutex, therefore we need
185 * to drop our mutex prior to calling mmu_notifier_unregister in
186 * order to prevent lock inversion (and system-wide deadlock)
187 * between the mmap_sem and struct-mutex. Hence we defer the
188 * unregistration to a workqueue where we hold no locks.
189 */
190 INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
191 schedule_work(&mmu->work);
192 }
193
194 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
195 {
196 if (++mmu->serial == 0)
197 mmu->serial = 1;
198 }
199
200 static void
201 i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
202 struct i915_mmu_object *mn)
203 {
204 lockdep_assert_held(&mmu->dev->struct_mutex);
205
206 spin_lock(&mmu->lock);
207 interval_tree_remove(&mn->it, &mmu->objects);
208 __i915_mmu_notifier_update_serial(mmu);
209 spin_unlock(&mmu->lock);
210
211 /* Protected against _add() by dev->struct_mutex */
212 if (--mmu->count == 0)
213 __i915_mmu_notifier_destroy(mmu);
214 }
215
216 static int
217 i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
218 struct i915_mmu_object *mn)
219 {
220 struct interval_tree_node *it;
221 int ret;
222
223 ret = i915_mutex_lock_interruptible(mmu->dev);
224 if (ret)
225 return ret;
226
227 /* Make sure we drop the final active reference (and thereby
228 * remove the objects from the interval tree) before we do
229 * the check for overlapping objects.
230 */
231 i915_gem_retire_requests(mmu->dev);
232
233 /* Disallow overlapping userptr objects */
234 spin_lock(&mmu->lock);
235 it = interval_tree_iter_first(&mmu->objects,
236 mn->it.start, mn->it.last);
237 if (it) {
238 struct drm_i915_gem_object *obj;
239
240 /* We only need to check the first object in the range as it
241 * either has cancelled gup work queued and we need to
242 * return back to the user to give time for the gup-workers
243 * to flush their object references upon which the object will
244 * be removed from the interval-tree, or the the range is
245 * still in use by another client and the overlap is invalid.
246 */
247
248 obj = container_of(it, struct i915_mmu_object, it)->obj;
249 ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
250 } else {
251 interval_tree_insert(&mn->it, &mmu->objects);
252 __i915_mmu_notifier_update_serial(mmu);
253 ret = 0;
254 }
255 spin_unlock(&mmu->lock);
256 mutex_unlock(&mmu->dev->struct_mutex);
257
258 return ret;
259 }
260
261 static void
262 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
263 {
264 struct i915_mmu_object *mn;
265
266 mn = obj->userptr.mn;
267 if (mn == NULL)
268 return;
269
270 i915_mmu_notifier_del(mn->mmu, mn);
271 obj->userptr.mn = NULL;
272 }
273
274 static int
275 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
276 unsigned flags)
277 {
278 struct i915_mmu_notifier *mmu;
279 struct i915_mmu_object *mn;
280 int ret;
281
282 if (flags & I915_USERPTR_UNSYNCHRONIZED)
283 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
284
285 down_write(&obj->userptr.mm->mmap_sem);
286 ret = i915_mutex_lock_interruptible(obj->base.dev);
287 if (ret == 0) {
288 mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
289 if (!IS_ERR(mmu))
290 mmu->count++; /* preemptive add to act as a refcount */
291 else
292 ret = PTR_ERR(mmu);
293 mutex_unlock(&obj->base.dev->struct_mutex);
294 }
295 up_write(&obj->userptr.mm->mmap_sem);
296 if (ret)
297 return ret;
298
299 mn = kzalloc(sizeof(*mn), GFP_KERNEL);
300 if (mn == NULL) {
301 ret = -ENOMEM;
302 goto destroy_mmu;
303 }
304
305 mn->mmu = mmu;
306 mn->it.start = obj->userptr.ptr;
307 mn->it.last = mn->it.start + obj->base.size - 1;
308 mn->obj = obj;
309
310 ret = i915_mmu_notifier_add(mmu, mn);
311 if (ret)
312 goto free_mn;
313
314 obj->userptr.mn = mn;
315 return 0;
316
317 free_mn:
318 kfree(mn);
319 destroy_mmu:
320 mutex_lock(&obj->base.dev->struct_mutex);
321 if (--mmu->count == 0)
322 __i915_mmu_notifier_destroy(mmu);
323 mutex_unlock(&obj->base.dev->struct_mutex);
324 return ret;
325 }
326
327 #else
328
329 static void
330 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
331 {
332 }
333
334 static int
335 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
336 unsigned flags)
337 {
338 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
339 return -ENODEV;
340
341 if (!capable(CAP_SYS_ADMIN))
342 return -EPERM;
343
344 return 0;
345 }
346 #endif
347
348 struct get_pages_work {
349 struct work_struct work;
350 struct drm_i915_gem_object *obj;
351 struct task_struct *task;
352 };
353
354
355 #if IS_ENABLED(CONFIG_SWIOTLB)
356 #define swiotlb_active() swiotlb_nr_tbl()
357 #else
358 #define swiotlb_active() 0
359 #endif
360
361 static int
362 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
363 {
364 struct scatterlist *sg;
365 int ret, n;
366
367 *st = kmalloc(sizeof(**st), GFP_KERNEL);
368 if (*st == NULL)
369 return -ENOMEM;
370
371 if (swiotlb_active()) {
372 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
373 if (ret)
374 goto err;
375
376 for_each_sg((*st)->sgl, sg, num_pages, n)
377 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
378 } else {
379 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
380 0, num_pages << PAGE_SHIFT,
381 GFP_KERNEL);
382 if (ret)
383 goto err;
384 }
385
386 return 0;
387
388 err:
389 kfree(*st);
390 *st = NULL;
391 return ret;
392 }
393
394 static void
395 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
396 {
397 struct get_pages_work *work = container_of(_work, typeof(*work), work);
398 struct drm_i915_gem_object *obj = work->obj;
399 struct drm_device *dev = obj->base.dev;
400 const int num_pages = obj->base.size >> PAGE_SHIFT;
401 struct page **pvec;
402 int pinned, ret;
403
404 ret = -ENOMEM;
405 pinned = 0;
406
407 pvec = kmalloc(num_pages*sizeof(struct page *),
408 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
409 if (pvec == NULL)
410 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
411 if (pvec != NULL) {
412 struct mm_struct *mm = obj->userptr.mm;
413
414 down_read(&mm->mmap_sem);
415 while (pinned < num_pages) {
416 ret = get_user_pages(work->task, mm,
417 obj->userptr.ptr + pinned * PAGE_SIZE,
418 num_pages - pinned,
419 !obj->userptr.read_only, 0,
420 pvec + pinned, NULL);
421 if (ret < 0)
422 break;
423
424 pinned += ret;
425 }
426 up_read(&mm->mmap_sem);
427 }
428
429 mutex_lock(&dev->struct_mutex);
430 if (obj->userptr.work != &work->work) {
431 ret = 0;
432 } else if (pinned == num_pages) {
433 ret = st_set_pages(&obj->pages, pvec, num_pages);
434 if (ret == 0) {
435 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
436 pinned = 0;
437 }
438 }
439
440 obj->userptr.work = ERR_PTR(ret);
441 obj->userptr.workers--;
442 drm_gem_object_unreference(&obj->base);
443 mutex_unlock(&dev->struct_mutex);
444
445 release_pages(pvec, pinned, 0);
446 drm_free_large(pvec);
447
448 put_task_struct(work->task);
449 kfree(work);
450 }
451
452 static int
453 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
454 {
455 const int num_pages = obj->base.size >> PAGE_SHIFT;
456 struct page **pvec;
457 int pinned, ret;
458
459 /* If userspace should engineer that these pages are replaced in
460 * the vma between us binding this page into the GTT and completion
461 * of rendering... Their loss. If they change the mapping of their
462 * pages they need to create a new bo to point to the new vma.
463 *
464 * However, that still leaves open the possibility of the vma
465 * being copied upon fork. Which falls under the same userspace
466 * synchronisation issue as a regular bo, except that this time
467 * the process may not be expecting that a particular piece of
468 * memory is tied to the GPU.
469 *
470 * Fortunately, we can hook into the mmu_notifier in order to
471 * discard the page references prior to anything nasty happening
472 * to the vma (discard or cloning) which should prevent the more
473 * egregious cases from causing harm.
474 */
475
476 pvec = NULL;
477 pinned = 0;
478 if (obj->userptr.mm == current->mm) {
479 pvec = kmalloc(num_pages*sizeof(struct page *),
480 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
481 if (pvec == NULL) {
482 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
483 if (pvec == NULL)
484 return -ENOMEM;
485 }
486
487 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
488 !obj->userptr.read_only, pvec);
489 }
490 if (pinned < num_pages) {
491 if (pinned < 0) {
492 ret = pinned;
493 pinned = 0;
494 } else {
495 /* Spawn a worker so that we can acquire the
496 * user pages without holding our mutex. Access
497 * to the user pages requires mmap_sem, and we have
498 * a strict lock ordering of mmap_sem, struct_mutex -
499 * we already hold struct_mutex here and so cannot
500 * call gup without encountering a lock inversion.
501 *
502 * Userspace will keep on repeating the operation
503 * (thanks to EAGAIN) until either we hit the fast
504 * path or the worker completes. If the worker is
505 * cancelled or superseded, the task is still run
506 * but the results ignored. (This leads to
507 * complications that we may have a stray object
508 * refcount that we need to be wary of when
509 * checking for existing objects during creation.)
510 * If the worker encounters an error, it reports
511 * that error back to this function through
512 * obj->userptr.work = ERR_PTR.
513 */
514 ret = -EAGAIN;
515 if (obj->userptr.work == NULL &&
516 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
517 struct get_pages_work *work;
518
519 work = kmalloc(sizeof(*work), GFP_KERNEL);
520 if (work != NULL) {
521 obj->userptr.work = &work->work;
522 obj->userptr.workers++;
523
524 work->obj = obj;
525 drm_gem_object_reference(&obj->base);
526
527 work->task = current;
528 get_task_struct(work->task);
529
530 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
531 schedule_work(&work->work);
532 } else
533 ret = -ENOMEM;
534 } else {
535 if (IS_ERR(obj->userptr.work)) {
536 ret = PTR_ERR(obj->userptr.work);
537 obj->userptr.work = NULL;
538 }
539 }
540 }
541 } else {
542 ret = st_set_pages(&obj->pages, pvec, num_pages);
543 if (ret == 0) {
544 obj->userptr.work = NULL;
545 pinned = 0;
546 }
547 }
548
549 release_pages(pvec, pinned, 0);
550 drm_free_large(pvec);
551 return ret;
552 }
553
554 static void
555 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
556 {
557 struct scatterlist *sg;
558 int i;
559
560 BUG_ON(obj->userptr.work != NULL);
561
562 if (obj->madv != I915_MADV_WILLNEED)
563 obj->dirty = 0;
564
565 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
566 struct page *page = sg_page(sg);
567
568 if (obj->dirty)
569 set_page_dirty(page);
570
571 mark_page_accessed(page);
572 page_cache_release(page);
573 }
574 obj->dirty = 0;
575
576 sg_free_table(obj->pages);
577 kfree(obj->pages);
578 }
579
580 static void
581 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
582 {
583 i915_gem_userptr_release__mmu_notifier(obj);
584
585 if (obj->userptr.mm) {
586 mmput(obj->userptr.mm);
587 obj->userptr.mm = NULL;
588 }
589 }
590
591 static int
592 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
593 {
594 if (obj->userptr.mn)
595 return 0;
596
597 return i915_gem_userptr_init__mmu_notifier(obj, 0);
598 }
599
600 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
601 .dmabuf_export = i915_gem_userptr_dmabuf_export,
602 .get_pages = i915_gem_userptr_get_pages,
603 .put_pages = i915_gem_userptr_put_pages,
604 .release = i915_gem_userptr_release,
605 };
606
607 /**
608 * Creates a new mm object that wraps some normal memory from the process
609 * context - user memory.
610 *
611 * We impose several restrictions upon the memory being mapped
612 * into the GPU.
613 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
614 * 2. It cannot overlap any other userptr object in the same address space.
615 * 3. It must be normal system memory, not a pointer into another map of IO
616 * space (e.g. it must not be a GTT mmapping of another object).
617 * 4. We only allow a bo as large as we could in theory map into the GTT,
618 * that is we limit the size to the total size of the GTT.
619 * 5. The bo is marked as being snoopable. The backing pages are left
620 * accessible directly by the CPU, but reads and writes by the GPU may
621 * incur the cost of a snoop (unless you have an LLC architecture).
622 *
623 * Synchronisation between multiple users and the GPU is left to userspace
624 * through the normal set-domain-ioctl. The kernel will enforce that the
625 * GPU relinquishes the VMA before it is returned back to the system
626 * i.e. upon free(), munmap() or process termination. However, the userspace
627 * malloc() library may not immediately relinquish the VMA after free() and
628 * instead reuse it whilst the GPU is still reading and writing to the VMA.
629 * Caveat emptor.
630 *
631 * Also note, that the object created here is not currently a "first class"
632 * object, in that several ioctls are banned. These are the CPU access
633 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
634 * direct access via your pointer rather than use those ioctls.
635 *
636 * If you think this is a good interface to use to pass GPU memory between
637 * drivers, please use dma-buf instead. In fact, wherever possible use
638 * dma-buf instead.
639 */
640 int
641 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
642 {
643 struct drm_i915_private *dev_priv = dev->dev_private;
644 struct drm_i915_gem_userptr *args = data;
645 struct drm_i915_gem_object *obj;
646 int ret;
647 u32 handle;
648
649 if (args->flags & ~(I915_USERPTR_READ_ONLY |
650 I915_USERPTR_UNSYNCHRONIZED))
651 return -EINVAL;
652
653 if (offset_in_page(args->user_ptr | args->user_size))
654 return -EINVAL;
655
656 if (args->user_size > dev_priv->gtt.base.total)
657 return -E2BIG;
658
659 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
660 (char __user *)(unsigned long)args->user_ptr, args->user_size))
661 return -EFAULT;
662
663 if (args->flags & I915_USERPTR_READ_ONLY) {
664 /* On almost all of the current hw, we cannot tell the GPU that a
665 * page is readonly, so this is just a placeholder in the uAPI.
666 */
667 return -ENODEV;
668 }
669
670 /* Allocate the new object */
671 obj = i915_gem_object_alloc(dev);
672 if (obj == NULL)
673 return -ENOMEM;
674
675 drm_gem_private_object_init(dev, &obj->base, args->user_size);
676 i915_gem_object_init(obj, &i915_gem_userptr_ops);
677 obj->cache_level = I915_CACHE_LLC;
678 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
679 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
680
681 obj->userptr.ptr = args->user_ptr;
682 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
683
684 /* And keep a pointer to the current->mm for resolving the user pages
685 * at binding. This means that we need to hook into the mmu_notifier
686 * in order to detect if the mmu is destroyed.
687 */
688 ret = -ENOMEM;
689 if ((obj->userptr.mm = get_task_mm(current)))
690 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
691 if (ret == 0)
692 ret = drm_gem_handle_create(file, &obj->base, &handle);
693
694 /* drop reference from allocate - handle holds it now */
695 drm_gem_object_unreference_unlocked(&obj->base);
696 if (ret)
697 return ret;
698
699 args->handle = handle;
700 return 0;
701 }
702
703 int
704 i915_gem_init_userptr(struct drm_device *dev)
705 {
706 #if defined(CONFIG_MMU_NOTIFIER)
707 struct drm_i915_private *dev_priv = to_i915(dev);
708 hash_init(dev_priv->mmu_notifiers);
709 #endif
710 return 0;
711 }
This page took 0.045604 seconds and 6 git commands to generate.