2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
35 #if defined(CONFIG_MMU_NOTIFIER)
36 #include <linux/interval_tree.h>
38 struct i915_mmu_notifier
{
40 struct hlist_node node
;
41 struct mmu_notifier mn
;
42 struct rb_root objects
;
43 struct list_head linear
;
44 struct drm_device
*dev
;
46 struct work_struct work
;
52 struct i915_mmu_object
{
53 struct i915_mmu_notifier
*mmu
;
54 struct interval_tree_node it
;
55 struct list_head link
;
56 struct drm_i915_gem_object
*obj
;
60 static unsigned long cancel_userptr(struct drm_i915_gem_object
*obj
)
62 struct drm_device
*dev
= obj
->base
.dev
;
65 mutex_lock(&dev
->struct_mutex
);
66 /* Cancel any active worker and force us to re-evaluate gup */
67 obj
->userptr
.work
= NULL
;
69 if (obj
->pages
!= NULL
) {
70 struct drm_i915_private
*dev_priv
= to_i915(dev
);
71 struct i915_vma
*vma
, *tmp
;
72 bool was_interruptible
;
74 was_interruptible
= dev_priv
->mm
.interruptible
;
75 dev_priv
->mm
.interruptible
= false;
77 list_for_each_entry_safe(vma
, tmp
, &obj
->vma_list
, vma_link
) {
78 int ret
= i915_vma_unbind(vma
);
79 WARN_ON(ret
&& ret
!= -EIO
);
81 WARN_ON(i915_gem_object_put_pages(obj
));
83 dev_priv
->mm
.interruptible
= was_interruptible
;
86 end
= obj
->userptr
.ptr
+ obj
->base
.size
;
88 drm_gem_object_unreference(&obj
->base
);
89 mutex_unlock(&dev
->struct_mutex
);
94 static void *invalidate_range__linear(struct i915_mmu_notifier
*mn
,
99 struct i915_mmu_object
*mmu
;
100 unsigned long serial
;
104 list_for_each_entry(mmu
, &mn
->linear
, link
) {
105 struct drm_i915_gem_object
*obj
;
107 if (mmu
->it
.last
< start
|| mmu
->it
.start
> end
)
111 drm_gem_object_reference(&obj
->base
);
112 spin_unlock(&mn
->lock
);
116 spin_lock(&mn
->lock
);
117 if (serial
!= mn
->serial
)
124 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier
*_mn
,
125 struct mm_struct
*mm
,
129 struct i915_mmu_notifier
*mn
= container_of(_mn
, struct i915_mmu_notifier
, mn
);
130 struct interval_tree_node
*it
= NULL
;
131 unsigned long next
= start
;
132 unsigned long serial
= 0;
134 end
--; /* interval ranges are inclusive, but invalidate range is exclusive */
136 struct drm_i915_gem_object
*obj
= NULL
;
138 spin_lock(&mn
->lock
);
140 it
= invalidate_range__linear(mn
, mm
, start
, end
);
141 else if (serial
== mn
->serial
)
142 it
= interval_tree_iter_next(it
, next
, end
);
144 it
= interval_tree_iter_first(&mn
->objects
, start
, end
);
146 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
147 drm_gem_object_reference(&obj
->base
);
150 spin_unlock(&mn
->lock
);
154 next
= cancel_userptr(obj
);
158 static const struct mmu_notifier_ops i915_gem_userptr_notifier
= {
159 .invalidate_range_start
= i915_gem_userptr_mn_invalidate_range_start
,
162 static struct i915_mmu_notifier
*
163 __i915_mmu_notifier_lookup(struct drm_device
*dev
, struct mm_struct
*mm
)
165 struct drm_i915_private
*dev_priv
= to_i915(dev
);
166 struct i915_mmu_notifier
*mmu
;
168 /* Protected by dev->struct_mutex */
169 hash_for_each_possible(dev_priv
->mmu_notifiers
, mmu
, node
, (unsigned long)mm
)
176 static struct i915_mmu_notifier
*
177 i915_mmu_notifier_get(struct drm_device
*dev
, struct mm_struct
*mm
)
179 struct drm_i915_private
*dev_priv
= to_i915(dev
);
180 struct i915_mmu_notifier
*mmu
;
183 lockdep_assert_held(&dev
->struct_mutex
);
185 mmu
= __i915_mmu_notifier_lookup(dev
, mm
);
189 mmu
= kmalloc(sizeof(*mmu
), GFP_KERNEL
);
191 return ERR_PTR(-ENOMEM
);
193 spin_lock_init(&mmu
->lock
);
195 mmu
->mn
.ops
= &i915_gem_userptr_notifier
;
197 mmu
->objects
= RB_ROOT
;
200 INIT_LIST_HEAD(&mmu
->linear
);
201 mmu
->has_linear
= false;
203 /* Protected by mmap_sem (write-lock) */
204 ret
= __mmu_notifier_register(&mmu
->mn
, mm
);
210 /* Protected by dev->struct_mutex */
211 hash_add(dev_priv
->mmu_notifiers
, &mmu
->node
, (unsigned long)mm
);
216 __i915_mmu_notifier_destroy_worker(struct work_struct
*work
)
218 struct i915_mmu_notifier
*mmu
= container_of(work
, typeof(*mmu
), work
);
219 mmu_notifier_unregister(&mmu
->mn
, mmu
->mm
);
224 __i915_mmu_notifier_destroy(struct i915_mmu_notifier
*mmu
)
226 lockdep_assert_held(&mmu
->dev
->struct_mutex
);
228 /* Protected by dev->struct_mutex */
229 hash_del(&mmu
->node
);
231 /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
232 * We enter the function holding struct_mutex, therefore we need
233 * to drop our mutex prior to calling mmu_notifier_unregister in
234 * order to prevent lock inversion (and system-wide deadlock)
235 * between the mmap_sem and struct-mutex. Hence we defer the
236 * unregistration to a workqueue where we hold no locks.
238 INIT_WORK(&mmu
->work
, __i915_mmu_notifier_destroy_worker
);
239 schedule_work(&mmu
->work
);
242 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier
*mmu
)
244 if (++mmu
->serial
== 0)
248 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier
*mmu
)
250 struct i915_mmu_object
*mn
;
252 list_for_each_entry(mn
, &mmu
->linear
, link
)
260 i915_mmu_notifier_del(struct i915_mmu_notifier
*mmu
,
261 struct i915_mmu_object
*mn
)
263 lockdep_assert_held(&mmu
->dev
->struct_mutex
);
265 spin_lock(&mmu
->lock
);
268 mmu
->has_linear
= i915_mmu_notifier_has_linear(mmu
);
270 interval_tree_remove(&mn
->it
, &mmu
->objects
);
271 __i915_mmu_notifier_update_serial(mmu
);
272 spin_unlock(&mmu
->lock
);
274 /* Protected against _add() by dev->struct_mutex */
275 if (--mmu
->count
== 0)
276 __i915_mmu_notifier_destroy(mmu
);
280 i915_mmu_notifier_add(struct i915_mmu_notifier
*mmu
,
281 struct i915_mmu_object
*mn
)
283 struct interval_tree_node
*it
;
286 ret
= i915_mutex_lock_interruptible(mmu
->dev
);
290 /* Make sure we drop the final active reference (and thereby
291 * remove the objects from the interval tree) before we do
292 * the check for overlapping objects.
294 i915_gem_retire_requests(mmu
->dev
);
296 spin_lock(&mmu
->lock
);
297 it
= interval_tree_iter_first(&mmu
->objects
,
298 mn
->it
.start
, mn
->it
.last
);
300 struct drm_i915_gem_object
*obj
;
302 /* We only need to check the first object in the range as it
303 * either has cancelled gup work queued and we need to
304 * return back to the user to give time for the gup-workers
305 * to flush their object references upon which the object will
306 * be removed from the interval-tree, or the the range is
307 * still in use by another client and the overlap is invalid.
309 * If we do have an overlap, we cannot use the interval tree
310 * for fast range invalidation.
313 obj
= container_of(it
, struct i915_mmu_object
, it
)->obj
;
314 if (!obj
->userptr
.workers
)
315 mmu
->has_linear
= mn
->is_linear
= true;
319 interval_tree_insert(&mn
->it
, &mmu
->objects
);
322 list_add(&mn
->link
, &mmu
->linear
);
323 __i915_mmu_notifier_update_serial(mmu
);
325 spin_unlock(&mmu
->lock
);
326 mutex_unlock(&mmu
->dev
->struct_mutex
);
332 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
334 struct i915_mmu_object
*mn
;
336 mn
= obj
->userptr
.mn
;
340 i915_mmu_notifier_del(mn
->mmu
, mn
);
341 obj
->userptr
.mn
= NULL
;
345 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
348 struct i915_mmu_notifier
*mmu
;
349 struct i915_mmu_object
*mn
;
352 if (flags
& I915_USERPTR_UNSYNCHRONIZED
)
353 return capable(CAP_SYS_ADMIN
) ? 0 : -EPERM
;
355 down_write(&obj
->userptr
.mm
->mmap_sem
);
356 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
358 mmu
= i915_mmu_notifier_get(obj
->base
.dev
, obj
->userptr
.mm
);
360 mmu
->count
++; /* preemptive add to act as a refcount */
363 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
365 up_write(&obj
->userptr
.mm
->mmap_sem
);
369 mn
= kzalloc(sizeof(*mn
), GFP_KERNEL
);
376 mn
->it
.start
= obj
->userptr
.ptr
;
377 mn
->it
.last
= mn
->it
.start
+ obj
->base
.size
- 1;
380 ret
= i915_mmu_notifier_add(mmu
, mn
);
384 obj
->userptr
.mn
= mn
;
390 mutex_lock(&obj
->base
.dev
->struct_mutex
);
391 if (--mmu
->count
== 0)
392 __i915_mmu_notifier_destroy(mmu
);
393 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
400 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
405 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
408 if ((flags
& I915_USERPTR_UNSYNCHRONIZED
) == 0)
411 if (!capable(CAP_SYS_ADMIN
))
418 struct get_pages_work
{
419 struct work_struct work
;
420 struct drm_i915_gem_object
*obj
;
421 struct task_struct
*task
;
425 #if IS_ENABLED(CONFIG_SWIOTLB)
426 #define swiotlb_active() swiotlb_nr_tbl()
428 #define swiotlb_active() 0
432 st_set_pages(struct sg_table
**st
, struct page
**pvec
, int num_pages
)
434 struct scatterlist
*sg
;
437 *st
= kmalloc(sizeof(**st
), GFP_KERNEL
);
441 if (swiotlb_active()) {
442 ret
= sg_alloc_table(*st
, num_pages
, GFP_KERNEL
);
446 for_each_sg((*st
)->sgl
, sg
, num_pages
, n
)
447 sg_set_page(sg
, pvec
[n
], PAGE_SIZE
, 0);
449 ret
= sg_alloc_table_from_pages(*st
, pvec
, num_pages
,
450 0, num_pages
<< PAGE_SHIFT
,
465 __i915_gem_userptr_get_pages_worker(struct work_struct
*_work
)
467 struct get_pages_work
*work
= container_of(_work
, typeof(*work
), work
);
468 struct drm_i915_gem_object
*obj
= work
->obj
;
469 struct drm_device
*dev
= obj
->base
.dev
;
470 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
477 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
478 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
480 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
482 struct mm_struct
*mm
= obj
->userptr
.mm
;
484 down_read(&mm
->mmap_sem
);
485 while (pinned
< num_pages
) {
486 ret
= get_user_pages(work
->task
, mm
,
487 obj
->userptr
.ptr
+ pinned
* PAGE_SIZE
,
489 !obj
->userptr
.read_only
, 0,
490 pvec
+ pinned
, NULL
);
496 up_read(&mm
->mmap_sem
);
499 mutex_lock(&dev
->struct_mutex
);
500 if (obj
->userptr
.work
!= &work
->work
) {
502 } else if (pinned
== num_pages
) {
503 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
505 list_add_tail(&obj
->global_list
, &to_i915(dev
)->mm
.unbound_list
);
510 obj
->userptr
.work
= ERR_PTR(ret
);
511 obj
->userptr
.workers
--;
512 drm_gem_object_unreference(&obj
->base
);
513 mutex_unlock(&dev
->struct_mutex
);
515 release_pages(pvec
, pinned
, 0);
516 drm_free_large(pvec
);
518 put_task_struct(work
->task
);
523 i915_gem_userptr_get_pages(struct drm_i915_gem_object
*obj
)
525 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
529 /* If userspace should engineer that these pages are replaced in
530 * the vma between us binding this page into the GTT and completion
531 * of rendering... Their loss. If they change the mapping of their
532 * pages they need to create a new bo to point to the new vma.
534 * However, that still leaves open the possibility of the vma
535 * being copied upon fork. Which falls under the same userspace
536 * synchronisation issue as a regular bo, except that this time
537 * the process may not be expecting that a particular piece of
538 * memory is tied to the GPU.
540 * Fortunately, we can hook into the mmu_notifier in order to
541 * discard the page references prior to anything nasty happening
542 * to the vma (discard or cloning) which should prevent the more
543 * egregious cases from causing harm.
548 if (obj
->userptr
.mm
== current
->mm
) {
549 pvec
= kmalloc(num_pages
*sizeof(struct page
*),
550 GFP_TEMPORARY
| __GFP_NOWARN
| __GFP_NORETRY
);
552 pvec
= drm_malloc_ab(num_pages
, sizeof(struct page
*));
557 pinned
= __get_user_pages_fast(obj
->userptr
.ptr
, num_pages
,
558 !obj
->userptr
.read_only
, pvec
);
560 if (pinned
< num_pages
) {
565 /* Spawn a worker so that we can acquire the
566 * user pages without holding our mutex. Access
567 * to the user pages requires mmap_sem, and we have
568 * a strict lock ordering of mmap_sem, struct_mutex -
569 * we already hold struct_mutex here and so cannot
570 * call gup without encountering a lock inversion.
572 * Userspace will keep on repeating the operation
573 * (thanks to EAGAIN) until either we hit the fast
574 * path or the worker completes. If the worker is
575 * cancelled or superseded, the task is still run
576 * but the results ignored. (This leads to
577 * complications that we may have a stray object
578 * refcount that we need to be wary of when
579 * checking for existing objects during creation.)
580 * If the worker encounters an error, it reports
581 * that error back to this function through
582 * obj->userptr.work = ERR_PTR.
585 if (obj
->userptr
.work
== NULL
&&
586 obj
->userptr
.workers
< I915_GEM_USERPTR_MAX_WORKERS
) {
587 struct get_pages_work
*work
;
589 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
591 obj
->userptr
.work
= &work
->work
;
592 obj
->userptr
.workers
++;
595 drm_gem_object_reference(&obj
->base
);
597 work
->task
= current
;
598 get_task_struct(work
->task
);
600 INIT_WORK(&work
->work
, __i915_gem_userptr_get_pages_worker
);
601 schedule_work(&work
->work
);
605 if (IS_ERR(obj
->userptr
.work
)) {
606 ret
= PTR_ERR(obj
->userptr
.work
);
607 obj
->userptr
.work
= NULL
;
612 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
614 obj
->userptr
.work
= NULL
;
619 release_pages(pvec
, pinned
, 0);
620 drm_free_large(pvec
);
625 i915_gem_userptr_put_pages(struct drm_i915_gem_object
*obj
)
627 struct scatterlist
*sg
;
630 BUG_ON(obj
->userptr
.work
!= NULL
);
632 if (obj
->madv
!= I915_MADV_WILLNEED
)
635 for_each_sg(obj
->pages
->sgl
, sg
, obj
->pages
->nents
, i
) {
636 struct page
*page
= sg_page(sg
);
639 set_page_dirty(page
);
641 mark_page_accessed(page
);
642 page_cache_release(page
);
646 sg_free_table(obj
->pages
);
651 i915_gem_userptr_release(struct drm_i915_gem_object
*obj
)
653 i915_gem_userptr_release__mmu_notifier(obj
);
655 if (obj
->userptr
.mm
) {
656 mmput(obj
->userptr
.mm
);
657 obj
->userptr
.mm
= NULL
;
662 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object
*obj
)
667 return i915_gem_userptr_init__mmu_notifier(obj
, 0);
670 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops
= {
671 .dmabuf_export
= i915_gem_userptr_dmabuf_export
,
672 .get_pages
= i915_gem_userptr_get_pages
,
673 .put_pages
= i915_gem_userptr_put_pages
,
674 .release
= i915_gem_userptr_release
,
678 * Creates a new mm object that wraps some normal memory from the process
679 * context - user memory.
681 * We impose several restrictions upon the memory being mapped
683 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
684 * 2. It must be normal system memory, not a pointer into another map of IO
685 * space (e.g. it must not be a GTT mmapping of another object).
686 * 3. We only allow a bo as large as we could in theory map into the GTT,
687 * that is we limit the size to the total size of the GTT.
688 * 4. The bo is marked as being snoopable. The backing pages are left
689 * accessible directly by the CPU, but reads and writes by the GPU may
690 * incur the cost of a snoop (unless you have an LLC architecture).
692 * Synchronisation between multiple users and the GPU is left to userspace
693 * through the normal set-domain-ioctl. The kernel will enforce that the
694 * GPU relinquishes the VMA before it is returned back to the system
695 * i.e. upon free(), munmap() or process termination. However, the userspace
696 * malloc() library may not immediately relinquish the VMA after free() and
697 * instead reuse it whilst the GPU is still reading and writing to the VMA.
700 * Also note, that the object created here is not currently a "first class"
701 * object, in that several ioctls are banned. These are the CPU access
702 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
703 * direct access via your pointer rather than use those ioctls.
705 * If you think this is a good interface to use to pass GPU memory between
706 * drivers, please use dma-buf instead. In fact, wherever possible use
710 i915_gem_userptr_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
712 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
713 struct drm_i915_gem_userptr
*args
= data
;
714 struct drm_i915_gem_object
*obj
;
718 if (args
->flags
& ~(I915_USERPTR_READ_ONLY
|
719 I915_USERPTR_UNSYNCHRONIZED
))
722 if (offset_in_page(args
->user_ptr
| args
->user_size
))
725 if (args
->user_size
> dev_priv
->gtt
.base
.total
)
728 if (!access_ok(args
->flags
& I915_USERPTR_READ_ONLY
? VERIFY_READ
: VERIFY_WRITE
,
729 (char __user
*)(unsigned long)args
->user_ptr
, args
->user_size
))
732 if (args
->flags
& I915_USERPTR_READ_ONLY
) {
733 /* On almost all of the current hw, we cannot tell the GPU that a
734 * page is readonly, so this is just a placeholder in the uAPI.
739 /* Allocate the new object */
740 obj
= i915_gem_object_alloc(dev
);
744 drm_gem_private_object_init(dev
, &obj
->base
, args
->user_size
);
745 i915_gem_object_init(obj
, &i915_gem_userptr_ops
);
746 obj
->cache_level
= I915_CACHE_LLC
;
747 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
748 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
750 obj
->userptr
.ptr
= args
->user_ptr
;
751 obj
->userptr
.read_only
= !!(args
->flags
& I915_USERPTR_READ_ONLY
);
753 /* And keep a pointer to the current->mm for resolving the user pages
754 * at binding. This means that we need to hook into the mmu_notifier
755 * in order to detect if the mmu is destroyed.
758 if ((obj
->userptr
.mm
= get_task_mm(current
)))
759 ret
= i915_gem_userptr_init__mmu_notifier(obj
, args
->flags
);
761 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
763 /* drop reference from allocate - handle holds it now */
764 drm_gem_object_unreference_unlocked(&obj
->base
);
768 args
->handle
= handle
;
773 i915_gem_init_userptr(struct drm_device
*dev
)
775 #if defined(CONFIG_MMU_NOTIFIER)
776 struct drm_i915_private
*dev_priv
= to_i915(dev
);
777 hash_init(dev_priv
->mmu_notifiers
);