X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_gem_userptr.c;h=2314c88323e39861152485ccfd1b8643ce8bc41f;hb=5599617ec0719dba3b1f85a4abca2a6c93368ae3;hp=4d30b60defda44adcbc24eea0be77d1e9278e804;hpb=4d230d4d030e34e6eb8911fb669d1b42298eca9e;p=deliverable%2Flinux.git diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 4d30b60defda..2314c88323e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -34,7 +34,7 @@ struct i915_mm_struct { struct mm_struct *mm; - struct drm_device *dev; + struct drm_i915_private *i915; struct i915_mmu_notifier *mn; struct hlist_node node; struct kref kref; @@ -49,6 +49,7 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root objects; + struct workqueue_struct *wq; }; struct i915_mmu_object { @@ -60,6 +61,37 @@ struct i915_mmu_object { bool attached; }; +static void wait_rendering(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; + int i, n; + + if (!obj->active) + return; + + n = 0; + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct drm_i915_gem_request *req; + + req = obj->last_read_req[i]; + if (req == NULL) + continue; + + requests[n++] = i915_gem_request_reference(req); + } + + mutex_unlock(&dev->struct_mutex); + + for (i = 0; i < n; i++) + __i915_wait_request(requests[i], false, NULL, NULL); + + mutex_lock(&dev->struct_mutex); + + for (i = 0; i < n; i++) + i915_gem_request_unreference(requests[i]); +} + static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); @@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work) struct i915_vma *vma, *tmp; bool was_interruptible; + wait_rendering(obj); + was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) { - int ret = i915_vma_unbind(vma); - WARN_ON(ret && ret != -EIO); - } + list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) + WARN_ON(i915_vma_unbind(vma)); WARN_ON(i915_gem_object_put_pages(obj)); dev_priv->mm.interruptible = was_interruptible; @@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, */ mo = container_of(it, struct i915_mmu_object, it); if (kref_get_unless_zero(&mo->obj->base.refcount)) - schedule_work(&mo->work); + queue_work(mn->wq, &mo->work); list_add(&mo->link, &cancelled); it = interval_tree_iter_next(it, start, end); @@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, list_for_each_entry(mo, &cancelled, link) del_object(mo); spin_unlock(&mn->lock); + + flush_workqueue(mn->wq); } static const struct mmu_notifier_ops i915_gem_userptr_notifier = { @@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT; + mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); + if (mn->wq == NULL) { + kfree(mn); + return ERR_PTR(-ENOMEM); + } /* Protected by mmap_sem (write-lock) */ ret = __mmu_notifier_register(&mn->mn, mm); if (ret) { + destroy_workqueue(mn->wq); kfree(mn); return ERR_PTR(ret); } @@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) return mn; down_write(&mm->mm->mmap_sem); - mutex_lock(&to_i915(mm->dev)->mm_lock); + mutex_lock(&mm->i915->mm_lock); if ((mn = mm->mn) == NULL) { mn = i915_mmu_notifier_create(mm->mm); if (!IS_ERR(mn)) mm->mn = mn; } - mutex_unlock(&to_i915(mm->dev)->mm_lock); + mutex_unlock(&mm->i915->mm_lock); up_write(&mm->mm->mmap_sem); return mn; @@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn, return; mmu_notifier_unregister(&mn->mn, mm); + destroy_workqueue(mn->wq); kfree(mn); } @@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) } kref_init(&mm->kref); - mm->dev = obj->base.dev; + mm->i915 = to_i915(obj->base.dev); mm->mm = current->mm; atomic_inc(¤t->mm->mm_count); @@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref) /* Protected by dev_priv->mm_lock */ hash_del(&mm->node); - mutex_unlock(&to_i915(mm->dev)->mm_lock); + mutex_unlock(&mm->i915->mm_lock); INIT_WORK(&mm->work, __i915_mm_struct_free__worker); schedule_work(&mm->work); @@ -494,10 +535,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = kmalloc(npages*sizeof(struct page *), - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); - if (pvec == NULL) - pvec = drm_malloc_ab(npages, sizeof(struct page *)); + pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; @@ -639,14 +677,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pvec = NULL; pinned = 0; if (obj->userptr.mm->mm == current->mm) { - pvec = kmalloc(num_pages*sizeof(struct page *), - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), + GFP_TEMPORARY); if (pvec == NULL) { - pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); - if (pvec == NULL) { - __i915_gem_userptr_set_active(obj, false); - return -ENOMEM; - } + __i915_gem_userptr_set_active(obj, false); + return -ENOMEM; } pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, @@ -671,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; BUG_ON(obj->userptr.work != NULL); __i915_gem_userptr_set_active(obj, false); @@ -681,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_gtt_finish_object(obj); - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -763,6 +797,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file int ret; u32 handle; + if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { + /* We cannot support coherent userptr objects on hw without + * LLC and broken snooping. + */ + return -ENODEV; + } + if (args->flags & ~(I915_USERPTR_READ_ONLY | I915_USERPTR_UNSYNCHRONIZED)) return -EINVAL; @@ -813,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return 0; } -int -i915_gem_init_userptr(struct drm_device *dev) +void i915_gem_init_userptr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->mm_lock); hash_init(dev_priv->mm_structs); - return 0; }