drm/i915/dmabuf: Tighten struct_mutex for unmap_dma_buf
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31 {
32 return to_intel_bo(buf->priv);
33 }
34
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37 {
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80 err_free_sg:
81 sg_free_table(st);
82 err_free:
83 kfree(st);
84 err_unpin:
85 i915_gem_object_unpin_pages(obj);
86 err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88 err:
89 return ERR_PTR(ret);
90 }
91
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95 {
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
99 sg_free_table(sg);
100 kfree(sg);
101
102 mutex_lock(&obj->base.dev->struct_mutex);
103 i915_gem_object_unpin_pages(obj);
104 mutex_unlock(&obj->base.dev->struct_mutex);
105 }
106
107 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
108 {
109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110 struct drm_device *dev = obj->base.dev;
111 struct sg_page_iter sg_iter;
112 struct page **pages;
113 int ret, i;
114
115 ret = i915_mutex_lock_interruptible(dev);
116 if (ret)
117 return ERR_PTR(ret);
118
119 if (obj->dma_buf_vmapping) {
120 obj->vmapping_count++;
121 goto out_unlock;
122 }
123
124 ret = i915_gem_object_get_pages(obj);
125 if (ret)
126 goto err;
127
128 i915_gem_object_pin_pages(obj);
129
130 ret = -ENOMEM;
131
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL)
134 goto err_unpin;
135
136 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
138 pages[i++] = sg_page_iter_page(&sg_iter);
139
140 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
141 drm_free_large(pages);
142
143 if (!obj->dma_buf_vmapping)
144 goto err_unpin;
145
146 obj->vmapping_count = 1;
147 out_unlock:
148 mutex_unlock(&dev->struct_mutex);
149 return obj->dma_buf_vmapping;
150
151 err_unpin:
152 i915_gem_object_unpin_pages(obj);
153 err:
154 mutex_unlock(&dev->struct_mutex);
155 return ERR_PTR(ret);
156 }
157
158 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
159 {
160 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
161 struct drm_device *dev = obj->base.dev;
162
163 mutex_lock(&dev->struct_mutex);
164 if (--obj->vmapping_count == 0) {
165 vunmap(obj->dma_buf_vmapping);
166 obj->dma_buf_vmapping = NULL;
167
168 i915_gem_object_unpin_pages(obj);
169 }
170 mutex_unlock(&dev->struct_mutex);
171 }
172
173 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
174 {
175 return NULL;
176 }
177
178 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
179 {
180
181 }
182 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
183 {
184 return NULL;
185 }
186
187 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
188 {
189
190 }
191
192 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
193 {
194 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
195 int ret;
196
197 if (obj->base.size < vma->vm_end - vma->vm_start)
198 return -EINVAL;
199
200 if (!obj->base.filp)
201 return -ENODEV;
202
203 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
204 if (ret)
205 return ret;
206
207 fput(vma->vm_file);
208 vma->vm_file = get_file(obj->base.filp);
209
210 return 0;
211 }
212
213 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
214 {
215 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
216 struct drm_device *dev = obj->base.dev;
217 int ret;
218 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
219
220 ret = i915_mutex_lock_interruptible(dev);
221 if (ret)
222 return ret;
223
224 ret = i915_gem_object_set_to_cpu_domain(obj, write);
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227 }
228
229 static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
230 {
231 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
232 struct drm_device *dev = obj->base.dev;
233 struct drm_i915_private *dev_priv = to_i915(dev);
234 bool was_interruptible;
235 int ret;
236
237 mutex_lock(&dev->struct_mutex);
238 was_interruptible = dev_priv->mm.interruptible;
239 dev_priv->mm.interruptible = false;
240
241 ret = i915_gem_object_set_to_gtt_domain(obj, false);
242
243 dev_priv->mm.interruptible = was_interruptible;
244 mutex_unlock(&dev->struct_mutex);
245
246 if (unlikely(ret))
247 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
248 }
249
250 static const struct dma_buf_ops i915_dmabuf_ops = {
251 .map_dma_buf = i915_gem_map_dma_buf,
252 .unmap_dma_buf = i915_gem_unmap_dma_buf,
253 .release = drm_gem_dmabuf_release,
254 .kmap = i915_gem_dmabuf_kmap,
255 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
256 .kunmap = i915_gem_dmabuf_kunmap,
257 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
258 .mmap = i915_gem_dmabuf_mmap,
259 .vmap = i915_gem_dmabuf_vmap,
260 .vunmap = i915_gem_dmabuf_vunmap,
261 .begin_cpu_access = i915_gem_begin_cpu_access,
262 .end_cpu_access = i915_gem_end_cpu_access,
263 };
264
265 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
266 struct drm_gem_object *gem_obj, int flags)
267 {
268 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
269 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
270
271 exp_info.ops = &i915_dmabuf_ops;
272 exp_info.size = gem_obj->size;
273 exp_info.flags = flags;
274 exp_info.priv = gem_obj;
275
276
277 if (obj->ops->dmabuf_export) {
278 int ret = obj->ops->dmabuf_export(obj);
279 if (ret)
280 return ERR_PTR(ret);
281 }
282
283 return dma_buf_export(&exp_info);
284 }
285
286 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
287 {
288 struct sg_table *sg;
289
290 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
291 if (IS_ERR(sg))
292 return PTR_ERR(sg);
293
294 obj->pages = sg;
295 return 0;
296 }
297
298 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
299 {
300 dma_buf_unmap_attachment(obj->base.import_attach,
301 obj->pages, DMA_BIDIRECTIONAL);
302 }
303
304 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
305 .get_pages = i915_gem_object_get_pages_dmabuf,
306 .put_pages = i915_gem_object_put_pages_dmabuf,
307 };
308
309 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
310 struct dma_buf *dma_buf)
311 {
312 struct dma_buf_attachment *attach;
313 struct drm_i915_gem_object *obj;
314 int ret;
315
316 /* is this one of own objects? */
317 if (dma_buf->ops == &i915_dmabuf_ops) {
318 obj = dma_buf_to_obj(dma_buf);
319 /* is it from our device? */
320 if (obj->base.dev == dev) {
321 /*
322 * Importing dmabuf exported from out own gem increases
323 * refcount on gem itself instead of f_count of dmabuf.
324 */
325 drm_gem_object_reference(&obj->base);
326 return &obj->base;
327 }
328 }
329
330 /* need to attach */
331 attach = dma_buf_attach(dma_buf, dev->dev);
332 if (IS_ERR(attach))
333 return ERR_CAST(attach);
334
335 get_dma_buf(dma_buf);
336
337 obj = i915_gem_object_alloc(dev);
338 if (obj == NULL) {
339 ret = -ENOMEM;
340 goto fail_detach;
341 }
342
343 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
344 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
345 obj->base.import_attach = attach;
346
347 return &obj->base;
348
349 fail_detach:
350 dma_buf_detach(dma_buf, attach);
351 dma_buf_put(dma_buf);
352
353 return ERR_PTR(ret);
354 }
This page took 0.040127 seconds and 5 git commands to generate.