drm/i915: Refactor duplicate object vmap functions
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31 {
32 return to_intel_bo(buf->priv);
33 }
34
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37 {
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80 err_free_sg:
81 sg_free_table(st);
82 err_free:
83 kfree(st);
84 err_unpin:
85 i915_gem_object_unpin_pages(obj);
86 err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88 err:
89 return ERR_PTR(ret);
90 }
91
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95 {
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
99 sg_free_table(sg);
100 kfree(sg);
101
102 mutex_lock(&obj->base.dev->struct_mutex);
103 i915_gem_object_unpin_pages(obj);
104 mutex_unlock(&obj->base.dev->struct_mutex);
105 }
106
107 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
108 {
109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110 struct drm_device *dev = obj->base.dev;
111 void *addr;
112 int ret;
113
114 ret = i915_mutex_lock_interruptible(dev);
115 if (ret)
116 return ERR_PTR(ret);
117
118 addr = i915_gem_object_pin_map(obj);
119 mutex_unlock(&dev->struct_mutex);
120
121 return addr;
122 }
123
124 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
125 {
126 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
127 struct drm_device *dev = obj->base.dev;
128
129 mutex_lock(&dev->struct_mutex);
130 i915_gem_object_unpin_map(obj);
131 mutex_unlock(&dev->struct_mutex);
132 }
133
134 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
135 {
136 return NULL;
137 }
138
139 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
140 {
141
142 }
143 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
144 {
145 return NULL;
146 }
147
148 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
149 {
150
151 }
152
153 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
154 {
155 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
156 int ret;
157
158 if (obj->base.size < vma->vm_end - vma->vm_start)
159 return -EINVAL;
160
161 if (!obj->base.filp)
162 return -ENODEV;
163
164 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
165 if (ret)
166 return ret;
167
168 fput(vma->vm_file);
169 vma->vm_file = get_file(obj->base.filp);
170
171 return 0;
172 }
173
174 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
175 {
176 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
177 struct drm_device *dev = obj->base.dev;
178 int ret;
179 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
180
181 ret = i915_mutex_lock_interruptible(dev);
182 if (ret)
183 return ret;
184
185 ret = i915_gem_object_set_to_cpu_domain(obj, write);
186 mutex_unlock(&dev->struct_mutex);
187 return ret;
188 }
189
190 static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
191 {
192 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
193 struct drm_device *dev = obj->base.dev;
194 struct drm_i915_private *dev_priv = to_i915(dev);
195 bool was_interruptible;
196 int ret;
197
198 mutex_lock(&dev->struct_mutex);
199 was_interruptible = dev_priv->mm.interruptible;
200 dev_priv->mm.interruptible = false;
201
202 ret = i915_gem_object_set_to_gtt_domain(obj, false);
203
204 dev_priv->mm.interruptible = was_interruptible;
205 mutex_unlock(&dev->struct_mutex);
206
207 if (unlikely(ret))
208 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
209 }
210
211 static const struct dma_buf_ops i915_dmabuf_ops = {
212 .map_dma_buf = i915_gem_map_dma_buf,
213 .unmap_dma_buf = i915_gem_unmap_dma_buf,
214 .release = drm_gem_dmabuf_release,
215 .kmap = i915_gem_dmabuf_kmap,
216 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
217 .kunmap = i915_gem_dmabuf_kunmap,
218 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
219 .mmap = i915_gem_dmabuf_mmap,
220 .vmap = i915_gem_dmabuf_vmap,
221 .vunmap = i915_gem_dmabuf_vunmap,
222 .begin_cpu_access = i915_gem_begin_cpu_access,
223 .end_cpu_access = i915_gem_end_cpu_access,
224 };
225
226 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
227 struct drm_gem_object *gem_obj, int flags)
228 {
229 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
230 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
231
232 exp_info.ops = &i915_dmabuf_ops;
233 exp_info.size = gem_obj->size;
234 exp_info.flags = flags;
235 exp_info.priv = gem_obj;
236
237
238 if (obj->ops->dmabuf_export) {
239 int ret = obj->ops->dmabuf_export(obj);
240 if (ret)
241 return ERR_PTR(ret);
242 }
243
244 return dma_buf_export(&exp_info);
245 }
246
247 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
248 {
249 struct sg_table *sg;
250
251 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
252 if (IS_ERR(sg))
253 return PTR_ERR(sg);
254
255 obj->pages = sg;
256 return 0;
257 }
258
259 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
260 {
261 dma_buf_unmap_attachment(obj->base.import_attach,
262 obj->pages, DMA_BIDIRECTIONAL);
263 }
264
265 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
266 .get_pages = i915_gem_object_get_pages_dmabuf,
267 .put_pages = i915_gem_object_put_pages_dmabuf,
268 };
269
270 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
271 struct dma_buf *dma_buf)
272 {
273 struct dma_buf_attachment *attach;
274 struct drm_i915_gem_object *obj;
275 int ret;
276
277 /* is this one of own objects? */
278 if (dma_buf->ops == &i915_dmabuf_ops) {
279 obj = dma_buf_to_obj(dma_buf);
280 /* is it from our device? */
281 if (obj->base.dev == dev) {
282 /*
283 * Importing dmabuf exported from out own gem increases
284 * refcount on gem itself instead of f_count of dmabuf.
285 */
286 drm_gem_object_reference(&obj->base);
287 return &obj->base;
288 }
289 }
290
291 /* need to attach */
292 attach = dma_buf_attach(dma_buf, dev->dev);
293 if (IS_ERR(attach))
294 return ERR_CAST(attach);
295
296 get_dma_buf(dma_buf);
297
298 obj = i915_gem_object_alloc(dev);
299 if (obj == NULL) {
300 ret = -ENOMEM;
301 goto fail_detach;
302 }
303
304 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
305 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
306 obj->base.import_attach = attach;
307
308 return &obj->base;
309
310 fail_detach:
311 dma_buf_detach(dma_buf, attach);
312 dma_buf_put(dma_buf);
313
314 return ERR_PTR(ret);
315 }
This page took 0.039951 seconds and 6 git commands to generate.