Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31 {
32 return to_intel_bo(buf->priv);
33 }
34
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
37 {
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 struct sg_table *st;
40 struct scatterlist *src, *dst;
41 int ret, i;
42
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
44 if (ret)
45 goto err;
46
47 ret = i915_gem_object_get_pages(obj);
48 if (ret)
49 goto err_unlock;
50
51 i915_gem_object_pin_pages(obj);
52
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 if (st == NULL) {
56 ret = -ENOMEM;
57 goto err_unpin;
58 }
59
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 if (ret)
62 goto err_free;
63
64 src = obj->pages->sgl;
65 dst = st->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
68 dst = sg_next(dst);
69 src = sg_next(src);
70 }
71
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 ret =-ENOMEM;
74 goto err_free_sg;
75 }
76
77 mutex_unlock(&obj->base.dev->struct_mutex);
78 return st;
79
80 err_free_sg:
81 sg_free_table(st);
82 err_free:
83 kfree(st);
84 err_unpin:
85 i915_gem_object_unpin_pages(obj);
86 err_unlock:
87 mutex_unlock(&obj->base.dev->struct_mutex);
88 err:
89 return ERR_PTR(ret);
90 }
91
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 struct sg_table *sg,
94 enum dma_data_direction dir)
95 {
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97
98 mutex_lock(&obj->base.dev->struct_mutex);
99
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 sg_free_table(sg);
102 kfree(sg);
103
104 i915_gem_object_unpin_pages(obj);
105
106 mutex_unlock(&obj->base.dev->struct_mutex);
107 }
108
109 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
110 {
111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 struct drm_device *dev = obj->base.dev;
113 struct sg_page_iter sg_iter;
114 struct page **pages;
115 int ret, i;
116
117 ret = i915_mutex_lock_interruptible(dev);
118 if (ret)
119 return ERR_PTR(ret);
120
121 if (obj->dma_buf_vmapping) {
122 obj->vmapping_count++;
123 goto out_unlock;
124 }
125
126 ret = i915_gem_object_get_pages(obj);
127 if (ret)
128 goto error;
129
130 ret = -ENOMEM;
131
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL)
134 goto error;
135
136 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
138 pages[i++] = sg_page_iter_page(&sg_iter);
139
140 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
141 drm_free_large(pages);
142
143 if (!obj->dma_buf_vmapping)
144 goto error;
145
146 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148 out_unlock:
149 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping;
151
152 error:
153 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret);
155 }
156
157 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
158 {
159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 struct drm_device *dev = obj->base.dev;
161 int ret;
162
163 ret = i915_mutex_lock_interruptible(dev);
164 if (ret)
165 return;
166
167 if (--obj->vmapping_count == 0) {
168 vunmap(obj->dma_buf_vmapping);
169 obj->dma_buf_vmapping = NULL;
170
171 i915_gem_object_unpin_pages(obj);
172 }
173 mutex_unlock(&dev->struct_mutex);
174 }
175
176 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
177 {
178 return NULL;
179 }
180
181 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
182 {
183
184 }
185 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
186 {
187 return NULL;
188 }
189
190 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
191 {
192
193 }
194
195 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
196 {
197 return -EINVAL;
198 }
199
200 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
201 {
202 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
203 struct drm_device *dev = obj->base.dev;
204 int ret;
205 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
206
207 ret = i915_mutex_lock_interruptible(dev);
208 if (ret)
209 return ret;
210
211 ret = i915_gem_object_set_to_cpu_domain(obj, write);
212 mutex_unlock(&dev->struct_mutex);
213 return ret;
214 }
215
216 static const struct dma_buf_ops i915_dmabuf_ops = {
217 .map_dma_buf = i915_gem_map_dma_buf,
218 .unmap_dma_buf = i915_gem_unmap_dma_buf,
219 .release = drm_gem_dmabuf_release,
220 .kmap = i915_gem_dmabuf_kmap,
221 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
222 .kunmap = i915_gem_dmabuf_kunmap,
223 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
224 .mmap = i915_gem_dmabuf_mmap,
225 .vmap = i915_gem_dmabuf_vmap,
226 .vunmap = i915_gem_dmabuf_vunmap,
227 .begin_cpu_access = i915_gem_begin_cpu_access,
228 };
229
230 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
231 struct drm_gem_object *gem_obj, int flags)
232 {
233 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
234 }
235
236 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
237 {
238 struct sg_table *sg;
239
240 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
241 if (IS_ERR(sg))
242 return PTR_ERR(sg);
243
244 obj->pages = sg;
245 obj->has_dma_mapping = true;
246 return 0;
247 }
248
249 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
250 {
251 dma_buf_unmap_attachment(obj->base.import_attach,
252 obj->pages, DMA_BIDIRECTIONAL);
253 obj->has_dma_mapping = false;
254 }
255
256 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
257 .get_pages = i915_gem_object_get_pages_dmabuf,
258 .put_pages = i915_gem_object_put_pages_dmabuf,
259 };
260
261 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
262 struct dma_buf *dma_buf)
263 {
264 struct dma_buf_attachment *attach;
265 struct drm_i915_gem_object *obj;
266 int ret;
267
268 /* is this one of own objects? */
269 if (dma_buf->ops == &i915_dmabuf_ops) {
270 obj = dma_buf_to_obj(dma_buf);
271 /* is it from our device? */
272 if (obj->base.dev == dev) {
273 /*
274 * Importing dmabuf exported from out own gem increases
275 * refcount on gem itself instead of f_count of dmabuf.
276 */
277 drm_gem_object_reference(&obj->base);
278 return &obj->base;
279 }
280 }
281
282 /* need to attach */
283 attach = dma_buf_attach(dma_buf, dev->dev);
284 if (IS_ERR(attach))
285 return ERR_CAST(attach);
286
287 get_dma_buf(dma_buf);
288
289 obj = i915_gem_object_alloc(dev);
290 if (obj == NULL) {
291 ret = -ENOMEM;
292 goto fail_detach;
293 }
294
295 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
296 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
297 obj->base.import_attach = attach;
298
299 return &obj->base;
300
301 fail_detach:
302 dma_buf_detach(dma_buf, attach);
303 dma_buf_put(dma_buf);
304
305 return ERR_PTR(ret);
306 }
This page took 0.041266 seconds and 6 git commands to generate.