2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct drm_i915_gem_object
*dma_buf_to_obj(struct dma_buf
*buf
)
32 return to_intel_bo(buf
->priv
);
35 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
36 enum dma_data_direction dir
)
38 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
40 struct scatterlist
*src
, *dst
;
43 ret
= i915_mutex_lock_interruptible(obj
->base
.dev
);
47 ret
= i915_gem_object_get_pages(obj
);
51 i915_gem_object_pin_pages(obj
);
53 /* Copy sg so that we make an independent mapping */
54 st
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
60 ret
= sg_alloc_table(st
, obj
->pages
->nents
, GFP_KERNEL
);
64 src
= obj
->pages
->sgl
;
66 for (i
= 0; i
< obj
->pages
->nents
; i
++) {
67 sg_set_page(dst
, sg_page(src
), src
->length
, 0);
72 if (!dma_map_sg(attachment
->dev
, st
->sgl
, st
->nents
, dir
)) {
77 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
85 i915_gem_object_unpin_pages(obj
);
87 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
94 enum dma_data_direction dir
)
96 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(attachment
->dmabuf
);
98 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
102 mutex_lock(&obj
->base
.dev
->struct_mutex
);
103 i915_gem_object_unpin_pages(obj
);
104 mutex_unlock(&obj
->base
.dev
->struct_mutex
);
107 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
109 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
110 struct drm_device
*dev
= obj
->base
.dev
;
111 struct sg_page_iter sg_iter
;
115 ret
= i915_mutex_lock_interruptible(dev
);
119 if (obj
->dma_buf_vmapping
) {
120 obj
->vmapping_count
++;
124 ret
= i915_gem_object_get_pages(obj
);
128 i915_gem_object_pin_pages(obj
);
132 pages
= drm_malloc_ab(obj
->base
.size
>> PAGE_SHIFT
, sizeof(*pages
));
137 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, 0)
138 pages
[i
++] = sg_page_iter_page(&sg_iter
);
140 obj
->dma_buf_vmapping
= vmap(pages
, i
, 0, PAGE_KERNEL
);
141 drm_free_large(pages
);
143 if (!obj
->dma_buf_vmapping
)
146 obj
->vmapping_count
= 1;
148 mutex_unlock(&dev
->struct_mutex
);
149 return obj
->dma_buf_vmapping
;
152 i915_gem_object_unpin_pages(obj
);
154 mutex_unlock(&dev
->struct_mutex
);
158 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
160 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
161 struct drm_device
*dev
= obj
->base
.dev
;
163 mutex_lock(&dev
->struct_mutex
);
164 if (--obj
->vmapping_count
== 0) {
165 vunmap(obj
->dma_buf_vmapping
);
166 obj
->dma_buf_vmapping
= NULL
;
168 i915_gem_object_unpin_pages(obj
);
170 mutex_unlock(&dev
->struct_mutex
);
173 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
178 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
182 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
187 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
192 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
194 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
197 if (obj
->base
.size
< vma
->vm_end
- vma
->vm_start
)
203 ret
= obj
->base
.filp
->f_op
->mmap(obj
->base
.filp
, vma
);
208 vma
->vm_file
= get_file(obj
->base
.filp
);
213 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
215 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
216 struct drm_device
*dev
= obj
->base
.dev
;
218 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
220 ret
= i915_mutex_lock_interruptible(dev
);
224 ret
= i915_gem_object_set_to_cpu_domain(obj
, write
);
225 mutex_unlock(&dev
->struct_mutex
);
229 static void i915_gem_end_cpu_access(struct dma_buf
*dma_buf
, enum dma_data_direction direction
)
231 struct drm_i915_gem_object
*obj
= dma_buf_to_obj(dma_buf
);
232 struct drm_device
*dev
= obj
->base
.dev
;
233 struct drm_i915_private
*dev_priv
= to_i915(dev
);
234 bool was_interruptible
;
237 mutex_lock(&dev
->struct_mutex
);
238 was_interruptible
= dev_priv
->mm
.interruptible
;
239 dev_priv
->mm
.interruptible
= false;
241 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
243 dev_priv
->mm
.interruptible
= was_interruptible
;
244 mutex_unlock(&dev
->struct_mutex
);
247 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
250 static const struct dma_buf_ops i915_dmabuf_ops
= {
251 .map_dma_buf
= i915_gem_map_dma_buf
,
252 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
253 .release
= drm_gem_dmabuf_release
,
254 .kmap
= i915_gem_dmabuf_kmap
,
255 .kmap_atomic
= i915_gem_dmabuf_kmap_atomic
,
256 .kunmap
= i915_gem_dmabuf_kunmap
,
257 .kunmap_atomic
= i915_gem_dmabuf_kunmap_atomic
,
258 .mmap
= i915_gem_dmabuf_mmap
,
259 .vmap
= i915_gem_dmabuf_vmap
,
260 .vunmap
= i915_gem_dmabuf_vunmap
,
261 .begin_cpu_access
= i915_gem_begin_cpu_access
,
262 .end_cpu_access
= i915_gem_end_cpu_access
,
265 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
266 struct drm_gem_object
*gem_obj
, int flags
)
268 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
269 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
271 exp_info
.ops
= &i915_dmabuf_ops
;
272 exp_info
.size
= gem_obj
->size
;
273 exp_info
.flags
= flags
;
274 exp_info
.priv
= gem_obj
;
277 if (obj
->ops
->dmabuf_export
) {
278 int ret
= obj
->ops
->dmabuf_export(obj
);
283 return dma_buf_export(&exp_info
);
286 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
*obj
)
290 sg
= dma_buf_map_attachment(obj
->base
.import_attach
, DMA_BIDIRECTIONAL
);
298 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object
*obj
)
300 dma_buf_unmap_attachment(obj
->base
.import_attach
,
301 obj
->pages
, DMA_BIDIRECTIONAL
);
304 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops
= {
305 .get_pages
= i915_gem_object_get_pages_dmabuf
,
306 .put_pages
= i915_gem_object_put_pages_dmabuf
,
309 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
310 struct dma_buf
*dma_buf
)
312 struct dma_buf_attachment
*attach
;
313 struct drm_i915_gem_object
*obj
;
316 /* is this one of own objects? */
317 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
318 obj
= dma_buf_to_obj(dma_buf
);
319 /* is it from our device? */
320 if (obj
->base
.dev
== dev
) {
322 * Importing dmabuf exported from out own gem increases
323 * refcount on gem itself instead of f_count of dmabuf.
325 drm_gem_object_reference(&obj
->base
);
331 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
333 return ERR_CAST(attach
);
335 get_dma_buf(dma_buf
);
337 obj
= i915_gem_object_alloc(dev
);
343 drm_gem_private_object_init(dev
, &obj
->base
, dma_buf
->size
);
344 i915_gem_object_init(obj
, &i915_gem_object_dmabuf_ops
);
345 obj
->base
.import_attach
= attach
;
350 dma_buf_detach(dma_buf
, attach
);
351 dma_buf_put(dma_buf
);