2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct sg_table
*i915_gem_map_dma_buf(struct dma_buf_attachment
*attachment
,
31 enum dma_data_direction dir
)
33 struct drm_i915_gem_object
*obj
= attachment
->dmabuf
->priv
;
34 struct drm_device
*dev
= obj
->base
.dev
;
35 int npages
= obj
->base
.size
/ PAGE_SIZE
;
40 ret
= i915_mutex_lock_interruptible(dev
);
44 ret
= i915_gem_object_get_pages_gtt(obj
);
50 /* link the pages into an SG then map the sg */
51 sg
= drm_prime_pages_to_sg(obj
->pages
, npages
);
52 nents
= dma_map_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
54 mutex_unlock(&dev
->struct_mutex
);
58 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
59 struct sg_table
*sg
, enum dma_data_direction dir
)
61 dma_unmap_sg(attachment
->dev
, sg
->sgl
, sg
->nents
, dir
);
66 static void i915_gem_dmabuf_release(struct dma_buf
*dma_buf
)
68 struct drm_i915_gem_object
*obj
= dma_buf
->priv
;
70 if (obj
->base
.export_dma_buf
== dma_buf
) {
71 /* drop the reference on the export fd holds */
72 obj
->base
.export_dma_buf
= NULL
;
73 drm_gem_object_unreference_unlocked(&obj
->base
);
77 static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf
)
79 struct drm_i915_gem_object
*obj
= dma_buf
->priv
;
80 struct drm_device
*dev
= obj
->base
.dev
;
83 ret
= i915_mutex_lock_interruptible(dev
);
87 if (obj
->dma_buf_vmapping
) {
88 obj
->vmapping_count
++;
92 ret
= i915_gem_object_get_pages_gtt(obj
);
94 mutex_unlock(&dev
->struct_mutex
);
98 obj
->dma_buf_vmapping
= vmap(obj
->pages
, obj
->base
.size
/ PAGE_SIZE
, 0, PAGE_KERNEL
);
99 if (!obj
->dma_buf_vmapping
) {
100 DRM_ERROR("failed to vmap object\n");
104 obj
->vmapping_count
= 1;
106 mutex_unlock(&dev
->struct_mutex
);
107 return obj
->dma_buf_vmapping
;
110 static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf
, void *vaddr
)
112 struct drm_i915_gem_object
*obj
= dma_buf
->priv
;
113 struct drm_device
*dev
= obj
->base
.dev
;
116 ret
= i915_mutex_lock_interruptible(dev
);
120 --obj
->vmapping_count
;
121 if (obj
->vmapping_count
== 0) {
122 vunmap(obj
->dma_buf_vmapping
);
123 obj
->dma_buf_vmapping
= NULL
;
125 mutex_unlock(&dev
->struct_mutex
);
128 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
)
133 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
137 static void *i915_gem_dmabuf_kmap(struct dma_buf
*dma_buf
, unsigned long page_num
)
142 static void i915_gem_dmabuf_kunmap(struct dma_buf
*dma_buf
, unsigned long page_num
, void *addr
)
147 static int i915_gem_dmabuf_mmap(struct dma_buf
*dma_buf
, struct vm_area_struct
*vma
)
152 static int i915_gem_begin_cpu_access(struct dma_buf
*dma_buf
, size_t start
, size_t length
, enum dma_data_direction direction
)
154 struct drm_i915_gem_object
*obj
= dma_buf
->priv
;
155 struct drm_device
*dev
= obj
->base
.dev
;
157 bool write
= (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
);
159 ret
= i915_mutex_lock_interruptible(dev
);
163 ret
= i915_gem_object_set_to_cpu_domain(obj
, write
);
164 mutex_unlock(&dev
->struct_mutex
);
168 static const struct dma_buf_ops i915_dmabuf_ops
= {
169 .map_dma_buf
= i915_gem_map_dma_buf
,
170 .unmap_dma_buf
= i915_gem_unmap_dma_buf
,
171 .release
= i915_gem_dmabuf_release
,
172 .kmap
= i915_gem_dmabuf_kmap
,
173 .kmap_atomic
= i915_gem_dmabuf_kmap_atomic
,
174 .kunmap
= i915_gem_dmabuf_kunmap
,
175 .kunmap_atomic
= i915_gem_dmabuf_kunmap_atomic
,
176 .mmap
= i915_gem_dmabuf_mmap
,
177 .vmap
= i915_gem_dmabuf_vmap
,
178 .vunmap
= i915_gem_dmabuf_vunmap
,
179 .begin_cpu_access
= i915_gem_begin_cpu_access
,
182 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
183 struct drm_gem_object
*gem_obj
, int flags
)
185 struct drm_i915_gem_object
*obj
= to_intel_bo(gem_obj
);
187 return dma_buf_export(obj
, &i915_dmabuf_ops
,
188 obj
->base
.size
, 0600);
191 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
192 struct dma_buf
*dma_buf
)
194 struct dma_buf_attachment
*attach
;
196 struct drm_i915_gem_object
*obj
;
201 /* is this one of own objects? */
202 if (dma_buf
->ops
== &i915_dmabuf_ops
) {
204 /* is it from our device? */
205 if (obj
->base
.dev
== dev
) {
206 drm_gem_object_reference(&obj
->base
);
212 attach
= dma_buf_attach(dma_buf
, dev
->dev
);
214 return ERR_CAST(attach
);
216 sg
= dma_buf_map_attachment(attach
, DMA_BIDIRECTIONAL
);
222 size
= dma_buf
->size
;
223 npages
= size
/ PAGE_SIZE
;
225 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
231 ret
= drm_gem_private_object_init(dev
, &obj
->base
, size
);
238 obj
->base
.import_attach
= attach
;
243 dma_buf_unmap_attachment(attach
, sg
, DMA_BIDIRECTIONAL
);
245 dma_buf_detach(dma_buf
, attach
);