Commit | Line | Data |
---|---|---|
b2df26c1 ID |
1 | /* exynos_drm_dmabuf.c |
2 | * | |
3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
4 | * Author: Inki Dae <inki.dae@samsung.com> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the next | |
14 | * paragraph) shall be included in all copies or substantial portions of the | |
15 | * Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
23 | * OTHER DEALINGS IN THE SOFTWARE. | |
24 | */ | |
25 | ||
26 | #include "drmP.h" | |
27 | #include "drm.h" | |
47fcdce2 | 28 | #include "exynos_drm.h" |
b2df26c1 ID |
29 | #include "exynos_drm_drv.h" |
30 | #include "exynos_drm_gem.h" | |
31 | ||
32 | #include <linux/dma-buf.h> | |
33 | ||
34 | static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, | |
35 | unsigned int page_size) | |
36 | { | |
37 | struct sg_table *sgt = NULL; | |
38 | struct scatterlist *sgl; | |
39 | int i, ret; | |
40 | ||
41 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
42 | if (!sgt) | |
43 | goto out; | |
44 | ||
45 | ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); | |
46 | if (ret) | |
47 | goto err_free_sgt; | |
48 | ||
49 | if (page_size < PAGE_SIZE) | |
50 | page_size = PAGE_SIZE; | |
51 | ||
52 | for_each_sg(sgt->sgl, sgl, nr_pages, i) | |
53 | sg_set_page(sgl, pages[i], page_size, 0); | |
54 | ||
55 | return sgt; | |
56 | ||
57 | err_free_sgt: | |
58 | kfree(sgt); | |
59 | sgt = NULL; | |
60 | out: | |
61 | return NULL; | |
62 | } | |
63 | ||
64 | static struct sg_table * | |
65 | exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, | |
66 | enum dma_data_direction dir) | |
67 | { | |
68 | struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; | |
69 | struct drm_device *dev = gem_obj->base.dev; | |
70 | struct exynos_drm_gem_buf *buf; | |
71 | struct sg_table *sgt = NULL; | |
72 | unsigned int npages; | |
73 | int nents; | |
74 | ||
75 | DRM_DEBUG_PRIME("%s\n", __FILE__); | |
76 | ||
77 | mutex_lock(&dev->struct_mutex); | |
78 | ||
79 | buf = gem_obj->buffer; | |
80 | ||
81 | /* there should always be pages allocated. */ | |
82 | if (!buf->pages) { | |
83 | DRM_ERROR("pages is null.\n"); | |
84 | goto err_unlock; | |
85 | } | |
86 | ||
87 | npages = buf->size / buf->page_size; | |
88 | ||
89 | sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); | |
56fb5380 SP |
90 | if (!sgt) { |
91 | DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); | |
92 | goto err_unlock; | |
93 | } | |
b2df26c1 ID |
94 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); |
95 | ||
96 | DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", | |
97 | npages, buf->size, buf->page_size); | |
98 | ||
99 | err_unlock: | |
100 | mutex_unlock(&dev->struct_mutex); | |
101 | return sgt; | |
102 | } | |
103 | ||
104 | static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | |
105 | struct sg_table *sgt, | |
106 | enum dma_data_direction dir) | |
107 | { | |
108 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
109 | sg_free_table(sgt); | |
110 | kfree(sgt); | |
111 | sgt = NULL; | |
112 | } | |
113 | ||
114 | static void exynos_dmabuf_release(struct dma_buf *dmabuf) | |
115 | { | |
116 | struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv; | |
117 | ||
118 | DRM_DEBUG_PRIME("%s\n", __FILE__); | |
119 | ||
120 | /* | |
121 | * exynos_dmabuf_release() call means that file object's | |
122 | * f_count is 0 and it calls drm_gem_object_handle_unreference() | |
123 | * to drop the references that these values had been increased | |
124 | * at drm_prime_handle_to_fd() | |
125 | */ | |
126 | if (exynos_gem_obj->base.export_dma_buf == dmabuf) { | |
127 | exynos_gem_obj->base.export_dma_buf = NULL; | |
128 | ||
129 | /* | |
130 | * drop this gem object refcount to release allocated buffer | |
131 | * and resources. | |
132 | */ | |
133 | drm_gem_object_unreference_unlocked(&exynos_gem_obj->base); | |
134 | } | |
135 | } | |
136 | ||
137 | static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, | |
138 | unsigned long page_num) | |
139 | { | |
140 | /* TODO */ | |
141 | ||
142 | return NULL; | |
143 | } | |
144 | ||
145 | static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, | |
146 | unsigned long page_num, | |
147 | void *addr) | |
148 | { | |
149 | /* TODO */ | |
150 | } | |
151 | ||
152 | static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, | |
153 | unsigned long page_num) | |
154 | { | |
155 | /* TODO */ | |
156 | ||
157 | return NULL; | |
158 | } | |
159 | ||
160 | static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, | |
161 | unsigned long page_num, void *addr) | |
162 | { | |
163 | /* TODO */ | |
164 | } | |
165 | ||
b716d46e TS |
166 | static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, |
167 | struct vm_area_struct *vma) | |
168 | { | |
169 | return -ENOTTY; | |
170 | } | |
171 | ||
b2df26c1 ID |
172 | static struct dma_buf_ops exynos_dmabuf_ops = { |
173 | .map_dma_buf = exynos_gem_map_dma_buf, | |
174 | .unmap_dma_buf = exynos_gem_unmap_dma_buf, | |
175 | .kmap = exynos_gem_dmabuf_kmap, | |
176 | .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, | |
177 | .kunmap = exynos_gem_dmabuf_kunmap, | |
178 | .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, | |
b716d46e | 179 | .mmap = exynos_gem_dmabuf_mmap, |
b2df26c1 ID |
180 | .release = exynos_dmabuf_release, |
181 | }; | |
182 | ||
183 | struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, | |
184 | struct drm_gem_object *obj, int flags) | |
185 | { | |
186 | struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); | |
187 | ||
188 | return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, | |
189 | exynos_gem_obj->base.size, 0600); | |
190 | } | |
191 | ||
192 | struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, | |
193 | struct dma_buf *dma_buf) | |
194 | { | |
195 | struct dma_buf_attachment *attach; | |
196 | struct sg_table *sgt; | |
197 | struct scatterlist *sgl; | |
198 | struct exynos_drm_gem_obj *exynos_gem_obj; | |
199 | struct exynos_drm_gem_buf *buffer; | |
200 | struct page *page; | |
47fcdce2 | 201 | int ret; |
b2df26c1 ID |
202 | |
203 | DRM_DEBUG_PRIME("%s\n", __FILE__); | |
204 | ||
205 | /* is this one of own objects? */ | |
206 | if (dma_buf->ops == &exynos_dmabuf_ops) { | |
207 | struct drm_gem_object *obj; | |
208 | ||
209 | exynos_gem_obj = dma_buf->priv; | |
210 | obj = &exynos_gem_obj->base; | |
211 | ||
212 | /* is it from our device? */ | |
213 | if (obj->dev == drm_dev) { | |
214 | drm_gem_object_reference(obj); | |
215 | return obj; | |
216 | } | |
217 | } | |
218 | ||
219 | attach = dma_buf_attach(dma_buf, drm_dev->dev); | |
220 | if (IS_ERR(attach)) | |
221 | return ERR_PTR(-EINVAL); | |
222 | ||
223 | ||
224 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
0dd3b72c | 225 | if (IS_ERR_OR_NULL(sgt)) { |
b2df26c1 ID |
226 | ret = PTR_ERR(sgt); |
227 | goto err_buf_detach; | |
228 | } | |
229 | ||
230 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | |
231 | if (!buffer) { | |
232 | DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); | |
233 | ret = -ENOMEM; | |
234 | goto err_unmap_attach; | |
235 | } | |
236 | ||
237 | buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); | |
238 | if (!buffer->pages) { | |
239 | DRM_ERROR("failed to allocate pages.\n"); | |
240 | ret = -ENOMEM; | |
241 | goto err_free_buffer; | |
242 | } | |
243 | ||
244 | exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); | |
245 | if (!exynos_gem_obj) { | |
246 | ret = -ENOMEM; | |
247 | goto err_free_pages; | |
248 | } | |
249 | ||
250 | sgl = sgt->sgl; | |
b2df26c1 | 251 | |
47fcdce2 ID |
252 | if (sgt->nents == 1) { |
253 | buffer->dma_addr = sg_dma_address(sgt->sgl); | |
254 | buffer->size = sg_dma_len(sgt->sgl); | |
255 | ||
256 | /* always physically continuous memory if sgt->nents is 1. */ | |
257 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; | |
258 | } else { | |
259 | unsigned int i = 0; | |
260 | ||
261 | buffer->dma_addr = sg_dma_address(sgl); | |
262 | while (i < sgt->nents) { | |
263 | buffer->pages[i] = sg_page(sgl); | |
264 | buffer->size += sg_dma_len(sgl); | |
265 | sgl = sg_next(sgl); | |
266 | i++; | |
267 | } | |
268 | ||
269 | exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; | |
b2df26c1 ID |
270 | } |
271 | ||
272 | exynos_gem_obj->buffer = buffer; | |
273 | buffer->sgt = sgt; | |
274 | exynos_gem_obj->base.import_attach = attach; | |
275 | ||
276 | DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, | |
277 | buffer->size); | |
278 | ||
279 | return &exynos_gem_obj->base; | |
280 | ||
281 | err_free_pages: | |
282 | kfree(buffer->pages); | |
283 | buffer->pages = NULL; | |
284 | err_free_buffer: | |
285 | kfree(buffer); | |
286 | buffer = NULL; | |
287 | err_unmap_attach: | |
288 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); | |
289 | err_buf_detach: | |
290 | dma_buf_detach(dma_buf, attach); | |
291 | return ERR_PTR(ret); | |
292 | } | |
293 | ||
294 | MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); | |
295 | MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module"); | |
296 | MODULE_LICENSE("GPL"); |