Merge remote-tracking branch 'sound-asoc/for-next'
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-vmalloc.c
1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-vmalloc.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_vmalloc_buf {
25 void *vaddr;
26 struct frame_vector *vec;
27 enum dma_data_direction dma_dir;
28 unsigned long size;
29 atomic_t refcount;
30 struct vb2_vmarea_handler handler;
31 struct dma_buf *dbuf;
32 };
33
34 static void vb2_vmalloc_put(void *buf_priv);
35
36 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
37 unsigned long size, enum dma_data_direction dma_dir,
38 gfp_t gfp_flags)
39 {
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
48 buf->dma_dir = dma_dir;
49 buf->handler.refcount = &buf->refcount;
50 buf->handler.put = vb2_vmalloc_put;
51 buf->handler.arg = buf;
52
53 if (!buf->vaddr) {
54 pr_debug("vmalloc of size %ld failed\n", buf->size);
55 kfree(buf);
56 return ERR_PTR(-ENOMEM);
57 }
58
59 atomic_inc(&buf->refcount);
60 return buf;
61 }
62
63 static void vb2_vmalloc_put(void *buf_priv)
64 {
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (atomic_dec_and_test(&buf->refcount)) {
68 vfree(buf->vaddr);
69 kfree(buf);
70 }
71 }
72
73 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
74 unsigned long size,
75 enum dma_data_direction dma_dir)
76 {
77 struct vb2_vmalloc_buf *buf;
78 struct frame_vector *vec;
79 int n_pages, offset, i;
80 int ret = -ENOMEM;
81
82 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 if (!buf)
84 return ERR_PTR(-ENOMEM);
85
86 buf->dma_dir = dma_dir;
87 offset = vaddr & ~PAGE_MASK;
88 buf->size = size;
89 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
90 if (IS_ERR(vec)) {
91 ret = PTR_ERR(vec);
92 goto fail_pfnvec_create;
93 }
94 buf->vec = vec;
95 n_pages = frame_vector_count(vec);
96 if (frame_vector_to_pages(vec) < 0) {
97 unsigned long *nums = frame_vector_pfns(vec);
98
99 /*
100 * We cannot get page pointers for these pfns. Check memory is
101 * physically contiguous and use direct mapping.
102 */
103 for (i = 1; i < n_pages; i++)
104 if (nums[i-1] + 1 != nums[i])
105 goto fail_map;
106 buf->vaddr = (__force void *)
107 ioremap_nocache(nums[0] << PAGE_SHIFT, size);
108 } else {
109 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
110 PAGE_KERNEL);
111 }
112
113 if (!buf->vaddr)
114 goto fail_map;
115 buf->vaddr += offset;
116 return buf;
117
118 fail_map:
119 vb2_destroy_framevec(vec);
120 fail_pfnvec_create:
121 kfree(buf);
122
123 return ERR_PTR(ret);
124 }
125
126 static void vb2_vmalloc_put_userptr(void *buf_priv)
127 {
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
131 struct page **pages;
132 unsigned int n_pages;
133
134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(buf->vec);
136 pages = frame_vector_pages(buf->vec);
137 if (vaddr)
138 vm_unmap_ram((void *)vaddr, n_pages);
139 if (buf->dma_dir == DMA_FROM_DEVICE)
140 for (i = 0; i < n_pages; i++)
141 set_page_dirty_lock(pages[i]);
142 } else {
143 iounmap((__force void __iomem *)buf->vaddr);
144 }
145 vb2_destroy_framevec(buf->vec);
146 kfree(buf);
147 }
148
149 static void *vb2_vmalloc_vaddr(void *buf_priv)
150 {
151 struct vb2_vmalloc_buf *buf = buf_priv;
152
153 if (!buf->vaddr) {
154 pr_err("Address of an unallocated plane requested "
155 "or cannot map user pointer\n");
156 return NULL;
157 }
158
159 return buf->vaddr;
160 }
161
162 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
163 {
164 struct vb2_vmalloc_buf *buf = buf_priv;
165 return atomic_read(&buf->refcount);
166 }
167
168 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
169 {
170 struct vb2_vmalloc_buf *buf = buf_priv;
171 int ret;
172
173 if (!buf) {
174 pr_err("No memory to map\n");
175 return -EINVAL;
176 }
177
178 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
179 if (ret) {
180 pr_err("Remapping vmalloc memory, error: %d\n", ret);
181 return ret;
182 }
183
184 /*
185 * Make sure that vm_areas for 2 buffers won't be merged together
186 */
187 vma->vm_flags |= VM_DONTEXPAND;
188
189 /*
190 * Use common vm_area operations to track buffer refcount.
191 */
192 vma->vm_private_data = &buf->handler;
193 vma->vm_ops = &vb2_common_vm_ops;
194
195 vma->vm_ops->open(vma);
196
197 return 0;
198 }
199
200 #ifdef CONFIG_HAS_DMA
201 /*********************************************/
202 /* DMABUF ops for exporters */
203 /*********************************************/
204
205 struct vb2_vmalloc_attachment {
206 struct sg_table sgt;
207 enum dma_data_direction dma_dir;
208 };
209
210 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
211 struct dma_buf_attachment *dbuf_attach)
212 {
213 struct vb2_vmalloc_attachment *attach;
214 struct vb2_vmalloc_buf *buf = dbuf->priv;
215 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
216 struct sg_table *sgt;
217 struct scatterlist *sg;
218 void *vaddr = buf->vaddr;
219 int ret;
220 int i;
221
222 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
223 if (!attach)
224 return -ENOMEM;
225
226 sgt = &attach->sgt;
227 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
228 if (ret) {
229 kfree(attach);
230 return ret;
231 }
232 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
233 struct page *page = vmalloc_to_page(vaddr);
234
235 if (!page) {
236 sg_free_table(sgt);
237 kfree(attach);
238 return -ENOMEM;
239 }
240 sg_set_page(sg, page, PAGE_SIZE, 0);
241 vaddr += PAGE_SIZE;
242 }
243
244 attach->dma_dir = DMA_NONE;
245 dbuf_attach->priv = attach;
246 return 0;
247 }
248
249 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
250 struct dma_buf_attachment *db_attach)
251 {
252 struct vb2_vmalloc_attachment *attach = db_attach->priv;
253 struct sg_table *sgt;
254
255 if (!attach)
256 return;
257
258 sgt = &attach->sgt;
259
260 /* release the scatterlist cache */
261 if (attach->dma_dir != DMA_NONE)
262 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
263 attach->dma_dir);
264 sg_free_table(sgt);
265 kfree(attach);
266 db_attach->priv = NULL;
267 }
268
269 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
270 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
271 {
272 struct vb2_vmalloc_attachment *attach = db_attach->priv;
273 /* stealing dmabuf mutex to serialize map/unmap operations */
274 struct mutex *lock = &db_attach->dmabuf->lock;
275 struct sg_table *sgt;
276
277 mutex_lock(lock);
278
279 sgt = &attach->sgt;
280 /* return previously mapped sg table */
281 if (attach->dma_dir == dma_dir) {
282 mutex_unlock(lock);
283 return sgt;
284 }
285
286 /* release any previous cache */
287 if (attach->dma_dir != DMA_NONE) {
288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
289 attach->dma_dir);
290 attach->dma_dir = DMA_NONE;
291 }
292
293 /* mapping to the client with new direction */
294 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
295 dma_dir);
296 if (!sgt->nents) {
297 pr_err("failed to map scatterlist\n");
298 mutex_unlock(lock);
299 return ERR_PTR(-EIO);
300 }
301
302 attach->dma_dir = dma_dir;
303
304 mutex_unlock(lock);
305
306 return sgt;
307 }
308
309 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
310 struct sg_table *sgt, enum dma_data_direction dma_dir)
311 {
312 /* nothing to be done here */
313 }
314
315 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
316 {
317 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
318 vb2_vmalloc_put(dbuf->priv);
319 }
320
321 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
322 {
323 struct vb2_vmalloc_buf *buf = dbuf->priv;
324
325 return buf->vaddr + pgnum * PAGE_SIZE;
326 }
327
328 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
329 {
330 struct vb2_vmalloc_buf *buf = dbuf->priv;
331
332 return buf->vaddr;
333 }
334
335 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
336 struct vm_area_struct *vma)
337 {
338 return vb2_vmalloc_mmap(dbuf->priv, vma);
339 }
340
341 static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
342 .attach = vb2_vmalloc_dmabuf_ops_attach,
343 .detach = vb2_vmalloc_dmabuf_ops_detach,
344 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
345 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
346 .kmap = vb2_vmalloc_dmabuf_ops_kmap,
347 .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
348 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
349 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
350 .release = vb2_vmalloc_dmabuf_ops_release,
351 };
352
353 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
354 {
355 struct vb2_vmalloc_buf *buf = buf_priv;
356 struct dma_buf *dbuf;
357 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
358
359 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
360 exp_info.size = buf->size;
361 exp_info.flags = flags;
362 exp_info.priv = buf;
363
364 if (WARN_ON(!buf->vaddr))
365 return NULL;
366
367 dbuf = dma_buf_export(&exp_info);
368 if (IS_ERR(dbuf))
369 return NULL;
370
371 /* dmabuf keeps reference to vb2 buffer */
372 atomic_inc(&buf->refcount);
373
374 return dbuf;
375 }
376 #endif /* CONFIG_HAS_DMA */
377
378
379 /*********************************************/
380 /* callbacks for DMABUF buffers */
381 /*********************************************/
382
383 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
384 {
385 struct vb2_vmalloc_buf *buf = mem_priv;
386
387 buf->vaddr = dma_buf_vmap(buf->dbuf);
388
389 return buf->vaddr ? 0 : -EFAULT;
390 }
391
392 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
393 {
394 struct vb2_vmalloc_buf *buf = mem_priv;
395
396 dma_buf_vunmap(buf->dbuf, buf->vaddr);
397 buf->vaddr = NULL;
398 }
399
400 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
401 {
402 struct vb2_vmalloc_buf *buf = mem_priv;
403
404 if (buf->vaddr)
405 dma_buf_vunmap(buf->dbuf, buf->vaddr);
406
407 kfree(buf);
408 }
409
410 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
411 unsigned long size, enum dma_data_direction dma_dir)
412 {
413 struct vb2_vmalloc_buf *buf;
414
415 if (dbuf->size < size)
416 return ERR_PTR(-EFAULT);
417
418 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
419 if (!buf)
420 return ERR_PTR(-ENOMEM);
421
422 buf->dbuf = dbuf;
423 buf->dma_dir = dma_dir;
424 buf->size = size;
425
426 return buf;
427 }
428
429
430 const struct vb2_mem_ops vb2_vmalloc_memops = {
431 .alloc = vb2_vmalloc_alloc,
432 .put = vb2_vmalloc_put,
433 .get_userptr = vb2_vmalloc_get_userptr,
434 .put_userptr = vb2_vmalloc_put_userptr,
435 #ifdef CONFIG_HAS_DMA
436 .get_dmabuf = vb2_vmalloc_get_dmabuf,
437 #endif
438 .map_dmabuf = vb2_vmalloc_map_dmabuf,
439 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
440 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
441 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
442 .vaddr = vb2_vmalloc_vaddr,
443 .mmap = vb2_vmalloc_mmap,
444 .num_users = vb2_vmalloc_num_users,
445 };
446 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
447
448 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
449 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
450 MODULE_LICENSE("GPL");
This page took 0.042688 seconds and 5 git commands to generate.