[media] v4l: vb2: add buffer exporting via dmabuf
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
CommitLineData
1a758d4e
PO
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
1a758d4e
PO
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
8c417d03 13#include <linux/dma-buf.h>
1a758d4e 14#include <linux/module.h>
e15dab75
TS
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
1a758d4e
PO
17#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
d0df3c38 21#include <media/videobuf2-dma-contig.h>
1a758d4e
PO
22#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25 struct device *dev;
26};
27
28struct vb2_dc_buf {
72f86bff 29 struct device *dev;
1a758d4e 30 void *vaddr;
1a758d4e 31 unsigned long size;
40d8b766 32 dma_addr_t dma_addr;
e15dab75
TS
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
40d8b766
LP
35
36 /* MMAP related */
1a758d4e 37 struct vb2_vmarea_handler handler;
40d8b766
LP
38 atomic_t refcount;
39
40 /* USERPTR related */
41 struct vm_area_struct *vma;
8c417d03
SS
42
43 /* DMABUF related */
44 struct dma_buf_attachment *db_attach;
1a758d4e
PO
45};
46
e15dab75
TS
47/*********************************************/
48/* scatterlist table functions */
49/*********************************************/
50
51
52static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
53 void (*cb)(struct page *pg))
54{
55 struct scatterlist *s;
56 unsigned int i;
57
58 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
59 struct page *page = sg_page(s);
60 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
61 >> PAGE_SHIFT;
62 unsigned int j;
63
64 for (j = 0; j < n_pages; ++j, ++page)
65 cb(page);
66 }
67}
68
69static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
70{
71 struct scatterlist *s;
72 dma_addr_t expected = sg_dma_address(sgt->sgl);
73 unsigned int i;
74 unsigned long size = 0;
75
76 for_each_sg(sgt->sgl, s, sgt->nents, i) {
77 if (sg_dma_address(s) != expected)
78 break;
79 expected = sg_dma_address(s) + sg_dma_len(s);
80 size += sg_dma_len(s);
81 }
82 return size;
83}
84
40d8b766
LP
85/*********************************************/
86/* callbacks for all buffers */
87/*********************************************/
88
89static void *vb2_dc_cookie(void *buf_priv)
90{
91 struct vb2_dc_buf *buf = buf_priv;
92
93 return &buf->dma_addr;
94}
95
96static void *vb2_dc_vaddr(void *buf_priv)
97{
98 struct vb2_dc_buf *buf = buf_priv;
99
100 return buf->vaddr;
101}
102
103static unsigned int vb2_dc_num_users(void *buf_priv)
104{
105 struct vb2_dc_buf *buf = buf_priv;
106
107 return atomic_read(&buf->refcount);
108}
109
199d101e
MS
110static void vb2_dc_prepare(void *buf_priv)
111{
112 struct vb2_dc_buf *buf = buf_priv;
113 struct sg_table *sgt = buf->dma_sgt;
114
8c417d03
SS
115 /* DMABUF exporter will flush the cache for us */
116 if (!sgt || buf->db_attach)
199d101e
MS
117 return;
118
119 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
120}
121
122static void vb2_dc_finish(void *buf_priv)
123{
124 struct vb2_dc_buf *buf = buf_priv;
125 struct sg_table *sgt = buf->dma_sgt;
126
8c417d03
SS
127 /* DMABUF exporter will flush the cache for us */
128 if (!sgt || buf->db_attach)
199d101e
MS
129 return;
130
131 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
132}
133
40d8b766
LP
134/*********************************************/
135/* callbacks for MMAP buffers */
136/*********************************************/
137
138static void vb2_dc_put(void *buf_priv)
139{
140 struct vb2_dc_buf *buf = buf_priv;
141
142 if (!atomic_dec_and_test(&buf->refcount))
143 return;
144
145 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
146 kfree(buf);
147}
1a758d4e 148
f7f129ce 149static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
1a758d4e
PO
150{
151 struct vb2_dc_conf *conf = alloc_ctx;
72f86bff 152 struct device *dev = conf->dev;
1a758d4e
PO
153 struct vb2_dc_buf *buf;
154
155 buf = kzalloc(sizeof *buf, GFP_KERNEL);
156 if (!buf)
157 return ERR_PTR(-ENOMEM);
158
72f86bff 159 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
1a758d4e 160 if (!buf->vaddr) {
72f86bff 161 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
1a758d4e
PO
162 kfree(buf);
163 return ERR_PTR(-ENOMEM);
164 }
165
72f86bff 166 buf->dev = dev;
1a758d4e
PO
167 buf->size = size;
168
169 buf->handler.refcount = &buf->refcount;
f7f129ce 170 buf->handler.put = vb2_dc_put;
1a758d4e
PO
171 buf->handler.arg = buf;
172
173 atomic_inc(&buf->refcount);
174
175 return buf;
176}
177
f7f129ce 178static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
1a758d4e
PO
179{
180 struct vb2_dc_buf *buf = buf_priv;
c60520fa 181 int ret;
1a758d4e
PO
182
183 if (!buf) {
184 printk(KERN_ERR "No buffer to map\n");
185 return -EINVAL;
186 }
187
c60520fa
MS
188 /*
189 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
190 * map whole buffer
191 */
192 vma->vm_pgoff = 0;
193
194 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
195 buf->dma_addr, buf->size);
196
197 if (ret) {
198 pr_err("Remapping memory failed, error: %d\n", ret);
199 return ret;
200 }
201
202 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
203 vma->vm_private_data = &buf->handler;
204 vma->vm_ops = &vb2_common_vm_ops;
205
206 vma->vm_ops->open(vma);
207
208 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
209 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
210 buf->size);
211
212 return 0;
1a758d4e
PO
213}
214
40d8b766
LP
215/*********************************************/
216/* callbacks for USERPTR buffers */
217/*********************************************/
218
e15dab75
TS
219static inline int vma_is_io(struct vm_area_struct *vma)
220{
221 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
222}
223
224static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
225 int n_pages, struct vm_area_struct *vma, int write)
226{
227 if (vma_is_io(vma)) {
228 unsigned int i;
229
230 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
231 unsigned long pfn;
232 int ret = follow_pfn(vma, start, &pfn);
233
234 if (ret) {
235 pr_err("no page for address %lu\n", start);
236 return ret;
237 }
238 pages[i] = pfn_to_page(pfn);
239 }
240 } else {
241 int n;
242
243 n = get_user_pages(current, current->mm, start & PAGE_MASK,
244 n_pages, write, 1, pages, NULL);
245 /* negative error means that no page was pinned */
246 n = max(n, 0);
247 if (n != n_pages) {
248 pr_err("got only %d of %d user pages\n", n, n_pages);
249 while (n)
250 put_page(pages[--n]);
251 return -EFAULT;
252 }
253 }
254
255 return 0;
256}
257
258static void vb2_dc_put_dirty_page(struct page *page)
259{
260 set_page_dirty_lock(page);
261 put_page(page);
262}
263
264static void vb2_dc_put_userptr(void *buf_priv)
265{
266 struct vb2_dc_buf *buf = buf_priv;
267 struct sg_table *sgt = buf->dma_sgt;
268
269 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
270 if (!vma_is_io(buf->vma))
271 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
272
273 sg_free_table(sgt);
274 kfree(sgt);
275 vb2_put_vma(buf->vma);
276 kfree(buf);
277}
278
f7f129ce 279static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
e15dab75 280 unsigned long size, int write)
1a758d4e 281{
e15dab75 282 struct vb2_dc_conf *conf = alloc_ctx;
1a758d4e 283 struct vb2_dc_buf *buf;
e15dab75
TS
284 unsigned long start;
285 unsigned long end;
286 unsigned long offset;
287 struct page **pages;
288 int n_pages;
289 int ret = 0;
1a758d4e 290 struct vm_area_struct *vma;
e15dab75
TS
291 struct sg_table *sgt;
292 unsigned long contig_size;
1a758d4e
PO
293
294 buf = kzalloc(sizeof *buf, GFP_KERNEL);
295 if (!buf)
296 return ERR_PTR(-ENOMEM);
297
e15dab75
TS
298 buf->dev = conf->dev;
299 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
300
301 start = vaddr & PAGE_MASK;
302 offset = vaddr & ~PAGE_MASK;
303 end = PAGE_ALIGN(vaddr + size);
304 n_pages = (end - start) >> PAGE_SHIFT;
305
306 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
307 if (!pages) {
308 ret = -ENOMEM;
309 pr_err("failed to allocate pages table\n");
310 goto fail_buf;
311 }
312
313 /* current->mm->mmap_sem is taken by videobuf2 core */
314 vma = find_vma(current->mm, vaddr);
315 if (!vma) {
316 pr_err("no vma for address %lu\n", vaddr);
317 ret = -EFAULT;
318 goto fail_pages;
319 }
320
321 if (vma->vm_end < vaddr + size) {
322 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
323 ret = -EFAULT;
324 goto fail_pages;
325 }
326
327 buf->vma = vb2_get_vma(vma);
328 if (!buf->vma) {
329 pr_err("failed to copy vma\n");
330 ret = -ENOMEM;
331 goto fail_pages;
332 }
333
334 /* extract page list from userspace mapping */
335 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
1a758d4e 336 if (ret) {
e15dab75
TS
337 pr_err("failed to get user pages\n");
338 goto fail_vma;
339 }
340
341 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
342 if (!sgt) {
343 pr_err("failed to allocate sg table\n");
344 ret = -ENOMEM;
345 goto fail_get_user_pages;
346 }
347
348 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
349 offset, size, GFP_KERNEL);
350 if (ret) {
351 pr_err("failed to initialize sg table\n");
352 goto fail_sgt;
353 }
354
355 /* pages are no longer needed */
356 kfree(pages);
357 pages = NULL;
358
359 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
360 buf->dma_dir);
361 if (sgt->nents <= 0) {
362 pr_err("failed to map scatterlist\n");
363 ret = -EIO;
364 goto fail_sgt_init;
365 }
366
367 contig_size = vb2_dc_get_contiguous_size(sgt);
368 if (contig_size < size) {
369 pr_err("contiguous mapping is too small %lu/%lu\n",
370 contig_size, size);
371 ret = -EFAULT;
372 goto fail_map_sg;
1a758d4e
PO
373 }
374
e15dab75 375 buf->dma_addr = sg_dma_address(sgt->sgl);
1a758d4e 376 buf->size = size;
e15dab75 377 buf->dma_sgt = sgt;
1a758d4e
PO
378
379 return buf;
1a758d4e 380
e15dab75
TS
381fail_map_sg:
382 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
1a758d4e 383
e15dab75
TS
384fail_sgt_init:
385 if (!vma_is_io(buf->vma))
386 vb2_dc_sgt_foreach_page(sgt, put_page);
387 sg_free_table(sgt);
388
389fail_sgt:
390 kfree(sgt);
1a758d4e 391
e15dab75
TS
392fail_get_user_pages:
393 if (pages && !vma_is_io(buf->vma))
394 while (n_pages)
395 put_page(pages[--n_pages]);
396
397fail_vma:
1a758d4e 398 vb2_put_vma(buf->vma);
e15dab75
TS
399
400fail_pages:
401 kfree(pages); /* kfree is NULL-proof */
402
403fail_buf:
1a758d4e 404 kfree(buf);
e15dab75
TS
405
406 return ERR_PTR(ret);
1a758d4e
PO
407}
408
8c417d03
SS
409/*********************************************/
410/* callbacks for DMABUF buffers */
411/*********************************************/
412
413static int vb2_dc_map_dmabuf(void *mem_priv)
414{
415 struct vb2_dc_buf *buf = mem_priv;
416 struct sg_table *sgt;
417 unsigned long contig_size;
418
419 if (WARN_ON(!buf->db_attach)) {
420 pr_err("trying to pin a non attached buffer\n");
421 return -EINVAL;
422 }
423
424 if (WARN_ON(buf->dma_sgt)) {
425 pr_err("dmabuf buffer is already pinned\n");
426 return 0;
427 }
428
429 /* get the associated scatterlist for this buffer */
430 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
431 if (IS_ERR_OR_NULL(sgt)) {
432 pr_err("Error getting dmabuf scatterlist\n");
433 return -EINVAL;
434 }
435
436 /* checking if dmabuf is big enough to store contiguous chunk */
437 contig_size = vb2_dc_get_contiguous_size(sgt);
438 if (contig_size < buf->size) {
439 pr_err("contiguous chunk is too small %lu/%lu b\n",
440 contig_size, buf->size);
441 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
442 return -EFAULT;
443 }
444
445 buf->dma_addr = sg_dma_address(sgt->sgl);
446 buf->dma_sgt = sgt;
447
448 return 0;
449}
450
451static void vb2_dc_unmap_dmabuf(void *mem_priv)
452{
453 struct vb2_dc_buf *buf = mem_priv;
454 struct sg_table *sgt = buf->dma_sgt;
455
456 if (WARN_ON(!buf->db_attach)) {
457 pr_err("trying to unpin a not attached buffer\n");
458 return;
459 }
460
461 if (WARN_ON(!sgt)) {
462 pr_err("dmabuf buffer is already unpinned\n");
463 return;
464 }
465
466 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
467
468 buf->dma_addr = 0;
469 buf->dma_sgt = NULL;
470}
471
472static void vb2_dc_detach_dmabuf(void *mem_priv)
473{
474 struct vb2_dc_buf *buf = mem_priv;
475
476 /* if vb2 works correctly you should never detach mapped buffer */
477 if (WARN_ON(buf->dma_addr))
478 vb2_dc_unmap_dmabuf(buf);
479
480 /* detach this attachment */
481 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
482 kfree(buf);
483}
484
485static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
486 unsigned long size, int write)
487{
488 struct vb2_dc_conf *conf = alloc_ctx;
489 struct vb2_dc_buf *buf;
490 struct dma_buf_attachment *dba;
491
492 if (dbuf->size < size)
493 return ERR_PTR(-EFAULT);
494
495 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
496 if (!buf)
497 return ERR_PTR(-ENOMEM);
498
499 buf->dev = conf->dev;
500 /* create attachment for the dmabuf with the user device */
501 dba = dma_buf_attach(dbuf, buf->dev);
502 if (IS_ERR(dba)) {
503 pr_err("failed to attach dmabuf\n");
504 kfree(buf);
505 return dba;
506 }
507
508 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
509 buf->size = size;
510 buf->db_attach = dba;
511
512 return buf;
513}
514
40d8b766
LP
515/*********************************************/
516/* DMA CONTIG exported functions */
517/*********************************************/
518
1a758d4e 519const struct vb2_mem_ops vb2_dma_contig_memops = {
f7f129ce
LP
520 .alloc = vb2_dc_alloc,
521 .put = vb2_dc_put,
522 .cookie = vb2_dc_cookie,
523 .vaddr = vb2_dc_vaddr,
524 .mmap = vb2_dc_mmap,
525 .get_userptr = vb2_dc_get_userptr,
526 .put_userptr = vb2_dc_put_userptr,
199d101e
MS
527 .prepare = vb2_dc_prepare,
528 .finish = vb2_dc_finish,
8c417d03
SS
529 .map_dmabuf = vb2_dc_map_dmabuf,
530 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
531 .attach_dmabuf = vb2_dc_attach_dmabuf,
532 .detach_dmabuf = vb2_dc_detach_dmabuf,
f7f129ce 533 .num_users = vb2_dc_num_users,
1a758d4e
PO
534};
535EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
536
537void *vb2_dma_contig_init_ctx(struct device *dev)
538{
539 struct vb2_dc_conf *conf;
540
541 conf = kzalloc(sizeof *conf, GFP_KERNEL);
542 if (!conf)
543 return ERR_PTR(-ENOMEM);
544
545 conf->dev = dev;
546
547 return conf;
548}
549EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
550
551void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
552{
553 kfree(alloc_ctx);
554}
555EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
556
557MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
95072084 558MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
1a758d4e 559MODULE_LICENSE("GPL");
This page took 0.159842 seconds and 5 git commands to generate.