[media] vim2m: support expbuf
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
CommitLineData
1a758d4e
PO
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
1a758d4e
PO
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
8c417d03 13#include <linux/dma-buf.h>
1a758d4e 14#include <linux/module.h>
e15dab75
TS
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
1a758d4e
PO
17#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
d0df3c38 21#include <media/videobuf2-dma-contig.h>
1a758d4e
PO
22#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25 struct device *dev;
26};
27
28struct vb2_dc_buf {
72f86bff 29 struct device *dev;
1a758d4e 30 void *vaddr;
1a758d4e 31 unsigned long size;
40d8b766 32 dma_addr_t dma_addr;
e15dab75
TS
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
40d8b766
LP
35
36 /* MMAP related */
1a758d4e 37 struct vb2_vmarea_handler handler;
40d8b766 38 atomic_t refcount;
9ef2cbeb 39 struct sg_table *sgt_base;
40d8b766
LP
40
41 /* USERPTR related */
42 struct vm_area_struct *vma;
8c417d03
SS
43
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
1a758d4e
PO
46};
47
e15dab75
TS
48/*********************************************/
49/* scatterlist table functions */
50/*********************************************/
51
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55{
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
76
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
82 }
83 return size;
84}
85
40d8b766
LP
86/*********************************************/
87/* callbacks for all buffers */
88/*********************************************/
89
90static void *vb2_dc_cookie(void *buf_priv)
91{
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return &buf->dma_addr;
95}
96
97static void *vb2_dc_vaddr(void *buf_priv)
98{
99 struct vb2_dc_buf *buf = buf_priv;
100
6bbd4fec
PZ
101 if (!buf->vaddr && buf->db_attach)
102 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
103
40d8b766
LP
104 return buf->vaddr;
105}
106
107static unsigned int vb2_dc_num_users(void *buf_priv)
108{
109 struct vb2_dc_buf *buf = buf_priv;
110
111 return atomic_read(&buf->refcount);
112}
113
199d101e
MS
114static void vb2_dc_prepare(void *buf_priv)
115{
116 struct vb2_dc_buf *buf = buf_priv;
117 struct sg_table *sgt = buf->dma_sgt;
118
8c417d03
SS
119 /* DMABUF exporter will flush the cache for us */
120 if (!sgt || buf->db_attach)
199d101e
MS
121 return;
122
123 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
124}
125
126static void vb2_dc_finish(void *buf_priv)
127{
128 struct vb2_dc_buf *buf = buf_priv;
129 struct sg_table *sgt = buf->dma_sgt;
130
8c417d03
SS
131 /* DMABUF exporter will flush the cache for us */
132 if (!sgt || buf->db_attach)
199d101e
MS
133 return;
134
135 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
136}
137
40d8b766
LP
138/*********************************************/
139/* callbacks for MMAP buffers */
140/*********************************************/
141
142static void vb2_dc_put(void *buf_priv)
143{
144 struct vb2_dc_buf *buf = buf_priv;
145
146 if (!atomic_dec_and_test(&buf->refcount))
147 return;
148
9ef2cbeb
TS
149 if (buf->sgt_base) {
150 sg_free_table(buf->sgt_base);
151 kfree(buf->sgt_base);
152 }
40d8b766 153 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
67a5d0ce 154 put_device(buf->dev);
40d8b766
LP
155 kfree(buf);
156}
1a758d4e 157
d935c57e
HV
158static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
159 enum dma_data_direction dma_dir, gfp_t gfp_flags)
1a758d4e
PO
160{
161 struct vb2_dc_conf *conf = alloc_ctx;
72f86bff 162 struct device *dev = conf->dev;
1a758d4e
PO
163 struct vb2_dc_buf *buf;
164
165 buf = kzalloc(sizeof *buf, GFP_KERNEL);
166 if (!buf)
167 return ERR_PTR(-ENOMEM);
168
b6ba2057
HV
169 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
170 GFP_KERNEL | gfp_flags);
1a758d4e 171 if (!buf->vaddr) {
72f86bff 172 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
1a758d4e
PO
173 kfree(buf);
174 return ERR_PTR(-ENOMEM);
175 }
176
67a5d0ce
TS
177 /* Prevent the device from being released while the buffer is used */
178 buf->dev = get_device(dev);
1a758d4e 179 buf->size = size;
d935c57e 180 buf->dma_dir = dma_dir;
1a758d4e
PO
181
182 buf->handler.refcount = &buf->refcount;
f7f129ce 183 buf->handler.put = vb2_dc_put;
1a758d4e
PO
184 buf->handler.arg = buf;
185
186 atomic_inc(&buf->refcount);
187
188 return buf;
189}
190
f7f129ce 191static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
1a758d4e
PO
192{
193 struct vb2_dc_buf *buf = buf_priv;
c60520fa 194 int ret;
1a758d4e
PO
195
196 if (!buf) {
197 printk(KERN_ERR "No buffer to map\n");
198 return -EINVAL;
199 }
200
c60520fa
MS
201 /*
202 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
203 * map whole buffer
204 */
205 vma->vm_pgoff = 0;
206
207 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
208 buf->dma_addr, buf->size);
209
210 if (ret) {
211 pr_err("Remapping memory failed, error: %d\n", ret);
212 return ret;
213 }
214
215 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
216 vma->vm_private_data = &buf->handler;
217 vma->vm_ops = &vb2_common_vm_ops;
218
219 vma->vm_ops->open(vma);
220
221 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
222 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
223 buf->size);
224
225 return 0;
1a758d4e
PO
226}
227
9ef2cbeb
TS
228/*********************************************/
229/* DMABUF ops for exporters */
230/*********************************************/
231
232struct vb2_dc_attachment {
233 struct sg_table sgt;
cd474037 234 enum dma_data_direction dma_dir;
9ef2cbeb
TS
235};
236
237static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
238 struct dma_buf_attachment *dbuf_attach)
239{
240 struct vb2_dc_attachment *attach;
241 unsigned int i;
242 struct scatterlist *rd, *wr;
243 struct sg_table *sgt;
244 struct vb2_dc_buf *buf = dbuf->priv;
245 int ret;
246
247 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
248 if (!attach)
249 return -ENOMEM;
250
251 sgt = &attach->sgt;
252 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
253 * map the same scatter list to multiple attachments at the same time.
254 */
255 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
256 if (ret) {
257 kfree(attach);
258 return -ENOMEM;
259 }
260
261 rd = buf->sgt_base->sgl;
262 wr = sgt->sgl;
263 for (i = 0; i < sgt->orig_nents; ++i) {
264 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
265 rd = sg_next(rd);
266 wr = sg_next(wr);
267 }
268
cd474037 269 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
270 dbuf_attach->priv = attach;
271
272 return 0;
273}
274
275static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
276 struct dma_buf_attachment *db_attach)
277{
278 struct vb2_dc_attachment *attach = db_attach->priv;
279 struct sg_table *sgt;
280
281 if (!attach)
282 return;
283
284 sgt = &attach->sgt;
285
286 /* release the scatterlist cache */
cd474037 287 if (attach->dma_dir != DMA_NONE)
9ef2cbeb 288 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
cd474037 289 attach->dma_dir);
9ef2cbeb
TS
290 sg_free_table(sgt);
291 kfree(attach);
292 db_attach->priv = NULL;
293}
294
295static struct sg_table *vb2_dc_dmabuf_ops_map(
cd474037 296 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
9ef2cbeb
TS
297{
298 struct vb2_dc_attachment *attach = db_attach->priv;
299 /* stealing dmabuf mutex to serialize map/unmap operations */
300 struct mutex *lock = &db_attach->dmabuf->lock;
301 struct sg_table *sgt;
302 int ret;
303
304 mutex_lock(lock);
305
306 sgt = &attach->sgt;
307 /* return previously mapped sg table */
cd474037 308 if (attach->dma_dir == dma_dir) {
9ef2cbeb
TS
309 mutex_unlock(lock);
310 return sgt;
311 }
312
313 /* release any previous cache */
cd474037 314 if (attach->dma_dir != DMA_NONE) {
9ef2cbeb 315 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
cd474037
HV
316 attach->dma_dir);
317 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
318 }
319
320 /* mapping to the client with new direction */
cd474037 321 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
9ef2cbeb
TS
322 if (ret <= 0) {
323 pr_err("failed to map scatterlist\n");
324 mutex_unlock(lock);
325 return ERR_PTR(-EIO);
326 }
327
cd474037 328 attach->dma_dir = dma_dir;
9ef2cbeb
TS
329
330 mutex_unlock(lock);
331
332 return sgt;
333}
334
335static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
cd474037 336 struct sg_table *sgt, enum dma_data_direction dma_dir)
9ef2cbeb
TS
337{
338 /* nothing to be done here */
339}
340
341static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
342{
343 /* drop reference obtained in vb2_dc_get_dmabuf */
344 vb2_dc_put(dbuf->priv);
345}
346
347static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
348{
349 struct vb2_dc_buf *buf = dbuf->priv;
350
351 return buf->vaddr + pgnum * PAGE_SIZE;
352}
353
354static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
355{
356 struct vb2_dc_buf *buf = dbuf->priv;
357
358 return buf->vaddr;
359}
360
361static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
362 struct vm_area_struct *vma)
363{
364 return vb2_dc_mmap(dbuf->priv, vma);
365}
366
367static struct dma_buf_ops vb2_dc_dmabuf_ops = {
368 .attach = vb2_dc_dmabuf_ops_attach,
369 .detach = vb2_dc_dmabuf_ops_detach,
370 .map_dma_buf = vb2_dc_dmabuf_ops_map,
371 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
372 .kmap = vb2_dc_dmabuf_ops_kmap,
373 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
374 .vmap = vb2_dc_dmabuf_ops_vmap,
375 .mmap = vb2_dc_dmabuf_ops_mmap,
376 .release = vb2_dc_dmabuf_ops_release,
377};
378
379static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
380{
381 int ret;
382 struct sg_table *sgt;
383
384 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385 if (!sgt) {
386 dev_err(buf->dev, "failed to alloc sg table\n");
387 return NULL;
388 }
389
390 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
391 buf->size);
392 if (ret < 0) {
393 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
394 kfree(sgt);
395 return NULL;
396 }
397
398 return sgt;
399}
400
c1b96a23 401static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
9ef2cbeb
TS
402{
403 struct vb2_dc_buf *buf = buf_priv;
404 struct dma_buf *dbuf;
405
406 if (!buf->sgt_base)
407 buf->sgt_base = vb2_dc_get_base_sgt(buf);
408
409 if (WARN_ON(!buf->sgt_base))
410 return NULL;
411
3aac4502 412 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
9ef2cbeb
TS
413 if (IS_ERR(dbuf))
414 return NULL;
415
416 /* dmabuf keeps reference to vb2 buffer */
417 atomic_inc(&buf->refcount);
418
419 return dbuf;
420}
421
40d8b766
LP
422/*********************************************/
423/* callbacks for USERPTR buffers */
424/*********************************************/
425
e15dab75
TS
426static inline int vma_is_io(struct vm_area_struct *vma)
427{
428 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
429}
430
774d2301
MS
431static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
432 struct vm_area_struct *vma, unsigned long *res)
433{
434 unsigned long pfn, start_pfn, prev_pfn;
435 unsigned int i;
436 int ret;
437
438 if (!vma_is_io(vma))
439 return -EFAULT;
440
441 ret = follow_pfn(vma, start, &pfn);
442 if (ret)
443 return ret;
444
445 start_pfn = pfn;
446 start += PAGE_SIZE;
447
448 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
449 prev_pfn = pfn;
450 ret = follow_pfn(vma, start, &pfn);
451
452 if (ret) {
453 pr_err("no page for address %lu\n", start);
454 return ret;
455 }
456 if (pfn != prev_pfn + 1)
457 return -EINVAL;
458 }
459
460 *res = start_pfn;
461 return 0;
462}
463
e15dab75 464static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
cd474037
HV
465 int n_pages, struct vm_area_struct *vma,
466 enum dma_data_direction dma_dir)
e15dab75
TS
467{
468 if (vma_is_io(vma)) {
469 unsigned int i;
470
471 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
472 unsigned long pfn;
473 int ret = follow_pfn(vma, start, &pfn);
474
774d2301
MS
475 if (!pfn_valid(pfn))
476 return -EINVAL;
477
e15dab75
TS
478 if (ret) {
479 pr_err("no page for address %lu\n", start);
480 return ret;
481 }
482 pages[i] = pfn_to_page(pfn);
483 }
484 } else {
485 int n;
486
487 n = get_user_pages(current, current->mm, start & PAGE_MASK,
cd474037 488 n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
e15dab75
TS
489 /* negative error means that no page was pinned */
490 n = max(n, 0);
491 if (n != n_pages) {
492 pr_err("got only %d of %d user pages\n", n, n_pages);
493 while (n)
494 put_page(pages[--n]);
495 return -EFAULT;
496 }
497 }
498
499 return 0;
500}
501
502static void vb2_dc_put_dirty_page(struct page *page)
503{
504 set_page_dirty_lock(page);
505 put_page(page);
506}
507
508static void vb2_dc_put_userptr(void *buf_priv)
509{
510 struct vb2_dc_buf *buf = buf_priv;
511 struct sg_table *sgt = buf->dma_sgt;
512
774d2301
MS
513 if (sgt) {
514 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
515 if (!vma_is_io(buf->vma))
516 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
e15dab75 517
774d2301
MS
518 sg_free_table(sgt);
519 kfree(sgt);
520 }
e15dab75
TS
521 vb2_put_vma(buf->vma);
522 kfree(buf);
523}
524
774d2301
MS
525/*
526 * For some kind of reserved memory there might be no struct page available,
527 * so all that can be done to support such 'pages' is to try to convert
528 * pfn to dma address or at the last resort just assume that
529 * dma address == physical address (like it has been assumed in earlier version
530 * of videobuf2-dma-contig
531 */
532
533#ifdef __arch_pfn_to_dma
534static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
535{
536 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
537}
538#elif defined(__pfn_to_bus)
539static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
540{
541 return (dma_addr_t)__pfn_to_bus(pfn);
542}
543#elif defined(__pfn_to_phys)
544static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
545{
546 return (dma_addr_t)__pfn_to_phys(pfn);
547}
548#else
549static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
550{
551 /* really, we cannot do anything better at this point */
552 return (dma_addr_t)(pfn) << PAGE_SHIFT;
553}
554#endif
555
f7f129ce 556static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
cd474037 557 unsigned long size, enum dma_data_direction dma_dir)
1a758d4e 558{
e15dab75 559 struct vb2_dc_conf *conf = alloc_ctx;
1a758d4e 560 struct vb2_dc_buf *buf;
e15dab75
TS
561 unsigned long start;
562 unsigned long end;
563 unsigned long offset;
564 struct page **pages;
565 int n_pages;
566 int ret = 0;
1a758d4e 567 struct vm_area_struct *vma;
e15dab75
TS
568 struct sg_table *sgt;
569 unsigned long contig_size;
d81e870d
MS
570 unsigned long dma_align = dma_get_cache_alignment();
571
572 /* Only cache aligned DMA transfers are reliable */
573 if (!IS_ALIGNED(vaddr | size, dma_align)) {
574 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
575 return ERR_PTR(-EINVAL);
576 }
577
578 if (!size) {
579 pr_debug("size is zero\n");
580 return ERR_PTR(-EINVAL);
581 }
1a758d4e
PO
582
583 buf = kzalloc(sizeof *buf, GFP_KERNEL);
584 if (!buf)
585 return ERR_PTR(-ENOMEM);
586
e15dab75 587 buf->dev = conf->dev;
cd474037 588 buf->dma_dir = dma_dir;
e15dab75
TS
589
590 start = vaddr & PAGE_MASK;
591 offset = vaddr & ~PAGE_MASK;
592 end = PAGE_ALIGN(vaddr + size);
593 n_pages = (end - start) >> PAGE_SHIFT;
594
595 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
596 if (!pages) {
597 ret = -ENOMEM;
598 pr_err("failed to allocate pages table\n");
599 goto fail_buf;
600 }
601
602 /* current->mm->mmap_sem is taken by videobuf2 core */
603 vma = find_vma(current->mm, vaddr);
604 if (!vma) {
605 pr_err("no vma for address %lu\n", vaddr);
606 ret = -EFAULT;
607 goto fail_pages;
608 }
609
610 if (vma->vm_end < vaddr + size) {
611 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
612 ret = -EFAULT;
613 goto fail_pages;
614 }
615
616 buf->vma = vb2_get_vma(vma);
617 if (!buf->vma) {
618 pr_err("failed to copy vma\n");
619 ret = -ENOMEM;
620 goto fail_pages;
621 }
622
623 /* extract page list from userspace mapping */
cd474037
HV
624 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
625 dma_dir == DMA_FROM_DEVICE);
1a758d4e 626 if (ret) {
774d2301
MS
627 unsigned long pfn;
628 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
629 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
630 buf->size = size;
631 kfree(pages);
632 return buf;
633 }
634
e15dab75
TS
635 pr_err("failed to get user pages\n");
636 goto fail_vma;
637 }
638
639 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
640 if (!sgt) {
641 pr_err("failed to allocate sg table\n");
642 ret = -ENOMEM;
643 goto fail_get_user_pages;
644 }
645
646 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
647 offset, size, GFP_KERNEL);
648 if (ret) {
649 pr_err("failed to initialize sg table\n");
650 goto fail_sgt;
651 }
652
653 /* pages are no longer needed */
654 kfree(pages);
655 pages = NULL;
656
657 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
658 buf->dma_dir);
659 if (sgt->nents <= 0) {
660 pr_err("failed to map scatterlist\n");
661 ret = -EIO;
662 goto fail_sgt_init;
663 }
664
665 contig_size = vb2_dc_get_contiguous_size(sgt);
666 if (contig_size < size) {
667 pr_err("contiguous mapping is too small %lu/%lu\n",
668 contig_size, size);
669 ret = -EFAULT;
670 goto fail_map_sg;
1a758d4e
PO
671 }
672
e15dab75 673 buf->dma_addr = sg_dma_address(sgt->sgl);
1a758d4e 674 buf->size = size;
e15dab75 675 buf->dma_sgt = sgt;
1a758d4e
PO
676
677 return buf;
1a758d4e 678
e15dab75
TS
679fail_map_sg:
680 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
1a758d4e 681
e15dab75
TS
682fail_sgt_init:
683 if (!vma_is_io(buf->vma))
684 vb2_dc_sgt_foreach_page(sgt, put_page);
685 sg_free_table(sgt);
686
687fail_sgt:
688 kfree(sgt);
1a758d4e 689
e15dab75
TS
690fail_get_user_pages:
691 if (pages && !vma_is_io(buf->vma))
692 while (n_pages)
693 put_page(pages[--n_pages]);
694
695fail_vma:
1a758d4e 696 vb2_put_vma(buf->vma);
e15dab75
TS
697
698fail_pages:
699 kfree(pages); /* kfree is NULL-proof */
700
701fail_buf:
1a758d4e 702 kfree(buf);
e15dab75
TS
703
704 return ERR_PTR(ret);
1a758d4e
PO
705}
706
8c417d03
SS
707/*********************************************/
708/* callbacks for DMABUF buffers */
709/*********************************************/
710
711static int vb2_dc_map_dmabuf(void *mem_priv)
712{
713 struct vb2_dc_buf *buf = mem_priv;
714 struct sg_table *sgt;
715 unsigned long contig_size;
716
717 if (WARN_ON(!buf->db_attach)) {
718 pr_err("trying to pin a non attached buffer\n");
719 return -EINVAL;
720 }
721
722 if (WARN_ON(buf->dma_sgt)) {
723 pr_err("dmabuf buffer is already pinned\n");
724 return 0;
725 }
726
727 /* get the associated scatterlist for this buffer */
728 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
fee0c54e 729 if (IS_ERR(sgt)) {
8c417d03
SS
730 pr_err("Error getting dmabuf scatterlist\n");
731 return -EINVAL;
732 }
733
734 /* checking if dmabuf is big enough to store contiguous chunk */
735 contig_size = vb2_dc_get_contiguous_size(sgt);
736 if (contig_size < buf->size) {
737 pr_err("contiguous chunk is too small %lu/%lu b\n",
738 contig_size, buf->size);
739 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
740 return -EFAULT;
741 }
742
743 buf->dma_addr = sg_dma_address(sgt->sgl);
744 buf->dma_sgt = sgt;
6bbd4fec 745 buf->vaddr = NULL;
8c417d03
SS
746
747 return 0;
748}
749
750static void vb2_dc_unmap_dmabuf(void *mem_priv)
751{
752 struct vb2_dc_buf *buf = mem_priv;
753 struct sg_table *sgt = buf->dma_sgt;
754
755 if (WARN_ON(!buf->db_attach)) {
756 pr_err("trying to unpin a not attached buffer\n");
757 return;
758 }
759
760 if (WARN_ON(!sgt)) {
761 pr_err("dmabuf buffer is already unpinned\n");
762 return;
763 }
764
6bbd4fec
PZ
765 if (buf->vaddr) {
766 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
767 buf->vaddr = NULL;
768 }
8c417d03
SS
769 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
770
771 buf->dma_addr = 0;
772 buf->dma_sgt = NULL;
773}
774
775static void vb2_dc_detach_dmabuf(void *mem_priv)
776{
777 struct vb2_dc_buf *buf = mem_priv;
778
779 /* if vb2 works correctly you should never detach mapped buffer */
780 if (WARN_ON(buf->dma_addr))
781 vb2_dc_unmap_dmabuf(buf);
782
783 /* detach this attachment */
784 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
785 kfree(buf);
786}
787
788static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
cd474037 789 unsigned long size, enum dma_data_direction dma_dir)
8c417d03
SS
790{
791 struct vb2_dc_conf *conf = alloc_ctx;
792 struct vb2_dc_buf *buf;
793 struct dma_buf_attachment *dba;
794
795 if (dbuf->size < size)
796 return ERR_PTR(-EFAULT);
797
798 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
799 if (!buf)
800 return ERR_PTR(-ENOMEM);
801
802 buf->dev = conf->dev;
803 /* create attachment for the dmabuf with the user device */
804 dba = dma_buf_attach(dbuf, buf->dev);
805 if (IS_ERR(dba)) {
806 pr_err("failed to attach dmabuf\n");
807 kfree(buf);
808 return dba;
809 }
810
cd474037 811 buf->dma_dir = dma_dir;
8c417d03
SS
812 buf->size = size;
813 buf->db_attach = dba;
814
815 return buf;
816}
817
40d8b766
LP
818/*********************************************/
819/* DMA CONTIG exported functions */
820/*********************************************/
821
1a758d4e 822const struct vb2_mem_ops vb2_dma_contig_memops = {
f7f129ce
LP
823 .alloc = vb2_dc_alloc,
824 .put = vb2_dc_put,
9ef2cbeb 825 .get_dmabuf = vb2_dc_get_dmabuf,
f7f129ce
LP
826 .cookie = vb2_dc_cookie,
827 .vaddr = vb2_dc_vaddr,
828 .mmap = vb2_dc_mmap,
829 .get_userptr = vb2_dc_get_userptr,
830 .put_userptr = vb2_dc_put_userptr,
199d101e
MS
831 .prepare = vb2_dc_prepare,
832 .finish = vb2_dc_finish,
8c417d03
SS
833 .map_dmabuf = vb2_dc_map_dmabuf,
834 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
835 .attach_dmabuf = vb2_dc_attach_dmabuf,
836 .detach_dmabuf = vb2_dc_detach_dmabuf,
f7f129ce 837 .num_users = vb2_dc_num_users,
1a758d4e
PO
838};
839EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
840
841void *vb2_dma_contig_init_ctx(struct device *dev)
842{
843 struct vb2_dc_conf *conf;
844
845 conf = kzalloc(sizeof *conf, GFP_KERNEL);
846 if (!conf)
847 return ERR_PTR(-ENOMEM);
848
849 conf->dev = dev;
850
851 return conf;
852}
853EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
854
855void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
856{
e5ae8fa7
HV
857 if (!IS_ERR_OR_NULL(alloc_ctx))
858 kfree(alloc_ctx);
1a758d4e
PO
859}
860EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
861
862MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
95072084 863MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
1a758d4e 864MODULE_LICENSE("GPL");
This page took 0.458537 seconds and 5 git commands to generate.