2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
25 module_param(debug
, int, 0644);
27 #define dprintk(level, fmt, arg...) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
33 struct vb2_dma_sg_conf
{
37 struct vb2_dma_sg_buf
{
42 enum dma_data_direction dma_dir
;
43 struct sg_table sg_table
;
45 * This will point to sg_table when used with the MMAP or USERPTR
46 * memory model, and to the dma_buf sglist when used with the
47 * DMABUF memory model.
49 struct sg_table
*dma_sgt
;
51 unsigned int num_pages
;
53 struct vb2_vmarea_handler handler
;
54 struct vm_area_struct
*vma
;
56 struct dma_buf_attachment
*db_attach
;
59 static void vb2_dma_sg_put(void *buf_priv
);
61 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf
*buf
,
64 unsigned int last_page
= 0;
72 order
= get_order(size
);
73 /* Dont over allocate*/
74 if ((PAGE_SIZE
<< order
) > size
)
79 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
|
80 __GFP_NOWARN
| gfp_flags
, order
);
86 __free_page(buf
->pages
[last_page
]);
92 split_page(pages
, order
);
93 for (i
= 0; i
< (1 << order
); i
++)
94 buf
->pages
[last_page
++] = &pages
[i
];
96 size
-= PAGE_SIZE
<< order
;
102 static void *vb2_dma_sg_alloc(void *alloc_ctx
, unsigned long size
,
103 enum dma_data_direction dma_dir
, gfp_t gfp_flags
)
105 struct vb2_dma_sg_conf
*conf
= alloc_ctx
;
106 struct vb2_dma_sg_buf
*buf
;
107 struct sg_table
*sgt
;
110 DEFINE_DMA_ATTRS(attrs
);
112 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC
, &attrs
);
114 if (WARN_ON(alloc_ctx
== NULL
))
116 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
121 buf
->dma_dir
= dma_dir
;
124 /* size is already page aligned */
125 buf
->num_pages
= size
>> PAGE_SHIFT
;
126 buf
->dma_sgt
= &buf
->sg_table
;
128 buf
->pages
= kzalloc(buf
->num_pages
* sizeof(struct page
*),
131 goto fail_pages_array_alloc
;
133 ret
= vb2_dma_sg_alloc_compacted(buf
, gfp_flags
);
135 goto fail_pages_alloc
;
137 ret
= sg_alloc_table_from_pages(buf
->dma_sgt
, buf
->pages
,
138 buf
->num_pages
, 0, size
, GFP_KERNEL
);
140 goto fail_table_alloc
;
142 /* Prevent the device from being released while the buffer is used */
143 buf
->dev
= get_device(conf
->dev
);
145 sgt
= &buf
->sg_table
;
147 * No need to sync to the device, this will happen later when the
148 * prepare() memop is called.
150 sgt
->nents
= dma_map_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
151 buf
->dma_dir
, &attrs
);
155 buf
->handler
.refcount
= &buf
->refcount
;
156 buf
->handler
.put
= vb2_dma_sg_put
;
157 buf
->handler
.arg
= buf
;
159 atomic_inc(&buf
->refcount
);
161 dprintk(1, "%s: Allocated buffer of %d pages\n",
162 __func__
, buf
->num_pages
);
166 put_device(buf
->dev
);
167 sg_free_table(buf
->dma_sgt
);
169 num_pages
= buf
->num_pages
;
171 __free_page(buf
->pages
[num_pages
]);
174 fail_pages_array_alloc
:
179 static void vb2_dma_sg_put(void *buf_priv
)
181 struct vb2_dma_sg_buf
*buf
= buf_priv
;
182 struct sg_table
*sgt
= &buf
->sg_table
;
183 int i
= buf
->num_pages
;
185 if (atomic_dec_and_test(&buf
->refcount
)) {
186 DEFINE_DMA_ATTRS(attrs
);
188 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC
, &attrs
);
189 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__
,
191 dma_unmap_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
192 buf
->dma_dir
, &attrs
);
194 vm_unmap_ram(buf
->vaddr
, buf
->num_pages
);
195 sg_free_table(buf
->dma_sgt
);
197 __free_page(buf
->pages
[i
]);
199 put_device(buf
->dev
);
204 static void vb2_dma_sg_prepare(void *buf_priv
)
206 struct vb2_dma_sg_buf
*buf
= buf_priv
;
207 struct sg_table
*sgt
= buf
->dma_sgt
;
209 /* DMABUF exporter will flush the cache for us */
213 dma_sync_sg_for_device(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
216 static void vb2_dma_sg_finish(void *buf_priv
)
218 struct vb2_dma_sg_buf
*buf
= buf_priv
;
219 struct sg_table
*sgt
= buf
->dma_sgt
;
221 /* DMABUF exporter will flush the cache for us */
225 dma_sync_sg_for_cpu(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
228 static inline int vma_is_io(struct vm_area_struct
*vma
)
230 return !!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
));
233 static void *vb2_dma_sg_get_userptr(void *alloc_ctx
, unsigned long vaddr
,
235 enum dma_data_direction dma_dir
)
237 struct vb2_dma_sg_conf
*conf
= alloc_ctx
;
238 struct vb2_dma_sg_buf
*buf
;
239 unsigned long first
, last
;
240 int num_pages_from_user
;
241 struct vm_area_struct
*vma
;
242 struct sg_table
*sgt
;
243 DEFINE_DMA_ATTRS(attrs
);
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC
, &attrs
);
247 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
252 buf
->dev
= conf
->dev
;
253 buf
->dma_dir
= dma_dir
;
254 buf
->offset
= vaddr
& ~PAGE_MASK
;
256 buf
->dma_sgt
= &buf
->sg_table
;
258 first
= (vaddr
& PAGE_MASK
) >> PAGE_SHIFT
;
259 last
= ((vaddr
+ size
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
260 buf
->num_pages
= last
- first
+ 1;
262 buf
->pages
= kzalloc(buf
->num_pages
* sizeof(struct page
*),
265 goto userptr_fail_alloc_pages
;
267 down_read(¤t
->mm
->mmap_sem
);
268 vma
= find_vma(current
->mm
, vaddr
);
270 dprintk(1, "no vma for address %lu\n", vaddr
);
271 goto userptr_fail_find_vma
;
274 if (vma
->vm_end
< vaddr
+ size
) {
275 dprintk(1, "vma at %lu is too small for %lu bytes\n",
277 goto userptr_fail_find_vma
;
280 buf
->vma
= vb2_get_vma(vma
);
282 dprintk(1, "failed to copy vma\n");
283 goto userptr_fail_find_vma
;
286 if (vma_is_io(buf
->vma
)) {
287 for (num_pages_from_user
= 0;
288 num_pages_from_user
< buf
->num_pages
;
289 ++num_pages_from_user
, vaddr
+= PAGE_SIZE
) {
292 if (follow_pfn(vma
, vaddr
, &pfn
)) {
293 dprintk(1, "no page for address %lu\n", vaddr
);
296 buf
->pages
[num_pages_from_user
] = pfn_to_page(pfn
);
299 num_pages_from_user
= get_user_pages(current
, current
->mm
,
302 buf
->dma_dir
== DMA_FROM_DEVICE
,
306 up_read(¤t
->mm
->mmap_sem
);
308 if (num_pages_from_user
!= buf
->num_pages
)
309 goto userptr_fail_get_user_pages
;
311 if (sg_alloc_table_from_pages(buf
->dma_sgt
, buf
->pages
,
312 buf
->num_pages
, buf
->offset
, size
, 0))
313 goto userptr_fail_alloc_table_from_pages
;
315 sgt
= &buf
->sg_table
;
317 * No need to sync to the device, this will happen later when the
318 * prepare() memop is called.
320 sgt
->nents
= dma_map_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
321 buf
->dma_dir
, &attrs
);
323 goto userptr_fail_map
;
328 sg_free_table(&buf
->sg_table
);
329 userptr_fail_alloc_table_from_pages
:
330 userptr_fail_get_user_pages
:
331 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
332 buf
->num_pages
, num_pages_from_user
);
333 if (!vma_is_io(buf
->vma
))
334 while (--num_pages_from_user
>= 0)
335 put_page(buf
->pages
[num_pages_from_user
]);
336 down_read(¤t
->mm
->mmap_sem
);
337 vb2_put_vma(buf
->vma
);
338 userptr_fail_find_vma
:
339 up_read(¤t
->mm
->mmap_sem
);
341 userptr_fail_alloc_pages
:
347 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
350 static void vb2_dma_sg_put_userptr(void *buf_priv
)
352 struct vb2_dma_sg_buf
*buf
= buf_priv
;
353 struct sg_table
*sgt
= &buf
->sg_table
;
354 int i
= buf
->num_pages
;
355 DEFINE_DMA_ATTRS(attrs
);
357 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC
, &attrs
);
359 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
360 __func__
, buf
->num_pages
);
361 dma_unmap_sg_attrs(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
,
364 vm_unmap_ram(buf
->vaddr
, buf
->num_pages
);
365 sg_free_table(buf
->dma_sgt
);
367 if (buf
->dma_dir
== DMA_FROM_DEVICE
)
368 set_page_dirty_lock(buf
->pages
[i
]);
369 if (!vma_is_io(buf
->vma
))
370 put_page(buf
->pages
[i
]);
373 down_read(¤t
->mm
->mmap_sem
);
374 vb2_put_vma(buf
->vma
);
375 up_read(¤t
->mm
->mmap_sem
);
379 static void *vb2_dma_sg_vaddr(void *buf_priv
)
381 struct vb2_dma_sg_buf
*buf
= buf_priv
;
387 buf
->vaddr
= dma_buf_vmap(buf
->db_attach
->dmabuf
);
389 buf
->vaddr
= vm_map_ram(buf
->pages
,
390 buf
->num_pages
, -1, PAGE_KERNEL
);
393 /* add offset in case userptr is not page-aligned */
394 return buf
->vaddr
? buf
->vaddr
+ buf
->offset
: NULL
;
397 static unsigned int vb2_dma_sg_num_users(void *buf_priv
)
399 struct vb2_dma_sg_buf
*buf
= buf_priv
;
401 return atomic_read(&buf
->refcount
);
404 static int vb2_dma_sg_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
406 struct vb2_dma_sg_buf
*buf
= buf_priv
;
407 unsigned long uaddr
= vma
->vm_start
;
408 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
412 printk(KERN_ERR
"No memory to map\n");
419 ret
= vm_insert_page(vma
, uaddr
, buf
->pages
[i
++]);
421 printk(KERN_ERR
"Remapping memory, error: %d\n", ret
);
431 * Use common vm_area operations to track buffer refcount.
433 vma
->vm_private_data
= &buf
->handler
;
434 vma
->vm_ops
= &vb2_common_vm_ops
;
436 vma
->vm_ops
->open(vma
);
441 /*********************************************/
442 /* DMABUF ops for exporters */
443 /*********************************************/
445 struct vb2_dma_sg_attachment
{
447 enum dma_data_direction dma_dir
;
450 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf
*dbuf
, struct device
*dev
,
451 struct dma_buf_attachment
*dbuf_attach
)
453 struct vb2_dma_sg_attachment
*attach
;
455 struct scatterlist
*rd
, *wr
;
456 struct sg_table
*sgt
;
457 struct vb2_dma_sg_buf
*buf
= dbuf
->priv
;
460 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
465 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
466 * map the same scatter list to multiple attachments at the same time.
468 ret
= sg_alloc_table(sgt
, buf
->dma_sgt
->orig_nents
, GFP_KERNEL
);
474 rd
= buf
->dma_sgt
->sgl
;
476 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
477 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
482 attach
->dma_dir
= DMA_NONE
;
483 dbuf_attach
->priv
= attach
;
488 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf
*dbuf
,
489 struct dma_buf_attachment
*db_attach
)
491 struct vb2_dma_sg_attachment
*attach
= db_attach
->priv
;
492 struct sg_table
*sgt
;
499 /* release the scatterlist cache */
500 if (attach
->dma_dir
!= DMA_NONE
)
501 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
505 db_attach
->priv
= NULL
;
508 static struct sg_table
*vb2_dma_sg_dmabuf_ops_map(
509 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dma_dir
)
511 struct vb2_dma_sg_attachment
*attach
= db_attach
->priv
;
512 /* stealing dmabuf mutex to serialize map/unmap operations */
513 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
514 struct sg_table
*sgt
;
519 /* return previously mapped sg table */
520 if (attach
->dma_dir
== dma_dir
) {
525 /* release any previous cache */
526 if (attach
->dma_dir
!= DMA_NONE
) {
527 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
529 attach
->dma_dir
= DMA_NONE
;
532 /* mapping to the client with new direction */
533 sgt
->nents
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
536 pr_err("failed to map scatterlist\n");
538 return ERR_PTR(-EIO
);
541 attach
->dma_dir
= dma_dir
;
548 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
549 struct sg_table
*sgt
, enum dma_data_direction dma_dir
)
551 /* nothing to be done here */
554 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf
*dbuf
)
556 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
557 vb2_dma_sg_put(dbuf
->priv
);
560 static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
562 struct vb2_dma_sg_buf
*buf
= dbuf
->priv
;
564 return buf
->vaddr
? buf
->vaddr
+ pgnum
* PAGE_SIZE
: NULL
;
567 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
569 struct vb2_dma_sg_buf
*buf
= dbuf
->priv
;
571 return vb2_dma_sg_vaddr(buf
);
574 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
575 struct vm_area_struct
*vma
)
577 return vb2_dma_sg_mmap(dbuf
->priv
, vma
);
580 static struct dma_buf_ops vb2_dma_sg_dmabuf_ops
= {
581 .attach
= vb2_dma_sg_dmabuf_ops_attach
,
582 .detach
= vb2_dma_sg_dmabuf_ops_detach
,
583 .map_dma_buf
= vb2_dma_sg_dmabuf_ops_map
,
584 .unmap_dma_buf
= vb2_dma_sg_dmabuf_ops_unmap
,
585 .kmap
= vb2_dma_sg_dmabuf_ops_kmap
,
586 .kmap_atomic
= vb2_dma_sg_dmabuf_ops_kmap
,
587 .vmap
= vb2_dma_sg_dmabuf_ops_vmap
,
588 .mmap
= vb2_dma_sg_dmabuf_ops_mmap
,
589 .release
= vb2_dma_sg_dmabuf_ops_release
,
592 static struct dma_buf
*vb2_dma_sg_get_dmabuf(void *buf_priv
, unsigned long flags
)
594 struct vb2_dma_sg_buf
*buf
= buf_priv
;
595 struct dma_buf
*dbuf
;
596 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
598 exp_info
.ops
= &vb2_dma_sg_dmabuf_ops
;
599 exp_info
.size
= buf
->size
;
600 exp_info
.flags
= flags
;
603 if (WARN_ON(!buf
->dma_sgt
))
606 dbuf
= dma_buf_export(&exp_info
);
610 /* dmabuf keeps reference to vb2 buffer */
611 atomic_inc(&buf
->refcount
);
616 /*********************************************/
617 /* callbacks for DMABUF buffers */
618 /*********************************************/
620 static int vb2_dma_sg_map_dmabuf(void *mem_priv
)
622 struct vb2_dma_sg_buf
*buf
= mem_priv
;
623 struct sg_table
*sgt
;
625 if (WARN_ON(!buf
->db_attach
)) {
626 pr_err("trying to pin a non attached buffer\n");
630 if (WARN_ON(buf
->dma_sgt
)) {
631 pr_err("dmabuf buffer is already pinned\n");
635 /* get the associated scatterlist for this buffer */
636 sgt
= dma_buf_map_attachment(buf
->db_attach
, buf
->dma_dir
);
638 pr_err("Error getting dmabuf scatterlist\n");
648 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv
)
650 struct vb2_dma_sg_buf
*buf
= mem_priv
;
651 struct sg_table
*sgt
= buf
->dma_sgt
;
653 if (WARN_ON(!buf
->db_attach
)) {
654 pr_err("trying to unpin a not attached buffer\n");
659 pr_err("dmabuf buffer is already unpinned\n");
664 dma_buf_vunmap(buf
->db_attach
->dmabuf
, buf
->vaddr
);
667 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
672 static void vb2_dma_sg_detach_dmabuf(void *mem_priv
)
674 struct vb2_dma_sg_buf
*buf
= mem_priv
;
676 /* if vb2 works correctly you should never detach mapped buffer */
677 if (WARN_ON(buf
->dma_sgt
))
678 vb2_dma_sg_unmap_dmabuf(buf
);
680 /* detach this attachment */
681 dma_buf_detach(buf
->db_attach
->dmabuf
, buf
->db_attach
);
685 static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx
, struct dma_buf
*dbuf
,
686 unsigned long size
, enum dma_data_direction dma_dir
)
688 struct vb2_dma_sg_conf
*conf
= alloc_ctx
;
689 struct vb2_dma_sg_buf
*buf
;
690 struct dma_buf_attachment
*dba
;
692 if (dbuf
->size
< size
)
693 return ERR_PTR(-EFAULT
);
695 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
697 return ERR_PTR(-ENOMEM
);
699 buf
->dev
= conf
->dev
;
700 /* create attachment for the dmabuf with the user device */
701 dba
= dma_buf_attach(dbuf
, buf
->dev
);
703 pr_err("failed to attach dmabuf\n");
708 buf
->dma_dir
= dma_dir
;
710 buf
->db_attach
= dba
;
715 static void *vb2_dma_sg_cookie(void *buf_priv
)
717 struct vb2_dma_sg_buf
*buf
= buf_priv
;
722 const struct vb2_mem_ops vb2_dma_sg_memops
= {
723 .alloc
= vb2_dma_sg_alloc
,
724 .put
= vb2_dma_sg_put
,
725 .get_userptr
= vb2_dma_sg_get_userptr
,
726 .put_userptr
= vb2_dma_sg_put_userptr
,
727 .prepare
= vb2_dma_sg_prepare
,
728 .finish
= vb2_dma_sg_finish
,
729 .vaddr
= vb2_dma_sg_vaddr
,
730 .mmap
= vb2_dma_sg_mmap
,
731 .num_users
= vb2_dma_sg_num_users
,
732 .get_dmabuf
= vb2_dma_sg_get_dmabuf
,
733 .map_dmabuf
= vb2_dma_sg_map_dmabuf
,
734 .unmap_dmabuf
= vb2_dma_sg_unmap_dmabuf
,
735 .attach_dmabuf
= vb2_dma_sg_attach_dmabuf
,
736 .detach_dmabuf
= vb2_dma_sg_detach_dmabuf
,
737 .cookie
= vb2_dma_sg_cookie
,
739 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops
);
741 void *vb2_dma_sg_init_ctx(struct device
*dev
)
743 struct vb2_dma_sg_conf
*conf
;
745 conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
747 return ERR_PTR(-ENOMEM
);
753 EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx
);
755 void vb2_dma_sg_cleanup_ctx(void *alloc_ctx
)
757 if (!IS_ERR_OR_NULL(alloc_ctx
))
760 EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx
);
762 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
763 MODULE_AUTHOR("Andrzej Pietrasiewicz");
764 MODULE_LICENSE("GPL");