[media] v4l: vb2: add prepare/finish callbacks to allocators
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
CommitLineData
1a758d4e
PO
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
1a758d4e
PO
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
e15dab75
TS
14#include <linux/scatterlist.h>
15#include <linux/sched.h>
1a758d4e
PO
16#include <linux/slab.h>
17#include <linux/dma-mapping.h>
18
19#include <media/videobuf2-core.h>
d0df3c38 20#include <media/videobuf2-dma-contig.h>
1a758d4e
PO
21#include <media/videobuf2-memops.h>
22
23struct vb2_dc_conf {
24 struct device *dev;
25};
26
27struct vb2_dc_buf {
72f86bff 28 struct device *dev;
1a758d4e 29 void *vaddr;
1a758d4e 30 unsigned long size;
40d8b766 31 dma_addr_t dma_addr;
e15dab75
TS
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
40d8b766
LP
34
35 /* MMAP related */
1a758d4e 36 struct vb2_vmarea_handler handler;
40d8b766
LP
37 atomic_t refcount;
38
39 /* USERPTR related */
40 struct vm_area_struct *vma;
1a758d4e
PO
41};
42
e15dab75
TS
43/*********************************************/
44/* scatterlist table functions */
45/*********************************************/
46
47
48static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
49 void (*cb)(struct page *pg))
50{
51 struct scatterlist *s;
52 unsigned int i;
53
54 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
55 struct page *page = sg_page(s);
56 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
57 >> PAGE_SHIFT;
58 unsigned int j;
59
60 for (j = 0; j < n_pages; ++j, ++page)
61 cb(page);
62 }
63}
64
65static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
66{
67 struct scatterlist *s;
68 dma_addr_t expected = sg_dma_address(sgt->sgl);
69 unsigned int i;
70 unsigned long size = 0;
71
72 for_each_sg(sgt->sgl, s, sgt->nents, i) {
73 if (sg_dma_address(s) != expected)
74 break;
75 expected = sg_dma_address(s) + sg_dma_len(s);
76 size += sg_dma_len(s);
77 }
78 return size;
79}
80
40d8b766
LP
81/*********************************************/
82/* callbacks for all buffers */
83/*********************************************/
84
85static void *vb2_dc_cookie(void *buf_priv)
86{
87 struct vb2_dc_buf *buf = buf_priv;
88
89 return &buf->dma_addr;
90}
91
92static void *vb2_dc_vaddr(void *buf_priv)
93{
94 struct vb2_dc_buf *buf = buf_priv;
95
96 return buf->vaddr;
97}
98
99static unsigned int vb2_dc_num_users(void *buf_priv)
100{
101 struct vb2_dc_buf *buf = buf_priv;
102
103 return atomic_read(&buf->refcount);
104}
105
106/*********************************************/
107/* callbacks for MMAP buffers */
108/*********************************************/
109
110static void vb2_dc_put(void *buf_priv)
111{
112 struct vb2_dc_buf *buf = buf_priv;
113
114 if (!atomic_dec_and_test(&buf->refcount))
115 return;
116
117 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
118 kfree(buf);
119}
1a758d4e 120
f7f129ce 121static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
1a758d4e
PO
122{
123 struct vb2_dc_conf *conf = alloc_ctx;
72f86bff 124 struct device *dev = conf->dev;
1a758d4e
PO
125 struct vb2_dc_buf *buf;
126
127 buf = kzalloc(sizeof *buf, GFP_KERNEL);
128 if (!buf)
129 return ERR_PTR(-ENOMEM);
130
72f86bff 131 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
1a758d4e 132 if (!buf->vaddr) {
72f86bff 133 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
1a758d4e
PO
134 kfree(buf);
135 return ERR_PTR(-ENOMEM);
136 }
137
72f86bff 138 buf->dev = dev;
1a758d4e
PO
139 buf->size = size;
140
141 buf->handler.refcount = &buf->refcount;
f7f129ce 142 buf->handler.put = vb2_dc_put;
1a758d4e
PO
143 buf->handler.arg = buf;
144
145 atomic_inc(&buf->refcount);
146
147 return buf;
148}
149
f7f129ce 150static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
1a758d4e
PO
151{
152 struct vb2_dc_buf *buf = buf_priv;
153
154 if (!buf) {
155 printk(KERN_ERR "No buffer to map\n");
156 return -EINVAL;
157 }
158
ba7fcb0c 159 return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
1a758d4e
PO
160 &vb2_common_vm_ops, &buf->handler);
161}
162
40d8b766
LP
163/*********************************************/
164/* callbacks for USERPTR buffers */
165/*********************************************/
166
e15dab75
TS
167static inline int vma_is_io(struct vm_area_struct *vma)
168{
169 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
170}
171
172static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
173 int n_pages, struct vm_area_struct *vma, int write)
174{
175 if (vma_is_io(vma)) {
176 unsigned int i;
177
178 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
179 unsigned long pfn;
180 int ret = follow_pfn(vma, start, &pfn);
181
182 if (ret) {
183 pr_err("no page for address %lu\n", start);
184 return ret;
185 }
186 pages[i] = pfn_to_page(pfn);
187 }
188 } else {
189 int n;
190
191 n = get_user_pages(current, current->mm, start & PAGE_MASK,
192 n_pages, write, 1, pages, NULL);
193 /* negative error means that no page was pinned */
194 n = max(n, 0);
195 if (n != n_pages) {
196 pr_err("got only %d of %d user pages\n", n, n_pages);
197 while (n)
198 put_page(pages[--n]);
199 return -EFAULT;
200 }
201 }
202
203 return 0;
204}
205
206static void vb2_dc_put_dirty_page(struct page *page)
207{
208 set_page_dirty_lock(page);
209 put_page(page);
210}
211
212static void vb2_dc_put_userptr(void *buf_priv)
213{
214 struct vb2_dc_buf *buf = buf_priv;
215 struct sg_table *sgt = buf->dma_sgt;
216
217 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
218 if (!vma_is_io(buf->vma))
219 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
220
221 sg_free_table(sgt);
222 kfree(sgt);
223 vb2_put_vma(buf->vma);
224 kfree(buf);
225}
226
f7f129ce 227static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
e15dab75 228 unsigned long size, int write)
1a758d4e 229{
e15dab75 230 struct vb2_dc_conf *conf = alloc_ctx;
1a758d4e 231 struct vb2_dc_buf *buf;
e15dab75
TS
232 unsigned long start;
233 unsigned long end;
234 unsigned long offset;
235 struct page **pages;
236 int n_pages;
237 int ret = 0;
1a758d4e 238 struct vm_area_struct *vma;
e15dab75
TS
239 struct sg_table *sgt;
240 unsigned long contig_size;
1a758d4e
PO
241
242 buf = kzalloc(sizeof *buf, GFP_KERNEL);
243 if (!buf)
244 return ERR_PTR(-ENOMEM);
245
e15dab75
TS
246 buf->dev = conf->dev;
247 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
248
249 start = vaddr & PAGE_MASK;
250 offset = vaddr & ~PAGE_MASK;
251 end = PAGE_ALIGN(vaddr + size);
252 n_pages = (end - start) >> PAGE_SHIFT;
253
254 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
255 if (!pages) {
256 ret = -ENOMEM;
257 pr_err("failed to allocate pages table\n");
258 goto fail_buf;
259 }
260
261 /* current->mm->mmap_sem is taken by videobuf2 core */
262 vma = find_vma(current->mm, vaddr);
263 if (!vma) {
264 pr_err("no vma for address %lu\n", vaddr);
265 ret = -EFAULT;
266 goto fail_pages;
267 }
268
269 if (vma->vm_end < vaddr + size) {
270 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
271 ret = -EFAULT;
272 goto fail_pages;
273 }
274
275 buf->vma = vb2_get_vma(vma);
276 if (!buf->vma) {
277 pr_err("failed to copy vma\n");
278 ret = -ENOMEM;
279 goto fail_pages;
280 }
281
282 /* extract page list from userspace mapping */
283 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
1a758d4e 284 if (ret) {
e15dab75
TS
285 pr_err("failed to get user pages\n");
286 goto fail_vma;
287 }
288
289 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
290 if (!sgt) {
291 pr_err("failed to allocate sg table\n");
292 ret = -ENOMEM;
293 goto fail_get_user_pages;
294 }
295
296 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
297 offset, size, GFP_KERNEL);
298 if (ret) {
299 pr_err("failed to initialize sg table\n");
300 goto fail_sgt;
301 }
302
303 /* pages are no longer needed */
304 kfree(pages);
305 pages = NULL;
306
307 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
308 buf->dma_dir);
309 if (sgt->nents <= 0) {
310 pr_err("failed to map scatterlist\n");
311 ret = -EIO;
312 goto fail_sgt_init;
313 }
314
315 contig_size = vb2_dc_get_contiguous_size(sgt);
316 if (contig_size < size) {
317 pr_err("contiguous mapping is too small %lu/%lu\n",
318 contig_size, size);
319 ret = -EFAULT;
320 goto fail_map_sg;
1a758d4e
PO
321 }
322
e15dab75 323 buf->dma_addr = sg_dma_address(sgt->sgl);
1a758d4e 324 buf->size = size;
e15dab75 325 buf->dma_sgt = sgt;
1a758d4e
PO
326
327 return buf;
1a758d4e 328
e15dab75
TS
329fail_map_sg:
330 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
1a758d4e 331
e15dab75
TS
332fail_sgt_init:
333 if (!vma_is_io(buf->vma))
334 vb2_dc_sgt_foreach_page(sgt, put_page);
335 sg_free_table(sgt);
336
337fail_sgt:
338 kfree(sgt);
1a758d4e 339
e15dab75
TS
340fail_get_user_pages:
341 if (pages && !vma_is_io(buf->vma))
342 while (n_pages)
343 put_page(pages[--n_pages]);
344
345fail_vma:
1a758d4e 346 vb2_put_vma(buf->vma);
e15dab75
TS
347
348fail_pages:
349 kfree(pages); /* kfree is NULL-proof */
350
351fail_buf:
1a758d4e 352 kfree(buf);
e15dab75
TS
353
354 return ERR_PTR(ret);
1a758d4e
PO
355}
356
40d8b766
LP
357/*********************************************/
358/* DMA CONTIG exported functions */
359/*********************************************/
360
1a758d4e 361const struct vb2_mem_ops vb2_dma_contig_memops = {
f7f129ce
LP
362 .alloc = vb2_dc_alloc,
363 .put = vb2_dc_put,
364 .cookie = vb2_dc_cookie,
365 .vaddr = vb2_dc_vaddr,
366 .mmap = vb2_dc_mmap,
367 .get_userptr = vb2_dc_get_userptr,
368 .put_userptr = vb2_dc_put_userptr,
369 .num_users = vb2_dc_num_users,
1a758d4e
PO
370};
371EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
372
373void *vb2_dma_contig_init_ctx(struct device *dev)
374{
375 struct vb2_dc_conf *conf;
376
377 conf = kzalloc(sizeof *conf, GFP_KERNEL);
378 if (!conf)
379 return ERR_PTR(-ENOMEM);
380
381 conf->dev = dev;
382
383 return conf;
384}
385EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
386
387void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
388{
389 kfree(alloc_ctx);
390}
391EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
392
393MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
95072084 394MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
1a758d4e 395MODULE_LICENSE("GPL");
This page took 0.155113 seconds and 5 git commands to generate.