[media] v4l2-dv-timings: support interlaced in v4l2_print_dv_timings
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
33 struct vb2_dma_sg_conf {
34 struct device *dev;
35 };
36
37 struct vb2_dma_sg_buf {
38 struct device *dev;
39 void *vaddr;
40 struct page **pages;
41 int offset;
42 enum dma_data_direction dma_dir;
43 struct sg_table sg_table;
44 /*
45 * This will point to sg_table when used with the MMAP or USERPTR
46 * memory model, and to the dma_buf sglist when used with the
47 * DMABUF memory model.
48 */
49 struct sg_table *dma_sgt;
50 size_t size;
51 unsigned int num_pages;
52 atomic_t refcount;
53 struct vb2_vmarea_handler handler;
54 struct vm_area_struct *vma;
55
56 struct dma_buf_attachment *db_attach;
57 };
58
59 static void vb2_dma_sg_put(void *buf_priv);
60
61 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 gfp_t gfp_flags)
63 {
64 unsigned int last_page = 0;
65 int size = buf->size;
66
67 while (size > 0) {
68 struct page *pages;
69 int order;
70 int i;
71
72 order = get_order(size);
73 /* Dont over allocate*/
74 if ((PAGE_SIZE << order) > size)
75 order--;
76
77 pages = NULL;
78 while (!pages) {
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
80 __GFP_NOWARN | gfp_flags, order);
81 if (pages)
82 break;
83
84 if (order == 0) {
85 while (last_page--)
86 __free_page(buf->pages[last_page]);
87 return -ENOMEM;
88 }
89 order--;
90 }
91
92 split_page(pages, order);
93 for (i = 0; i < (1 << order); i++)
94 buf->pages[last_page++] = &pages[i];
95
96 size -= PAGE_SIZE << order;
97 }
98
99 return 0;
100 }
101
102 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
103 enum dma_data_direction dma_dir, gfp_t gfp_flags)
104 {
105 struct vb2_dma_sg_conf *conf = alloc_ctx;
106 struct vb2_dma_sg_buf *buf;
107 struct sg_table *sgt;
108 int ret;
109 int num_pages;
110 DEFINE_DMA_ATTRS(attrs);
111
112 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
113
114 if (WARN_ON(alloc_ctx == NULL))
115 return NULL;
116 buf = kzalloc(sizeof *buf, GFP_KERNEL);
117 if (!buf)
118 return NULL;
119
120 buf->vaddr = NULL;
121 buf->dma_dir = dma_dir;
122 buf->offset = 0;
123 buf->size = size;
124 /* size is already page aligned */
125 buf->num_pages = size >> PAGE_SHIFT;
126 buf->dma_sgt = &buf->sg_table;
127
128 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
129 GFP_KERNEL);
130 if (!buf->pages)
131 goto fail_pages_array_alloc;
132
133 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
134 if (ret)
135 goto fail_pages_alloc;
136
137 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
138 buf->num_pages, 0, size, GFP_KERNEL);
139 if (ret)
140 goto fail_table_alloc;
141
142 /* Prevent the device from being released while the buffer is used */
143 buf->dev = get_device(conf->dev);
144
145 sgt = &buf->sg_table;
146 /*
147 * No need to sync to the device, this will happen later when the
148 * prepare() memop is called.
149 */
150 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
151 buf->dma_dir, &attrs);
152 if (!sgt->nents)
153 goto fail_map;
154
155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
158
159 atomic_inc(&buf->refcount);
160
161 dprintk(1, "%s: Allocated buffer of %d pages\n",
162 __func__, buf->num_pages);
163 return buf;
164
165 fail_map:
166 put_device(buf->dev);
167 sg_free_table(buf->dma_sgt);
168 fail_table_alloc:
169 num_pages = buf->num_pages;
170 while (num_pages--)
171 __free_page(buf->pages[num_pages]);
172 fail_pages_alloc:
173 kfree(buf->pages);
174 fail_pages_array_alloc:
175 kfree(buf);
176 return NULL;
177 }
178
179 static void vb2_dma_sg_put(void *buf_priv)
180 {
181 struct vb2_dma_sg_buf *buf = buf_priv;
182 struct sg_table *sgt = &buf->sg_table;
183 int i = buf->num_pages;
184
185 if (atomic_dec_and_test(&buf->refcount)) {
186 DEFINE_DMA_ATTRS(attrs);
187
188 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
189 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
190 buf->num_pages);
191 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
192 buf->dma_dir, &attrs);
193 if (buf->vaddr)
194 vm_unmap_ram(buf->vaddr, buf->num_pages);
195 sg_free_table(buf->dma_sgt);
196 while (--i >= 0)
197 __free_page(buf->pages[i]);
198 kfree(buf->pages);
199 put_device(buf->dev);
200 kfree(buf);
201 }
202 }
203
204 static void vb2_dma_sg_prepare(void *buf_priv)
205 {
206 struct vb2_dma_sg_buf *buf = buf_priv;
207 struct sg_table *sgt = buf->dma_sgt;
208
209 /* DMABUF exporter will flush the cache for us */
210 if (buf->db_attach)
211 return;
212
213 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
214 }
215
216 static void vb2_dma_sg_finish(void *buf_priv)
217 {
218 struct vb2_dma_sg_buf *buf = buf_priv;
219 struct sg_table *sgt = buf->dma_sgt;
220
221 /* DMABUF exporter will flush the cache for us */
222 if (buf->db_attach)
223 return;
224
225 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
226 }
227
228 static inline int vma_is_io(struct vm_area_struct *vma)
229 {
230 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
231 }
232
233 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
234 unsigned long size,
235 enum dma_data_direction dma_dir)
236 {
237 struct vb2_dma_sg_conf *conf = alloc_ctx;
238 struct vb2_dma_sg_buf *buf;
239 unsigned long first, last;
240 int num_pages_from_user;
241 struct vm_area_struct *vma;
242 struct sg_table *sgt;
243 DEFINE_DMA_ATTRS(attrs);
244
245 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
246
247 buf = kzalloc(sizeof *buf, GFP_KERNEL);
248 if (!buf)
249 return NULL;
250
251 buf->vaddr = NULL;
252 buf->dev = conf->dev;
253 buf->dma_dir = dma_dir;
254 buf->offset = vaddr & ~PAGE_MASK;
255 buf->size = size;
256 buf->dma_sgt = &buf->sg_table;
257
258 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
259 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
260 buf->num_pages = last - first + 1;
261
262 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
263 GFP_KERNEL);
264 if (!buf->pages)
265 goto userptr_fail_alloc_pages;
266
267 down_read(&current->mm->mmap_sem);
268 vma = find_vma(current->mm, vaddr);
269 if (!vma) {
270 dprintk(1, "no vma for address %lu\n", vaddr);
271 goto userptr_fail_find_vma;
272 }
273
274 if (vma->vm_end < vaddr + size) {
275 dprintk(1, "vma at %lu is too small for %lu bytes\n",
276 vaddr, size);
277 goto userptr_fail_find_vma;
278 }
279
280 buf->vma = vb2_get_vma(vma);
281 if (!buf->vma) {
282 dprintk(1, "failed to copy vma\n");
283 goto userptr_fail_find_vma;
284 }
285
286 if (vma_is_io(buf->vma)) {
287 for (num_pages_from_user = 0;
288 num_pages_from_user < buf->num_pages;
289 ++num_pages_from_user, vaddr += PAGE_SIZE) {
290 unsigned long pfn;
291
292 if (follow_pfn(vma, vaddr, &pfn)) {
293 dprintk(1, "no page for address %lu\n", vaddr);
294 break;
295 }
296 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
297 }
298 } else
299 num_pages_from_user = get_user_pages(current, current->mm,
300 vaddr & PAGE_MASK,
301 buf->num_pages,
302 buf->dma_dir == DMA_FROM_DEVICE,
303 1, /* force */
304 buf->pages,
305 NULL);
306 up_read(&current->mm->mmap_sem);
307
308 if (num_pages_from_user != buf->num_pages)
309 goto userptr_fail_get_user_pages;
310
311 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
312 buf->num_pages, buf->offset, size, 0))
313 goto userptr_fail_alloc_table_from_pages;
314
315 sgt = &buf->sg_table;
316 /*
317 * No need to sync to the device, this will happen later when the
318 * prepare() memop is called.
319 */
320 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
321 buf->dma_dir, &attrs);
322 if (!sgt->nents)
323 goto userptr_fail_map;
324
325 return buf;
326
327 userptr_fail_map:
328 sg_free_table(&buf->sg_table);
329 userptr_fail_alloc_table_from_pages:
330 userptr_fail_get_user_pages:
331 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
332 buf->num_pages, num_pages_from_user);
333 if (!vma_is_io(buf->vma))
334 while (--num_pages_from_user >= 0)
335 put_page(buf->pages[num_pages_from_user]);
336 down_read(&current->mm->mmap_sem);
337 vb2_put_vma(buf->vma);
338 userptr_fail_find_vma:
339 up_read(&current->mm->mmap_sem);
340 kfree(buf->pages);
341 userptr_fail_alloc_pages:
342 kfree(buf);
343 return NULL;
344 }
345
346 /*
347 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
348 * be used
349 */
350 static void vb2_dma_sg_put_userptr(void *buf_priv)
351 {
352 struct vb2_dma_sg_buf *buf = buf_priv;
353 struct sg_table *sgt = &buf->sg_table;
354 int i = buf->num_pages;
355 DEFINE_DMA_ATTRS(attrs);
356
357 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
358
359 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
360 __func__, buf->num_pages);
361 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
362 &attrs);
363 if (buf->vaddr)
364 vm_unmap_ram(buf->vaddr, buf->num_pages);
365 sg_free_table(buf->dma_sgt);
366 while (--i >= 0) {
367 if (buf->dma_dir == DMA_FROM_DEVICE)
368 set_page_dirty_lock(buf->pages[i]);
369 if (!vma_is_io(buf->vma))
370 put_page(buf->pages[i]);
371 }
372 kfree(buf->pages);
373 down_read(&current->mm->mmap_sem);
374 vb2_put_vma(buf->vma);
375 up_read(&current->mm->mmap_sem);
376 kfree(buf);
377 }
378
379 static void *vb2_dma_sg_vaddr(void *buf_priv)
380 {
381 struct vb2_dma_sg_buf *buf = buf_priv;
382
383 BUG_ON(!buf);
384
385 if (!buf->vaddr) {
386 if (buf->db_attach)
387 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
388 else
389 buf->vaddr = vm_map_ram(buf->pages,
390 buf->num_pages, -1, PAGE_KERNEL);
391 }
392
393 /* add offset in case userptr is not page-aligned */
394 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
395 }
396
397 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
398 {
399 struct vb2_dma_sg_buf *buf = buf_priv;
400
401 return atomic_read(&buf->refcount);
402 }
403
404 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
405 {
406 struct vb2_dma_sg_buf *buf = buf_priv;
407 unsigned long uaddr = vma->vm_start;
408 unsigned long usize = vma->vm_end - vma->vm_start;
409 int i = 0;
410
411 if (!buf) {
412 printk(KERN_ERR "No memory to map\n");
413 return -EINVAL;
414 }
415
416 do {
417 int ret;
418
419 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
420 if (ret) {
421 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
422 return ret;
423 }
424
425 uaddr += PAGE_SIZE;
426 usize -= PAGE_SIZE;
427 } while (usize > 0);
428
429
430 /*
431 * Use common vm_area operations to track buffer refcount.
432 */
433 vma->vm_private_data = &buf->handler;
434 vma->vm_ops = &vb2_common_vm_ops;
435
436 vma->vm_ops->open(vma);
437
438 return 0;
439 }
440
441 /*********************************************/
442 /* DMABUF ops for exporters */
443 /*********************************************/
444
445 struct vb2_dma_sg_attachment {
446 struct sg_table sgt;
447 enum dma_data_direction dma_dir;
448 };
449
450 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
451 struct dma_buf_attachment *dbuf_attach)
452 {
453 struct vb2_dma_sg_attachment *attach;
454 unsigned int i;
455 struct scatterlist *rd, *wr;
456 struct sg_table *sgt;
457 struct vb2_dma_sg_buf *buf = dbuf->priv;
458 int ret;
459
460 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
461 if (!attach)
462 return -ENOMEM;
463
464 sgt = &attach->sgt;
465 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
466 * map the same scatter list to multiple attachments at the same time.
467 */
468 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
469 if (ret) {
470 kfree(attach);
471 return -ENOMEM;
472 }
473
474 rd = buf->dma_sgt->sgl;
475 wr = sgt->sgl;
476 for (i = 0; i < sgt->orig_nents; ++i) {
477 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
478 rd = sg_next(rd);
479 wr = sg_next(wr);
480 }
481
482 attach->dma_dir = DMA_NONE;
483 dbuf_attach->priv = attach;
484
485 return 0;
486 }
487
488 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
489 struct dma_buf_attachment *db_attach)
490 {
491 struct vb2_dma_sg_attachment *attach = db_attach->priv;
492 struct sg_table *sgt;
493
494 if (!attach)
495 return;
496
497 sgt = &attach->sgt;
498
499 /* release the scatterlist cache */
500 if (attach->dma_dir != DMA_NONE)
501 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
502 attach->dma_dir);
503 sg_free_table(sgt);
504 kfree(attach);
505 db_attach->priv = NULL;
506 }
507
508 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
509 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
510 {
511 struct vb2_dma_sg_attachment *attach = db_attach->priv;
512 /* stealing dmabuf mutex to serialize map/unmap operations */
513 struct mutex *lock = &db_attach->dmabuf->lock;
514 struct sg_table *sgt;
515
516 mutex_lock(lock);
517
518 sgt = &attach->sgt;
519 /* return previously mapped sg table */
520 if (attach->dma_dir == dma_dir) {
521 mutex_unlock(lock);
522 return sgt;
523 }
524
525 /* release any previous cache */
526 if (attach->dma_dir != DMA_NONE) {
527 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
528 attach->dma_dir);
529 attach->dma_dir = DMA_NONE;
530 }
531
532 /* mapping to the client with new direction */
533 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
534 dma_dir);
535 if (!sgt->nents) {
536 pr_err("failed to map scatterlist\n");
537 mutex_unlock(lock);
538 return ERR_PTR(-EIO);
539 }
540
541 attach->dma_dir = dma_dir;
542
543 mutex_unlock(lock);
544
545 return sgt;
546 }
547
548 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
549 struct sg_table *sgt, enum dma_data_direction dma_dir)
550 {
551 /* nothing to be done here */
552 }
553
554 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
555 {
556 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
557 vb2_dma_sg_put(dbuf->priv);
558 }
559
560 static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
561 {
562 struct vb2_dma_sg_buf *buf = dbuf->priv;
563
564 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
565 }
566
567 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
568 {
569 struct vb2_dma_sg_buf *buf = dbuf->priv;
570
571 return vb2_dma_sg_vaddr(buf);
572 }
573
574 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
575 struct vm_area_struct *vma)
576 {
577 return vb2_dma_sg_mmap(dbuf->priv, vma);
578 }
579
580 static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
581 .attach = vb2_dma_sg_dmabuf_ops_attach,
582 .detach = vb2_dma_sg_dmabuf_ops_detach,
583 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
584 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
585 .kmap = vb2_dma_sg_dmabuf_ops_kmap,
586 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
587 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
588 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
589 .release = vb2_dma_sg_dmabuf_ops_release,
590 };
591
592 static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
593 {
594 struct vb2_dma_sg_buf *buf = buf_priv;
595 struct dma_buf *dbuf;
596 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
597
598 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
599 exp_info.size = buf->size;
600 exp_info.flags = flags;
601 exp_info.priv = buf;
602
603 if (WARN_ON(!buf->dma_sgt))
604 return NULL;
605
606 dbuf = dma_buf_export(&exp_info);
607 if (IS_ERR(dbuf))
608 return NULL;
609
610 /* dmabuf keeps reference to vb2 buffer */
611 atomic_inc(&buf->refcount);
612
613 return dbuf;
614 }
615
616 /*********************************************/
617 /* callbacks for DMABUF buffers */
618 /*********************************************/
619
620 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
621 {
622 struct vb2_dma_sg_buf *buf = mem_priv;
623 struct sg_table *sgt;
624
625 if (WARN_ON(!buf->db_attach)) {
626 pr_err("trying to pin a non attached buffer\n");
627 return -EINVAL;
628 }
629
630 if (WARN_ON(buf->dma_sgt)) {
631 pr_err("dmabuf buffer is already pinned\n");
632 return 0;
633 }
634
635 /* get the associated scatterlist for this buffer */
636 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
637 if (IS_ERR(sgt)) {
638 pr_err("Error getting dmabuf scatterlist\n");
639 return -EINVAL;
640 }
641
642 buf->dma_sgt = sgt;
643 buf->vaddr = NULL;
644
645 return 0;
646 }
647
648 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
649 {
650 struct vb2_dma_sg_buf *buf = mem_priv;
651 struct sg_table *sgt = buf->dma_sgt;
652
653 if (WARN_ON(!buf->db_attach)) {
654 pr_err("trying to unpin a not attached buffer\n");
655 return;
656 }
657
658 if (WARN_ON(!sgt)) {
659 pr_err("dmabuf buffer is already unpinned\n");
660 return;
661 }
662
663 if (buf->vaddr) {
664 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
665 buf->vaddr = NULL;
666 }
667 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
668
669 buf->dma_sgt = NULL;
670 }
671
672 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
673 {
674 struct vb2_dma_sg_buf *buf = mem_priv;
675
676 /* if vb2 works correctly you should never detach mapped buffer */
677 if (WARN_ON(buf->dma_sgt))
678 vb2_dma_sg_unmap_dmabuf(buf);
679
680 /* detach this attachment */
681 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
682 kfree(buf);
683 }
684
685 static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
686 unsigned long size, enum dma_data_direction dma_dir)
687 {
688 struct vb2_dma_sg_conf *conf = alloc_ctx;
689 struct vb2_dma_sg_buf *buf;
690 struct dma_buf_attachment *dba;
691
692 if (dbuf->size < size)
693 return ERR_PTR(-EFAULT);
694
695 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
696 if (!buf)
697 return ERR_PTR(-ENOMEM);
698
699 buf->dev = conf->dev;
700 /* create attachment for the dmabuf with the user device */
701 dba = dma_buf_attach(dbuf, buf->dev);
702 if (IS_ERR(dba)) {
703 pr_err("failed to attach dmabuf\n");
704 kfree(buf);
705 return dba;
706 }
707
708 buf->dma_dir = dma_dir;
709 buf->size = size;
710 buf->db_attach = dba;
711
712 return buf;
713 }
714
715 static void *vb2_dma_sg_cookie(void *buf_priv)
716 {
717 struct vb2_dma_sg_buf *buf = buf_priv;
718
719 return buf->dma_sgt;
720 }
721
722 const struct vb2_mem_ops vb2_dma_sg_memops = {
723 .alloc = vb2_dma_sg_alloc,
724 .put = vb2_dma_sg_put,
725 .get_userptr = vb2_dma_sg_get_userptr,
726 .put_userptr = vb2_dma_sg_put_userptr,
727 .prepare = vb2_dma_sg_prepare,
728 .finish = vb2_dma_sg_finish,
729 .vaddr = vb2_dma_sg_vaddr,
730 .mmap = vb2_dma_sg_mmap,
731 .num_users = vb2_dma_sg_num_users,
732 .get_dmabuf = vb2_dma_sg_get_dmabuf,
733 .map_dmabuf = vb2_dma_sg_map_dmabuf,
734 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
735 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
736 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
737 .cookie = vb2_dma_sg_cookie,
738 };
739 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
740
741 void *vb2_dma_sg_init_ctx(struct device *dev)
742 {
743 struct vb2_dma_sg_conf *conf;
744
745 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
746 if (!conf)
747 return ERR_PTR(-ENOMEM);
748
749 conf->dev = dev;
750
751 return conf;
752 }
753 EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
754
755 void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
756 {
757 if (!IS_ERR_OR_NULL(alloc_ctx))
758 kfree(alloc_ctx);
759 }
760 EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
761
762 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
763 MODULE_AUTHOR("Andrzej Pietrasiewicz");
764 MODULE_LICENSE("GPL");
This page took 0.069661 seconds and 5 git commands to generate.