Commit | Line | Data |
---|---|---|
1a758d4e PO |
1 | /* |
2 | * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2 | |
3 | * | |
4 | * Copyright (C) 2010 Samsung Electronics | |
5 | * | |
95072084 | 6 | * Author: Pawel Osciak <pawel@osciak.com> |
1a758d4e PO |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation. | |
11 | */ | |
12 | ||
8c417d03 | 13 | #include <linux/dma-buf.h> |
1a758d4e | 14 | #include <linux/module.h> |
e15dab75 TS |
15 | #include <linux/scatterlist.h> |
16 | #include <linux/sched.h> | |
1a758d4e PO |
17 | #include <linux/slab.h> |
18 | #include <linux/dma-mapping.h> | |
19 | ||
20 | #include <media/videobuf2-core.h> | |
d0df3c38 | 21 | #include <media/videobuf2-dma-contig.h> |
1a758d4e PO |
22 | #include <media/videobuf2-memops.h> |
23 | ||
24 | struct vb2_dc_conf { | |
25 | struct device *dev; | |
26 | }; | |
27 | ||
28 | struct vb2_dc_buf { | |
72f86bff | 29 | struct device *dev; |
1a758d4e | 30 | void *vaddr; |
1a758d4e | 31 | unsigned long size; |
40d8b766 | 32 | dma_addr_t dma_addr; |
e15dab75 TS |
33 | enum dma_data_direction dma_dir; |
34 | struct sg_table *dma_sgt; | |
40d8b766 LP |
35 | |
36 | /* MMAP related */ | |
1a758d4e | 37 | struct vb2_vmarea_handler handler; |
40d8b766 | 38 | atomic_t refcount; |
9ef2cbeb | 39 | struct sg_table *sgt_base; |
40d8b766 LP |
40 | |
41 | /* USERPTR related */ | |
42 | struct vm_area_struct *vma; | |
8c417d03 SS |
43 | |
44 | /* DMABUF related */ | |
45 | struct dma_buf_attachment *db_attach; | |
1a758d4e PO |
46 | }; |
47 | ||
e15dab75 TS |
48 | /*********************************************/ |
49 | /* scatterlist table functions */ | |
50 | /*********************************************/ | |
51 | ||
52 | ||
53 | static void vb2_dc_sgt_foreach_page(struct sg_table *sgt, | |
54 | void (*cb)(struct page *pg)) | |
55 | { | |
56 | struct scatterlist *s; | |
57 | unsigned int i; | |
58 | ||
59 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | |
60 | struct page *page = sg_page(s); | |
61 | unsigned int n_pages = PAGE_ALIGN(s->offset + s->length) | |
62 | >> PAGE_SHIFT; | |
63 | unsigned int j; | |
64 | ||
65 | for (j = 0; j < n_pages; ++j, ++page) | |
66 | cb(page); | |
67 | } | |
68 | } | |
69 | ||
70 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) | |
71 | { | |
72 | struct scatterlist *s; | |
73 | dma_addr_t expected = sg_dma_address(sgt->sgl); | |
74 | unsigned int i; | |
75 | unsigned long size = 0; | |
76 | ||
77 | for_each_sg(sgt->sgl, s, sgt->nents, i) { | |
78 | if (sg_dma_address(s) != expected) | |
79 | break; | |
80 | expected = sg_dma_address(s) + sg_dma_len(s); | |
81 | size += sg_dma_len(s); | |
82 | } | |
83 | return size; | |
84 | } | |
85 | ||
40d8b766 LP |
86 | /*********************************************/ |
87 | /* callbacks for all buffers */ | |
88 | /*********************************************/ | |
89 | ||
90 | static void *vb2_dc_cookie(void *buf_priv) | |
91 | { | |
92 | struct vb2_dc_buf *buf = buf_priv; | |
93 | ||
94 | return &buf->dma_addr; | |
95 | } | |
96 | ||
97 | static void *vb2_dc_vaddr(void *buf_priv) | |
98 | { | |
99 | struct vb2_dc_buf *buf = buf_priv; | |
100 | ||
101 | return buf->vaddr; | |
102 | } | |
103 | ||
104 | static unsigned int vb2_dc_num_users(void *buf_priv) | |
105 | { | |
106 | struct vb2_dc_buf *buf = buf_priv; | |
107 | ||
108 | return atomic_read(&buf->refcount); | |
109 | } | |
110 | ||
199d101e MS |
111 | static void vb2_dc_prepare(void *buf_priv) |
112 | { | |
113 | struct vb2_dc_buf *buf = buf_priv; | |
114 | struct sg_table *sgt = buf->dma_sgt; | |
115 | ||
8c417d03 SS |
116 | /* DMABUF exporter will flush the cache for us */ |
117 | if (!sgt || buf->db_attach) | |
199d101e MS |
118 | return; |
119 | ||
120 | dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | |
121 | } | |
122 | ||
123 | static void vb2_dc_finish(void *buf_priv) | |
124 | { | |
125 | struct vb2_dc_buf *buf = buf_priv; | |
126 | struct sg_table *sgt = buf->dma_sgt; | |
127 | ||
8c417d03 SS |
128 | /* DMABUF exporter will flush the cache for us */ |
129 | if (!sgt || buf->db_attach) | |
199d101e MS |
130 | return; |
131 | ||
132 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | |
133 | } | |
134 | ||
40d8b766 LP |
135 | /*********************************************/ |
136 | /* callbacks for MMAP buffers */ | |
137 | /*********************************************/ | |
138 | ||
139 | static void vb2_dc_put(void *buf_priv) | |
140 | { | |
141 | struct vb2_dc_buf *buf = buf_priv; | |
142 | ||
143 | if (!atomic_dec_and_test(&buf->refcount)) | |
144 | return; | |
145 | ||
9ef2cbeb TS |
146 | if (buf->sgt_base) { |
147 | sg_free_table(buf->sgt_base); | |
148 | kfree(buf->sgt_base); | |
149 | } | |
40d8b766 | 150 | dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); |
67a5d0ce | 151 | put_device(buf->dev); |
40d8b766 LP |
152 | kfree(buf); |
153 | } | |
1a758d4e | 154 | |
b6ba2057 | 155 | static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) |
1a758d4e PO |
156 | { |
157 | struct vb2_dc_conf *conf = alloc_ctx; | |
72f86bff | 158 | struct device *dev = conf->dev; |
1a758d4e PO |
159 | struct vb2_dc_buf *buf; |
160 | ||
161 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | |
162 | if (!buf) | |
163 | return ERR_PTR(-ENOMEM); | |
164 | ||
b6ba2057 HV |
165 | buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, |
166 | GFP_KERNEL | gfp_flags); | |
1a758d4e | 167 | if (!buf->vaddr) { |
72f86bff | 168 | dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); |
1a758d4e PO |
169 | kfree(buf); |
170 | return ERR_PTR(-ENOMEM); | |
171 | } | |
172 | ||
67a5d0ce TS |
173 | /* Prevent the device from being released while the buffer is used */ |
174 | buf->dev = get_device(dev); | |
1a758d4e PO |
175 | buf->size = size; |
176 | ||
177 | buf->handler.refcount = &buf->refcount; | |
f7f129ce | 178 | buf->handler.put = vb2_dc_put; |
1a758d4e PO |
179 | buf->handler.arg = buf; |
180 | ||
181 | atomic_inc(&buf->refcount); | |
182 | ||
183 | return buf; | |
184 | } | |
185 | ||
f7f129ce | 186 | static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) |
1a758d4e PO |
187 | { |
188 | struct vb2_dc_buf *buf = buf_priv; | |
c60520fa | 189 | int ret; |
1a758d4e PO |
190 | |
191 | if (!buf) { | |
192 | printk(KERN_ERR "No buffer to map\n"); | |
193 | return -EINVAL; | |
194 | } | |
195 | ||
c60520fa MS |
196 | /* |
197 | * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to | |
198 | * map whole buffer | |
199 | */ | |
200 | vma->vm_pgoff = 0; | |
201 | ||
202 | ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, | |
203 | buf->dma_addr, buf->size); | |
204 | ||
205 | if (ret) { | |
206 | pr_err("Remapping memory failed, error: %d\n", ret); | |
207 | return ret; | |
208 | } | |
209 | ||
210 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
211 | vma->vm_private_data = &buf->handler; | |
212 | vma->vm_ops = &vb2_common_vm_ops; | |
213 | ||
214 | vma->vm_ops->open(vma); | |
215 | ||
216 | pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", | |
217 | __func__, (unsigned long)buf->dma_addr, vma->vm_start, | |
218 | buf->size); | |
219 | ||
220 | return 0; | |
1a758d4e PO |
221 | } |
222 | ||
9ef2cbeb TS |
223 | /*********************************************/ |
224 | /* DMABUF ops for exporters */ | |
225 | /*********************************************/ | |
226 | ||
227 | struct vb2_dc_attachment { | |
228 | struct sg_table sgt; | |
229 | enum dma_data_direction dir; | |
230 | }; | |
231 | ||
232 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | |
233 | struct dma_buf_attachment *dbuf_attach) | |
234 | { | |
235 | struct vb2_dc_attachment *attach; | |
236 | unsigned int i; | |
237 | struct scatterlist *rd, *wr; | |
238 | struct sg_table *sgt; | |
239 | struct vb2_dc_buf *buf = dbuf->priv; | |
240 | int ret; | |
241 | ||
242 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | |
243 | if (!attach) | |
244 | return -ENOMEM; | |
245 | ||
246 | sgt = &attach->sgt; | |
247 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | |
248 | * map the same scatter list to multiple attachments at the same time. | |
249 | */ | |
250 | ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); | |
251 | if (ret) { | |
252 | kfree(attach); | |
253 | return -ENOMEM; | |
254 | } | |
255 | ||
256 | rd = buf->sgt_base->sgl; | |
257 | wr = sgt->sgl; | |
258 | for (i = 0; i < sgt->orig_nents; ++i) { | |
259 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | |
260 | rd = sg_next(rd); | |
261 | wr = sg_next(wr); | |
262 | } | |
263 | ||
264 | attach->dir = DMA_NONE; | |
265 | dbuf_attach->priv = attach; | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
270 | static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, | |
271 | struct dma_buf_attachment *db_attach) | |
272 | { | |
273 | struct vb2_dc_attachment *attach = db_attach->priv; | |
274 | struct sg_table *sgt; | |
275 | ||
276 | if (!attach) | |
277 | return; | |
278 | ||
279 | sgt = &attach->sgt; | |
280 | ||
281 | /* release the scatterlist cache */ | |
282 | if (attach->dir != DMA_NONE) | |
283 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
284 | attach->dir); | |
285 | sg_free_table(sgt); | |
286 | kfree(attach); | |
287 | db_attach->priv = NULL; | |
288 | } | |
289 | ||
290 | static struct sg_table *vb2_dc_dmabuf_ops_map( | |
291 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | |
292 | { | |
293 | struct vb2_dc_attachment *attach = db_attach->priv; | |
294 | /* stealing dmabuf mutex to serialize map/unmap operations */ | |
295 | struct mutex *lock = &db_attach->dmabuf->lock; | |
296 | struct sg_table *sgt; | |
297 | int ret; | |
298 | ||
299 | mutex_lock(lock); | |
300 | ||
301 | sgt = &attach->sgt; | |
302 | /* return previously mapped sg table */ | |
303 | if (attach->dir == dir) { | |
304 | mutex_unlock(lock); | |
305 | return sgt; | |
306 | } | |
307 | ||
308 | /* release any previous cache */ | |
309 | if (attach->dir != DMA_NONE) { | |
310 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
311 | attach->dir); | |
312 | attach->dir = DMA_NONE; | |
313 | } | |
314 | ||
315 | /* mapping to the client with new direction */ | |
316 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | |
317 | if (ret <= 0) { | |
318 | pr_err("failed to map scatterlist\n"); | |
319 | mutex_unlock(lock); | |
320 | return ERR_PTR(-EIO); | |
321 | } | |
322 | ||
323 | attach->dir = dir; | |
324 | ||
325 | mutex_unlock(lock); | |
326 | ||
327 | return sgt; | |
328 | } | |
329 | ||
330 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | |
331 | struct sg_table *sgt, enum dma_data_direction dir) | |
332 | { | |
333 | /* nothing to be done here */ | |
334 | } | |
335 | ||
336 | static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) | |
337 | { | |
338 | /* drop reference obtained in vb2_dc_get_dmabuf */ | |
339 | vb2_dc_put(dbuf->priv); | |
340 | } | |
341 | ||
342 | static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | |
343 | { | |
344 | struct vb2_dc_buf *buf = dbuf->priv; | |
345 | ||
346 | return buf->vaddr + pgnum * PAGE_SIZE; | |
347 | } | |
348 | ||
349 | static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) | |
350 | { | |
351 | struct vb2_dc_buf *buf = dbuf->priv; | |
352 | ||
353 | return buf->vaddr; | |
354 | } | |
355 | ||
356 | static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, | |
357 | struct vm_area_struct *vma) | |
358 | { | |
359 | return vb2_dc_mmap(dbuf->priv, vma); | |
360 | } | |
361 | ||
362 | static struct dma_buf_ops vb2_dc_dmabuf_ops = { | |
363 | .attach = vb2_dc_dmabuf_ops_attach, | |
364 | .detach = vb2_dc_dmabuf_ops_detach, | |
365 | .map_dma_buf = vb2_dc_dmabuf_ops_map, | |
366 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, | |
367 | .kmap = vb2_dc_dmabuf_ops_kmap, | |
368 | .kmap_atomic = vb2_dc_dmabuf_ops_kmap, | |
369 | .vmap = vb2_dc_dmabuf_ops_vmap, | |
370 | .mmap = vb2_dc_dmabuf_ops_mmap, | |
371 | .release = vb2_dc_dmabuf_ops_release, | |
372 | }; | |
373 | ||
374 | static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | |
375 | { | |
376 | int ret; | |
377 | struct sg_table *sgt; | |
378 | ||
379 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
380 | if (!sgt) { | |
381 | dev_err(buf->dev, "failed to alloc sg table\n"); | |
382 | return NULL; | |
383 | } | |
384 | ||
385 | ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, | |
386 | buf->size); | |
387 | if (ret < 0) { | |
388 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); | |
389 | kfree(sgt); | |
390 | return NULL; | |
391 | } | |
392 | ||
393 | return sgt; | |
394 | } | |
395 | ||
396 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) | |
397 | { | |
398 | struct vb2_dc_buf *buf = buf_priv; | |
399 | struct dma_buf *dbuf; | |
400 | ||
401 | if (!buf->sgt_base) | |
402 | buf->sgt_base = vb2_dc_get_base_sgt(buf); | |
403 | ||
404 | if (WARN_ON(!buf->sgt_base)) | |
405 | return NULL; | |
406 | ||
407 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); | |
408 | if (IS_ERR(dbuf)) | |
409 | return NULL; | |
410 | ||
411 | /* dmabuf keeps reference to vb2 buffer */ | |
412 | atomic_inc(&buf->refcount); | |
413 | ||
414 | return dbuf; | |
415 | } | |
416 | ||
40d8b766 LP |
417 | /*********************************************/ |
418 | /* callbacks for USERPTR buffers */ | |
419 | /*********************************************/ | |
420 | ||
e15dab75 TS |
421 | static inline int vma_is_io(struct vm_area_struct *vma) |
422 | { | |
423 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); | |
424 | } | |
425 | ||
426 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, | |
427 | int n_pages, struct vm_area_struct *vma, int write) | |
428 | { | |
429 | if (vma_is_io(vma)) { | |
430 | unsigned int i; | |
431 | ||
432 | for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { | |
433 | unsigned long pfn; | |
434 | int ret = follow_pfn(vma, start, &pfn); | |
435 | ||
436 | if (ret) { | |
437 | pr_err("no page for address %lu\n", start); | |
438 | return ret; | |
439 | } | |
440 | pages[i] = pfn_to_page(pfn); | |
441 | } | |
442 | } else { | |
443 | int n; | |
444 | ||
445 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | |
446 | n_pages, write, 1, pages, NULL); | |
447 | /* negative error means that no page was pinned */ | |
448 | n = max(n, 0); | |
449 | if (n != n_pages) { | |
450 | pr_err("got only %d of %d user pages\n", n, n_pages); | |
451 | while (n) | |
452 | put_page(pages[--n]); | |
453 | return -EFAULT; | |
454 | } | |
455 | } | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
460 | static void vb2_dc_put_dirty_page(struct page *page) | |
461 | { | |
462 | set_page_dirty_lock(page); | |
463 | put_page(page); | |
464 | } | |
465 | ||
466 | static void vb2_dc_put_userptr(void *buf_priv) | |
467 | { | |
468 | struct vb2_dc_buf *buf = buf_priv; | |
469 | struct sg_table *sgt = buf->dma_sgt; | |
470 | ||
471 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | |
472 | if (!vma_is_io(buf->vma)) | |
473 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | |
474 | ||
475 | sg_free_table(sgt); | |
476 | kfree(sgt); | |
477 | vb2_put_vma(buf->vma); | |
478 | kfree(buf); | |
479 | } | |
480 | ||
f7f129ce | 481 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
e15dab75 | 482 | unsigned long size, int write) |
1a758d4e | 483 | { |
e15dab75 | 484 | struct vb2_dc_conf *conf = alloc_ctx; |
1a758d4e | 485 | struct vb2_dc_buf *buf; |
e15dab75 TS |
486 | unsigned long start; |
487 | unsigned long end; | |
488 | unsigned long offset; | |
489 | struct page **pages; | |
490 | int n_pages; | |
491 | int ret = 0; | |
1a758d4e | 492 | struct vm_area_struct *vma; |
e15dab75 TS |
493 | struct sg_table *sgt; |
494 | unsigned long contig_size; | |
d81e870d MS |
495 | unsigned long dma_align = dma_get_cache_alignment(); |
496 | ||
497 | /* Only cache aligned DMA transfers are reliable */ | |
498 | if (!IS_ALIGNED(vaddr | size, dma_align)) { | |
499 | pr_debug("user data must be aligned to %lu bytes\n", dma_align); | |
500 | return ERR_PTR(-EINVAL); | |
501 | } | |
502 | ||
503 | if (!size) { | |
504 | pr_debug("size is zero\n"); | |
505 | return ERR_PTR(-EINVAL); | |
506 | } | |
1a758d4e PO |
507 | |
508 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | |
509 | if (!buf) | |
510 | return ERR_PTR(-ENOMEM); | |
511 | ||
e15dab75 TS |
512 | buf->dev = conf->dev; |
513 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
514 | ||
515 | start = vaddr & PAGE_MASK; | |
516 | offset = vaddr & ~PAGE_MASK; | |
517 | end = PAGE_ALIGN(vaddr + size); | |
518 | n_pages = (end - start) >> PAGE_SHIFT; | |
519 | ||
520 | pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); | |
521 | if (!pages) { | |
522 | ret = -ENOMEM; | |
523 | pr_err("failed to allocate pages table\n"); | |
524 | goto fail_buf; | |
525 | } | |
526 | ||
527 | /* current->mm->mmap_sem is taken by videobuf2 core */ | |
528 | vma = find_vma(current->mm, vaddr); | |
529 | if (!vma) { | |
530 | pr_err("no vma for address %lu\n", vaddr); | |
531 | ret = -EFAULT; | |
532 | goto fail_pages; | |
533 | } | |
534 | ||
535 | if (vma->vm_end < vaddr + size) { | |
536 | pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); | |
537 | ret = -EFAULT; | |
538 | goto fail_pages; | |
539 | } | |
540 | ||
541 | buf->vma = vb2_get_vma(vma); | |
542 | if (!buf->vma) { | |
543 | pr_err("failed to copy vma\n"); | |
544 | ret = -ENOMEM; | |
545 | goto fail_pages; | |
546 | } | |
547 | ||
548 | /* extract page list from userspace mapping */ | |
549 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | |
1a758d4e | 550 | if (ret) { |
e15dab75 TS |
551 | pr_err("failed to get user pages\n"); |
552 | goto fail_vma; | |
553 | } | |
554 | ||
555 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
556 | if (!sgt) { | |
557 | pr_err("failed to allocate sg table\n"); | |
558 | ret = -ENOMEM; | |
559 | goto fail_get_user_pages; | |
560 | } | |
561 | ||
562 | ret = sg_alloc_table_from_pages(sgt, pages, n_pages, | |
563 | offset, size, GFP_KERNEL); | |
564 | if (ret) { | |
565 | pr_err("failed to initialize sg table\n"); | |
566 | goto fail_sgt; | |
567 | } | |
568 | ||
569 | /* pages are no longer needed */ | |
570 | kfree(pages); | |
571 | pages = NULL; | |
572 | ||
573 | sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, | |
574 | buf->dma_dir); | |
575 | if (sgt->nents <= 0) { | |
576 | pr_err("failed to map scatterlist\n"); | |
577 | ret = -EIO; | |
578 | goto fail_sgt_init; | |
579 | } | |
580 | ||
581 | contig_size = vb2_dc_get_contiguous_size(sgt); | |
582 | if (contig_size < size) { | |
583 | pr_err("contiguous mapping is too small %lu/%lu\n", | |
584 | contig_size, size); | |
585 | ret = -EFAULT; | |
586 | goto fail_map_sg; | |
1a758d4e PO |
587 | } |
588 | ||
e15dab75 | 589 | buf->dma_addr = sg_dma_address(sgt->sgl); |
1a758d4e | 590 | buf->size = size; |
e15dab75 | 591 | buf->dma_sgt = sgt; |
1a758d4e PO |
592 | |
593 | return buf; | |
1a758d4e | 594 | |
e15dab75 TS |
595 | fail_map_sg: |
596 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | |
1a758d4e | 597 | |
e15dab75 TS |
598 | fail_sgt_init: |
599 | if (!vma_is_io(buf->vma)) | |
600 | vb2_dc_sgt_foreach_page(sgt, put_page); | |
601 | sg_free_table(sgt); | |
602 | ||
603 | fail_sgt: | |
604 | kfree(sgt); | |
1a758d4e | 605 | |
e15dab75 TS |
606 | fail_get_user_pages: |
607 | if (pages && !vma_is_io(buf->vma)) | |
608 | while (n_pages) | |
609 | put_page(pages[--n_pages]); | |
610 | ||
611 | fail_vma: | |
1a758d4e | 612 | vb2_put_vma(buf->vma); |
e15dab75 TS |
613 | |
614 | fail_pages: | |
615 | kfree(pages); /* kfree is NULL-proof */ | |
616 | ||
617 | fail_buf: | |
1a758d4e | 618 | kfree(buf); |
e15dab75 TS |
619 | |
620 | return ERR_PTR(ret); | |
1a758d4e PO |
621 | } |
622 | ||
8c417d03 SS |
623 | /*********************************************/ |
624 | /* callbacks for DMABUF buffers */ | |
625 | /*********************************************/ | |
626 | ||
627 | static int vb2_dc_map_dmabuf(void *mem_priv) | |
628 | { | |
629 | struct vb2_dc_buf *buf = mem_priv; | |
630 | struct sg_table *sgt; | |
631 | unsigned long contig_size; | |
632 | ||
633 | if (WARN_ON(!buf->db_attach)) { | |
634 | pr_err("trying to pin a non attached buffer\n"); | |
635 | return -EINVAL; | |
636 | } | |
637 | ||
638 | if (WARN_ON(buf->dma_sgt)) { | |
639 | pr_err("dmabuf buffer is already pinned\n"); | |
640 | return 0; | |
641 | } | |
642 | ||
643 | /* get the associated scatterlist for this buffer */ | |
644 | sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); | |
645 | if (IS_ERR_OR_NULL(sgt)) { | |
646 | pr_err("Error getting dmabuf scatterlist\n"); | |
647 | return -EINVAL; | |
648 | } | |
649 | ||
650 | /* checking if dmabuf is big enough to store contiguous chunk */ | |
651 | contig_size = vb2_dc_get_contiguous_size(sgt); | |
652 | if (contig_size < buf->size) { | |
653 | pr_err("contiguous chunk is too small %lu/%lu b\n", | |
654 | contig_size, buf->size); | |
655 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | |
656 | return -EFAULT; | |
657 | } | |
658 | ||
659 | buf->dma_addr = sg_dma_address(sgt->sgl); | |
660 | buf->dma_sgt = sgt; | |
661 | ||
662 | return 0; | |
663 | } | |
664 | ||
665 | static void vb2_dc_unmap_dmabuf(void *mem_priv) | |
666 | { | |
667 | struct vb2_dc_buf *buf = mem_priv; | |
668 | struct sg_table *sgt = buf->dma_sgt; | |
669 | ||
670 | if (WARN_ON(!buf->db_attach)) { | |
671 | pr_err("trying to unpin a not attached buffer\n"); | |
672 | return; | |
673 | } | |
674 | ||
675 | if (WARN_ON(!sgt)) { | |
676 | pr_err("dmabuf buffer is already unpinned\n"); | |
677 | return; | |
678 | } | |
679 | ||
680 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | |
681 | ||
682 | buf->dma_addr = 0; | |
683 | buf->dma_sgt = NULL; | |
684 | } | |
685 | ||
686 | static void vb2_dc_detach_dmabuf(void *mem_priv) | |
687 | { | |
688 | struct vb2_dc_buf *buf = mem_priv; | |
689 | ||
690 | /* if vb2 works correctly you should never detach mapped buffer */ | |
691 | if (WARN_ON(buf->dma_addr)) | |
692 | vb2_dc_unmap_dmabuf(buf); | |
693 | ||
694 | /* detach this attachment */ | |
695 | dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); | |
696 | kfree(buf); | |
697 | } | |
698 | ||
699 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |
700 | unsigned long size, int write) | |
701 | { | |
702 | struct vb2_dc_conf *conf = alloc_ctx; | |
703 | struct vb2_dc_buf *buf; | |
704 | struct dma_buf_attachment *dba; | |
705 | ||
706 | if (dbuf->size < size) | |
707 | return ERR_PTR(-EFAULT); | |
708 | ||
709 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | |
710 | if (!buf) | |
711 | return ERR_PTR(-ENOMEM); | |
712 | ||
713 | buf->dev = conf->dev; | |
714 | /* create attachment for the dmabuf with the user device */ | |
715 | dba = dma_buf_attach(dbuf, buf->dev); | |
716 | if (IS_ERR(dba)) { | |
717 | pr_err("failed to attach dmabuf\n"); | |
718 | kfree(buf); | |
719 | return dba; | |
720 | } | |
721 | ||
722 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
723 | buf->size = size; | |
724 | buf->db_attach = dba; | |
725 | ||
726 | return buf; | |
727 | } | |
728 | ||
40d8b766 LP |
729 | /*********************************************/ |
730 | /* DMA CONTIG exported functions */ | |
731 | /*********************************************/ | |
732 | ||
1a758d4e | 733 | const struct vb2_mem_ops vb2_dma_contig_memops = { |
f7f129ce LP |
734 | .alloc = vb2_dc_alloc, |
735 | .put = vb2_dc_put, | |
9ef2cbeb | 736 | .get_dmabuf = vb2_dc_get_dmabuf, |
f7f129ce LP |
737 | .cookie = vb2_dc_cookie, |
738 | .vaddr = vb2_dc_vaddr, | |
739 | .mmap = vb2_dc_mmap, | |
740 | .get_userptr = vb2_dc_get_userptr, | |
741 | .put_userptr = vb2_dc_put_userptr, | |
199d101e MS |
742 | .prepare = vb2_dc_prepare, |
743 | .finish = vb2_dc_finish, | |
8c417d03 SS |
744 | .map_dmabuf = vb2_dc_map_dmabuf, |
745 | .unmap_dmabuf = vb2_dc_unmap_dmabuf, | |
746 | .attach_dmabuf = vb2_dc_attach_dmabuf, | |
747 | .detach_dmabuf = vb2_dc_detach_dmabuf, | |
f7f129ce | 748 | .num_users = vb2_dc_num_users, |
1a758d4e PO |
749 | }; |
750 | EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); | |
751 | ||
752 | void *vb2_dma_contig_init_ctx(struct device *dev) | |
753 | { | |
754 | struct vb2_dc_conf *conf; | |
755 | ||
756 | conf = kzalloc(sizeof *conf, GFP_KERNEL); | |
757 | if (!conf) | |
758 | return ERR_PTR(-ENOMEM); | |
759 | ||
760 | conf->dev = dev; | |
761 | ||
762 | return conf; | |
763 | } | |
764 | EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx); | |
765 | ||
766 | void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) | |
767 | { | |
768 | kfree(alloc_ctx); | |
769 | } | |
770 | EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx); | |
771 | ||
772 | MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2"); | |
95072084 | 773 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); |
1a758d4e | 774 | MODULE_LICENSE("GPL"); |