Commit | Line | Data |
---|---|---|
1a758d4e PO |
1 | /* |
2 | * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2 | |
3 | * | |
4 | * Copyright (C) 2010 Samsung Electronics | |
5 | * | |
95072084 | 6 | * Author: Pawel Osciak <pawel@osciak.com> |
1a758d4e PO |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation. | |
11 | */ | |
12 | ||
8c417d03 | 13 | #include <linux/dma-buf.h> |
1a758d4e | 14 | #include <linux/module.h> |
e15dab75 TS |
15 | #include <linux/scatterlist.h> |
16 | #include <linux/sched.h> | |
1a758d4e PO |
17 | #include <linux/slab.h> |
18 | #include <linux/dma-mapping.h> | |
19 | ||
20 | #include <media/videobuf2-core.h> | |
d0df3c38 | 21 | #include <media/videobuf2-dma-contig.h> |
1a758d4e PO |
22 | #include <media/videobuf2-memops.h> |
23 | ||
24 | struct vb2_dc_conf { | |
25 | struct device *dev; | |
26 | }; | |
27 | ||
28 | struct vb2_dc_buf { | |
72f86bff | 29 | struct device *dev; |
1a758d4e | 30 | void *vaddr; |
1a758d4e | 31 | unsigned long size; |
40d8b766 | 32 | dma_addr_t dma_addr; |
e15dab75 TS |
33 | enum dma_data_direction dma_dir; |
34 | struct sg_table *dma_sgt; | |
40d8b766 LP |
35 | |
36 | /* MMAP related */ | |
1a758d4e | 37 | struct vb2_vmarea_handler handler; |
40d8b766 | 38 | atomic_t refcount; |
9ef2cbeb | 39 | struct sg_table *sgt_base; |
40d8b766 LP |
40 | |
41 | /* USERPTR related */ | |
42 | struct vm_area_struct *vma; | |
8c417d03 SS |
43 | |
44 | /* DMABUF related */ | |
45 | struct dma_buf_attachment *db_attach; | |
1a758d4e PO |
46 | }; |
47 | ||
e15dab75 TS |
48 | /*********************************************/ |
49 | /* scatterlist table functions */ | |
50 | /*********************************************/ | |
51 | ||
52 | ||
53 | static void vb2_dc_sgt_foreach_page(struct sg_table *sgt, | |
54 | void (*cb)(struct page *pg)) | |
55 | { | |
56 | struct scatterlist *s; | |
57 | unsigned int i; | |
58 | ||
59 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | |
60 | struct page *page = sg_page(s); | |
61 | unsigned int n_pages = PAGE_ALIGN(s->offset + s->length) | |
62 | >> PAGE_SHIFT; | |
63 | unsigned int j; | |
64 | ||
65 | for (j = 0; j < n_pages; ++j, ++page) | |
66 | cb(page); | |
67 | } | |
68 | } | |
69 | ||
70 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) | |
71 | { | |
72 | struct scatterlist *s; | |
73 | dma_addr_t expected = sg_dma_address(sgt->sgl); | |
74 | unsigned int i; | |
75 | unsigned long size = 0; | |
76 | ||
77 | for_each_sg(sgt->sgl, s, sgt->nents, i) { | |
78 | if (sg_dma_address(s) != expected) | |
79 | break; | |
80 | expected = sg_dma_address(s) + sg_dma_len(s); | |
81 | size += sg_dma_len(s); | |
82 | } | |
83 | return size; | |
84 | } | |
85 | ||
40d8b766 LP |
86 | /*********************************************/ |
87 | /* callbacks for all buffers */ | |
88 | /*********************************************/ | |
89 | ||
90 | static void *vb2_dc_cookie(void *buf_priv) | |
91 | { | |
92 | struct vb2_dc_buf *buf = buf_priv; | |
93 | ||
94 | return &buf->dma_addr; | |
95 | } | |
96 | ||
97 | static void *vb2_dc_vaddr(void *buf_priv) | |
98 | { | |
99 | struct vb2_dc_buf *buf = buf_priv; | |
100 | ||
6bbd4fec PZ |
101 | if (!buf->vaddr && buf->db_attach) |
102 | buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); | |
103 | ||
40d8b766 LP |
104 | return buf->vaddr; |
105 | } | |
106 | ||
107 | static unsigned int vb2_dc_num_users(void *buf_priv) | |
108 | { | |
109 | struct vb2_dc_buf *buf = buf_priv; | |
110 | ||
111 | return atomic_read(&buf->refcount); | |
112 | } | |
113 | ||
199d101e MS |
114 | static void vb2_dc_prepare(void *buf_priv) |
115 | { | |
116 | struct vb2_dc_buf *buf = buf_priv; | |
117 | struct sg_table *sgt = buf->dma_sgt; | |
118 | ||
8c417d03 SS |
119 | /* DMABUF exporter will flush the cache for us */ |
120 | if (!sgt || buf->db_attach) | |
199d101e MS |
121 | return; |
122 | ||
123 | dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | |
124 | } | |
125 | ||
126 | static void vb2_dc_finish(void *buf_priv) | |
127 | { | |
128 | struct vb2_dc_buf *buf = buf_priv; | |
129 | struct sg_table *sgt = buf->dma_sgt; | |
130 | ||
8c417d03 SS |
131 | /* DMABUF exporter will flush the cache for us */ |
132 | if (!sgt || buf->db_attach) | |
199d101e MS |
133 | return; |
134 | ||
135 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); | |
136 | } | |
137 | ||
40d8b766 LP |
138 | /*********************************************/ |
139 | /* callbacks for MMAP buffers */ | |
140 | /*********************************************/ | |
141 | ||
142 | static void vb2_dc_put(void *buf_priv) | |
143 | { | |
144 | struct vb2_dc_buf *buf = buf_priv; | |
145 | ||
146 | if (!atomic_dec_and_test(&buf->refcount)) | |
147 | return; | |
148 | ||
9ef2cbeb TS |
149 | if (buf->sgt_base) { |
150 | sg_free_table(buf->sgt_base); | |
151 | kfree(buf->sgt_base); | |
152 | } | |
40d8b766 | 153 | dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr); |
67a5d0ce | 154 | put_device(buf->dev); |
40d8b766 LP |
155 | kfree(buf); |
156 | } | |
1a758d4e | 157 | |
b6ba2057 | 158 | static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) |
1a758d4e PO |
159 | { |
160 | struct vb2_dc_conf *conf = alloc_ctx; | |
72f86bff | 161 | struct device *dev = conf->dev; |
1a758d4e PO |
162 | struct vb2_dc_buf *buf; |
163 | ||
164 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | |
165 | if (!buf) | |
166 | return ERR_PTR(-ENOMEM); | |
167 | ||
b6ba2057 HV |
168 | buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, |
169 | GFP_KERNEL | gfp_flags); | |
1a758d4e | 170 | if (!buf->vaddr) { |
72f86bff | 171 | dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); |
1a758d4e PO |
172 | kfree(buf); |
173 | return ERR_PTR(-ENOMEM); | |
174 | } | |
175 | ||
67a5d0ce TS |
176 | /* Prevent the device from being released while the buffer is used */ |
177 | buf->dev = get_device(dev); | |
1a758d4e PO |
178 | buf->size = size; |
179 | ||
180 | buf->handler.refcount = &buf->refcount; | |
f7f129ce | 181 | buf->handler.put = vb2_dc_put; |
1a758d4e PO |
182 | buf->handler.arg = buf; |
183 | ||
184 | atomic_inc(&buf->refcount); | |
185 | ||
186 | return buf; | |
187 | } | |
188 | ||
f7f129ce | 189 | static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) |
1a758d4e PO |
190 | { |
191 | struct vb2_dc_buf *buf = buf_priv; | |
c60520fa | 192 | int ret; |
1a758d4e PO |
193 | |
194 | if (!buf) { | |
195 | printk(KERN_ERR "No buffer to map\n"); | |
196 | return -EINVAL; | |
197 | } | |
198 | ||
c60520fa MS |
199 | /* |
200 | * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to | |
201 | * map whole buffer | |
202 | */ | |
203 | vma->vm_pgoff = 0; | |
204 | ||
205 | ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, | |
206 | buf->dma_addr, buf->size); | |
207 | ||
208 | if (ret) { | |
209 | pr_err("Remapping memory failed, error: %d\n", ret); | |
210 | return ret; | |
211 | } | |
212 | ||
213 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
214 | vma->vm_private_data = &buf->handler; | |
215 | vma->vm_ops = &vb2_common_vm_ops; | |
216 | ||
217 | vma->vm_ops->open(vma); | |
218 | ||
219 | pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", | |
220 | __func__, (unsigned long)buf->dma_addr, vma->vm_start, | |
221 | buf->size); | |
222 | ||
223 | return 0; | |
1a758d4e PO |
224 | } |
225 | ||
9ef2cbeb TS |
226 | /*********************************************/ |
227 | /* DMABUF ops for exporters */ | |
228 | /*********************************************/ | |
229 | ||
230 | struct vb2_dc_attachment { | |
231 | struct sg_table sgt; | |
232 | enum dma_data_direction dir; | |
233 | }; | |
234 | ||
235 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | |
236 | struct dma_buf_attachment *dbuf_attach) | |
237 | { | |
238 | struct vb2_dc_attachment *attach; | |
239 | unsigned int i; | |
240 | struct scatterlist *rd, *wr; | |
241 | struct sg_table *sgt; | |
242 | struct vb2_dc_buf *buf = dbuf->priv; | |
243 | int ret; | |
244 | ||
245 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | |
246 | if (!attach) | |
247 | return -ENOMEM; | |
248 | ||
249 | sgt = &attach->sgt; | |
250 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | |
251 | * map the same scatter list to multiple attachments at the same time. | |
252 | */ | |
253 | ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); | |
254 | if (ret) { | |
255 | kfree(attach); | |
256 | return -ENOMEM; | |
257 | } | |
258 | ||
259 | rd = buf->sgt_base->sgl; | |
260 | wr = sgt->sgl; | |
261 | for (i = 0; i < sgt->orig_nents; ++i) { | |
262 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | |
263 | rd = sg_next(rd); | |
264 | wr = sg_next(wr); | |
265 | } | |
266 | ||
267 | attach->dir = DMA_NONE; | |
268 | dbuf_attach->priv = attach; | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
273 | static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, | |
274 | struct dma_buf_attachment *db_attach) | |
275 | { | |
276 | struct vb2_dc_attachment *attach = db_attach->priv; | |
277 | struct sg_table *sgt; | |
278 | ||
279 | if (!attach) | |
280 | return; | |
281 | ||
282 | sgt = &attach->sgt; | |
283 | ||
284 | /* release the scatterlist cache */ | |
285 | if (attach->dir != DMA_NONE) | |
286 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
287 | attach->dir); | |
288 | sg_free_table(sgt); | |
289 | kfree(attach); | |
290 | db_attach->priv = NULL; | |
291 | } | |
292 | ||
293 | static struct sg_table *vb2_dc_dmabuf_ops_map( | |
294 | struct dma_buf_attachment *db_attach, enum dma_data_direction dir) | |
295 | { | |
296 | struct vb2_dc_attachment *attach = db_attach->priv; | |
297 | /* stealing dmabuf mutex to serialize map/unmap operations */ | |
298 | struct mutex *lock = &db_attach->dmabuf->lock; | |
299 | struct sg_table *sgt; | |
300 | int ret; | |
301 | ||
302 | mutex_lock(lock); | |
303 | ||
304 | sgt = &attach->sgt; | |
305 | /* return previously mapped sg table */ | |
306 | if (attach->dir == dir) { | |
307 | mutex_unlock(lock); | |
308 | return sgt; | |
309 | } | |
310 | ||
311 | /* release any previous cache */ | |
312 | if (attach->dir != DMA_NONE) { | |
313 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
314 | attach->dir); | |
315 | attach->dir = DMA_NONE; | |
316 | } | |
317 | ||
318 | /* mapping to the client with new direction */ | |
319 | ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); | |
320 | if (ret <= 0) { | |
321 | pr_err("failed to map scatterlist\n"); | |
322 | mutex_unlock(lock); | |
323 | return ERR_PTR(-EIO); | |
324 | } | |
325 | ||
326 | attach->dir = dir; | |
327 | ||
328 | mutex_unlock(lock); | |
329 | ||
330 | return sgt; | |
331 | } | |
332 | ||
333 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | |
334 | struct sg_table *sgt, enum dma_data_direction dir) | |
335 | { | |
336 | /* nothing to be done here */ | |
337 | } | |
338 | ||
339 | static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) | |
340 | { | |
341 | /* drop reference obtained in vb2_dc_get_dmabuf */ | |
342 | vb2_dc_put(dbuf->priv); | |
343 | } | |
344 | ||
345 | static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | |
346 | { | |
347 | struct vb2_dc_buf *buf = dbuf->priv; | |
348 | ||
349 | return buf->vaddr + pgnum * PAGE_SIZE; | |
350 | } | |
351 | ||
352 | static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) | |
353 | { | |
354 | struct vb2_dc_buf *buf = dbuf->priv; | |
355 | ||
356 | return buf->vaddr; | |
357 | } | |
358 | ||
359 | static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, | |
360 | struct vm_area_struct *vma) | |
361 | { | |
362 | return vb2_dc_mmap(dbuf->priv, vma); | |
363 | } | |
364 | ||
365 | static struct dma_buf_ops vb2_dc_dmabuf_ops = { | |
366 | .attach = vb2_dc_dmabuf_ops_attach, | |
367 | .detach = vb2_dc_dmabuf_ops_detach, | |
368 | .map_dma_buf = vb2_dc_dmabuf_ops_map, | |
369 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, | |
370 | .kmap = vb2_dc_dmabuf_ops_kmap, | |
371 | .kmap_atomic = vb2_dc_dmabuf_ops_kmap, | |
372 | .vmap = vb2_dc_dmabuf_ops_vmap, | |
373 | .mmap = vb2_dc_dmabuf_ops_mmap, | |
374 | .release = vb2_dc_dmabuf_ops_release, | |
375 | }; | |
376 | ||
377 | static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) | |
378 | { | |
379 | int ret; | |
380 | struct sg_table *sgt; | |
381 | ||
382 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
383 | if (!sgt) { | |
384 | dev_err(buf->dev, "failed to alloc sg table\n"); | |
385 | return NULL; | |
386 | } | |
387 | ||
388 | ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr, | |
389 | buf->size); | |
390 | if (ret < 0) { | |
391 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); | |
392 | kfree(sgt); | |
393 | return NULL; | |
394 | } | |
395 | ||
396 | return sgt; | |
397 | } | |
398 | ||
c1b96a23 | 399 | static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) |
9ef2cbeb TS |
400 | { |
401 | struct vb2_dc_buf *buf = buf_priv; | |
402 | struct dma_buf *dbuf; | |
403 | ||
404 | if (!buf->sgt_base) | |
405 | buf->sgt_base = vb2_dc_get_base_sgt(buf); | |
406 | ||
407 | if (WARN_ON(!buf->sgt_base)) | |
408 | return NULL; | |
409 | ||
3aac4502 | 410 | dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL); |
9ef2cbeb TS |
411 | if (IS_ERR(dbuf)) |
412 | return NULL; | |
413 | ||
414 | /* dmabuf keeps reference to vb2 buffer */ | |
415 | atomic_inc(&buf->refcount); | |
416 | ||
417 | return dbuf; | |
418 | } | |
419 | ||
40d8b766 LP |
420 | /*********************************************/ |
421 | /* callbacks for USERPTR buffers */ | |
422 | /*********************************************/ | |
423 | ||
e15dab75 TS |
424 | static inline int vma_is_io(struct vm_area_struct *vma) |
425 | { | |
426 | return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); | |
427 | } | |
428 | ||
774d2301 MS |
429 | static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, |
430 | struct vm_area_struct *vma, unsigned long *res) | |
431 | { | |
432 | unsigned long pfn, start_pfn, prev_pfn; | |
433 | unsigned int i; | |
434 | int ret; | |
435 | ||
436 | if (!vma_is_io(vma)) | |
437 | return -EFAULT; | |
438 | ||
439 | ret = follow_pfn(vma, start, &pfn); | |
440 | if (ret) | |
441 | return ret; | |
442 | ||
443 | start_pfn = pfn; | |
444 | start += PAGE_SIZE; | |
445 | ||
446 | for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { | |
447 | prev_pfn = pfn; | |
448 | ret = follow_pfn(vma, start, &pfn); | |
449 | ||
450 | if (ret) { | |
451 | pr_err("no page for address %lu\n", start); | |
452 | return ret; | |
453 | } | |
454 | if (pfn != prev_pfn + 1) | |
455 | return -EINVAL; | |
456 | } | |
457 | ||
458 | *res = start_pfn; | |
459 | return 0; | |
460 | } | |
461 | ||
e15dab75 TS |
462 | static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, |
463 | int n_pages, struct vm_area_struct *vma, int write) | |
464 | { | |
465 | if (vma_is_io(vma)) { | |
466 | unsigned int i; | |
467 | ||
468 | for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) { | |
469 | unsigned long pfn; | |
470 | int ret = follow_pfn(vma, start, &pfn); | |
471 | ||
774d2301 MS |
472 | if (!pfn_valid(pfn)) |
473 | return -EINVAL; | |
474 | ||
e15dab75 TS |
475 | if (ret) { |
476 | pr_err("no page for address %lu\n", start); | |
477 | return ret; | |
478 | } | |
479 | pages[i] = pfn_to_page(pfn); | |
480 | } | |
481 | } else { | |
482 | int n; | |
483 | ||
484 | n = get_user_pages(current, current->mm, start & PAGE_MASK, | |
485 | n_pages, write, 1, pages, NULL); | |
486 | /* negative error means that no page was pinned */ | |
487 | n = max(n, 0); | |
488 | if (n != n_pages) { | |
489 | pr_err("got only %d of %d user pages\n", n, n_pages); | |
490 | while (n) | |
491 | put_page(pages[--n]); | |
492 | return -EFAULT; | |
493 | } | |
494 | } | |
495 | ||
496 | return 0; | |
497 | } | |
498 | ||
499 | static void vb2_dc_put_dirty_page(struct page *page) | |
500 | { | |
501 | set_page_dirty_lock(page); | |
502 | put_page(page); | |
503 | } | |
504 | ||
505 | static void vb2_dc_put_userptr(void *buf_priv) | |
506 | { | |
507 | struct vb2_dc_buf *buf = buf_priv; | |
508 | struct sg_table *sgt = buf->dma_sgt; | |
509 | ||
774d2301 MS |
510 | if (sgt) { |
511 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | |
512 | if (!vma_is_io(buf->vma)) | |
513 | vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); | |
e15dab75 | 514 | |
774d2301 MS |
515 | sg_free_table(sgt); |
516 | kfree(sgt); | |
517 | } | |
e15dab75 TS |
518 | vb2_put_vma(buf->vma); |
519 | kfree(buf); | |
520 | } | |
521 | ||
774d2301 MS |
522 | /* |
523 | * For some kind of reserved memory there might be no struct page available, | |
524 | * so all that can be done to support such 'pages' is to try to convert | |
525 | * pfn to dma address or at the last resort just assume that | |
526 | * dma address == physical address (like it has been assumed in earlier version | |
527 | * of videobuf2-dma-contig | |
528 | */ | |
529 | ||
530 | #ifdef __arch_pfn_to_dma | |
531 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | |
532 | { | |
533 | return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); | |
534 | } | |
535 | #elif defined(__pfn_to_bus) | |
536 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | |
537 | { | |
538 | return (dma_addr_t)__pfn_to_bus(pfn); | |
539 | } | |
540 | #elif defined(__pfn_to_phys) | |
541 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | |
542 | { | |
543 | return (dma_addr_t)__pfn_to_phys(pfn); | |
544 | } | |
545 | #else | |
546 | static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) | |
547 | { | |
548 | /* really, we cannot do anything better at this point */ | |
549 | return (dma_addr_t)(pfn) << PAGE_SHIFT; | |
550 | } | |
551 | #endif | |
552 | ||
f7f129ce | 553 | static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, |
e15dab75 | 554 | unsigned long size, int write) |
1a758d4e | 555 | { |
e15dab75 | 556 | struct vb2_dc_conf *conf = alloc_ctx; |
1a758d4e | 557 | struct vb2_dc_buf *buf; |
e15dab75 TS |
558 | unsigned long start; |
559 | unsigned long end; | |
560 | unsigned long offset; | |
561 | struct page **pages; | |
562 | int n_pages; | |
563 | int ret = 0; | |
1a758d4e | 564 | struct vm_area_struct *vma; |
e15dab75 TS |
565 | struct sg_table *sgt; |
566 | unsigned long contig_size; | |
d81e870d MS |
567 | unsigned long dma_align = dma_get_cache_alignment(); |
568 | ||
569 | /* Only cache aligned DMA transfers are reliable */ | |
570 | if (!IS_ALIGNED(vaddr | size, dma_align)) { | |
571 | pr_debug("user data must be aligned to %lu bytes\n", dma_align); | |
572 | return ERR_PTR(-EINVAL); | |
573 | } | |
574 | ||
575 | if (!size) { | |
576 | pr_debug("size is zero\n"); | |
577 | return ERR_PTR(-EINVAL); | |
578 | } | |
1a758d4e PO |
579 | |
580 | buf = kzalloc(sizeof *buf, GFP_KERNEL); | |
581 | if (!buf) | |
582 | return ERR_PTR(-ENOMEM); | |
583 | ||
e15dab75 TS |
584 | buf->dev = conf->dev; |
585 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
586 | ||
587 | start = vaddr & PAGE_MASK; | |
588 | offset = vaddr & ~PAGE_MASK; | |
589 | end = PAGE_ALIGN(vaddr + size); | |
590 | n_pages = (end - start) >> PAGE_SHIFT; | |
591 | ||
592 | pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL); | |
593 | if (!pages) { | |
594 | ret = -ENOMEM; | |
595 | pr_err("failed to allocate pages table\n"); | |
596 | goto fail_buf; | |
597 | } | |
598 | ||
599 | /* current->mm->mmap_sem is taken by videobuf2 core */ | |
600 | vma = find_vma(current->mm, vaddr); | |
601 | if (!vma) { | |
602 | pr_err("no vma for address %lu\n", vaddr); | |
603 | ret = -EFAULT; | |
604 | goto fail_pages; | |
605 | } | |
606 | ||
607 | if (vma->vm_end < vaddr + size) { | |
608 | pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); | |
609 | ret = -EFAULT; | |
610 | goto fail_pages; | |
611 | } | |
612 | ||
613 | buf->vma = vb2_get_vma(vma); | |
614 | if (!buf->vma) { | |
615 | pr_err("failed to copy vma\n"); | |
616 | ret = -ENOMEM; | |
617 | goto fail_pages; | |
618 | } | |
619 | ||
620 | /* extract page list from userspace mapping */ | |
621 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); | |
1a758d4e | 622 | if (ret) { |
774d2301 MS |
623 | unsigned long pfn; |
624 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | |
625 | buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); | |
626 | buf->size = size; | |
627 | kfree(pages); | |
628 | return buf; | |
629 | } | |
630 | ||
e15dab75 TS |
631 | pr_err("failed to get user pages\n"); |
632 | goto fail_vma; | |
633 | } | |
634 | ||
635 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
636 | if (!sgt) { | |
637 | pr_err("failed to allocate sg table\n"); | |
638 | ret = -ENOMEM; | |
639 | goto fail_get_user_pages; | |
640 | } | |
641 | ||
642 | ret = sg_alloc_table_from_pages(sgt, pages, n_pages, | |
643 | offset, size, GFP_KERNEL); | |
644 | if (ret) { | |
645 | pr_err("failed to initialize sg table\n"); | |
646 | goto fail_sgt; | |
647 | } | |
648 | ||
649 | /* pages are no longer needed */ | |
650 | kfree(pages); | |
651 | pages = NULL; | |
652 | ||
653 | sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, | |
654 | buf->dma_dir); | |
655 | if (sgt->nents <= 0) { | |
656 | pr_err("failed to map scatterlist\n"); | |
657 | ret = -EIO; | |
658 | goto fail_sgt_init; | |
659 | } | |
660 | ||
661 | contig_size = vb2_dc_get_contiguous_size(sgt); | |
662 | if (contig_size < size) { | |
663 | pr_err("contiguous mapping is too small %lu/%lu\n", | |
664 | contig_size, size); | |
665 | ret = -EFAULT; | |
666 | goto fail_map_sg; | |
1a758d4e PO |
667 | } |
668 | ||
e15dab75 | 669 | buf->dma_addr = sg_dma_address(sgt->sgl); |
1a758d4e | 670 | buf->size = size; |
e15dab75 | 671 | buf->dma_sgt = sgt; |
1a758d4e PO |
672 | |
673 | return buf; | |
1a758d4e | 674 | |
e15dab75 TS |
675 | fail_map_sg: |
676 | dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); | |
1a758d4e | 677 | |
e15dab75 TS |
678 | fail_sgt_init: |
679 | if (!vma_is_io(buf->vma)) | |
680 | vb2_dc_sgt_foreach_page(sgt, put_page); | |
681 | sg_free_table(sgt); | |
682 | ||
683 | fail_sgt: | |
684 | kfree(sgt); | |
1a758d4e | 685 | |
e15dab75 TS |
686 | fail_get_user_pages: |
687 | if (pages && !vma_is_io(buf->vma)) | |
688 | while (n_pages) | |
689 | put_page(pages[--n_pages]); | |
690 | ||
691 | fail_vma: | |
1a758d4e | 692 | vb2_put_vma(buf->vma); |
e15dab75 TS |
693 | |
694 | fail_pages: | |
695 | kfree(pages); /* kfree is NULL-proof */ | |
696 | ||
697 | fail_buf: | |
1a758d4e | 698 | kfree(buf); |
e15dab75 TS |
699 | |
700 | return ERR_PTR(ret); | |
1a758d4e PO |
701 | } |
702 | ||
8c417d03 SS |
703 | /*********************************************/ |
704 | /* callbacks for DMABUF buffers */ | |
705 | /*********************************************/ | |
706 | ||
707 | static int vb2_dc_map_dmabuf(void *mem_priv) | |
708 | { | |
709 | struct vb2_dc_buf *buf = mem_priv; | |
710 | struct sg_table *sgt; | |
711 | unsigned long contig_size; | |
712 | ||
713 | if (WARN_ON(!buf->db_attach)) { | |
714 | pr_err("trying to pin a non attached buffer\n"); | |
715 | return -EINVAL; | |
716 | } | |
717 | ||
718 | if (WARN_ON(buf->dma_sgt)) { | |
719 | pr_err("dmabuf buffer is already pinned\n"); | |
720 | return 0; | |
721 | } | |
722 | ||
723 | /* get the associated scatterlist for this buffer */ | |
724 | sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); | |
fee0c54e | 725 | if (IS_ERR(sgt)) { |
8c417d03 SS |
726 | pr_err("Error getting dmabuf scatterlist\n"); |
727 | return -EINVAL; | |
728 | } | |
729 | ||
730 | /* checking if dmabuf is big enough to store contiguous chunk */ | |
731 | contig_size = vb2_dc_get_contiguous_size(sgt); | |
732 | if (contig_size < buf->size) { | |
733 | pr_err("contiguous chunk is too small %lu/%lu b\n", | |
734 | contig_size, buf->size); | |
735 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | |
736 | return -EFAULT; | |
737 | } | |
738 | ||
739 | buf->dma_addr = sg_dma_address(sgt->sgl); | |
740 | buf->dma_sgt = sgt; | |
6bbd4fec | 741 | buf->vaddr = NULL; |
8c417d03 SS |
742 | |
743 | return 0; | |
744 | } | |
745 | ||
746 | static void vb2_dc_unmap_dmabuf(void *mem_priv) | |
747 | { | |
748 | struct vb2_dc_buf *buf = mem_priv; | |
749 | struct sg_table *sgt = buf->dma_sgt; | |
750 | ||
751 | if (WARN_ON(!buf->db_attach)) { | |
752 | pr_err("trying to unpin a not attached buffer\n"); | |
753 | return; | |
754 | } | |
755 | ||
756 | if (WARN_ON(!sgt)) { | |
757 | pr_err("dmabuf buffer is already unpinned\n"); | |
758 | return; | |
759 | } | |
760 | ||
6bbd4fec PZ |
761 | if (buf->vaddr) { |
762 | dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); | |
763 | buf->vaddr = NULL; | |
764 | } | |
8c417d03 SS |
765 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); |
766 | ||
767 | buf->dma_addr = 0; | |
768 | buf->dma_sgt = NULL; | |
769 | } | |
770 | ||
771 | static void vb2_dc_detach_dmabuf(void *mem_priv) | |
772 | { | |
773 | struct vb2_dc_buf *buf = mem_priv; | |
774 | ||
775 | /* if vb2 works correctly you should never detach mapped buffer */ | |
776 | if (WARN_ON(buf->dma_addr)) | |
777 | vb2_dc_unmap_dmabuf(buf); | |
778 | ||
779 | /* detach this attachment */ | |
780 | dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); | |
781 | kfree(buf); | |
782 | } | |
783 | ||
784 | static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |
785 | unsigned long size, int write) | |
786 | { | |
787 | struct vb2_dc_conf *conf = alloc_ctx; | |
788 | struct vb2_dc_buf *buf; | |
789 | struct dma_buf_attachment *dba; | |
790 | ||
791 | if (dbuf->size < size) | |
792 | return ERR_PTR(-EFAULT); | |
793 | ||
794 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | |
795 | if (!buf) | |
796 | return ERR_PTR(-ENOMEM); | |
797 | ||
798 | buf->dev = conf->dev; | |
799 | /* create attachment for the dmabuf with the user device */ | |
800 | dba = dma_buf_attach(dbuf, buf->dev); | |
801 | if (IS_ERR(dba)) { | |
802 | pr_err("failed to attach dmabuf\n"); | |
803 | kfree(buf); | |
804 | return dba; | |
805 | } | |
806 | ||
807 | buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
808 | buf->size = size; | |
809 | buf->db_attach = dba; | |
810 | ||
811 | return buf; | |
812 | } | |
813 | ||
40d8b766 LP |
814 | /*********************************************/ |
815 | /* DMA CONTIG exported functions */ | |
816 | /*********************************************/ | |
817 | ||
1a758d4e | 818 | const struct vb2_mem_ops vb2_dma_contig_memops = { |
f7f129ce LP |
819 | .alloc = vb2_dc_alloc, |
820 | .put = vb2_dc_put, | |
9ef2cbeb | 821 | .get_dmabuf = vb2_dc_get_dmabuf, |
f7f129ce LP |
822 | .cookie = vb2_dc_cookie, |
823 | .vaddr = vb2_dc_vaddr, | |
824 | .mmap = vb2_dc_mmap, | |
825 | .get_userptr = vb2_dc_get_userptr, | |
826 | .put_userptr = vb2_dc_put_userptr, | |
199d101e MS |
827 | .prepare = vb2_dc_prepare, |
828 | .finish = vb2_dc_finish, | |
8c417d03 SS |
829 | .map_dmabuf = vb2_dc_map_dmabuf, |
830 | .unmap_dmabuf = vb2_dc_unmap_dmabuf, | |
831 | .attach_dmabuf = vb2_dc_attach_dmabuf, | |
832 | .detach_dmabuf = vb2_dc_detach_dmabuf, | |
f7f129ce | 833 | .num_users = vb2_dc_num_users, |
1a758d4e PO |
834 | }; |
835 | EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); | |
836 | ||
837 | void *vb2_dma_contig_init_ctx(struct device *dev) | |
838 | { | |
839 | struct vb2_dc_conf *conf; | |
840 | ||
841 | conf = kzalloc(sizeof *conf, GFP_KERNEL); | |
842 | if (!conf) | |
843 | return ERR_PTR(-ENOMEM); | |
844 | ||
845 | conf->dev = dev; | |
846 | ||
847 | return conf; | |
848 | } | |
849 | EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx); | |
850 | ||
851 | void vb2_dma_contig_cleanup_ctx(void *alloc_ctx) | |
852 | { | |
853 | kfree(alloc_ctx); | |
854 | } | |
855 | EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx); | |
856 | ||
857 | MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2"); | |
95072084 | 858 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); |
1a758d4e | 859 | MODULE_LICENSE("GPL"); |