iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[deliverable/linux.git] / drivers / media / video / videobuf-dma-contig.c
CommitLineData
2cc45cf2
MD
1/*
2 * helper functions for physically contiguous capture buffers
3 *
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
6 *
7 * Copyright (c) 2008 Magnus Damm
8 *
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
f19ad390 19#include <linux/mm.h>
720b17e7 20#include <linux/pagemap.h>
2cc45cf2 21#include <linux/dma-mapping.h>
f39c1ab3 22#include <linux/sched.h>
2cc45cf2
MD
23#include <media/videobuf-dma-contig.h>
24
25struct videobuf_dma_contig_memory {
26 u32 magic;
27 void *vaddr;
28 dma_addr_t dma_handle;
29 unsigned long size;
720b17e7 30 int is_userptr;
2cc45cf2
MD
31};
32
33#define MAGIC_DC_MEM 0x0733ac61
c60f2b5c
GL
34#define MAGIC_CHECK(is, should) \
35 if (unlikely((is) != (should))) { \
36 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
37 BUG(); \
2cc45cf2
MD
38 }
39
40static void
41videobuf_vm_open(struct vm_area_struct *vma)
42{
43 struct videobuf_mapping *map = vma->vm_private_data;
44
45 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
46 map, map->count, vma->vm_start, vma->vm_end);
47
48 map->count++;
49}
50
51static void videobuf_vm_close(struct vm_area_struct *vma)
52{
53 struct videobuf_mapping *map = vma->vm_private_data;
54 struct videobuf_queue *q = map->q;
55 int i;
56
57 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
58 map, map->count, vma->vm_start, vma->vm_end);
59
60 map->count--;
61 if (0 == map->count) {
62 struct videobuf_dma_contig_memory *mem;
63
64 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
65 mutex_lock(&q->vb_lock);
66
67 /* We need first to cancel streams, before unmapping */
68 if (q->streaming)
69 videobuf_queue_cancel(q);
70
71 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
72 if (NULL == q->bufs[i])
73 continue;
74
75 if (q->bufs[i]->map != map)
76 continue;
77
78 mem = q->bufs[i]->priv;
79 if (mem) {
80 /* This callback is called only if kernel has
81 allocated memory and this memory is mmapped.
82 In this case, memory should be freed,
83 in order to do memory unmap.
84 */
85
86 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
87
88 /* vfree is not atomic - can't be
89 called with IRQ's disabled
90 */
91 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
92 i, mem->vaddr);
93
94 dma_free_coherent(q->dev, mem->size,
95 mem->vaddr, mem->dma_handle);
96 mem->vaddr = NULL;
97 }
98
99 q->bufs[i]->map = NULL;
100 q->bufs[i]->baddr = 0;
101 }
102
103 kfree(map);
104
105 mutex_unlock(&q->vb_lock);
106 }
107}
108
f0f37e2f 109static const struct vm_operations_struct videobuf_vm_ops = {
2cc45cf2
MD
110 .open = videobuf_vm_open,
111 .close = videobuf_vm_close,
112};
113
720b17e7
MD
114/**
115 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
116 * @mem: per-buffer private videobuf-dma-contig data
117 *
118 * This function resets the user space pointer
119 */
120static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
121{
122 mem->is_userptr = 0;
123 mem->dma_handle = 0;
124 mem->size = 0;
125}
126
127/**
128 * videobuf_dma_contig_user_get() - setup user space memory pointer
129 * @mem: per-buffer private videobuf-dma-contig data
130 * @vb: video buffer to map
131 *
132 * This function validates and sets up a pointer to user space memory.
133 * Only physically contiguous pfn-mapped memory is accepted.
134 *
135 * Returns 0 if successful.
136 */
137static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
138 struct videobuf_buffer *vb)
139{
140 struct mm_struct *mm = current->mm;
141 struct vm_area_struct *vma;
142 unsigned long prev_pfn, this_pfn;
143 unsigned long pages_done, user_address;
31bedfa5 144 unsigned int offset;
720b17e7
MD
145 int ret;
146
31bedfa5
MK
147 offset = vb->baddr & ~PAGE_MASK;
148 mem->size = PAGE_ALIGN(vb->size + offset);
720b17e7
MD
149 mem->is_userptr = 0;
150 ret = -EINVAL;
151
152 down_read(&mm->mmap_sem);
153
154 vma = find_vma(mm, vb->baddr);
155 if (!vma)
156 goto out_up;
157
158 if ((vb->baddr + mem->size) > vma->vm_end)
159 goto out_up;
160
161 pages_done = 0;
162 prev_pfn = 0; /* kill warning */
163 user_address = vb->baddr;
164
165 while (pages_done < (mem->size >> PAGE_SHIFT)) {
166 ret = follow_pfn(vma, user_address, &this_pfn);
167 if (ret)
168 break;
169
170 if (pages_done == 0)
31bedfa5 171 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
720b17e7
MD
172 else if (this_pfn != (prev_pfn + 1))
173 ret = -EFAULT;
174
175 if (ret)
176 break;
177
178 prev_pfn = this_pfn;
179 user_address += PAGE_SIZE;
180 pages_done++;
181 }
182
183 if (!ret)
184 mem->is_userptr = 1;
185
186 out_up:
187 up_read(&current->mm->mmap_sem);
188
189 return ret;
190}
191
2cc45cf2
MD
192static void *__videobuf_alloc(size_t size)
193{
194 struct videobuf_dma_contig_memory *mem;
195 struct videobuf_buffer *vb;
196
197 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
198 if (vb) {
199 mem = vb->priv = ((char *)vb) + size;
200 mem->magic = MAGIC_DC_MEM;
201 }
202
203 return vb;
204}
205
206static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
207{
208 struct videobuf_dma_contig_memory *mem = buf->priv;
209
210 BUG_ON(!mem);
211 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
212
213 return mem->vaddr;
214}
215
216static int __videobuf_iolock(struct videobuf_queue *q,
217 struct videobuf_buffer *vb,
218 struct v4l2_framebuffer *fbuf)
219{
220 struct videobuf_dma_contig_memory *mem = vb->priv;
221
222 BUG_ON(!mem);
223 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
224
225 switch (vb->memory) {
226 case V4L2_MEMORY_MMAP:
227 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
228
229 /* All handling should be done by __videobuf_mmap_mapper() */
230 if (!mem->vaddr) {
231 dev_err(q->dev, "memory is not alloced/mmapped.\n");
232 return -EINVAL;
233 }
234 break;
235 case V4L2_MEMORY_USERPTR:
236 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
237
720b17e7 238 /* handle pointer from user space */
2cc45cf2 239 if (vb->baddr)
720b17e7 240 return videobuf_dma_contig_user_get(mem, vb);
2cc45cf2 241
720b17e7 242 /* allocate memory for the read() method */
2cc45cf2
MD
243 mem->size = PAGE_ALIGN(vb->size);
244 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
245 &mem->dma_handle, GFP_KERNEL);
246 if (!mem->vaddr) {
247 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
248 mem->size);
249 return -ENOMEM;
250 }
251
252 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
253 mem->vaddr, mem->size);
254 break;
255 case V4L2_MEMORY_OVERLAY:
256 default:
257 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
258 __func__);
259 return -EINVAL;
260 }
261
262 return 0;
263}
264
2cc45cf2
MD
265static int __videobuf_mmap_free(struct videobuf_queue *q)
266{
267 unsigned int i;
268
269 dev_dbg(q->dev, "%s\n", __func__);
270 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
271 if (q->bufs[i] && q->bufs[i]->map)
272 return -EBUSY;
273 }
274
275 return 0;
276}
277
278static int __videobuf_mmap_mapper(struct videobuf_queue *q,
279 struct vm_area_struct *vma)
280{
281 struct videobuf_dma_contig_memory *mem;
282 struct videobuf_mapping *map;
283 unsigned int first;
284 int retval;
285 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
286
287 dev_dbg(q->dev, "%s\n", __func__);
288 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
289 return -EINVAL;
290
291 /* look for first buffer to map */
292 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
293 if (!q->bufs[first])
294 continue;
295
296 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
297 continue;
298 if (q->bufs[first]->boff == offset)
299 break;
300 }
301 if (VIDEO_MAX_FRAME == first) {
302 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
303 offset);
304 return -EINVAL;
305 }
306
307 /* create mapping + update buffer list */
308 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
309 if (!map)
310 return -ENOMEM;
311
312 q->bufs[first]->map = map;
313 map->start = vma->vm_start;
314 map->end = vma->vm_end;
315 map->q = q;
316
317 q->bufs[first]->baddr = vma->vm_start;
318
319 mem = q->bufs[first]->priv;
320 BUG_ON(!mem);
321 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
322
323 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
324 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
325 &mem->dma_handle, GFP_KERNEL);
326 if (!mem->vaddr) {
327 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
328 mem->size);
329 goto error;
330 }
331 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
332 mem->vaddr, mem->size);
333
334 /* Try to remap memory */
335
336 size = vma->vm_end - vma->vm_start;
337 size = (size < mem->size) ? size : mem->size;
338
339 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
340 retval = remap_pfn_range(vma, vma->vm_start,
341 mem->dma_handle >> PAGE_SHIFT,
342 size, vma->vm_page_prot);
343 if (retval) {
344 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
345 dma_free_coherent(q->dev, mem->size,
346 mem->vaddr, mem->dma_handle);
347 goto error;
348 }
349
350 vma->vm_ops = &videobuf_vm_ops;
351 vma->vm_flags |= VM_DONTEXPAND;
352 vma->vm_private_data = map;
353
354 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
355 map, q, vma->vm_start, vma->vm_end,
356 (long int) q->bufs[first]->bsize,
357 vma->vm_pgoff, first);
358
359 videobuf_vm_open(vma);
360
361 return 0;
362
363error:
364 kfree(map);
365 return -ENOMEM;
366}
367
368static int __videobuf_copy_to_user(struct videobuf_queue *q,
369 char __user *data, size_t count,
370 int nonblocking)
371{
372 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
373 void *vaddr;
374
375 BUG_ON(!mem);
376 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
377 BUG_ON(!mem->vaddr);
378
379 /* copy to userspace */
380 if (count > q->read_buf->size - q->read_off)
381 count = q->read_buf->size - q->read_off;
382
383 vaddr = mem->vaddr;
384
385 if (copy_to_user(data, vaddr + q->read_off, count))
386 return -EFAULT;
387
388 return count;
389}
390
391static int __videobuf_copy_stream(struct videobuf_queue *q,
392 char __user *data, size_t count, size_t pos,
393 int vbihack, int nonblocking)
394{
395 unsigned int *fc;
396 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
397
398 BUG_ON(!mem);
399 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
400
401 if (vbihack) {
402 /* dirty, undocumented hack -- pass the frame counter
403 * within the last four bytes of each vbi data block.
404 * We need that one to maintain backward compatibility
405 * to all vbi decoding software out there ... */
406 fc = (unsigned int *)mem->vaddr;
407 fc += (q->read_buf->size >> 2) - 1;
408 *fc = q->read_buf->field_count >> 1;
409 dev_dbg(q->dev, "vbihack: %d\n", *fc);
410 }
411
412 /* copy stuff using the common method */
413 count = __videobuf_copy_to_user(q, data, count, nonblocking);
414
415 if ((count == -EFAULT) && (pos == 0))
416 return -EFAULT;
417
418 return count;
419}
420
421static struct videobuf_qtype_ops qops = {
422 .magic = MAGIC_QTYPE_OPS,
423
424 .alloc = __videobuf_alloc,
425 .iolock = __videobuf_iolock,
2cc45cf2
MD
426 .mmap_free = __videobuf_mmap_free,
427 .mmap_mapper = __videobuf_mmap_mapper,
428 .video_copy_to_user = __videobuf_copy_to_user,
429 .copy_stream = __videobuf_copy_stream,
430 .vmalloc = __videobuf_to_vmalloc,
431};
432
433void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
38a54f35 434 const struct videobuf_queue_ops *ops,
2cc45cf2
MD
435 struct device *dev,
436 spinlock_t *irqlock,
437 enum v4l2_buf_type type,
438 enum v4l2_field field,
439 unsigned int msize,
440 void *priv)
441{
442 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
443 priv, &qops);
444}
445EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
446
447dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
448{
449 struct videobuf_dma_contig_memory *mem = buf->priv;
450
451 BUG_ON(!mem);
452 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
453
454 return mem->dma_handle;
455}
456EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
457
458void videobuf_dma_contig_free(struct videobuf_queue *q,
459 struct videobuf_buffer *buf)
460{
461 struct videobuf_dma_contig_memory *mem = buf->priv;
462
463 /* mmapped memory can't be freed here, otherwise mmapped region
464 would be released, while still needed. In this case, the memory
465 release should happen inside videobuf_vm_close().
466 So, it should free memory only if the memory were allocated for
467 read() operation.
468 */
720b17e7 469 if (buf->memory != V4L2_MEMORY_USERPTR)
2cc45cf2
MD
470 return;
471
472 if (!mem)
473 return;
474
475 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
476
720b17e7
MD
477 /* handle user space pointer case */
478 if (buf->baddr) {
479 videobuf_dma_contig_user_put(mem);
480 return;
481 }
482
483 /* read() method */
2cc45cf2
MD
484 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
485 mem->vaddr = NULL;
486}
487EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
488
489MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
490MODULE_AUTHOR("Magnus Damm");
491MODULE_LICENSE("GPL");
This page took 0.202468 seconds and 5 git commands to generate.