[media] Revert "[media] videobuf_vm_{open,close} race fixes"
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-core.c
CommitLineData
e23ccc0a
PO
1/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
e23ccc0a
PO
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
95213ceb
HV
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
e23ccc0a
PO
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
5931ffe3 36#define call_memop(q, op, args...) \
e23ccc0a
PO
37 (((q)->mem_ops->op) ? \
38 ((q)->mem_ops->op(args)) : 0)
39
40#define call_qop(q, op, args...) \
41 (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
42
1b18e7a0 43#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
2d86401c 44 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
1b18e7a0
SA
45 V4L2_BUF_FLAG_PREPARED | \
46 V4L2_BUF_FLAG_TIMESTAMP_MASK)
ea42c8ec 47
e23ccc0a
PO
48/**
49 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
50 */
c1426bc7 51static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
e23ccc0a
PO
52{
53 struct vb2_queue *q = vb->vb2_queue;
54 void *mem_priv;
55 int plane;
56
7f841459
MCC
57 /*
58 * Allocate memory for all planes in this buffer
59 * NOTE: mmapped areas should be page aligned
60 */
e23ccc0a 61 for (plane = 0; plane < vb->num_planes; ++plane) {
7f841459
MCC
62 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
63
5931ffe3 64 mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
7f841459 65 size, q->gfp_flags);
62a79436 66 if (IS_ERR_OR_NULL(mem_priv))
e23ccc0a
PO
67 goto free;
68
69 /* Associate allocator private data with this plane */
70 vb->planes[plane].mem_priv = mem_priv;
c1426bc7 71 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
e23ccc0a
PO
72 }
73
74 return 0;
75free:
76 /* Free already allocated memory if one of the allocations failed */
a00d0266 77 for (; plane > 0; --plane) {
5931ffe3 78 call_memop(q, put, vb->planes[plane - 1].mem_priv);
a00d0266
MS
79 vb->planes[plane - 1].mem_priv = NULL;
80 }
e23ccc0a
PO
81
82 return -ENOMEM;
83}
84
85/**
86 * __vb2_buf_mem_free() - free memory of the given buffer
87 */
88static void __vb2_buf_mem_free(struct vb2_buffer *vb)
89{
90 struct vb2_queue *q = vb->vb2_queue;
91 unsigned int plane;
92
93 for (plane = 0; plane < vb->num_planes; ++plane) {
5931ffe3 94 call_memop(q, put, vb->planes[plane].mem_priv);
e23ccc0a 95 vb->planes[plane].mem_priv = NULL;
a00d0266
MS
96 dprintk(3, "Freed plane %d of buffer %d\n", plane,
97 vb->v4l2_buf.index);
e23ccc0a
PO
98 }
99}
100
101/**
102 * __vb2_buf_userptr_put() - release userspace memory associated with
103 * a USERPTR buffer
104 */
105static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
106{
107 struct vb2_queue *q = vb->vb2_queue;
108 unsigned int plane;
109
110 for (plane = 0; plane < vb->num_planes; ++plane) {
a00d0266
MS
111 if (vb->planes[plane].mem_priv)
112 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
113 vb->planes[plane].mem_priv = NULL;
e23ccc0a
PO
114 }
115}
116
c5384048
SS
117/**
118 * __vb2_plane_dmabuf_put() - release memory associated with
119 * a DMABUF shared plane
120 */
121static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
122{
123 if (!p->mem_priv)
124 return;
125
126 if (p->dbuf_mapped)
127 call_memop(q, unmap_dmabuf, p->mem_priv);
128
129 call_memop(q, detach_dmabuf, p->mem_priv);
130 dma_buf_put(p->dbuf);
131 memset(p, 0, sizeof(*p));
132}
133
134/**
135 * __vb2_buf_dmabuf_put() - release memory associated with
136 * a DMABUF shared buffer
137 */
138static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
139{
140 struct vb2_queue *q = vb->vb2_queue;
141 unsigned int plane;
142
143 for (plane = 0; plane < vb->num_planes; ++plane)
144 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
145}
146
a5e3d743
HV
147/**
148 * __setup_lengths() - setup initial lengths for every plane in
149 * every buffer on the queue
150 */
151static void __setup_lengths(struct vb2_queue *q, unsigned int n)
152{
153 unsigned int buffer, plane;
154 struct vb2_buffer *vb;
155
156 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
157 vb = q->bufs[buffer];
158 if (!vb)
159 continue;
160
161 for (plane = 0; plane < vb->num_planes; ++plane)
162 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
163 }
164}
165
e23ccc0a
PO
166/**
167 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
168 * every buffer on the queue
169 */
2d86401c 170static void __setup_offsets(struct vb2_queue *q, unsigned int n)
e23ccc0a
PO
171{
172 unsigned int buffer, plane;
173 struct vb2_buffer *vb;
2d86401c 174 unsigned long off;
e23ccc0a 175
2d86401c
GL
176 if (q->num_buffers) {
177 struct v4l2_plane *p;
178 vb = q->bufs[q->num_buffers - 1];
179 p = &vb->v4l2_planes[vb->num_planes - 1];
180 off = PAGE_ALIGN(p->m.mem_offset + p->length);
181 } else {
182 off = 0;
183 }
184
185 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
e23ccc0a
PO
186 vb = q->bufs[buffer];
187 if (!vb)
188 continue;
189
190 for (plane = 0; plane < vb->num_planes; ++plane) {
191 vb->v4l2_planes[plane].m.mem_offset = off;
192
193 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
194 buffer, plane, off);
195
196 off += vb->v4l2_planes[plane].length;
197 off = PAGE_ALIGN(off);
198 }
199 }
200}
201
202/**
203 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
204 * video buffer memory for all buffers/planes on the queue and initializes the
205 * queue
206 *
207 * Returns the number of buffers successfully allocated.
208 */
209static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
c1426bc7 210 unsigned int num_buffers, unsigned int num_planes)
e23ccc0a
PO
211{
212 unsigned int buffer;
213 struct vb2_buffer *vb;
214 int ret;
215
216 for (buffer = 0; buffer < num_buffers; ++buffer) {
217 /* Allocate videobuf buffer structures */
218 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
219 if (!vb) {
220 dprintk(1, "Memory alloc for buffer struct failed\n");
221 break;
222 }
223
224 /* Length stores number of planes for multiplanar buffers */
225 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
226 vb->v4l2_buf.length = num_planes;
227
228 vb->state = VB2_BUF_STATE_DEQUEUED;
229 vb->vb2_queue = q;
230 vb->num_planes = num_planes;
2d86401c 231 vb->v4l2_buf.index = q->num_buffers + buffer;
e23ccc0a
PO
232 vb->v4l2_buf.type = q->type;
233 vb->v4l2_buf.memory = memory;
234
235 /* Allocate video buffer memory for the MMAP type */
236 if (memory == V4L2_MEMORY_MMAP) {
c1426bc7 237 ret = __vb2_buf_mem_alloc(vb);
e23ccc0a
PO
238 if (ret) {
239 dprintk(1, "Failed allocating memory for "
240 "buffer %d\n", buffer);
241 kfree(vb);
242 break;
243 }
244 /*
245 * Call the driver-provided buffer initialization
246 * callback, if given. An error in initialization
247 * results in queue setup failure.
248 */
249 ret = call_qop(q, buf_init, vb);
250 if (ret) {
251 dprintk(1, "Buffer %d %p initialization"
252 " failed\n", buffer, vb);
253 __vb2_buf_mem_free(vb);
254 kfree(vb);
255 break;
256 }
257 }
258
2d86401c 259 q->bufs[q->num_buffers + buffer] = vb;
e23ccc0a
PO
260 }
261
a5e3d743 262 __setup_lengths(q, buffer);
dc77523c
PZ
263 if (memory == V4L2_MEMORY_MMAP)
264 __setup_offsets(q, buffer);
e23ccc0a
PO
265
266 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
2d86401c 267 buffer, num_planes);
e23ccc0a
PO
268
269 return buffer;
270}
271
272/**
273 * __vb2_free_mem() - release all video buffer memory for a given queue
274 */
2d86401c 275static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
276{
277 unsigned int buffer;
278 struct vb2_buffer *vb;
279
2d86401c
GL
280 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
281 ++buffer) {
e23ccc0a
PO
282 vb = q->bufs[buffer];
283 if (!vb)
284 continue;
285
286 /* Free MMAP buffers or release USERPTR buffers */
287 if (q->memory == V4L2_MEMORY_MMAP)
288 __vb2_buf_mem_free(vb);
c5384048
SS
289 else if (q->memory == V4L2_MEMORY_DMABUF)
290 __vb2_buf_dmabuf_put(vb);
e23ccc0a
PO
291 else
292 __vb2_buf_userptr_put(vb);
293 }
294}
295
296/**
2d86401c
GL
297 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
298 * related information, if no buffers are left return the queue to an
299 * uninitialized state. Might be called even if the queue has already been freed.
e23ccc0a 300 */
63faabfd 301static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
302{
303 unsigned int buffer;
304
63faabfd
HV
305 /*
306 * Sanity check: when preparing a buffer the queue lock is released for
307 * a short while (see __buf_prepare for the details), which would allow
308 * a race with a reqbufs which can call this function. Removing the
309 * buffers from underneath __buf_prepare is obviously a bad idea, so we
310 * check if any of the buffers is in the state PREPARING, and if so we
311 * just return -EAGAIN.
312 */
313 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
314 ++buffer) {
315 if (q->bufs[buffer] == NULL)
316 continue;
317 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
318 dprintk(1, "reqbufs: preparing buffers, cannot free\n");
319 return -EAGAIN;
320 }
321 }
322
e23ccc0a
PO
323 /* Call driver-provided cleanup function for each buffer, if provided */
324 if (q->ops->buf_cleanup) {
2d86401c
GL
325 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
326 ++buffer) {
e23ccc0a
PO
327 if (NULL == q->bufs[buffer])
328 continue;
329 q->ops->buf_cleanup(q->bufs[buffer]);
330 }
331 }
332
333 /* Release video buffer memory */
2d86401c 334 __vb2_free_mem(q, buffers);
e23ccc0a
PO
335
336 /* Free videobuf buffers */
2d86401c
GL
337 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
338 ++buffer) {
e23ccc0a
PO
339 kfree(q->bufs[buffer]);
340 q->bufs[buffer] = NULL;
341 }
342
2d86401c
GL
343 q->num_buffers -= buffers;
344 if (!q->num_buffers)
345 q->memory = 0;
bd50d999 346 INIT_LIST_HEAD(&q->queued_list);
63faabfd 347 return 0;
e23ccc0a
PO
348}
349
350/**
351 * __verify_planes_array() - verify that the planes array passed in struct
352 * v4l2_buffer from userspace can be safely used
353 */
2d86401c 354static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 355{
32a77260
HV
356 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
357 return 0;
358
e23ccc0a
PO
359 /* Is memory for copying plane information present? */
360 if (NULL == b->m.planes) {
361 dprintk(1, "Multi-planar buffer passed but "
362 "planes array not provided\n");
363 return -EINVAL;
364 }
365
366 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
367 dprintk(1, "Incorrect planes array length, "
368 "expected %d, got %d\n", vb->num_planes, b->length);
369 return -EINVAL;
370 }
371
372 return 0;
373}
374
8023ed09
LP
375/**
376 * __verify_length() - Verify that the bytesused value for each plane fits in
377 * the plane length and that the data offset doesn't exceed the bytesused value.
378 */
379static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
380{
381 unsigned int length;
382 unsigned int plane;
383
384 if (!V4L2_TYPE_IS_OUTPUT(b->type))
385 return 0;
386
387 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
388 for (plane = 0; plane < vb->num_planes; ++plane) {
389 length = (b->memory == V4L2_MEMORY_USERPTR)
390 ? b->m.planes[plane].length
391 : vb->v4l2_planes[plane].length;
392
393 if (b->m.planes[plane].bytesused > length)
394 return -EINVAL;
3c5c23c5
SN
395
396 if (b->m.planes[plane].data_offset > 0 &&
397 b->m.planes[plane].data_offset >=
8023ed09
LP
398 b->m.planes[plane].bytesused)
399 return -EINVAL;
400 }
401 } else {
402 length = (b->memory == V4L2_MEMORY_USERPTR)
403 ? b->length : vb->v4l2_planes[0].length;
404
405 if (b->bytesused > length)
406 return -EINVAL;
407 }
408
409 return 0;
410}
411
25a27d91
MS
412/**
413 * __buffer_in_use() - return true if the buffer is in use and
414 * the queue cannot be freed (by the means of REQBUFS(0)) call
415 */
416static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
417{
418 unsigned int plane;
419 for (plane = 0; plane < vb->num_planes; ++plane) {
2c2dd6ac 420 void *mem_priv = vb->planes[plane].mem_priv;
25a27d91
MS
421 /*
422 * If num_users() has not been provided, call_memop
423 * will return 0, apparently nobody cares about this
424 * case anyway. If num_users() returns more than 1,
425 * we are not the only user of the plane's memory.
426 */
5931ffe3 427 if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
25a27d91
MS
428 return true;
429 }
430 return false;
431}
432
433/**
434 * __buffers_in_use() - return true if any buffers on the queue are in use and
435 * the queue cannot be freed (by the means of REQBUFS(0)) call
436 */
437static bool __buffers_in_use(struct vb2_queue *q)
438{
439 unsigned int buffer;
440 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
441 if (__buffer_in_use(q, q->bufs[buffer]))
442 return true;
443 }
444 return false;
445}
446
e23ccc0a
PO
447/**
448 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
449 * returned to userspace
450 */
32a77260 451static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
e23ccc0a
PO
452{
453 struct vb2_queue *q = vb->vb2_queue;
e23ccc0a 454
2b719d7b 455 /* Copy back data such as timestamp, flags, etc. */
e23ccc0a 456 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
2b719d7b 457 b->reserved2 = vb->v4l2_buf.reserved2;
e23ccc0a
PO
458 b->reserved = vb->v4l2_buf.reserved;
459
460 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
e23ccc0a
PO
461 /*
462 * Fill in plane-related data if userspace provided an array
32a77260 463 * for it. The caller has already verified memory and size.
e23ccc0a 464 */
3c0b6061 465 b->length = vb->num_planes;
e23ccc0a
PO
466 memcpy(b->m.planes, vb->v4l2_planes,
467 b->length * sizeof(struct v4l2_plane));
468 } else {
469 /*
470 * We use length and offset in v4l2_planes array even for
471 * single-planar buffers, but userspace does not.
472 */
473 b->length = vb->v4l2_planes[0].length;
474 b->bytesused = vb->v4l2_planes[0].bytesused;
475 if (q->memory == V4L2_MEMORY_MMAP)
476 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
477 else if (q->memory == V4L2_MEMORY_USERPTR)
478 b->m.userptr = vb->v4l2_planes[0].m.userptr;
c5384048
SS
479 else if (q->memory == V4L2_MEMORY_DMABUF)
480 b->m.fd = vb->v4l2_planes[0].m.fd;
e23ccc0a
PO
481 }
482
ea42c8ec
MS
483 /*
484 * Clear any buffer state related flags.
485 */
1b18e7a0 486 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
6aa69f99 487 b->flags |= q->timestamp_type;
e23ccc0a
PO
488
489 switch (vb->state) {
490 case VB2_BUF_STATE_QUEUED:
491 case VB2_BUF_STATE_ACTIVE:
492 b->flags |= V4L2_BUF_FLAG_QUEUED;
493 break;
494 case VB2_BUF_STATE_ERROR:
495 b->flags |= V4L2_BUF_FLAG_ERROR;
496 /* fall through */
497 case VB2_BUF_STATE_DONE:
498 b->flags |= V4L2_BUF_FLAG_DONE;
499 break;
ebc087d0 500 case VB2_BUF_STATE_PREPARED:
2d86401c
GL
501 b->flags |= V4L2_BUF_FLAG_PREPARED;
502 break;
b18a8ff2 503 case VB2_BUF_STATE_PREPARING:
2d86401c 504 case VB2_BUF_STATE_DEQUEUED:
e23ccc0a
PO
505 /* nothing */
506 break;
507 }
508
25a27d91 509 if (__buffer_in_use(q, vb))
e23ccc0a 510 b->flags |= V4L2_BUF_FLAG_MAPPED;
e23ccc0a
PO
511}
512
513/**
514 * vb2_querybuf() - query video buffer information
515 * @q: videobuf queue
516 * @b: buffer struct passed from userspace to vidioc_querybuf handler
517 * in driver
518 *
519 * Should be called from vidioc_querybuf ioctl handler in driver.
520 * This function will verify the passed v4l2_buffer structure and fill the
521 * relevant information for the userspace.
522 *
523 * The return values from this function are intended to be directly returned
524 * from vidioc_querybuf handler in driver.
525 */
526int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
527{
528 struct vb2_buffer *vb;
32a77260 529 int ret;
e23ccc0a
PO
530
531 if (b->type != q->type) {
532 dprintk(1, "querybuf: wrong buffer type\n");
533 return -EINVAL;
534 }
535
536 if (b->index >= q->num_buffers) {
537 dprintk(1, "querybuf: buffer index out of range\n");
538 return -EINVAL;
539 }
540 vb = q->bufs[b->index];
32a77260
HV
541 ret = __verify_planes_array(vb, b);
542 if (!ret)
543 __fill_v4l2_buffer(vb, b);
544 return ret;
e23ccc0a
PO
545}
546EXPORT_SYMBOL(vb2_querybuf);
547
548/**
549 * __verify_userptr_ops() - verify that all memory operations required for
550 * USERPTR queue type have been provided
551 */
552static int __verify_userptr_ops(struct vb2_queue *q)
553{
554 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
555 !q->mem_ops->put_userptr)
556 return -EINVAL;
557
558 return 0;
559}
560
561/**
562 * __verify_mmap_ops() - verify that all memory operations required for
563 * MMAP queue type have been provided
564 */
565static int __verify_mmap_ops(struct vb2_queue *q)
566{
567 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
568 !q->mem_ops->put || !q->mem_ops->mmap)
569 return -EINVAL;
570
571 return 0;
572}
573
c5384048
SS
574/**
575 * __verify_dmabuf_ops() - verify that all memory operations required for
576 * DMABUF queue type have been provided
577 */
578static int __verify_dmabuf_ops(struct vb2_queue *q)
579{
580 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
581 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
582 !q->mem_ops->unmap_dmabuf)
583 return -EINVAL;
584
585 return 0;
586}
587
e23ccc0a 588/**
37d9ed94
HV
589 * __verify_memory_type() - Check whether the memory type and buffer type
590 * passed to a buffer operation are compatible with the queue.
591 */
592static int __verify_memory_type(struct vb2_queue *q,
593 enum v4l2_memory memory, enum v4l2_buf_type type)
594{
c5384048
SS
595 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
596 memory != V4L2_MEMORY_DMABUF) {
37d9ed94
HV
597 dprintk(1, "reqbufs: unsupported memory type\n");
598 return -EINVAL;
599 }
600
601 if (type != q->type) {
602 dprintk(1, "reqbufs: requested type is incorrect\n");
603 return -EINVAL;
604 }
605
606 /*
607 * Make sure all the required memory ops for given memory type
608 * are available.
609 */
610 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
611 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
612 return -EINVAL;
613 }
614
615 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
616 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
617 return -EINVAL;
618 }
619
c5384048
SS
620 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
621 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
622 return -EINVAL;
623 }
624
37d9ed94
HV
625 /*
626 * Place the busy tests at the end: -EBUSY can be ignored when
627 * create_bufs is called with count == 0, but count == 0 should still
628 * do the memory and type validation.
629 */
630 if (q->fileio) {
631 dprintk(1, "reqbufs: file io in progress\n");
632 return -EBUSY;
633 }
634 return 0;
635}
636
637/**
638 * __reqbufs() - Initiate streaming
e23ccc0a
PO
639 * @q: videobuf2 queue
640 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
641 *
642 * Should be called from vidioc_reqbufs ioctl handler of a driver.
643 * This function:
644 * 1) verifies streaming parameters passed from the userspace,
645 * 2) sets up the queue,
646 * 3) negotiates number of buffers and planes per buffer with the driver
647 * to be used during streaming,
648 * 4) allocates internal buffer structures (struct vb2_buffer), according to
649 * the agreed parameters,
650 * 5) for MMAP memory type, allocates actual video memory, using the
651 * memory handling/allocation routines provided during queue initialization
652 *
653 * If req->count is 0, all the memory will be freed instead.
654 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
655 * and the queue is not busy, memory will be reallocated.
656 *
657 * The return values from this function are intended to be directly returned
658 * from vidioc_reqbufs handler in driver.
659 */
37d9ed94 660static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
e23ccc0a 661{
2d86401c 662 unsigned int num_buffers, allocated_buffers, num_planes = 0;
37d9ed94 663 int ret;
e23ccc0a
PO
664
665 if (q->streaming) {
666 dprintk(1, "reqbufs: streaming active\n");
667 return -EBUSY;
668 }
669
29e3fbd8 670 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
e23ccc0a
PO
671 /*
672 * We already have buffers allocated, so first check if they
673 * are not in use and can be freed.
674 */
675 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
676 dprintk(1, "reqbufs: memory in use, cannot free\n");
677 return -EBUSY;
678 }
679
63faabfd
HV
680 ret = __vb2_queue_free(q, q->num_buffers);
681 if (ret)
682 return ret;
29e3fbd8
MS
683
684 /*
685 * In case of REQBUFS(0) return immediately without calling
686 * driver's queue_setup() callback and allocating resources.
687 */
688 if (req->count == 0)
689 return 0;
e23ccc0a
PO
690 }
691
692 /*
693 * Make sure the requested values and current defaults are sane.
694 */
695 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
c1426bc7 696 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
e23ccc0a 697 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
13b14095 698 q->memory = req->memory;
e23ccc0a
PO
699
700 /*
701 * Ask the driver how many buffers and planes per buffer it requires.
702 * Driver also sets the size and allocator context for each plane.
703 */
fc714e70 704 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
c1426bc7 705 q->plane_sizes, q->alloc_ctx);
e23ccc0a
PO
706 if (ret)
707 return ret;
708
709 /* Finally, allocate buffers and video memory */
c1426bc7 710 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
66072d4f
MS
711 if (ret == 0) {
712 dprintk(1, "Memory allocation failed\n");
713 return -ENOMEM;
e23ccc0a
PO
714 }
715
2d86401c
GL
716 allocated_buffers = ret;
717
e23ccc0a
PO
718 /*
719 * Check if driver can handle the allocated number of buffers.
720 */
2d86401c
GL
721 if (allocated_buffers < num_buffers) {
722 num_buffers = allocated_buffers;
e23ccc0a 723
fc714e70
GL
724 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
725 &num_planes, q->plane_sizes, q->alloc_ctx);
e23ccc0a 726
2d86401c 727 if (!ret && allocated_buffers < num_buffers)
e23ccc0a 728 ret = -ENOMEM;
e23ccc0a
PO
729
730 /*
2d86401c
GL
731 * Either the driver has accepted a smaller number of buffers,
732 * or .queue_setup() returned an error
e23ccc0a 733 */
2d86401c
GL
734 }
735
736 q->num_buffers = allocated_buffers;
737
738 if (ret < 0) {
739 __vb2_queue_free(q, allocated_buffers);
740 return ret;
e23ccc0a
PO
741 }
742
e23ccc0a
PO
743 /*
744 * Return the number of successfully allocated buffers
745 * to the userspace.
746 */
2d86401c 747 req->count = allocated_buffers;
e23ccc0a
PO
748
749 return 0;
e23ccc0a 750}
37d9ed94
HV
751
752/**
753 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
754 * type values.
755 * @q: videobuf2 queue
756 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
757 */
758int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
759{
760 int ret = __verify_memory_type(q, req->memory, req->type);
761
762 return ret ? ret : __reqbufs(q, req);
763}
e23ccc0a
PO
764EXPORT_SYMBOL_GPL(vb2_reqbufs);
765
2d86401c 766/**
37d9ed94 767 * __create_bufs() - Allocate buffers and any required auxiliary structs
2d86401c
GL
768 * @q: videobuf2 queue
769 * @create: creation parameters, passed from userspace to vidioc_create_bufs
770 * handler in driver
771 *
772 * Should be called from vidioc_create_bufs ioctl handler of a driver.
773 * This function:
774 * 1) verifies parameter sanity
775 * 2) calls the .queue_setup() queue operation
776 * 3) performs any necessary memory allocations
777 *
778 * The return values from this function are intended to be directly returned
779 * from vidioc_create_bufs handler in driver.
780 */
37d9ed94 781static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
2d86401c
GL
782{
783 unsigned int num_planes = 0, num_buffers, allocated_buffers;
37d9ed94 784 int ret;
2d86401c
GL
785
786 if (q->num_buffers == VIDEO_MAX_FRAME) {
787 dprintk(1, "%s(): maximum number of buffers already allocated\n",
788 __func__);
789 return -ENOBUFS;
790 }
791
2d86401c
GL
792 if (!q->num_buffers) {
793 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
794 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
795 q->memory = create->memory;
796 }
797
798 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
799
800 /*
801 * Ask the driver, whether the requested number of buffers, planes per
802 * buffer and their sizes are acceptable
803 */
804 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
805 &num_planes, q->plane_sizes, q->alloc_ctx);
806 if (ret)
807 return ret;
808
809 /* Finally, allocate buffers and video memory */
810 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
811 num_planes);
f05393d2
HV
812 if (ret == 0) {
813 dprintk(1, "Memory allocation failed\n");
814 return -ENOMEM;
2d86401c
GL
815 }
816
817 allocated_buffers = ret;
818
819 /*
820 * Check if driver can handle the so far allocated number of buffers.
821 */
822 if (ret < num_buffers) {
823 num_buffers = ret;
824
825 /*
826 * q->num_buffers contains the total number of buffers, that the
827 * queue driver has set up
828 */
829 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
830 &num_planes, q->plane_sizes, q->alloc_ctx);
831
832 if (!ret && allocated_buffers < num_buffers)
833 ret = -ENOMEM;
834
835 /*
836 * Either the driver has accepted a smaller number of buffers,
837 * or .queue_setup() returned an error
838 */
839 }
840
841 q->num_buffers += allocated_buffers;
842
843 if (ret < 0) {
844 __vb2_queue_free(q, allocated_buffers);
f05393d2 845 return -ENOMEM;
2d86401c
GL
846 }
847
848 /*
849 * Return the number of successfully allocated buffers
850 * to the userspace.
851 */
852 create->count = allocated_buffers;
853
854 return 0;
855}
37d9ed94
HV
856
857/**
53aa3b19
NT
858 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
859 * memory and type values.
37d9ed94
HV
860 * @q: videobuf2 queue
861 * @create: creation parameters, passed from userspace to vidioc_create_bufs
862 * handler in driver
863 */
864int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
865{
866 int ret = __verify_memory_type(q, create->memory, create->format.type);
867
868 create->index = q->num_buffers;
f05393d2
HV
869 if (create->count == 0)
870 return ret != -EBUSY ? ret : 0;
37d9ed94
HV
871 return ret ? ret : __create_bufs(q, create);
872}
2d86401c
GL
873EXPORT_SYMBOL_GPL(vb2_create_bufs);
874
e23ccc0a
PO
875/**
876 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
877 * @vb: vb2_buffer to which the plane in question belongs to
878 * @plane_no: plane number for which the address is to be returned
879 *
880 * This function returns a kernel virtual address of a given plane if
881 * such a mapping exist, NULL otherwise.
882 */
883void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
884{
885 struct vb2_queue *q = vb->vb2_queue;
886
a00d0266 887 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
888 return NULL;
889
5931ffe3 890 return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
891
892}
893EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
894
895/**
896 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
897 * @vb: vb2_buffer to which the plane in question belongs to
898 * @plane_no: plane number for which the cookie is to be returned
899 *
900 * This function returns an allocator specific cookie for a given plane if
901 * available, NULL otherwise. The allocator should provide some simple static
902 * inline function, which would convert this cookie to the allocator specific
903 * type that can be used directly by the driver to access the buffer. This can
904 * be for example physical address, pointer to scatter list or IOMMU mapping.
905 */
906void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
907{
908 struct vb2_queue *q = vb->vb2_queue;
909
a00d0266 910 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
911 return NULL;
912
5931ffe3 913 return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
914}
915EXPORT_SYMBOL_GPL(vb2_plane_cookie);
916
917/**
918 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
919 * @vb: vb2_buffer returned from the driver
920 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
921 * or VB2_BUF_STATE_ERROR if the operation finished with an error
922 *
923 * This function should be called by the driver after a hardware operation on
924 * a buffer is finished and the buffer may be returned to userspace. The driver
925 * cannot use this buffer anymore until it is queued back to it by videobuf
926 * by the means of buf_queue callback. Only buffers previously queued to the
927 * driver by buf_queue can be passed to this function.
928 */
929void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
930{
931 struct vb2_queue *q = vb->vb2_queue;
932 unsigned long flags;
3e0c2f20 933 unsigned int plane;
e23ccc0a
PO
934
935 if (vb->state != VB2_BUF_STATE_ACTIVE)
936 return;
937
938 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
939 return;
940
941 dprintk(4, "Done processing on buffer %d, state: %d\n",
9b6f5dc0 942 vb->v4l2_buf.index, state);
e23ccc0a 943
3e0c2f20
MS
944 /* sync buffers */
945 for (plane = 0; plane < vb->num_planes; ++plane)
946 call_memop(q, finish, vb->planes[plane].mem_priv);
947
e23ccc0a
PO
948 /* Add the buffer to the done buffers list */
949 spin_lock_irqsave(&q->done_lock, flags);
950 vb->state = state;
951 list_add_tail(&vb->done_entry, &q->done_list);
952 atomic_dec(&q->queued_count);
953 spin_unlock_irqrestore(&q->done_lock, flags);
954
955 /* Inform any processes that may be waiting for buffers */
956 wake_up(&q->done_wq);
957}
958EXPORT_SYMBOL_GPL(vb2_buffer_done);
959
960/**
32a77260
HV
961 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
962 * v4l2_buffer by the userspace. The caller has already verified that struct
963 * v4l2_buffer has a valid number of planes.
e23ccc0a 964 */
32a77260 965static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
e23ccc0a
PO
966 struct v4l2_plane *v4l2_planes)
967{
968 unsigned int plane;
e23ccc0a
PO
969
970 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
e23ccc0a
PO
971 /* Fill in driver-provided information for OUTPUT types */
972 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
973 /*
974 * Will have to go up to b->length when API starts
975 * accepting variable number of planes.
976 */
977 for (plane = 0; plane < vb->num_planes; ++plane) {
978 v4l2_planes[plane].bytesused =
979 b->m.planes[plane].bytesused;
980 v4l2_planes[plane].data_offset =
981 b->m.planes[plane].data_offset;
982 }
983 }
984
985 if (b->memory == V4L2_MEMORY_USERPTR) {
986 for (plane = 0; plane < vb->num_planes; ++plane) {
987 v4l2_planes[plane].m.userptr =
988 b->m.planes[plane].m.userptr;
989 v4l2_planes[plane].length =
990 b->m.planes[plane].length;
991 }
992 }
c5384048
SS
993 if (b->memory == V4L2_MEMORY_DMABUF) {
994 for (plane = 0; plane < vb->num_planes; ++plane) {
995 v4l2_planes[plane].m.fd =
996 b->m.planes[plane].m.fd;
997 v4l2_planes[plane].length =
998 b->m.planes[plane].length;
999 v4l2_planes[plane].data_offset =
1000 b->m.planes[plane].data_offset;
1001 }
1002 }
e23ccc0a
PO
1003 } else {
1004 /*
1005 * Single-planar buffers do not use planes array,
1006 * so fill in relevant v4l2_buffer struct fields instead.
1007 * In videobuf we use our internal V4l2_planes struct for
1008 * single-planar buffers as well, for simplicity.
1009 */
ac706bf7 1010 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
e23ccc0a 1011 v4l2_planes[0].bytesused = b->bytesused;
ac706bf7
LP
1012 v4l2_planes[0].data_offset = 0;
1013 }
e23ccc0a
PO
1014
1015 if (b->memory == V4L2_MEMORY_USERPTR) {
1016 v4l2_planes[0].m.userptr = b->m.userptr;
1017 v4l2_planes[0].length = b->length;
1018 }
c5384048
SS
1019
1020 if (b->memory == V4L2_MEMORY_DMABUF) {
1021 v4l2_planes[0].m.fd = b->m.fd;
1022 v4l2_planes[0].length = b->length;
1023 v4l2_planes[0].data_offset = 0;
1024 }
1025
e23ccc0a
PO
1026 }
1027
1028 vb->v4l2_buf.field = b->field;
1029 vb->v4l2_buf.timestamp = b->timestamp;
1b18e7a0 1030 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
e23ccc0a
PO
1031}
1032
1033/**
1034 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1035 */
2d86401c 1036static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a
PO
1037{
1038 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1039 struct vb2_queue *q = vb->vb2_queue;
1040 void *mem_priv;
1041 unsigned int plane;
1042 int ret;
1043 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1044
32a77260
HV
1045 /* Copy relevant information provided by the userspace */
1046 __fill_vb2_buffer(vb, b, planes);
e23ccc0a
PO
1047
1048 for (plane = 0; plane < vb->num_planes; ++plane) {
1049 /* Skip the plane if already verified */
f0b7c7fc
MS
1050 if (vb->v4l2_planes[plane].m.userptr &&
1051 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
e23ccc0a
PO
1052 && vb->v4l2_planes[plane].length == planes[plane].length)
1053 continue;
1054
1055 dprintk(3, "qbuf: userspace address for plane %d changed, "
1056 "reacquiring memory\n", plane);
1057
c1426bc7
MS
1058 /* Check if the provided plane buffer is large enough */
1059 if (planes[plane].length < q->plane_sizes[plane]) {
2484a7e2
SWK
1060 dprintk(1, "qbuf: provided buffer size %u is less than "
1061 "setup size %u for plane %d\n",
1062 planes[plane].length,
1063 q->plane_sizes[plane], plane);
4c2625db 1064 ret = -EINVAL;
c1426bc7
MS
1065 goto err;
1066 }
1067
e23ccc0a
PO
1068 /* Release previously acquired memory if present */
1069 if (vb->planes[plane].mem_priv)
5931ffe3 1070 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
e23ccc0a
PO
1071
1072 vb->planes[plane].mem_priv = NULL;
c1426bc7
MS
1073 vb->v4l2_planes[plane].m.userptr = 0;
1074 vb->v4l2_planes[plane].length = 0;
e23ccc0a
PO
1075
1076 /* Acquire each plane's memory */
a00d0266
MS
1077 mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
1078 planes[plane].m.userptr,
1079 planes[plane].length, write);
1080 if (IS_ERR_OR_NULL(mem_priv)) {
1081 dprintk(1, "qbuf: failed acquiring userspace "
e23ccc0a 1082 "memory for plane %d\n", plane);
a00d0266
MS
1083 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1084 goto err;
e23ccc0a 1085 }
a00d0266 1086 vb->planes[plane].mem_priv = mem_priv;
e23ccc0a
PO
1087 }
1088
1089 /*
1090 * Call driver-specific initialization on the newly acquired buffer,
1091 * if provided.
1092 */
1093 ret = call_qop(q, buf_init, vb);
1094 if (ret) {
1095 dprintk(1, "qbuf: buffer initialization failed\n");
1096 goto err;
1097 }
1098
1099 /*
1100 * Now that everything is in order, copy relevant information
1101 * provided by userspace.
1102 */
1103 for (plane = 0; plane < vb->num_planes; ++plane)
1104 vb->v4l2_planes[plane] = planes[plane];
1105
1106 return 0;
1107err:
1108 /* In case of errors, release planes that were already acquired */
c1426bc7
MS
1109 for (plane = 0; plane < vb->num_planes; ++plane) {
1110 if (vb->planes[plane].mem_priv)
5931ffe3 1111 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
c1426bc7
MS
1112 vb->planes[plane].mem_priv = NULL;
1113 vb->v4l2_planes[plane].m.userptr = 0;
1114 vb->v4l2_planes[plane].length = 0;
e23ccc0a
PO
1115 }
1116
1117 return ret;
1118}
1119
1120/**
1121 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1122 */
2d86401c 1123static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 1124{
32a77260
HV
1125 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
1126 return 0;
e23ccc0a
PO
1127}
1128
c5384048
SS
1129/**
1130 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1131 */
1132static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1133{
1134 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1135 struct vb2_queue *q = vb->vb2_queue;
1136 void *mem_priv;
1137 unsigned int plane;
1138 int ret;
1139 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1140
6f546c5f 1141 /* Copy relevant information provided by the userspace */
c5384048
SS
1142 __fill_vb2_buffer(vb, b, planes);
1143
1144 for (plane = 0; plane < vb->num_planes; ++plane) {
1145 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1146
1147 if (IS_ERR_OR_NULL(dbuf)) {
1148 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1149 plane);
1150 ret = -EINVAL;
1151 goto err;
1152 }
1153
1154 /* use DMABUF size if length is not provided */
1155 if (planes[plane].length == 0)
1156 planes[plane].length = dbuf->size;
1157
1158 if (planes[plane].length < planes[plane].data_offset +
1159 q->plane_sizes[plane]) {
77c0782e
SWK
1160 dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
1161 plane);
c5384048
SS
1162 ret = -EINVAL;
1163 goto err;
1164 }
1165
1166 /* Skip the plane if already verified */
1167 if (dbuf == vb->planes[plane].dbuf &&
1168 vb->v4l2_planes[plane].length == planes[plane].length) {
1169 dma_buf_put(dbuf);
1170 continue;
1171 }
1172
1173 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1174
1175 /* Release previously acquired memory if present */
1176 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
1177 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1178
1179 /* Acquire each plane's memory */
1180 mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
1181 dbuf, planes[plane].length, write);
1182 if (IS_ERR(mem_priv)) {
1183 dprintk(1, "qbuf: failed to attach dmabuf\n");
1184 ret = PTR_ERR(mem_priv);
1185 dma_buf_put(dbuf);
1186 goto err;
1187 }
1188
1189 vb->planes[plane].dbuf = dbuf;
1190 vb->planes[plane].mem_priv = mem_priv;
1191 }
1192
1193 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1194 * really we want to do this just before the DMA, not while queueing
1195 * the buffer(s)..
1196 */
1197 for (plane = 0; plane < vb->num_planes; ++plane) {
1198 ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
1199 if (ret) {
1200 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1201 plane);
1202 goto err;
1203 }
1204 vb->planes[plane].dbuf_mapped = 1;
1205 }
1206
1207 /*
1208 * Call driver-specific initialization on the newly acquired buffer,
1209 * if provided.
1210 */
1211 ret = call_qop(q, buf_init, vb);
1212 if (ret) {
1213 dprintk(1, "qbuf: buffer initialization failed\n");
1214 goto err;
1215 }
1216
1217 /*
1218 * Now that everything is in order, copy relevant information
1219 * provided by userspace.
1220 */
1221 for (plane = 0; plane < vb->num_planes; ++plane)
1222 vb->v4l2_planes[plane] = planes[plane];
1223
1224 return 0;
1225err:
1226 /* In case of errors, release planes that were already acquired */
1227 __vb2_buf_dmabuf_put(vb);
1228
1229 return ret;
1230}
1231
e23ccc0a
PO
1232/**
1233 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1234 */
1235static void __enqueue_in_driver(struct vb2_buffer *vb)
1236{
1237 struct vb2_queue *q = vb->vb2_queue;
3e0c2f20 1238 unsigned int plane;
e23ccc0a
PO
1239
1240 vb->state = VB2_BUF_STATE_ACTIVE;
1241 atomic_inc(&q->queued_count);
3e0c2f20
MS
1242
1243 /* sync buffers */
1244 for (plane = 0; plane < vb->num_planes; ++plane)
1245 call_memop(q, prepare, vb->planes[plane].mem_priv);
1246
e23ccc0a
PO
1247 q->ops->buf_queue(vb);
1248}
1249
2d86401c 1250static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ebc087d0
GL
1251{
1252 struct vb2_queue *q = vb->vb2_queue;
b18a8ff2 1253 struct rw_semaphore *mmap_sem;
ebc087d0
GL
1254 int ret;
1255
8023ed09 1256 ret = __verify_length(vb, b);
3a9621b0
SN
1257 if (ret < 0) {
1258 dprintk(1, "%s(): plane parameters verification failed: %d\n",
1259 __func__, ret);
8023ed09 1260 return ret;
3a9621b0 1261 }
8023ed09 1262
b18a8ff2 1263 vb->state = VB2_BUF_STATE_PREPARING;
ebc087d0
GL
1264 switch (q->memory) {
1265 case V4L2_MEMORY_MMAP:
1266 ret = __qbuf_mmap(vb, b);
1267 break;
1268 case V4L2_MEMORY_USERPTR:
b18a8ff2 1269 /*
f103b5d6
MCC
1270 * In case of user pointer buffers vb2 allocators need to get
1271 * direct access to userspace pages. This requires getting
1272 * the mmap semaphore for read access in the current process
1273 * structure. The same semaphore is taken before calling mmap
1274 * operation, while both qbuf/prepare_buf and mmap are called
1275 * by the driver or v4l2 core with the driver's lock held.
1276 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1277 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1278 * the videobuf2 core releases the driver's lock, takes
1279 * mmap_sem and then takes the driver's lock again.
b18a8ff2
HV
1280 */
1281 mmap_sem = &current->mm->mmap_sem;
1282 call_qop(q, wait_prepare, q);
1283 down_read(mmap_sem);
1284 call_qop(q, wait_finish, q);
1285
ebc087d0 1286 ret = __qbuf_userptr(vb, b);
b18a8ff2
HV
1287
1288 up_read(mmap_sem);
ebc087d0 1289 break;
c5384048
SS
1290 case V4L2_MEMORY_DMABUF:
1291 ret = __qbuf_dmabuf(vb, b);
1292 break;
ebc087d0
GL
1293 default:
1294 WARN(1, "Invalid queue type\n");
1295 ret = -EINVAL;
1296 }
1297
1298 if (!ret)
1299 ret = call_qop(q, buf_prepare, vb);
1300 if (ret)
1301 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
b18a8ff2 1302 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
ebc087d0
GL
1303
1304 return ret;
1305}
1306
012043b8 1307static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
4138111a 1308 const char *opname)
2d86401c 1309{
2d86401c 1310 if (b->type != q->type) {
012043b8 1311 dprintk(1, "%s(): invalid buffer type\n", opname);
b18a8ff2 1312 return -EINVAL;
2d86401c
GL
1313 }
1314
1315 if (b->index >= q->num_buffers) {
012043b8 1316 dprintk(1, "%s(): buffer index out of range\n", opname);
b18a8ff2 1317 return -EINVAL;
2d86401c
GL
1318 }
1319
4138111a 1320 if (q->bufs[b->index] == NULL) {
2d86401c 1321 /* Should never happen */
012043b8 1322 dprintk(1, "%s(): buffer is NULL\n", opname);
b18a8ff2 1323 return -EINVAL;
2d86401c
GL
1324 }
1325
1326 if (b->memory != q->memory) {
012043b8 1327 dprintk(1, "%s(): invalid memory type\n", opname);
b18a8ff2 1328 return -EINVAL;
2d86401c
GL
1329 }
1330
4138111a 1331 return __verify_planes_array(q->bufs[b->index], b);
012043b8 1332}
2d86401c 1333
e23ccc0a 1334/**
012043b8 1335 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
e23ccc0a 1336 * @q: videobuf2 queue
012043b8
LP
1337 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1338 * handler in driver
e23ccc0a 1339 *
012043b8 1340 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
e23ccc0a
PO
1341 * This function:
1342 * 1) verifies the passed buffer,
012043b8
LP
1343 * 2) calls buf_prepare callback in the driver (if provided), in which
1344 * driver-specific buffer initialization can be performed,
e23ccc0a
PO
1345 *
1346 * The return values from this function are intended to be directly returned
012043b8 1347 * from vidioc_prepare_buf handler in driver.
e23ccc0a 1348 */
012043b8 1349int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
e23ccc0a 1350{
4138111a 1351 struct vb2_buffer *vb;
b2f2f047
HV
1352 int ret;
1353
1354 if (q->fileio) {
1355 dprintk(1, "%s(): file io in progress\n", __func__);
1356 return -EBUSY;
1357 }
4138111a 1358
b2f2f047 1359 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
4138111a
HV
1360 if (ret)
1361 return ret;
1362
1363 vb = q->bufs[b->index];
1364 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1365 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1366 vb->state);
1367 return -EINVAL;
1368 }
1369
1370 ret = __buf_prepare(vb, b);
1371 if (!ret) {
1372 /* Fill buffer information for the userspace */
1373 __fill_v4l2_buffer(vb, b);
1374
1375 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1376 }
1377 return ret;
012043b8
LP
1378}
1379EXPORT_SYMBOL_GPL(vb2_prepare_buf);
e23ccc0a 1380
02f142ec
HV
1381/**
1382 * vb2_start_streaming() - Attempt to start streaming.
1383 * @q: videobuf2 queue
1384 *
1385 * If there are not enough buffers, then retry_start_streaming is set to
1386 * 1 and 0 is returned. The next time a buffer is queued and
1387 * retry_start_streaming is 1, this function will be called again to
1388 * retry starting the DMA engine.
1389 */
1390static int vb2_start_streaming(struct vb2_queue *q)
1391{
1392 int ret;
1393
1394 /* Tell the driver to start streaming */
1395 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1396
1397 /*
1398 * If there are not enough buffers queued to start streaming, then
1399 * the start_streaming operation will return -ENOBUFS and you have to
1400 * retry when the next buffer is queued.
1401 */
1402 if (ret == -ENOBUFS) {
1403 dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
1404 q->retry_start_streaming = 1;
1405 return 0;
1406 }
1407 if (ret)
1408 dprintk(1, "qbuf: driver refused to start streaming\n");
1409 else
1410 q->retry_start_streaming = 0;
1411 return ret;
1412}
1413
b2f2f047 1414static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
012043b8 1415{
4138111a
HV
1416 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1417 struct vb2_buffer *vb;
1418
1419 if (ret)
1420 return ret;
1421
1422 vb = q->bufs[b->index];
1423 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1424 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1425 vb->state);
1426 return -EINVAL;
1427 }
e23ccc0a 1428
ebc087d0
GL
1429 switch (vb->state) {
1430 case VB2_BUF_STATE_DEQUEUED:
1431 ret = __buf_prepare(vb, b);
1432 if (ret)
012043b8 1433 return ret;
4138111a 1434 break;
ebc087d0
GL
1435 case VB2_BUF_STATE_PREPARED:
1436 break;
b18a8ff2
HV
1437 case VB2_BUF_STATE_PREPARING:
1438 dprintk(1, "qbuf: buffer still being prepared\n");
1439 return -EINVAL;
ebc087d0 1440 default:
e23ccc0a 1441 dprintk(1, "qbuf: buffer already in use\n");
012043b8 1442 return -EINVAL;
e23ccc0a
PO
1443 }
1444
e23ccc0a
PO
1445 /*
1446 * Add to the queued buffers list, a buffer will stay on it until
1447 * dequeued in dqbuf.
1448 */
1449 list_add_tail(&vb->queued_entry, &q->queued_list);
1450 vb->state = VB2_BUF_STATE_QUEUED;
1451
1452 /*
1453 * If already streaming, give the buffer to driver for processing.
1454 * If not, the buffer will be given to driver on next streamon.
1455 */
1456 if (q->streaming)
1457 __enqueue_in_driver(vb);
1458
4138111a
HV
1459 /* Fill buffer information for the userspace */
1460 __fill_v4l2_buffer(vb, b);
21db3e07 1461
02f142ec
HV
1462 if (q->retry_start_streaming) {
1463 ret = vb2_start_streaming(q);
1464 if (ret)
1465 return ret;
1466 }
1467
4138111a
HV
1468 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1469 return 0;
e23ccc0a 1470}
b2f2f047
HV
1471
1472/**
1473 * vb2_qbuf() - Queue a buffer from userspace
1474 * @q: videobuf2 queue
1475 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1476 * in driver
1477 *
1478 * Should be called from vidioc_qbuf ioctl handler of a driver.
1479 * This function:
1480 * 1) verifies the passed buffer,
1481 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1482 * which driver-specific buffer initialization can be performed,
1483 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1484 * callback for processing.
1485 *
1486 * The return values from this function are intended to be directly returned
1487 * from vidioc_qbuf handler in driver.
1488 */
1489int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1490{
1491 if (q->fileio) {
1492 dprintk(1, "%s(): file io in progress\n", __func__);
1493 return -EBUSY;
1494 }
1495
1496 return vb2_internal_qbuf(q, b);
1497}
e23ccc0a
PO
1498EXPORT_SYMBOL_GPL(vb2_qbuf);
1499
1500/**
1501 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1502 * for dequeuing
1503 *
1504 * Will sleep if required for nonblocking == false.
1505 */
1506static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1507{
1508 /*
1509 * All operations on vb_done_list are performed under done_lock
1510 * spinlock protection. However, buffers may be removed from
1511 * it and returned to userspace only while holding both driver's
1512 * lock and the done_lock spinlock. Thus we can be sure that as
1513 * long as we hold the driver's lock, the list will remain not
1514 * empty if list_empty() check succeeds.
1515 */
1516
1517 for (;;) {
1518 int ret;
1519
1520 if (!q->streaming) {
1521 dprintk(1, "Streaming off, will not wait for buffers\n");
1522 return -EINVAL;
1523 }
1524
1525 if (!list_empty(&q->done_list)) {
1526 /*
1527 * Found a buffer that we were waiting for.
1528 */
1529 break;
1530 }
1531
1532 if (nonblocking) {
1533 dprintk(1, "Nonblocking and no buffers to dequeue, "
1534 "will not wait\n");
1535 return -EAGAIN;
1536 }
1537
1538 /*
1539 * We are streaming and blocking, wait for another buffer to
1540 * become ready or for streamoff. Driver's lock is released to
1541 * allow streamoff or qbuf to be called while waiting.
1542 */
1543 call_qop(q, wait_prepare, q);
1544
1545 /*
1546 * All locks have been released, it is safe to sleep now.
1547 */
1548 dprintk(3, "Will sleep waiting for buffers\n");
1549 ret = wait_event_interruptible(q->done_wq,
1550 !list_empty(&q->done_list) || !q->streaming);
1551
1552 /*
1553 * We need to reevaluate both conditions again after reacquiring
1554 * the locks or return an error if one occurred.
1555 */
1556 call_qop(q, wait_finish, q);
32a77260
HV
1557 if (ret) {
1558 dprintk(1, "Sleep was interrupted\n");
e23ccc0a 1559 return ret;
32a77260 1560 }
e23ccc0a
PO
1561 }
1562 return 0;
1563}
1564
1565/**
1566 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1567 *
1568 * Will sleep if required for nonblocking == false.
1569 */
1570static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
32a77260 1571 struct v4l2_buffer *b, int nonblocking)
e23ccc0a
PO
1572{
1573 unsigned long flags;
1574 int ret;
1575
1576 /*
1577 * Wait for at least one buffer to become available on the done_list.
1578 */
1579 ret = __vb2_wait_for_done_vb(q, nonblocking);
1580 if (ret)
1581 return ret;
1582
1583 /*
1584 * Driver's lock has been held since we last verified that done_list
1585 * is not empty, so no need for another list_empty(done_list) check.
1586 */
1587 spin_lock_irqsave(&q->done_lock, flags);
1588 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
32a77260
HV
1589 /*
1590 * Only remove the buffer from done_list if v4l2_buffer can handle all
1591 * the planes.
1592 */
1593 ret = __verify_planes_array(*vb, b);
1594 if (!ret)
1595 list_del(&(*vb)->done_entry);
e23ccc0a
PO
1596 spin_unlock_irqrestore(&q->done_lock, flags);
1597
32a77260 1598 return ret;
e23ccc0a
PO
1599}
1600
1601/**
1602 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1603 * @q: videobuf2 queue
1604 *
1605 * This function will wait until all buffers that have been given to the driver
1606 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1607 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1608 * taken, for example from stop_streaming() callback.
1609 */
1610int vb2_wait_for_all_buffers(struct vb2_queue *q)
1611{
1612 if (!q->streaming) {
1613 dprintk(1, "Streaming off, will not wait for buffers\n");
1614 return -EINVAL;
1615 }
1616
02f142ec
HV
1617 if (!q->retry_start_streaming)
1618 wait_event(q->done_wq, !atomic_read(&q->queued_count));
e23ccc0a
PO
1619 return 0;
1620}
1621EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1622
c5384048
SS
1623/**
1624 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1625 */
1626static void __vb2_dqbuf(struct vb2_buffer *vb)
1627{
1628 struct vb2_queue *q = vb->vb2_queue;
1629 unsigned int i;
1630
1631 /* nothing to do if the buffer is already dequeued */
1632 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1633 return;
1634
1635 vb->state = VB2_BUF_STATE_DEQUEUED;
1636
1637 /* unmap DMABUF buffer */
1638 if (q->memory == V4L2_MEMORY_DMABUF)
1639 for (i = 0; i < vb->num_planes; ++i) {
1640 if (!vb->planes[i].dbuf_mapped)
1641 continue;
1642 call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
1643 vb->planes[i].dbuf_mapped = 0;
1644 }
1645}
1646
b2f2f047 1647static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
e23ccc0a
PO
1648{
1649 struct vb2_buffer *vb = NULL;
1650 int ret;
1651
1652 if (b->type != q->type) {
1653 dprintk(1, "dqbuf: invalid buffer type\n");
1654 return -EINVAL;
1655 }
32a77260
HV
1656 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1657 if (ret < 0)
e23ccc0a 1658 return ret;
e23ccc0a
PO
1659
1660 ret = call_qop(q, buf_finish, vb);
1661 if (ret) {
1662 dprintk(1, "dqbuf: buffer finish failed\n");
1663 return ret;
1664 }
1665
1666 switch (vb->state) {
1667 case VB2_BUF_STATE_DONE:
1668 dprintk(3, "dqbuf: Returning done buffer\n");
1669 break;
1670 case VB2_BUF_STATE_ERROR:
1671 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1672 break;
1673 default:
1674 dprintk(1, "dqbuf: Invalid buffer state\n");
1675 return -EINVAL;
1676 }
1677
1678 /* Fill buffer information for the userspace */
1679 __fill_v4l2_buffer(vb, b);
1680 /* Remove from videobuf queue */
1681 list_del(&vb->queued_entry);
c5384048
SS
1682 /* go back to dequeued state */
1683 __vb2_dqbuf(vb);
e23ccc0a
PO
1684
1685 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1686 vb->v4l2_buf.index, vb->state);
1687
e23ccc0a
PO
1688 return 0;
1689}
b2f2f047
HV
1690
1691/**
1692 * vb2_dqbuf() - Dequeue a buffer to the userspace
1693 * @q: videobuf2 queue
1694 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
1695 * in driver
1696 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
1697 * buffers ready for dequeuing are present. Normally the driver
1698 * would be passing (file->f_flags & O_NONBLOCK) here
1699 *
1700 * Should be called from vidioc_dqbuf ioctl handler of a driver.
1701 * This function:
1702 * 1) verifies the passed buffer,
1703 * 2) calls buf_finish callback in the driver (if provided), in which
1704 * driver can perform any additional operations that may be required before
1705 * returning the buffer to userspace, such as cache sync,
1706 * 3) the buffer struct members are filled with relevant information for
1707 * the userspace.
1708 *
1709 * The return values from this function are intended to be directly returned
1710 * from vidioc_dqbuf handler in driver.
1711 */
1712int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1713{
1714 if (q->fileio) {
1715 dprintk(1, "dqbuf: file io in progress\n");
1716 return -EBUSY;
1717 }
1718 return vb2_internal_dqbuf(q, b, nonblocking);
1719}
e23ccc0a
PO
1720EXPORT_SYMBOL_GPL(vb2_dqbuf);
1721
bd323e28
MS
1722/**
1723 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1724 *
1725 * Removes all queued buffers from driver's queue and all buffers queued by
1726 * userspace from videobuf's queue. Returns to state after reqbufs.
1727 */
1728static void __vb2_queue_cancel(struct vb2_queue *q)
1729{
1730 unsigned int i;
1731
02f142ec
HV
1732 if (q->retry_start_streaming) {
1733 q->retry_start_streaming = 0;
1734 q->streaming = 0;
1735 }
1736
bd323e28
MS
1737 /*
1738 * Tell driver to stop all transactions and release all queued
1739 * buffers.
1740 */
1741 if (q->streaming)
1742 call_qop(q, stop_streaming, q);
1743 q->streaming = 0;
1744
1745 /*
1746 * Remove all buffers from videobuf's list...
1747 */
1748 INIT_LIST_HEAD(&q->queued_list);
1749 /*
1750 * ...and done list; userspace will not receive any buffers it
1751 * has not already dequeued before initiating cancel.
1752 */
1753 INIT_LIST_HEAD(&q->done_list);
1754 atomic_set(&q->queued_count, 0);
1755 wake_up_all(&q->done_wq);
1756
1757 /*
1758 * Reinitialize all buffers for next use.
1759 */
1760 for (i = 0; i < q->num_buffers; ++i)
c5384048 1761 __vb2_dqbuf(q->bufs[i]);
bd323e28
MS
1762}
1763
b2f2f047 1764static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a
PO
1765{
1766 struct vb2_buffer *vb;
5db2c3ba 1767 int ret;
e23ccc0a
PO
1768
1769 if (type != q->type) {
1770 dprintk(1, "streamon: invalid stream type\n");
1771 return -EINVAL;
1772 }
1773
1774 if (q->streaming) {
f956035c
RR
1775 dprintk(3, "streamon successful: already streaming\n");
1776 return 0;
e23ccc0a
PO
1777 }
1778
1779 /*
bd323e28
MS
1780 * If any buffers were queued before streamon,
1781 * we can now pass them to driver for processing.
e23ccc0a 1782 */
bd323e28
MS
1783 list_for_each_entry(vb, &q->queued_list, queued_entry)
1784 __enqueue_in_driver(vb);
e23ccc0a 1785
02f142ec
HV
1786 /* Tell driver to start streaming. */
1787 ret = vb2_start_streaming(q);
5db2c3ba 1788 if (ret) {
bd323e28 1789 __vb2_queue_cancel(q);
5db2c3ba
PO
1790 return ret;
1791 }
1792
1793 q->streaming = 1;
e23ccc0a 1794
e23ccc0a
PO
1795 dprintk(3, "Streamon successful\n");
1796 return 0;
1797}
e23ccc0a
PO
1798
1799/**
b2f2f047 1800 * vb2_streamon - start streaming
e23ccc0a 1801 * @q: videobuf2 queue
b2f2f047 1802 * @type: type argument passed from userspace to vidioc_streamon handler
e23ccc0a 1803 *
b2f2f047 1804 * Should be called from vidioc_streamon handler of a driver.
e23ccc0a 1805 * This function:
b2f2f047
HV
1806 * 1) verifies current state
1807 * 2) passes any previously queued buffers to the driver and starts streaming
e23ccc0a 1808 *
e23ccc0a 1809 * The return values from this function are intended to be directly returned
b2f2f047 1810 * from vidioc_streamon handler in the driver.
e23ccc0a 1811 */
b2f2f047 1812int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 1813{
b25748fe 1814 if (q->fileio) {
b2f2f047 1815 dprintk(1, "streamon: file io in progress\n");
b25748fe
MS
1816 return -EBUSY;
1817 }
b2f2f047
HV
1818 return vb2_internal_streamon(q, type);
1819}
1820EXPORT_SYMBOL_GPL(vb2_streamon);
b25748fe 1821
b2f2f047
HV
1822static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1823{
e23ccc0a
PO
1824 if (type != q->type) {
1825 dprintk(1, "streamoff: invalid stream type\n");
1826 return -EINVAL;
1827 }
1828
1829 if (!q->streaming) {
f956035c
RR
1830 dprintk(3, "streamoff successful: not streaming\n");
1831 return 0;
e23ccc0a
PO
1832 }
1833
1834 /*
1835 * Cancel will pause streaming and remove all buffers from the driver
1836 * and videobuf, effectively returning control over them to userspace.
1837 */
1838 __vb2_queue_cancel(q);
1839
1840 dprintk(3, "Streamoff successful\n");
1841 return 0;
1842}
b2f2f047
HV
1843
1844/**
1845 * vb2_streamoff - stop streaming
1846 * @q: videobuf2 queue
1847 * @type: type argument passed from userspace to vidioc_streamoff handler
1848 *
1849 * Should be called from vidioc_streamoff handler of a driver.
1850 * This function:
1851 * 1) verifies current state,
1852 * 2) stop streaming and dequeues any queued buffers, including those previously
1853 * passed to the driver (after waiting for the driver to finish).
1854 *
1855 * This call can be used for pausing playback.
1856 * The return values from this function are intended to be directly returned
1857 * from vidioc_streamoff handler in the driver
1858 */
1859int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1860{
1861 if (q->fileio) {
1862 dprintk(1, "streamoff: file io in progress\n");
1863 return -EBUSY;
1864 }
1865 return vb2_internal_streamoff(q, type);
1866}
e23ccc0a
PO
1867EXPORT_SYMBOL_GPL(vb2_streamoff);
1868
1869/**
1870 * __find_plane_by_offset() - find plane associated with the given offset off
1871 */
1872static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1873 unsigned int *_buffer, unsigned int *_plane)
1874{
1875 struct vb2_buffer *vb;
1876 unsigned int buffer, plane;
1877
1878 /*
1879 * Go over all buffers and their planes, comparing the given offset
1880 * with an offset assigned to each plane. If a match is found,
1881 * return its buffer and plane numbers.
1882 */
1883 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1884 vb = q->bufs[buffer];
1885
1886 for (plane = 0; plane < vb->num_planes; ++plane) {
1887 if (vb->v4l2_planes[plane].m.mem_offset == off) {
1888 *_buffer = buffer;
1889 *_plane = plane;
1890 return 0;
1891 }
1892 }
1893 }
1894
1895 return -EINVAL;
1896}
1897
83ae7c5a
TS
1898/**
1899 * vb2_expbuf() - Export a buffer as a file descriptor
1900 * @q: videobuf2 queue
1901 * @eb: export buffer structure passed from userspace to vidioc_expbuf
1902 * handler in driver
1903 *
1904 * The return values from this function are intended to be directly returned
1905 * from vidioc_expbuf handler in driver.
1906 */
1907int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1908{
1909 struct vb2_buffer *vb = NULL;
1910 struct vb2_plane *vb_plane;
1911 int ret;
1912 struct dma_buf *dbuf;
1913
1914 if (q->memory != V4L2_MEMORY_MMAP) {
1915 dprintk(1, "Queue is not currently set up for mmap\n");
1916 return -EINVAL;
1917 }
1918
1919 if (!q->mem_ops->get_dmabuf) {
1920 dprintk(1, "Queue does not support DMA buffer exporting\n");
1921 return -EINVAL;
1922 }
1923
ea3aba84
PZ
1924 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
1925 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
83ae7c5a
TS
1926 return -EINVAL;
1927 }
1928
1929 if (eb->type != q->type) {
1930 dprintk(1, "qbuf: invalid buffer type\n");
1931 return -EINVAL;
1932 }
1933
1934 if (eb->index >= q->num_buffers) {
1935 dprintk(1, "buffer index out of range\n");
1936 return -EINVAL;
1937 }
1938
1939 vb = q->bufs[eb->index];
1940
1941 if (eb->plane >= vb->num_planes) {
1942 dprintk(1, "buffer plane out of range\n");
1943 return -EINVAL;
1944 }
1945
1946 vb_plane = &vb->planes[eb->plane];
1947
ea3aba84 1948 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
83ae7c5a
TS
1949 if (IS_ERR_OR_NULL(dbuf)) {
1950 dprintk(1, "Failed to export buffer %d, plane %d\n",
1951 eb->index, eb->plane);
1952 return -EINVAL;
1953 }
1954
ea3aba84 1955 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
83ae7c5a
TS
1956 if (ret < 0) {
1957 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1958 eb->index, eb->plane, ret);
1959 dma_buf_put(dbuf);
1960 return ret;
1961 }
1962
1963 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
1964 eb->index, eb->plane, ret);
1965 eb->fd = ret;
1966
1967 return 0;
1968}
1969EXPORT_SYMBOL_GPL(vb2_expbuf);
1970
e23ccc0a
PO
1971/**
1972 * vb2_mmap() - map video buffers into application address space
1973 * @q: videobuf2 queue
1974 * @vma: vma passed to the mmap file operation handler in the driver
1975 *
1976 * Should be called from mmap file operation handler of a driver.
1977 * This function maps one plane of one of the available video buffers to
1978 * userspace. To map whole video memory allocated on reqbufs, this function
1979 * has to be called once per each plane per each buffer previously allocated.
1980 *
1981 * When the userspace application calls mmap, it passes to it an offset returned
1982 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
1983 * a "cookie", which is then used to identify the plane to be mapped.
1984 * This function finds a plane with a matching offset and a mapping is performed
1985 * by the means of a provided memory operation.
1986 *
1987 * The return values from this function are intended to be directly returned
1988 * from the mmap handler in driver.
1989 */
1990int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1991{
1992 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
e23ccc0a
PO
1993 struct vb2_buffer *vb;
1994 unsigned int buffer, plane;
1995 int ret;
7f841459 1996 unsigned long length;
e23ccc0a
PO
1997
1998 if (q->memory != V4L2_MEMORY_MMAP) {
1999 dprintk(1, "Queue is not currently set up for mmap\n");
2000 return -EINVAL;
2001 }
2002
2003 /*
2004 * Check memory area access mode.
2005 */
2006 if (!(vma->vm_flags & VM_SHARED)) {
2007 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2008 return -EINVAL;
2009 }
2010 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2011 if (!(vma->vm_flags & VM_WRITE)) {
2012 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2013 return -EINVAL;
2014 }
2015 } else {
2016 if (!(vma->vm_flags & VM_READ)) {
2017 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2018 return -EINVAL;
2019 }
2020 }
2021
2022 /*
2023 * Find the plane corresponding to the offset passed by userspace.
2024 */
2025 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2026 if (ret)
2027 return ret;
2028
2029 vb = q->bufs[buffer];
e23ccc0a 2030
7f841459
MCC
2031 /*
2032 * MMAP requires page_aligned buffers.
2033 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2034 * so, we need to do the same here.
2035 */
2036 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2037 if (length < (vma->vm_end - vma->vm_start)) {
2038 dprintk(1,
2039 "MMAP invalid, as it would overflow buffer length\n");
068a0df7
SWK
2040 return -EINVAL;
2041 }
2042
a00d0266 2043 ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
e23ccc0a
PO
2044 if (ret)
2045 return ret;
2046
e23ccc0a
PO
2047 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2048 return 0;
2049}
2050EXPORT_SYMBOL_GPL(vb2_mmap);
2051
6f524ec1
SJ
2052#ifndef CONFIG_MMU
2053unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2054 unsigned long addr,
2055 unsigned long len,
2056 unsigned long pgoff,
2057 unsigned long flags)
2058{
2059 unsigned long off = pgoff << PAGE_SHIFT;
2060 struct vb2_buffer *vb;
2061 unsigned int buffer, plane;
2062 int ret;
2063
2064 if (q->memory != V4L2_MEMORY_MMAP) {
2065 dprintk(1, "Queue is not currently set up for mmap\n");
2066 return -EINVAL;
2067 }
2068
2069 /*
2070 * Find the plane corresponding to the offset passed by userspace.
2071 */
2072 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2073 if (ret)
2074 return ret;
2075
2076 vb = q->bufs[buffer];
2077
2078 return (unsigned long)vb2_plane_vaddr(vb, plane);
2079}
2080EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2081#endif
2082
b25748fe
MS
2083static int __vb2_init_fileio(struct vb2_queue *q, int read);
2084static int __vb2_cleanup_fileio(struct vb2_queue *q);
e23ccc0a
PO
2085
2086/**
2087 * vb2_poll() - implements poll userspace operation
2088 * @q: videobuf2 queue
2089 * @file: file argument passed to the poll file operation handler
2090 * @wait: wait argument passed to the poll file operation handler
2091 *
2092 * This function implements poll file operation handler for a driver.
2093 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2094 * be informed that the file descriptor of a video device is available for
2095 * reading.
2096 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2097 * will be reported as available for writing.
2098 *
95213ceb
HV
2099 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2100 * pending events.
2101 *
e23ccc0a
PO
2102 * The return values from this function are intended to be directly returned
2103 * from poll handler in driver.
2104 */
2105unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2106{
95213ceb 2107 struct video_device *vfd = video_devdata(file);
bf5c7cbb 2108 unsigned long req_events = poll_requested_events(wait);
e23ccc0a 2109 struct vb2_buffer *vb = NULL;
95213ceb
HV
2110 unsigned int res = 0;
2111 unsigned long flags;
2112
2113 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2114 struct v4l2_fh *fh = file->private_data;
2115
2116 if (v4l2_event_pending(fh))
2117 res = POLLPRI;
2118 else if (req_events & POLLPRI)
2119 poll_wait(file, &fh->wait, wait);
2120 }
e23ccc0a 2121
cd13823f
HV
2122 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2123 return res;
2124 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2125 return res;
2126
b25748fe 2127 /*
4ffabdb3 2128 * Start file I/O emulator only if streaming API has not been used yet.
b25748fe
MS
2129 */
2130 if (q->num_buffers == 0 && q->fileio == NULL) {
bf5c7cbb
HV
2131 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2132 (req_events & (POLLIN | POLLRDNORM))) {
95213ceb
HV
2133 if (__vb2_init_fileio(q, 1))
2134 return res | POLLERR;
b25748fe 2135 }
bf5c7cbb
HV
2136 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2137 (req_events & (POLLOUT | POLLWRNORM))) {
95213ceb
HV
2138 if (__vb2_init_fileio(q, 0))
2139 return res | POLLERR;
b25748fe
MS
2140 /*
2141 * Write to OUTPUT queue can be done immediately.
2142 */
95213ceb 2143 return res | POLLOUT | POLLWRNORM;
b25748fe
MS
2144 }
2145 }
2146
e23ccc0a
PO
2147 /*
2148 * There is nothing to wait for if no buffers have already been queued.
2149 */
2150 if (list_empty(&q->queued_list))
95213ceb 2151 return res | POLLERR;
e23ccc0a 2152
412cb87d
SWK
2153 if (list_empty(&q->done_list))
2154 poll_wait(file, &q->done_wq, wait);
e23ccc0a
PO
2155
2156 /*
2157 * Take first buffer available for dequeuing.
2158 */
2159 spin_lock_irqsave(&q->done_lock, flags);
2160 if (!list_empty(&q->done_list))
2161 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2162 done_entry);
2163 spin_unlock_irqrestore(&q->done_lock, flags);
2164
2165 if (vb && (vb->state == VB2_BUF_STATE_DONE
2166 || vb->state == VB2_BUF_STATE_ERROR)) {
95213ceb
HV
2167 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2168 res | POLLOUT | POLLWRNORM :
2169 res | POLLIN | POLLRDNORM;
e23ccc0a 2170 }
95213ceb 2171 return res;
e23ccc0a
PO
2172}
2173EXPORT_SYMBOL_GPL(vb2_poll);
2174
2175/**
2176 * vb2_queue_init() - initialize a videobuf2 queue
2177 * @q: videobuf2 queue; this structure should be allocated in driver
2178 *
2179 * The vb2_queue structure should be allocated by the driver. The driver is
2180 * responsible of clearing it's content and setting initial values for some
2181 * required entries before calling this function.
2182 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2183 * to the struct vb2_queue description in include/media/videobuf2-core.h
2184 * for more information.
2185 */
2186int vb2_queue_init(struct vb2_queue *q)
2187{
896f38f5
EG
2188 /*
2189 * Sanity check
2190 */
2191 if (WARN_ON(!q) ||
2192 WARN_ON(!q->ops) ||
2193 WARN_ON(!q->mem_ops) ||
2194 WARN_ON(!q->type) ||
2195 WARN_ON(!q->io_modes) ||
2196 WARN_ON(!q->ops->queue_setup) ||
6aa69f99
KD
2197 WARN_ON(!q->ops->buf_queue) ||
2198 WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK))
896f38f5 2199 return -EINVAL;
e23ccc0a 2200
6aa69f99
KD
2201 /* Warn that the driver should choose an appropriate timestamp type */
2202 WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
2203
e23ccc0a
PO
2204 INIT_LIST_HEAD(&q->queued_list);
2205 INIT_LIST_HEAD(&q->done_list);
2206 spin_lock_init(&q->done_lock);
2207 init_waitqueue_head(&q->done_wq);
2208
2209 if (q->buf_struct_size == 0)
2210 q->buf_struct_size = sizeof(struct vb2_buffer);
2211
2212 return 0;
2213}
2214EXPORT_SYMBOL_GPL(vb2_queue_init);
2215
2216/**
2217 * vb2_queue_release() - stop streaming, release the queue and free memory
2218 * @q: videobuf2 queue
2219 *
2220 * This function stops streaming and performs necessary clean ups, including
2221 * freeing video buffer memory. The driver is responsible for freeing
2222 * the vb2_queue structure itself.
2223 */
2224void vb2_queue_release(struct vb2_queue *q)
2225{
b25748fe 2226 __vb2_cleanup_fileio(q);
e23ccc0a 2227 __vb2_queue_cancel(q);
2d86401c 2228 __vb2_queue_free(q, q->num_buffers);
e23ccc0a
PO
2229}
2230EXPORT_SYMBOL_GPL(vb2_queue_release);
2231
b25748fe
MS
2232/**
2233 * struct vb2_fileio_buf - buffer context used by file io emulator
2234 *
2235 * vb2 provides a compatibility layer and emulator of file io (read and
2236 * write) calls on top of streaming API. This structure is used for
2237 * tracking context related to the buffers.
2238 */
2239struct vb2_fileio_buf {
2240 void *vaddr;
2241 unsigned int size;
2242 unsigned int pos;
2243 unsigned int queued:1;
2244};
2245
2246/**
2247 * struct vb2_fileio_data - queue context used by file io emulator
2248 *
2249 * vb2 provides a compatibility layer and emulator of file io (read and
2250 * write) calls on top of streaming API. For proper operation it required
2251 * this structure to save the driver state between each call of the read
2252 * or write function.
2253 */
2254struct vb2_fileio_data {
2255 struct v4l2_requestbuffers req;
2256 struct v4l2_buffer b;
2257 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
2258 unsigned int index;
2259 unsigned int q_count;
2260 unsigned int dq_count;
2261 unsigned int flags;
2262};
2263
2264/**
2265 * __vb2_init_fileio() - initialize file io emulator
2266 * @q: videobuf2 queue
2267 * @read: mode selector (1 means read, 0 means write)
2268 */
2269static int __vb2_init_fileio(struct vb2_queue *q, int read)
2270{
2271 struct vb2_fileio_data *fileio;
2272 int i, ret;
2273 unsigned int count = 0;
2274
2275 /*
2276 * Sanity check
2277 */
2278 if ((read && !(q->io_modes & VB2_READ)) ||
2279 (!read && !(q->io_modes & VB2_WRITE)))
2280 BUG();
2281
2282 /*
2283 * Check if device supports mapping buffers to kernel virtual space.
2284 */
2285 if (!q->mem_ops->vaddr)
2286 return -EBUSY;
2287
2288 /*
2289 * Check if streaming api has not been already activated.
2290 */
2291 if (q->streaming || q->num_buffers > 0)
2292 return -EBUSY;
2293
2294 /*
2295 * Start with count 1, driver can increase it in queue_setup()
2296 */
2297 count = 1;
2298
2299 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2300 (read) ? "read" : "write", count, q->io_flags);
2301
2302 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2303 if (fileio == NULL)
2304 return -ENOMEM;
2305
2306 fileio->flags = q->io_flags;
2307
2308 /*
2309 * Request buffers and use MMAP type to force driver
2310 * to allocate buffers by itself.
2311 */
2312 fileio->req.count = count;
2313 fileio->req.memory = V4L2_MEMORY_MMAP;
2314 fileio->req.type = q->type;
2315 ret = vb2_reqbufs(q, &fileio->req);
2316 if (ret)
2317 goto err_kfree;
2318
2319 /*
2320 * Check if plane_count is correct
2321 * (multiplane buffers are not supported).
2322 */
2323 if (q->bufs[0]->num_planes != 1) {
b25748fe
MS
2324 ret = -EBUSY;
2325 goto err_reqbufs;
2326 }
2327
2328 /*
2329 * Get kernel address of each buffer.
2330 */
2331 for (i = 0; i < q->num_buffers; i++) {
2332 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
5dd6946c
WY
2333 if (fileio->bufs[i].vaddr == NULL) {
2334 ret = -EINVAL;
b25748fe 2335 goto err_reqbufs;
5dd6946c 2336 }
b25748fe
MS
2337 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2338 }
2339
2340 /*
2341 * Read mode requires pre queuing of all buffers.
2342 */
2343 if (read) {
2344 /*
2345 * Queue all buffers.
2346 */
2347 for (i = 0; i < q->num_buffers; i++) {
2348 struct v4l2_buffer *b = &fileio->b;
2349 memset(b, 0, sizeof(*b));
2350 b->type = q->type;
2351 b->memory = q->memory;
2352 b->index = i;
2353 ret = vb2_qbuf(q, b);
2354 if (ret)
2355 goto err_reqbufs;
2356 fileio->bufs[i].queued = 1;
2357 }
88e26870 2358 fileio->index = q->num_buffers;
b25748fe
MS
2359 }
2360
02f142ec
HV
2361 /*
2362 * Start streaming.
2363 */
2364 ret = vb2_streamon(q, q->type);
2365 if (ret)
2366 goto err_reqbufs;
2367
b25748fe
MS
2368 q->fileio = fileio;
2369
2370 return ret;
2371
2372err_reqbufs:
a67e1722 2373 fileio->req.count = 0;
b25748fe
MS
2374 vb2_reqbufs(q, &fileio->req);
2375
2376err_kfree:
2377 kfree(fileio);
2378 return ret;
2379}
2380
2381/**
2382 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2383 * @q: videobuf2 queue
2384 */
2385static int __vb2_cleanup_fileio(struct vb2_queue *q)
2386{
2387 struct vb2_fileio_data *fileio = q->fileio;
2388
2389 if (fileio) {
b2f2f047 2390 vb2_internal_streamoff(q, q->type);
b25748fe 2391 q->fileio = NULL;
b25748fe
MS
2392 fileio->req.count = 0;
2393 vb2_reqbufs(q, &fileio->req);
2394 kfree(fileio);
2395 dprintk(3, "file io emulator closed\n");
2396 }
2397 return 0;
2398}
2399
2400/**
2401 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2402 * @q: videobuf2 queue
2403 * @data: pointed to target userspace buffer
2404 * @count: number of bytes to read or write
2405 * @ppos: file handle position tracking pointer
2406 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2407 * @read: access mode selector (1 means read, 0 means write)
2408 */
2409static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2410 loff_t *ppos, int nonblock, int read)
2411{
2412 struct vb2_fileio_data *fileio;
2413 struct vb2_fileio_buf *buf;
2414 int ret, index;
2415
08b99e26 2416 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
b25748fe
MS
2417 read ? "read" : "write", (long)*ppos, count,
2418 nonblock ? "non" : "");
2419
2420 if (!data)
2421 return -EINVAL;
2422
2423 /*
2424 * Initialize emulator on first call.
2425 */
2426 if (!q->fileio) {
2427 ret = __vb2_init_fileio(q, read);
2428 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2429 if (ret)
2430 return ret;
2431 }
2432 fileio = q->fileio;
2433
b25748fe
MS
2434 /*
2435 * Check if we need to dequeue the buffer.
2436 */
88e26870
HV
2437 index = fileio->index;
2438 if (index >= q->num_buffers) {
b25748fe
MS
2439 /*
2440 * Call vb2_dqbuf to get buffer back.
2441 */
2442 memset(&fileio->b, 0, sizeof(fileio->b));
2443 fileio->b.type = q->type;
2444 fileio->b.memory = q->memory;
b2f2f047 2445 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
b25748fe
MS
2446 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2447 if (ret)
b2f2f047 2448 return ret;
b25748fe
MS
2449 fileio->dq_count += 1;
2450
88e26870
HV
2451 index = fileio->b.index;
2452 buf = &fileio->bufs[index];
2453
b25748fe
MS
2454 /*
2455 * Get number of bytes filled by the driver
2456 */
88e26870 2457 buf->pos = 0;
b25748fe 2458 buf->queued = 0;
88e26870
HV
2459 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2460 : vb2_plane_size(q->bufs[index], 0);
2461 } else {
2462 buf = &fileio->bufs[index];
b25748fe
MS
2463 }
2464
2465 /*
2466 * Limit count on last few bytes of the buffer.
2467 */
2468 if (buf->pos + count > buf->size) {
2469 count = buf->size - buf->pos;
08b99e26 2470 dprintk(5, "reducing read count: %zd\n", count);
b25748fe
MS
2471 }
2472
2473 /*
2474 * Transfer data to userspace.
2475 */
08b99e26 2476 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
b25748fe
MS
2477 count, index, buf->pos);
2478 if (read)
2479 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2480 else
2481 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2482 if (ret) {
2483 dprintk(3, "file io: error copying data\n");
b2f2f047 2484 return -EFAULT;
b25748fe
MS
2485 }
2486
2487 /*
2488 * Update counters.
2489 */
2490 buf->pos += count;
2491 *ppos += count;
2492
2493 /*
2494 * Queue next buffer if required.
2495 */
2496 if (buf->pos == buf->size ||
2497 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2498 /*
2499 * Check if this is the last buffer to read.
2500 */
2501 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2502 fileio->dq_count == 1) {
2503 dprintk(3, "file io: read limit reached\n");
b25748fe
MS
2504 return __vb2_cleanup_fileio(q);
2505 }
2506
2507 /*
2508 * Call vb2_qbuf and give buffer to the driver.
2509 */
2510 memset(&fileio->b, 0, sizeof(fileio->b));
2511 fileio->b.type = q->type;
2512 fileio->b.memory = q->memory;
2513 fileio->b.index = index;
2514 fileio->b.bytesused = buf->pos;
b2f2f047 2515 ret = vb2_internal_qbuf(q, &fileio->b);
b25748fe
MS
2516 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2517 if (ret)
b2f2f047 2518 return ret;
b25748fe
MS
2519
2520 /*
2521 * Buffer has been queued, update the status
2522 */
2523 buf->pos = 0;
2524 buf->queued = 1;
88e26870 2525 buf->size = vb2_plane_size(q->bufs[index], 0);
b25748fe 2526 fileio->q_count += 1;
88e26870
HV
2527 if (fileio->index < q->num_buffers)
2528 fileio->index++;
b25748fe
MS
2529 }
2530
2531 /*
2532 * Return proper number of bytes processed.
2533 */
2534 if (ret == 0)
2535 ret = count;
b25748fe
MS
2536 return ret;
2537}
2538
2539size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2540 loff_t *ppos, int nonblocking)
2541{
2542 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2543}
2544EXPORT_SYMBOL_GPL(vb2_read);
2545
819585bc 2546size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
b25748fe
MS
2547 loff_t *ppos, int nonblocking)
2548{
819585bc
RR
2549 return __vb2_perform_fileio(q, (char __user *) data, count,
2550 ppos, nonblocking, 0);
b25748fe
MS
2551}
2552EXPORT_SYMBOL_GPL(vb2_write);
2553
4c1ffcaa
HV
2554
2555/*
2556 * The following functions are not part of the vb2 core API, but are helper
2557 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2558 * and struct vb2_ops.
2559 * They contain boilerplate code that most if not all drivers have to do
2560 * and so they simplify the driver code.
2561 */
2562
2563/* The queue is busy if there is a owner and you are not that owner. */
2564static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2565{
2566 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2567}
2568
2569/* vb2 ioctl helpers */
2570
2571int vb2_ioctl_reqbufs(struct file *file, void *priv,
2572 struct v4l2_requestbuffers *p)
2573{
2574 struct video_device *vdev = video_devdata(file);
2575 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2576
2577 if (res)
2578 return res;
2579 if (vb2_queue_is_busy(vdev, file))
2580 return -EBUSY;
2581 res = __reqbufs(vdev->queue, p);
2582 /* If count == 0, then the owner has released all buffers and he
2583 is no longer owner of the queue. Otherwise we have a new owner. */
2584 if (res == 0)
2585 vdev->queue->owner = p->count ? file->private_data : NULL;
2586 return res;
2587}
2588EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2589
2590int vb2_ioctl_create_bufs(struct file *file, void *priv,
2591 struct v4l2_create_buffers *p)
2592{
2593 struct video_device *vdev = video_devdata(file);
2594 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2595
2596 p->index = vdev->queue->num_buffers;
2597 /* If count == 0, then just check if memory and type are valid.
2598 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
2599 if (p->count == 0)
2600 return res != -EBUSY ? res : 0;
2601 if (res)
2602 return res;
2603 if (vb2_queue_is_busy(vdev, file))
2604 return -EBUSY;
2605 res = __create_bufs(vdev->queue, p);
2606 if (res == 0)
2607 vdev->queue->owner = file->private_data;
2608 return res;
2609}
2610EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
2611
2612int vb2_ioctl_prepare_buf(struct file *file, void *priv,
2613 struct v4l2_buffer *p)
2614{
2615 struct video_device *vdev = video_devdata(file);
2616
2617 if (vb2_queue_is_busy(vdev, file))
2618 return -EBUSY;
2619 return vb2_prepare_buf(vdev->queue, p);
2620}
2621EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
2622
2623int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
2624{
2625 struct video_device *vdev = video_devdata(file);
2626
2627 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
2628 return vb2_querybuf(vdev->queue, p);
2629}
2630EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
2631
2632int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2633{
2634 struct video_device *vdev = video_devdata(file);
2635
2636 if (vb2_queue_is_busy(vdev, file))
2637 return -EBUSY;
2638 return vb2_qbuf(vdev->queue, p);
2639}
2640EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
2641
2642int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2643{
2644 struct video_device *vdev = video_devdata(file);
2645
2646 if (vb2_queue_is_busy(vdev, file))
2647 return -EBUSY;
2648 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
2649}
2650EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
2651
2652int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
2653{
2654 struct video_device *vdev = video_devdata(file);
2655
2656 if (vb2_queue_is_busy(vdev, file))
2657 return -EBUSY;
2658 return vb2_streamon(vdev->queue, i);
2659}
2660EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
2661
2662int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2663{
2664 struct video_device *vdev = video_devdata(file);
2665
2666 if (vb2_queue_is_busy(vdev, file))
2667 return -EBUSY;
2668 return vb2_streamoff(vdev->queue, i);
2669}
2670EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
2671
83ae7c5a
TS
2672int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
2673{
2674 struct video_device *vdev = video_devdata(file);
2675
2676 if (vb2_queue_is_busy(vdev, file))
2677 return -EBUSY;
2678 return vb2_expbuf(vdev->queue, p);
2679}
2680EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
2681
4c1ffcaa
HV
2682/* v4l2_file_operations helpers */
2683
2684int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
2685{
2686 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
2687 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2688 int err;
4c1ffcaa 2689
8a90f1a6
LP
2690 if (lock && mutex_lock_interruptible(lock))
2691 return -ERESTARTSYS;
2692 err = vb2_mmap(vdev->queue, vma);
2693 if (lock)
2694 mutex_unlock(lock);
2695 return err;
4c1ffcaa
HV
2696}
2697EXPORT_SYMBOL_GPL(vb2_fop_mmap);
2698
1380f575 2699int _vb2_fop_release(struct file *file, struct mutex *lock)
4c1ffcaa
HV
2700{
2701 struct video_device *vdev = video_devdata(file);
2702
2703 if (file->private_data == vdev->queue->owner) {
1380f575
RR
2704 if (lock)
2705 mutex_lock(lock);
4c1ffcaa
HV
2706 vb2_queue_release(vdev->queue);
2707 vdev->queue->owner = NULL;
1380f575
RR
2708 if (lock)
2709 mutex_unlock(lock);
4c1ffcaa
HV
2710 }
2711 return v4l2_fh_release(file);
2712}
1380f575
RR
2713EXPORT_SYMBOL_GPL(_vb2_fop_release);
2714
2715int vb2_fop_release(struct file *file)
2716{
2717 struct video_device *vdev = video_devdata(file);
2718 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2719
2720 return _vb2_fop_release(file, lock);
2721}
4c1ffcaa
HV
2722EXPORT_SYMBOL_GPL(vb2_fop_release);
2723
819585bc 2724ssize_t vb2_fop_write(struct file *file, const char __user *buf,
4c1ffcaa
HV
2725 size_t count, loff_t *ppos)
2726{
2727 struct video_device *vdev = video_devdata(file);
2728 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
2729 int err = -EBUSY;
2730
cf533735 2731 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
2732 return -ERESTARTSYS;
2733 if (vb2_queue_is_busy(vdev, file))
2734 goto exit;
2735 err = vb2_write(vdev->queue, buf, count, ppos,
2736 file->f_flags & O_NONBLOCK);
8c82c75c 2737 if (vdev->queue->fileio)
4c1ffcaa
HV
2738 vdev->queue->owner = file->private_data;
2739exit:
cf533735 2740 if (lock)
4c1ffcaa
HV
2741 mutex_unlock(lock);
2742 return err;
2743}
2744EXPORT_SYMBOL_GPL(vb2_fop_write);
2745
2746ssize_t vb2_fop_read(struct file *file, char __user *buf,
2747 size_t count, loff_t *ppos)
2748{
2749 struct video_device *vdev = video_devdata(file);
2750 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
2751 int err = -EBUSY;
2752
cf533735 2753 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
2754 return -ERESTARTSYS;
2755 if (vb2_queue_is_busy(vdev, file))
2756 goto exit;
2757 err = vb2_read(vdev->queue, buf, count, ppos,
2758 file->f_flags & O_NONBLOCK);
8c82c75c 2759 if (vdev->queue->fileio)
4c1ffcaa
HV
2760 vdev->queue->owner = file->private_data;
2761exit:
cf533735 2762 if (lock)
4c1ffcaa
HV
2763 mutex_unlock(lock);
2764 return err;
2765}
2766EXPORT_SYMBOL_GPL(vb2_fop_read);
2767
2768unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
2769{
2770 struct video_device *vdev = video_devdata(file);
2771 struct vb2_queue *q = vdev->queue;
2772 struct mutex *lock = q->lock ? q->lock : vdev->lock;
2773 unsigned long req_events = poll_requested_events(wait);
2774 unsigned res;
2775 void *fileio;
4c1ffcaa
HV
2776 bool must_lock = false;
2777
2778 /* Try to be smart: only lock if polling might start fileio,
2779 otherwise locking will only introduce unwanted delays. */
2780 if (q->num_buffers == 0 && q->fileio == NULL) {
2781 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2782 (req_events & (POLLIN | POLLRDNORM)))
2783 must_lock = true;
2784 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2785 (req_events & (POLLOUT | POLLWRNORM)))
2786 must_lock = true;
2787 }
2788
2789 /* If locking is needed, but this helper doesn't know how, then you
2790 shouldn't be using this helper but you should write your own. */
cf533735 2791 WARN_ON(must_lock && !lock);
4c1ffcaa 2792
cf533735 2793 if (must_lock && lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
2794 return POLLERR;
2795
2796 fileio = q->fileio;
2797
2798 res = vb2_poll(vdev->queue, file, wait);
2799
2800 /* If fileio was started, then we have a new queue owner. */
2801 if (must_lock && !fileio && q->fileio)
2802 q->owner = file->private_data;
cf533735 2803 if (must_lock && lock)
4c1ffcaa
HV
2804 mutex_unlock(lock);
2805 return res;
2806}
2807EXPORT_SYMBOL_GPL(vb2_fop_poll);
2808
2809#ifndef CONFIG_MMU
2810unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
2811 unsigned long len, unsigned long pgoff, unsigned long flags)
2812{
2813 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
2814 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2815 int ret;
4c1ffcaa 2816
8a90f1a6
LP
2817 if (lock && mutex_lock_interruptible(lock))
2818 return -ERESTARTSYS;
2819 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
2820 if (lock)
2821 mutex_unlock(lock);
2822 return ret;
4c1ffcaa
HV
2823}
2824EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
2825#endif
2826
2827/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
2828
2829void vb2_ops_wait_prepare(struct vb2_queue *vq)
2830{
2831 mutex_unlock(vq->lock);
2832}
2833EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
2834
2835void vb2_ops_wait_finish(struct vb2_queue *vq)
2836{
2837 mutex_lock(vq->lock);
2838}
2839EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
2840
e23ccc0a 2841MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
95072084 2842MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
e23ccc0a 2843MODULE_LICENSE("GPL");
This page took 0.509691 seconds and 5 git commands to generate.