[media] vb2: rename queued_count to owned_by_drv_count
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-core.c
CommitLineData
e23ccc0a
PO
1/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
e23ccc0a
PO
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
95213ceb
HV
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
e23ccc0a
PO
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
b5b4541e
HV
36#ifdef CONFIG_VIDEO_ADV_DEBUG
37
38/*
39 * If advanced debugging is on, then count how often each op is called,
40 * which can either be per-buffer or per-queue.
41 *
42 * If the op failed then the 'fail_' variant is called to decrease the
43 * counter. That makes it easy to check that the 'init' and 'cleanup'
44 * (and variations thereof) stay balanced.
45 */
46
47#define call_memop(vb, op, args...) \
48({ \
49 struct vb2_queue *_q = (vb)->vb2_queue; \
50 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
51 _q, (vb)->v4l2_buf.index, #op, \
52 _q->mem_ops->op ? "" : " (nop)"); \
53 (vb)->cnt_mem_ ## op++; \
54 _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
55})
56#define fail_memop(vb, op) ((vb)->cnt_mem_ ## op--)
57
58#define call_qop(q, op, args...) \
59({ \
60 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
61 (q)->ops->op ? "" : " (nop)"); \
62 (q)->cnt_ ## op++; \
63 (q)->ops->op ? (q)->ops->op(args) : 0; \
64})
65#define fail_qop(q, op) ((q)->cnt_ ## op--)
66
67#define call_vb_qop(vb, op, args...) \
68({ \
69 struct vb2_queue *_q = (vb)->vb2_queue; \
70 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
71 _q, (vb)->v4l2_buf.index, #op, \
72 _q->ops->op ? "" : " (nop)"); \
73 (vb)->cnt_ ## op++; \
74 _q->ops->op ? _q->ops->op(args) : 0; \
75})
76#define fail_vb_qop(vb, op) ((vb)->cnt_ ## op--)
77
78#else
79
80#define call_memop(vb, op, args...) \
81 ((vb)->vb2_queue->mem_ops->op ? (vb)->vb2_queue->mem_ops->op(args) : 0)
82#define fail_memop(vb, op)
e23ccc0a
PO
83
84#define call_qop(q, op, args...) \
b5b4541e
HV
85 ((q)->ops->op ? (q)->ops->op(args) : 0)
86#define fail_qop(q, op)
87
88#define call_vb_qop(vb, op, args...) \
89 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
90#define fail_vb_qop(vb, op)
91
92#endif
e23ccc0a 93
f1343281 94/* Flags that are set by the vb2 core */
1b18e7a0 95#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
2d86401c 96 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
1b18e7a0
SA
97 V4L2_BUF_FLAG_PREPARED | \
98 V4L2_BUF_FLAG_TIMESTAMP_MASK)
f1343281
HV
99/* Output buffer flags that should be passed on to the driver */
100#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
101 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
ea42c8ec 102
e23ccc0a
PO
103/**
104 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
105 */
c1426bc7 106static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
e23ccc0a
PO
107{
108 struct vb2_queue *q = vb->vb2_queue;
109 void *mem_priv;
110 int plane;
111
7f841459
MCC
112 /*
113 * Allocate memory for all planes in this buffer
114 * NOTE: mmapped areas should be page aligned
115 */
e23ccc0a 116 for (plane = 0; plane < vb->num_planes; ++plane) {
7f841459
MCC
117 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
118
b5b4541e 119 mem_priv = call_memop(vb, alloc, q->alloc_ctx[plane],
7f841459 120 size, q->gfp_flags);
62a79436 121 if (IS_ERR_OR_NULL(mem_priv))
e23ccc0a
PO
122 goto free;
123
124 /* Associate allocator private data with this plane */
125 vb->planes[plane].mem_priv = mem_priv;
c1426bc7 126 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
e23ccc0a
PO
127 }
128
129 return 0;
130free:
b5b4541e 131 fail_memop(vb, alloc);
e23ccc0a 132 /* Free already allocated memory if one of the allocations failed */
a00d0266 133 for (; plane > 0; --plane) {
b5b4541e 134 call_memop(vb, put, vb->planes[plane - 1].mem_priv);
a00d0266
MS
135 vb->planes[plane - 1].mem_priv = NULL;
136 }
e23ccc0a
PO
137
138 return -ENOMEM;
139}
140
141/**
142 * __vb2_buf_mem_free() - free memory of the given buffer
143 */
144static void __vb2_buf_mem_free(struct vb2_buffer *vb)
145{
e23ccc0a
PO
146 unsigned int plane;
147
148 for (plane = 0; plane < vb->num_planes; ++plane) {
b5b4541e 149 call_memop(vb, put, vb->planes[plane].mem_priv);
e23ccc0a 150 vb->planes[plane].mem_priv = NULL;
a00d0266
MS
151 dprintk(3, "Freed plane %d of buffer %d\n", plane,
152 vb->v4l2_buf.index);
e23ccc0a
PO
153 }
154}
155
156/**
157 * __vb2_buf_userptr_put() - release userspace memory associated with
158 * a USERPTR buffer
159 */
160static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
161{
e23ccc0a
PO
162 unsigned int plane;
163
164 for (plane = 0; plane < vb->num_planes; ++plane) {
a00d0266 165 if (vb->planes[plane].mem_priv)
b5b4541e 166 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
a00d0266 167 vb->planes[plane].mem_priv = NULL;
e23ccc0a
PO
168 }
169}
170
c5384048
SS
171/**
172 * __vb2_plane_dmabuf_put() - release memory associated with
173 * a DMABUF shared plane
174 */
b5b4541e 175static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
c5384048
SS
176{
177 if (!p->mem_priv)
178 return;
179
180 if (p->dbuf_mapped)
b5b4541e 181 call_memop(vb, unmap_dmabuf, p->mem_priv);
c5384048 182
b5b4541e 183 call_memop(vb, detach_dmabuf, p->mem_priv);
c5384048
SS
184 dma_buf_put(p->dbuf);
185 memset(p, 0, sizeof(*p));
186}
187
188/**
189 * __vb2_buf_dmabuf_put() - release memory associated with
190 * a DMABUF shared buffer
191 */
192static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
193{
c5384048
SS
194 unsigned int plane;
195
196 for (plane = 0; plane < vb->num_planes; ++plane)
b5b4541e 197 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
198}
199
a5e3d743
HV
200/**
201 * __setup_lengths() - setup initial lengths for every plane in
202 * every buffer on the queue
203 */
204static void __setup_lengths(struct vb2_queue *q, unsigned int n)
205{
206 unsigned int buffer, plane;
207 struct vb2_buffer *vb;
208
209 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
210 vb = q->bufs[buffer];
211 if (!vb)
212 continue;
213
214 for (plane = 0; plane < vb->num_planes; ++plane)
215 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
216 }
217}
218
e23ccc0a
PO
219/**
220 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
221 * every buffer on the queue
222 */
2d86401c 223static void __setup_offsets(struct vb2_queue *q, unsigned int n)
e23ccc0a
PO
224{
225 unsigned int buffer, plane;
226 struct vb2_buffer *vb;
2d86401c 227 unsigned long off;
e23ccc0a 228
2d86401c
GL
229 if (q->num_buffers) {
230 struct v4l2_plane *p;
231 vb = q->bufs[q->num_buffers - 1];
232 p = &vb->v4l2_planes[vb->num_planes - 1];
233 off = PAGE_ALIGN(p->m.mem_offset + p->length);
234 } else {
235 off = 0;
236 }
237
238 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
e23ccc0a
PO
239 vb = q->bufs[buffer];
240 if (!vb)
241 continue;
242
243 for (plane = 0; plane < vb->num_planes; ++plane) {
244 vb->v4l2_planes[plane].m.mem_offset = off;
245
246 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
247 buffer, plane, off);
248
249 off += vb->v4l2_planes[plane].length;
250 off = PAGE_ALIGN(off);
251 }
252 }
253}
254
255/**
256 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
257 * video buffer memory for all buffers/planes on the queue and initializes the
258 * queue
259 *
260 * Returns the number of buffers successfully allocated.
261 */
262static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
c1426bc7 263 unsigned int num_buffers, unsigned int num_planes)
e23ccc0a
PO
264{
265 unsigned int buffer;
266 struct vb2_buffer *vb;
267 int ret;
268
269 for (buffer = 0; buffer < num_buffers; ++buffer) {
270 /* Allocate videobuf buffer structures */
271 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
272 if (!vb) {
273 dprintk(1, "Memory alloc for buffer struct failed\n");
274 break;
275 }
276
277 /* Length stores number of planes for multiplanar buffers */
278 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
279 vb->v4l2_buf.length = num_planes;
280
281 vb->state = VB2_BUF_STATE_DEQUEUED;
282 vb->vb2_queue = q;
283 vb->num_planes = num_planes;
2d86401c 284 vb->v4l2_buf.index = q->num_buffers + buffer;
e23ccc0a
PO
285 vb->v4l2_buf.type = q->type;
286 vb->v4l2_buf.memory = memory;
287
288 /* Allocate video buffer memory for the MMAP type */
289 if (memory == V4L2_MEMORY_MMAP) {
c1426bc7 290 ret = __vb2_buf_mem_alloc(vb);
e23ccc0a
PO
291 if (ret) {
292 dprintk(1, "Failed allocating memory for "
293 "buffer %d\n", buffer);
294 kfree(vb);
295 break;
296 }
297 /*
298 * Call the driver-provided buffer initialization
299 * callback, if given. An error in initialization
300 * results in queue setup failure.
301 */
b5b4541e 302 ret = call_vb_qop(vb, buf_init, vb);
e23ccc0a
PO
303 if (ret) {
304 dprintk(1, "Buffer %d %p initialization"
305 " failed\n", buffer, vb);
b5b4541e 306 fail_vb_qop(vb, buf_init);
e23ccc0a
PO
307 __vb2_buf_mem_free(vb);
308 kfree(vb);
309 break;
310 }
311 }
312
2d86401c 313 q->bufs[q->num_buffers + buffer] = vb;
e23ccc0a
PO
314 }
315
a5e3d743 316 __setup_lengths(q, buffer);
dc77523c
PZ
317 if (memory == V4L2_MEMORY_MMAP)
318 __setup_offsets(q, buffer);
e23ccc0a
PO
319
320 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
2d86401c 321 buffer, num_planes);
e23ccc0a
PO
322
323 return buffer;
324}
325
326/**
327 * __vb2_free_mem() - release all video buffer memory for a given queue
328 */
2d86401c 329static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
330{
331 unsigned int buffer;
332 struct vb2_buffer *vb;
333
2d86401c
GL
334 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
335 ++buffer) {
e23ccc0a
PO
336 vb = q->bufs[buffer];
337 if (!vb)
338 continue;
339
340 /* Free MMAP buffers or release USERPTR buffers */
341 if (q->memory == V4L2_MEMORY_MMAP)
342 __vb2_buf_mem_free(vb);
c5384048
SS
343 else if (q->memory == V4L2_MEMORY_DMABUF)
344 __vb2_buf_dmabuf_put(vb);
e23ccc0a
PO
345 else
346 __vb2_buf_userptr_put(vb);
347 }
348}
349
350/**
2d86401c
GL
351 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
352 * related information, if no buffers are left return the queue to an
353 * uninitialized state. Might be called even if the queue has already been freed.
e23ccc0a 354 */
63faabfd 355static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
356{
357 unsigned int buffer;
358
63faabfd
HV
359 /*
360 * Sanity check: when preparing a buffer the queue lock is released for
361 * a short while (see __buf_prepare for the details), which would allow
362 * a race with a reqbufs which can call this function. Removing the
363 * buffers from underneath __buf_prepare is obviously a bad idea, so we
364 * check if any of the buffers is in the state PREPARING, and if so we
365 * just return -EAGAIN.
366 */
367 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
368 ++buffer) {
369 if (q->bufs[buffer] == NULL)
370 continue;
371 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
372 dprintk(1, "reqbufs: preparing buffers, cannot free\n");
373 return -EAGAIN;
374 }
375 }
376
e23ccc0a 377 /* Call driver-provided cleanup function for each buffer, if provided */
b5b4541e
HV
378 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
379 ++buffer) {
256f3162
HV
380 struct vb2_buffer *vb = q->bufs[buffer];
381
382 if (vb && vb->planes[0].mem_priv)
383 call_vb_qop(vb, buf_cleanup, vb);
e23ccc0a
PO
384 }
385
386 /* Release video buffer memory */
2d86401c 387 __vb2_free_mem(q, buffers);
e23ccc0a 388
b5b4541e
HV
389#ifdef CONFIG_VIDEO_ADV_DEBUG
390 /*
391 * Check that all the calls were balances during the life-time of this
392 * queue. If not (or if the debug level is 1 or up), then dump the
393 * counters to the kernel log.
394 */
395 if (q->num_buffers) {
396 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
397 q->cnt_wait_prepare != q->cnt_wait_finish;
398
399 if (unbalanced || debug) {
400 pr_info("vb2: counters for queue %p:%s\n", q,
401 unbalanced ? " UNBALANCED!" : "");
402 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
403 q->cnt_queue_setup, q->cnt_start_streaming,
404 q->cnt_stop_streaming);
405 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
406 q->cnt_wait_prepare, q->cnt_wait_finish);
407 }
408 q->cnt_queue_setup = 0;
409 q->cnt_wait_prepare = 0;
410 q->cnt_wait_finish = 0;
411 q->cnt_start_streaming = 0;
412 q->cnt_stop_streaming = 0;
413 }
414 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
415 struct vb2_buffer *vb = q->bufs[buffer];
416 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
417 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
418 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
419 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
420 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
421 vb->cnt_buf_queue != vb->cnt_buf_done ||
422 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
423 vb->cnt_buf_init != vb->cnt_buf_cleanup;
424
425 if (unbalanced || debug) {
426 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
427 q, buffer, unbalanced ? " UNBALANCED!" : "");
428 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
429 vb->cnt_buf_init, vb->cnt_buf_cleanup,
430 vb->cnt_buf_prepare, vb->cnt_buf_finish);
431 pr_info("vb2: buf_queue: %u buf_done: %u\n",
432 vb->cnt_buf_queue, vb->cnt_buf_done);
433 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
434 vb->cnt_mem_alloc, vb->cnt_mem_put,
435 vb->cnt_mem_prepare, vb->cnt_mem_finish,
436 vb->cnt_mem_mmap);
437 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
438 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
439 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
440 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
441 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
442 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
443 vb->cnt_mem_get_dmabuf,
444 vb->cnt_mem_num_users,
445 vb->cnt_mem_vaddr,
446 vb->cnt_mem_cookie);
447 }
448 }
449#endif
450
e23ccc0a 451 /* Free videobuf buffers */
2d86401c
GL
452 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
453 ++buffer) {
e23ccc0a
PO
454 kfree(q->bufs[buffer]);
455 q->bufs[buffer] = NULL;
456 }
457
2d86401c
GL
458 q->num_buffers -= buffers;
459 if (!q->num_buffers)
460 q->memory = 0;
bd50d999 461 INIT_LIST_HEAD(&q->queued_list);
63faabfd 462 return 0;
e23ccc0a
PO
463}
464
465/**
466 * __verify_planes_array() - verify that the planes array passed in struct
467 * v4l2_buffer from userspace can be safely used
468 */
2d86401c 469static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 470{
32a77260
HV
471 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
472 return 0;
473
e23ccc0a
PO
474 /* Is memory for copying plane information present? */
475 if (NULL == b->m.planes) {
476 dprintk(1, "Multi-planar buffer passed but "
477 "planes array not provided\n");
478 return -EINVAL;
479 }
480
481 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
482 dprintk(1, "Incorrect planes array length, "
483 "expected %d, got %d\n", vb->num_planes, b->length);
484 return -EINVAL;
485 }
486
487 return 0;
488}
489
8023ed09
LP
490/**
491 * __verify_length() - Verify that the bytesused value for each plane fits in
492 * the plane length and that the data offset doesn't exceed the bytesused value.
493 */
494static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
495{
496 unsigned int length;
497 unsigned int plane;
498
499 if (!V4L2_TYPE_IS_OUTPUT(b->type))
500 return 0;
501
502 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
503 for (plane = 0; plane < vb->num_planes; ++plane) {
504 length = (b->memory == V4L2_MEMORY_USERPTR)
505 ? b->m.planes[plane].length
506 : vb->v4l2_planes[plane].length;
507
508 if (b->m.planes[plane].bytesused > length)
509 return -EINVAL;
3c5c23c5
SN
510
511 if (b->m.planes[plane].data_offset > 0 &&
512 b->m.planes[plane].data_offset >=
8023ed09
LP
513 b->m.planes[plane].bytesused)
514 return -EINVAL;
515 }
516 } else {
517 length = (b->memory == V4L2_MEMORY_USERPTR)
518 ? b->length : vb->v4l2_planes[0].length;
519
520 if (b->bytesused > length)
521 return -EINVAL;
522 }
523
524 return 0;
525}
526
25a27d91
MS
527/**
528 * __buffer_in_use() - return true if the buffer is in use and
529 * the queue cannot be freed (by the means of REQBUFS(0)) call
530 */
531static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
532{
533 unsigned int plane;
534 for (plane = 0; plane < vb->num_planes; ++plane) {
2c2dd6ac 535 void *mem_priv = vb->planes[plane].mem_priv;
25a27d91
MS
536 /*
537 * If num_users() has not been provided, call_memop
538 * will return 0, apparently nobody cares about this
539 * case anyway. If num_users() returns more than 1,
540 * we are not the only user of the plane's memory.
541 */
b5b4541e 542 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
25a27d91
MS
543 return true;
544 }
545 return false;
546}
547
548/**
549 * __buffers_in_use() - return true if any buffers on the queue are in use and
550 * the queue cannot be freed (by the means of REQBUFS(0)) call
551 */
552static bool __buffers_in_use(struct vb2_queue *q)
553{
554 unsigned int buffer;
555 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
556 if (__buffer_in_use(q, q->bufs[buffer]))
557 return true;
558 }
559 return false;
560}
561
e23ccc0a
PO
562/**
563 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
564 * returned to userspace
565 */
32a77260 566static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
e23ccc0a
PO
567{
568 struct vb2_queue *q = vb->vb2_queue;
e23ccc0a 569
2b719d7b 570 /* Copy back data such as timestamp, flags, etc. */
e23ccc0a 571 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
2b719d7b 572 b->reserved2 = vb->v4l2_buf.reserved2;
e23ccc0a
PO
573 b->reserved = vb->v4l2_buf.reserved;
574
575 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
e23ccc0a
PO
576 /*
577 * Fill in plane-related data if userspace provided an array
32a77260 578 * for it. The caller has already verified memory and size.
e23ccc0a 579 */
3c0b6061 580 b->length = vb->num_planes;
e23ccc0a
PO
581 memcpy(b->m.planes, vb->v4l2_planes,
582 b->length * sizeof(struct v4l2_plane));
583 } else {
584 /*
585 * We use length and offset in v4l2_planes array even for
586 * single-planar buffers, but userspace does not.
587 */
588 b->length = vb->v4l2_planes[0].length;
589 b->bytesused = vb->v4l2_planes[0].bytesused;
590 if (q->memory == V4L2_MEMORY_MMAP)
591 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
592 else if (q->memory == V4L2_MEMORY_USERPTR)
593 b->m.userptr = vb->v4l2_planes[0].m.userptr;
c5384048
SS
594 else if (q->memory == V4L2_MEMORY_DMABUF)
595 b->m.fd = vb->v4l2_planes[0].m.fd;
e23ccc0a
PO
596 }
597
ea42c8ec
MS
598 /*
599 * Clear any buffer state related flags.
600 */
1b18e7a0 601 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
602 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
603 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
604 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
605 /*
606 * For non-COPY timestamps, drop timestamp source bits
607 * and obtain the timestamp source from the queue.
608 */
609 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
610 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
611 }
e23ccc0a
PO
612
613 switch (vb->state) {
614 case VB2_BUF_STATE_QUEUED:
615 case VB2_BUF_STATE_ACTIVE:
616 b->flags |= V4L2_BUF_FLAG_QUEUED;
617 break;
618 case VB2_BUF_STATE_ERROR:
619 b->flags |= V4L2_BUF_FLAG_ERROR;
620 /* fall through */
621 case VB2_BUF_STATE_DONE:
622 b->flags |= V4L2_BUF_FLAG_DONE;
623 break;
ebc087d0 624 case VB2_BUF_STATE_PREPARED:
2d86401c
GL
625 b->flags |= V4L2_BUF_FLAG_PREPARED;
626 break;
b18a8ff2 627 case VB2_BUF_STATE_PREPARING:
2d86401c 628 case VB2_BUF_STATE_DEQUEUED:
e23ccc0a
PO
629 /* nothing */
630 break;
631 }
632
25a27d91 633 if (__buffer_in_use(q, vb))
e23ccc0a 634 b->flags |= V4L2_BUF_FLAG_MAPPED;
e23ccc0a
PO
635}
636
637/**
638 * vb2_querybuf() - query video buffer information
639 * @q: videobuf queue
640 * @b: buffer struct passed from userspace to vidioc_querybuf handler
641 * in driver
642 *
643 * Should be called from vidioc_querybuf ioctl handler in driver.
644 * This function will verify the passed v4l2_buffer structure and fill the
645 * relevant information for the userspace.
646 *
647 * The return values from this function are intended to be directly returned
648 * from vidioc_querybuf handler in driver.
649 */
650int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
651{
652 struct vb2_buffer *vb;
32a77260 653 int ret;
e23ccc0a
PO
654
655 if (b->type != q->type) {
656 dprintk(1, "querybuf: wrong buffer type\n");
657 return -EINVAL;
658 }
659
660 if (b->index >= q->num_buffers) {
661 dprintk(1, "querybuf: buffer index out of range\n");
662 return -EINVAL;
663 }
664 vb = q->bufs[b->index];
32a77260
HV
665 ret = __verify_planes_array(vb, b);
666 if (!ret)
667 __fill_v4l2_buffer(vb, b);
668 return ret;
e23ccc0a
PO
669}
670EXPORT_SYMBOL(vb2_querybuf);
671
672/**
673 * __verify_userptr_ops() - verify that all memory operations required for
674 * USERPTR queue type have been provided
675 */
676static int __verify_userptr_ops(struct vb2_queue *q)
677{
678 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
679 !q->mem_ops->put_userptr)
680 return -EINVAL;
681
682 return 0;
683}
684
685/**
686 * __verify_mmap_ops() - verify that all memory operations required for
687 * MMAP queue type have been provided
688 */
689static int __verify_mmap_ops(struct vb2_queue *q)
690{
691 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
692 !q->mem_ops->put || !q->mem_ops->mmap)
693 return -EINVAL;
694
695 return 0;
696}
697
c5384048
SS
698/**
699 * __verify_dmabuf_ops() - verify that all memory operations required for
700 * DMABUF queue type have been provided
701 */
702static int __verify_dmabuf_ops(struct vb2_queue *q)
703{
704 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
705 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
706 !q->mem_ops->unmap_dmabuf)
707 return -EINVAL;
708
709 return 0;
710}
711
e23ccc0a 712/**
37d9ed94
HV
713 * __verify_memory_type() - Check whether the memory type and buffer type
714 * passed to a buffer operation are compatible with the queue.
715 */
716static int __verify_memory_type(struct vb2_queue *q,
717 enum v4l2_memory memory, enum v4l2_buf_type type)
718{
c5384048
SS
719 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
720 memory != V4L2_MEMORY_DMABUF) {
37d9ed94
HV
721 dprintk(1, "reqbufs: unsupported memory type\n");
722 return -EINVAL;
723 }
724
725 if (type != q->type) {
726 dprintk(1, "reqbufs: requested type is incorrect\n");
727 return -EINVAL;
728 }
729
730 /*
731 * Make sure all the required memory ops for given memory type
732 * are available.
733 */
734 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
735 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
736 return -EINVAL;
737 }
738
739 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
740 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
741 return -EINVAL;
742 }
743
c5384048
SS
744 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
745 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
746 return -EINVAL;
747 }
748
37d9ed94
HV
749 /*
750 * Place the busy tests at the end: -EBUSY can be ignored when
751 * create_bufs is called with count == 0, but count == 0 should still
752 * do the memory and type validation.
753 */
754 if (q->fileio) {
755 dprintk(1, "reqbufs: file io in progress\n");
756 return -EBUSY;
757 }
758 return 0;
759}
760
761/**
762 * __reqbufs() - Initiate streaming
e23ccc0a
PO
763 * @q: videobuf2 queue
764 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
765 *
766 * Should be called from vidioc_reqbufs ioctl handler of a driver.
767 * This function:
768 * 1) verifies streaming parameters passed from the userspace,
769 * 2) sets up the queue,
770 * 3) negotiates number of buffers and planes per buffer with the driver
771 * to be used during streaming,
772 * 4) allocates internal buffer structures (struct vb2_buffer), according to
773 * the agreed parameters,
774 * 5) for MMAP memory type, allocates actual video memory, using the
775 * memory handling/allocation routines provided during queue initialization
776 *
777 * If req->count is 0, all the memory will be freed instead.
778 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
779 * and the queue is not busy, memory will be reallocated.
780 *
781 * The return values from this function are intended to be directly returned
782 * from vidioc_reqbufs handler in driver.
783 */
37d9ed94 784static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
e23ccc0a 785{
2d86401c 786 unsigned int num_buffers, allocated_buffers, num_planes = 0;
37d9ed94 787 int ret;
e23ccc0a
PO
788
789 if (q->streaming) {
790 dprintk(1, "reqbufs: streaming active\n");
791 return -EBUSY;
792 }
793
29e3fbd8 794 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
e23ccc0a
PO
795 /*
796 * We already have buffers allocated, so first check if they
797 * are not in use and can be freed.
798 */
799 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
800 dprintk(1, "reqbufs: memory in use, cannot free\n");
801 return -EBUSY;
802 }
803
63faabfd
HV
804 ret = __vb2_queue_free(q, q->num_buffers);
805 if (ret)
806 return ret;
29e3fbd8
MS
807
808 /*
809 * In case of REQBUFS(0) return immediately without calling
810 * driver's queue_setup() callback and allocating resources.
811 */
812 if (req->count == 0)
813 return 0;
e23ccc0a
PO
814 }
815
816 /*
817 * Make sure the requested values and current defaults are sane.
818 */
819 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
c1426bc7 820 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
e23ccc0a 821 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
13b14095 822 q->memory = req->memory;
e23ccc0a
PO
823
824 /*
825 * Ask the driver how many buffers and planes per buffer it requires.
826 * Driver also sets the size and allocator context for each plane.
827 */
fc714e70 828 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
c1426bc7 829 q->plane_sizes, q->alloc_ctx);
b5b4541e
HV
830 if (ret) {
831 fail_qop(q, queue_setup);
e23ccc0a 832 return ret;
b5b4541e 833 }
e23ccc0a
PO
834
835 /* Finally, allocate buffers and video memory */
c1426bc7 836 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
66072d4f
MS
837 if (ret == 0) {
838 dprintk(1, "Memory allocation failed\n");
839 return -ENOMEM;
e23ccc0a
PO
840 }
841
2d86401c
GL
842 allocated_buffers = ret;
843
e23ccc0a
PO
844 /*
845 * Check if driver can handle the allocated number of buffers.
846 */
2d86401c
GL
847 if (allocated_buffers < num_buffers) {
848 num_buffers = allocated_buffers;
e23ccc0a 849
fc714e70
GL
850 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
851 &num_planes, q->plane_sizes, q->alloc_ctx);
b5b4541e
HV
852 if (ret)
853 fail_qop(q, queue_setup);
e23ccc0a 854
2d86401c 855 if (!ret && allocated_buffers < num_buffers)
e23ccc0a 856 ret = -ENOMEM;
e23ccc0a
PO
857
858 /*
2d86401c
GL
859 * Either the driver has accepted a smaller number of buffers,
860 * or .queue_setup() returned an error
e23ccc0a 861 */
2d86401c
GL
862 }
863
864 q->num_buffers = allocated_buffers;
865
866 if (ret < 0) {
867 __vb2_queue_free(q, allocated_buffers);
868 return ret;
e23ccc0a
PO
869 }
870
e23ccc0a
PO
871 /*
872 * Return the number of successfully allocated buffers
873 * to the userspace.
874 */
2d86401c 875 req->count = allocated_buffers;
e23ccc0a
PO
876
877 return 0;
e23ccc0a 878}
37d9ed94
HV
879
880/**
881 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
882 * type values.
883 * @q: videobuf2 queue
884 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
885 */
886int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
887{
888 int ret = __verify_memory_type(q, req->memory, req->type);
889
890 return ret ? ret : __reqbufs(q, req);
891}
e23ccc0a
PO
892EXPORT_SYMBOL_GPL(vb2_reqbufs);
893
2d86401c 894/**
37d9ed94 895 * __create_bufs() - Allocate buffers and any required auxiliary structs
2d86401c
GL
896 * @q: videobuf2 queue
897 * @create: creation parameters, passed from userspace to vidioc_create_bufs
898 * handler in driver
899 *
900 * Should be called from vidioc_create_bufs ioctl handler of a driver.
901 * This function:
902 * 1) verifies parameter sanity
903 * 2) calls the .queue_setup() queue operation
904 * 3) performs any necessary memory allocations
905 *
906 * The return values from this function are intended to be directly returned
907 * from vidioc_create_bufs handler in driver.
908 */
37d9ed94 909static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
2d86401c
GL
910{
911 unsigned int num_planes = 0, num_buffers, allocated_buffers;
37d9ed94 912 int ret;
2d86401c
GL
913
914 if (q->num_buffers == VIDEO_MAX_FRAME) {
915 dprintk(1, "%s(): maximum number of buffers already allocated\n",
916 __func__);
917 return -ENOBUFS;
918 }
919
2d86401c
GL
920 if (!q->num_buffers) {
921 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
922 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
923 q->memory = create->memory;
924 }
925
926 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
927
928 /*
929 * Ask the driver, whether the requested number of buffers, planes per
930 * buffer and their sizes are acceptable
931 */
932 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
933 &num_planes, q->plane_sizes, q->alloc_ctx);
b5b4541e
HV
934 if (ret) {
935 fail_qop(q, queue_setup);
2d86401c 936 return ret;
b5b4541e 937 }
2d86401c
GL
938
939 /* Finally, allocate buffers and video memory */
940 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
941 num_planes);
f05393d2
HV
942 if (ret == 0) {
943 dprintk(1, "Memory allocation failed\n");
944 return -ENOMEM;
2d86401c
GL
945 }
946
947 allocated_buffers = ret;
948
949 /*
950 * Check if driver can handle the so far allocated number of buffers.
951 */
952 if (ret < num_buffers) {
953 num_buffers = ret;
954
955 /*
956 * q->num_buffers contains the total number of buffers, that the
957 * queue driver has set up
958 */
959 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
960 &num_planes, q->plane_sizes, q->alloc_ctx);
b5b4541e
HV
961 if (ret)
962 fail_qop(q, queue_setup);
2d86401c
GL
963
964 if (!ret && allocated_buffers < num_buffers)
965 ret = -ENOMEM;
966
967 /*
968 * Either the driver has accepted a smaller number of buffers,
969 * or .queue_setup() returned an error
970 */
971 }
972
973 q->num_buffers += allocated_buffers;
974
975 if (ret < 0) {
976 __vb2_queue_free(q, allocated_buffers);
f05393d2 977 return -ENOMEM;
2d86401c
GL
978 }
979
980 /*
981 * Return the number of successfully allocated buffers
982 * to the userspace.
983 */
984 create->count = allocated_buffers;
985
986 return 0;
987}
37d9ed94
HV
988
989/**
53aa3b19
NT
990 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
991 * memory and type values.
37d9ed94
HV
992 * @q: videobuf2 queue
993 * @create: creation parameters, passed from userspace to vidioc_create_bufs
994 * handler in driver
995 */
996int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
997{
998 int ret = __verify_memory_type(q, create->memory, create->format.type);
999
1000 create->index = q->num_buffers;
f05393d2
HV
1001 if (create->count == 0)
1002 return ret != -EBUSY ? ret : 0;
37d9ed94
HV
1003 return ret ? ret : __create_bufs(q, create);
1004}
2d86401c
GL
1005EXPORT_SYMBOL_GPL(vb2_create_bufs);
1006
e23ccc0a
PO
1007/**
1008 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1009 * @vb: vb2_buffer to which the plane in question belongs to
1010 * @plane_no: plane number for which the address is to be returned
1011 *
1012 * This function returns a kernel virtual address of a given plane if
1013 * such a mapping exist, NULL otherwise.
1014 */
1015void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1016{
a00d0266 1017 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1018 return NULL;
1019
b5b4541e 1020 return call_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1021
1022}
1023EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1024
1025/**
1026 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1027 * @vb: vb2_buffer to which the plane in question belongs to
1028 * @plane_no: plane number for which the cookie is to be returned
1029 *
1030 * This function returns an allocator specific cookie for a given plane if
1031 * available, NULL otherwise. The allocator should provide some simple static
1032 * inline function, which would convert this cookie to the allocator specific
1033 * type that can be used directly by the driver to access the buffer. This can
1034 * be for example physical address, pointer to scatter list or IOMMU mapping.
1035 */
1036void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1037{
a00d0266 1038 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1039 return NULL;
1040
b5b4541e 1041 return call_memop(vb, cookie, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1042}
1043EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1044
1045/**
1046 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1047 * @vb: vb2_buffer returned from the driver
1048 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
1049 * or VB2_BUF_STATE_ERROR if the operation finished with an error
1050 *
1051 * This function should be called by the driver after a hardware operation on
1052 * a buffer is finished and the buffer may be returned to userspace. The driver
1053 * cannot use this buffer anymore until it is queued back to it by videobuf
1054 * by the means of buf_queue callback. Only buffers previously queued to the
1055 * driver by buf_queue can be passed to this function.
1056 */
1057void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1058{
1059 struct vb2_queue *q = vb->vb2_queue;
1060 unsigned long flags;
3e0c2f20 1061 unsigned int plane;
e23ccc0a
PO
1062
1063 if (vb->state != VB2_BUF_STATE_ACTIVE)
1064 return;
1065
1066 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
1067 return;
1068
b5b4541e
HV
1069#ifdef CONFIG_VIDEO_ADV_DEBUG
1070 /*
1071 * Although this is not a callback, it still does have to balance
1072 * with the buf_queue op. So update this counter manually.
1073 */
1074 vb->cnt_buf_done++;
1075#endif
e23ccc0a 1076 dprintk(4, "Done processing on buffer %d, state: %d\n",
9b6f5dc0 1077 vb->v4l2_buf.index, state);
e23ccc0a 1078
3e0c2f20
MS
1079 /* sync buffers */
1080 for (plane = 0; plane < vb->num_planes; ++plane)
b5b4541e 1081 call_memop(vb, finish, vb->planes[plane].mem_priv);
3e0c2f20 1082
e23ccc0a
PO
1083 /* Add the buffer to the done buffers list */
1084 spin_lock_irqsave(&q->done_lock, flags);
1085 vb->state = state;
1086 list_add_tail(&vb->done_entry, &q->done_list);
6ea3b980 1087 atomic_dec(&q->owned_by_drv_count);
e23ccc0a
PO
1088 spin_unlock_irqrestore(&q->done_lock, flags);
1089
1090 /* Inform any processes that may be waiting for buffers */
1091 wake_up(&q->done_wq);
1092}
1093EXPORT_SYMBOL_GPL(vb2_buffer_done);
1094
1095/**
32a77260
HV
1096 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1097 * v4l2_buffer by the userspace. The caller has already verified that struct
1098 * v4l2_buffer has a valid number of planes.
e23ccc0a 1099 */
32a77260 1100static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
e23ccc0a
PO
1101 struct v4l2_plane *v4l2_planes)
1102{
1103 unsigned int plane;
e23ccc0a
PO
1104
1105 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
e23ccc0a
PO
1106 /* Fill in driver-provided information for OUTPUT types */
1107 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1108 /*
1109 * Will have to go up to b->length when API starts
1110 * accepting variable number of planes.
1111 */
1112 for (plane = 0; plane < vb->num_planes; ++plane) {
1113 v4l2_planes[plane].bytesused =
1114 b->m.planes[plane].bytesused;
1115 v4l2_planes[plane].data_offset =
1116 b->m.planes[plane].data_offset;
1117 }
1118 }
1119
1120 if (b->memory == V4L2_MEMORY_USERPTR) {
1121 for (plane = 0; plane < vb->num_planes; ++plane) {
1122 v4l2_planes[plane].m.userptr =
1123 b->m.planes[plane].m.userptr;
1124 v4l2_planes[plane].length =
1125 b->m.planes[plane].length;
1126 }
1127 }
c5384048
SS
1128 if (b->memory == V4L2_MEMORY_DMABUF) {
1129 for (plane = 0; plane < vb->num_planes; ++plane) {
1130 v4l2_planes[plane].m.fd =
1131 b->m.planes[plane].m.fd;
1132 v4l2_planes[plane].length =
1133 b->m.planes[plane].length;
1134 v4l2_planes[plane].data_offset =
1135 b->m.planes[plane].data_offset;
1136 }
1137 }
e23ccc0a
PO
1138 } else {
1139 /*
1140 * Single-planar buffers do not use planes array,
1141 * so fill in relevant v4l2_buffer struct fields instead.
1142 * In videobuf we use our internal V4l2_planes struct for
1143 * single-planar buffers as well, for simplicity.
1144 */
ac706bf7 1145 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
e23ccc0a 1146 v4l2_planes[0].bytesused = b->bytesused;
ac706bf7
LP
1147 v4l2_planes[0].data_offset = 0;
1148 }
e23ccc0a
PO
1149
1150 if (b->memory == V4L2_MEMORY_USERPTR) {
1151 v4l2_planes[0].m.userptr = b->m.userptr;
1152 v4l2_planes[0].length = b->length;
1153 }
c5384048
SS
1154
1155 if (b->memory == V4L2_MEMORY_DMABUF) {
1156 v4l2_planes[0].m.fd = b->m.fd;
1157 v4l2_planes[0].length = b->length;
1158 v4l2_planes[0].data_offset = 0;
1159 }
1160
e23ccc0a
PO
1161 }
1162
f1343281 1163 /* Zero flags that the vb2 core handles */
1b18e7a0 1164 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
1165 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1166 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1167 /*
1168 * Non-COPY timestamps and non-OUTPUT queues will get
1169 * their timestamp and timestamp source flags from the
1170 * queue.
1171 */
1172 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1173 }
1174
f1343281
HV
1175 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1176 /*
1177 * For output buffers mask out the timecode flag:
1178 * this will be handled later in vb2_internal_qbuf().
1179 * The 'field' is valid metadata for this output buffer
1180 * and so that needs to be copied here.
1181 */
1182 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1183 vb->v4l2_buf.field = b->field;
1184 } else {
1185 /* Zero any output buffer flags as this is a capture buffer */
1186 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1187 }
e23ccc0a
PO
1188}
1189
1190/**
1191 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1192 */
2d86401c 1193static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a
PO
1194{
1195 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1196 struct vb2_queue *q = vb->vb2_queue;
1197 void *mem_priv;
1198 unsigned int plane;
1199 int ret;
1200 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1201 bool reacquired = vb->planes[0].mem_priv == NULL;
e23ccc0a 1202
32a77260
HV
1203 /* Copy relevant information provided by the userspace */
1204 __fill_vb2_buffer(vb, b, planes);
e23ccc0a
PO
1205
1206 for (plane = 0; plane < vb->num_planes; ++plane) {
1207 /* Skip the plane if already verified */
f0b7c7fc
MS
1208 if (vb->v4l2_planes[plane].m.userptr &&
1209 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
e23ccc0a
PO
1210 && vb->v4l2_planes[plane].length == planes[plane].length)
1211 continue;
1212
1213 dprintk(3, "qbuf: userspace address for plane %d changed, "
1214 "reacquiring memory\n", plane);
1215
c1426bc7
MS
1216 /* Check if the provided plane buffer is large enough */
1217 if (planes[plane].length < q->plane_sizes[plane]) {
2484a7e2
SWK
1218 dprintk(1, "qbuf: provided buffer size %u is less than "
1219 "setup size %u for plane %d\n",
1220 planes[plane].length,
1221 q->plane_sizes[plane], plane);
4c2625db 1222 ret = -EINVAL;
c1426bc7
MS
1223 goto err;
1224 }
1225
e23ccc0a 1226 /* Release previously acquired memory if present */
256f3162
HV
1227 if (vb->planes[plane].mem_priv) {
1228 if (!reacquired) {
1229 reacquired = true;
1230 call_vb_qop(vb, buf_cleanup, vb);
1231 }
b5b4541e 1232 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
256f3162 1233 }
e23ccc0a
PO
1234
1235 vb->planes[plane].mem_priv = NULL;
256f3162 1236 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
e23ccc0a
PO
1237
1238 /* Acquire each plane's memory */
b5b4541e 1239 mem_priv = call_memop(vb, get_userptr, q->alloc_ctx[plane],
a00d0266
MS
1240 planes[plane].m.userptr,
1241 planes[plane].length, write);
1242 if (IS_ERR_OR_NULL(mem_priv)) {
1243 dprintk(1, "qbuf: failed acquiring userspace "
e23ccc0a 1244 "memory for plane %d\n", plane);
b5b4541e 1245 fail_memop(vb, get_userptr);
a00d0266
MS
1246 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1247 goto err;
e23ccc0a 1248 }
a00d0266 1249 vb->planes[plane].mem_priv = mem_priv;
e23ccc0a
PO
1250 }
1251
e23ccc0a
PO
1252 /*
1253 * Now that everything is in order, copy relevant information
1254 * provided by userspace.
1255 */
1256 for (plane = 0; plane < vb->num_planes; ++plane)
1257 vb->v4l2_planes[plane] = planes[plane];
1258
256f3162
HV
1259 if (reacquired) {
1260 /*
1261 * One or more planes changed, so we must call buf_init to do
1262 * the driver-specific initialization on the newly acquired
1263 * buffer, if provided.
1264 */
1265 ret = call_vb_qop(vb, buf_init, vb);
1266 if (ret) {
1267 dprintk(1, "qbuf: buffer initialization failed\n");
1268 fail_vb_qop(vb, buf_init);
1269 goto err;
1270 }
1271 }
1272
1273 ret = call_vb_qop(vb, buf_prepare, vb);
1274 if (ret) {
1275 dprintk(1, "qbuf: buffer preparation failed\n");
1276 fail_vb_qop(vb, buf_prepare);
1277 call_vb_qop(vb, buf_cleanup, vb);
1278 goto err;
1279 }
1280
e23ccc0a
PO
1281 return 0;
1282err:
1283 /* In case of errors, release planes that were already acquired */
c1426bc7
MS
1284 for (plane = 0; plane < vb->num_planes; ++plane) {
1285 if (vb->planes[plane].mem_priv)
b5b4541e 1286 call_memop(vb, put_userptr, vb->planes[plane].mem_priv);
c1426bc7
MS
1287 vb->planes[plane].mem_priv = NULL;
1288 vb->v4l2_planes[plane].m.userptr = 0;
1289 vb->v4l2_planes[plane].length = 0;
e23ccc0a
PO
1290 }
1291
1292 return ret;
1293}
1294
1295/**
1296 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1297 */
2d86401c 1298static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 1299{
256f3162
HV
1300 int ret;
1301
32a77260 1302 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
256f3162
HV
1303 ret = call_vb_qop(vb, buf_prepare, vb);
1304 if (ret)
1305 fail_vb_qop(vb, buf_prepare);
1306 return ret;
e23ccc0a
PO
1307}
1308
c5384048
SS
1309/**
1310 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1311 */
1312static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1313{
1314 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1315 struct vb2_queue *q = vb->vb2_queue;
1316 void *mem_priv;
1317 unsigned int plane;
1318 int ret;
1319 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1320 bool reacquired = vb->planes[0].mem_priv == NULL;
c5384048 1321
6f546c5f 1322 /* Copy relevant information provided by the userspace */
c5384048
SS
1323 __fill_vb2_buffer(vb, b, planes);
1324
1325 for (plane = 0; plane < vb->num_planes; ++plane) {
1326 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1327
1328 if (IS_ERR_OR_NULL(dbuf)) {
1329 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1330 plane);
1331 ret = -EINVAL;
1332 goto err;
1333 }
1334
1335 /* use DMABUF size if length is not provided */
1336 if (planes[plane].length == 0)
1337 planes[plane].length = dbuf->size;
1338
1339 if (planes[plane].length < planes[plane].data_offset +
1340 q->plane_sizes[plane]) {
77c0782e
SWK
1341 dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
1342 plane);
c5384048
SS
1343 ret = -EINVAL;
1344 goto err;
1345 }
1346
1347 /* Skip the plane if already verified */
1348 if (dbuf == vb->planes[plane].dbuf &&
1349 vb->v4l2_planes[plane].length == planes[plane].length) {
1350 dma_buf_put(dbuf);
1351 continue;
1352 }
1353
1354 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1355
256f3162
HV
1356 if (!reacquired) {
1357 reacquired = true;
1358 call_vb_qop(vb, buf_cleanup, vb);
1359 }
1360
c5384048 1361 /* Release previously acquired memory if present */
b5b4541e 1362 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
1363 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1364
1365 /* Acquire each plane's memory */
b5b4541e 1366 mem_priv = call_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
c5384048
SS
1367 dbuf, planes[plane].length, write);
1368 if (IS_ERR(mem_priv)) {
1369 dprintk(1, "qbuf: failed to attach dmabuf\n");
b5b4541e 1370 fail_memop(vb, attach_dmabuf);
c5384048
SS
1371 ret = PTR_ERR(mem_priv);
1372 dma_buf_put(dbuf);
1373 goto err;
1374 }
1375
1376 vb->planes[plane].dbuf = dbuf;
1377 vb->planes[plane].mem_priv = mem_priv;
1378 }
1379
1380 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1381 * really we want to do this just before the DMA, not while queueing
1382 * the buffer(s)..
1383 */
1384 for (plane = 0; plane < vb->num_planes; ++plane) {
b5b4541e 1385 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
c5384048
SS
1386 if (ret) {
1387 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1388 plane);
b5b4541e 1389 fail_memop(vb, map_dmabuf);
c5384048
SS
1390 goto err;
1391 }
1392 vb->planes[plane].dbuf_mapped = 1;
1393 }
1394
c5384048
SS
1395 /*
1396 * Now that everything is in order, copy relevant information
1397 * provided by userspace.
1398 */
1399 for (plane = 0; plane < vb->num_planes; ++plane)
1400 vb->v4l2_planes[plane] = planes[plane];
1401
256f3162
HV
1402 if (reacquired) {
1403 /*
1404 * Call driver-specific initialization on the newly acquired buffer,
1405 * if provided.
1406 */
1407 ret = call_vb_qop(vb, buf_init, vb);
1408 if (ret) {
1409 dprintk(1, "qbuf: buffer initialization failed\n");
1410 fail_vb_qop(vb, buf_init);
1411 goto err;
1412 }
1413 }
1414
1415 ret = call_vb_qop(vb, buf_prepare, vb);
1416 if (ret) {
1417 dprintk(1, "qbuf: buffer preparation failed\n");
1418 fail_vb_qop(vb, buf_prepare);
1419 call_vb_qop(vb, buf_cleanup, vb);
1420 goto err;
1421 }
1422
c5384048
SS
1423 return 0;
1424err:
1425 /* In case of errors, release planes that were already acquired */
1426 __vb2_buf_dmabuf_put(vb);
1427
1428 return ret;
1429}
1430
e23ccc0a
PO
1431/**
1432 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1433 */
1434static void __enqueue_in_driver(struct vb2_buffer *vb)
1435{
1436 struct vb2_queue *q = vb->vb2_queue;
3e0c2f20 1437 unsigned int plane;
e23ccc0a
PO
1438
1439 vb->state = VB2_BUF_STATE_ACTIVE;
6ea3b980 1440 atomic_inc(&q->owned_by_drv_count);
3e0c2f20
MS
1441
1442 /* sync buffers */
1443 for (plane = 0; plane < vb->num_planes; ++plane)
b5b4541e 1444 call_memop(vb, prepare, vb->planes[plane].mem_priv);
3e0c2f20 1445
b5b4541e 1446 call_vb_qop(vb, buf_queue, vb);
e23ccc0a
PO
1447}
1448
2d86401c 1449static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ebc087d0
GL
1450{
1451 struct vb2_queue *q = vb->vb2_queue;
b18a8ff2 1452 struct rw_semaphore *mmap_sem;
ebc087d0
GL
1453 int ret;
1454
8023ed09 1455 ret = __verify_length(vb, b);
3a9621b0
SN
1456 if (ret < 0) {
1457 dprintk(1, "%s(): plane parameters verification failed: %d\n",
1458 __func__, ret);
8023ed09 1459 return ret;
3a9621b0 1460 }
8023ed09 1461
b18a8ff2 1462 vb->state = VB2_BUF_STATE_PREPARING;
f1343281
HV
1463 vb->v4l2_buf.timestamp.tv_sec = 0;
1464 vb->v4l2_buf.timestamp.tv_usec = 0;
1465 vb->v4l2_buf.sequence = 0;
1466
ebc087d0
GL
1467 switch (q->memory) {
1468 case V4L2_MEMORY_MMAP:
1469 ret = __qbuf_mmap(vb, b);
1470 break;
1471 case V4L2_MEMORY_USERPTR:
b18a8ff2 1472 /*
f103b5d6
MCC
1473 * In case of user pointer buffers vb2 allocators need to get
1474 * direct access to userspace pages. This requires getting
1475 * the mmap semaphore for read access in the current process
1476 * structure. The same semaphore is taken before calling mmap
1477 * operation, while both qbuf/prepare_buf and mmap are called
1478 * by the driver or v4l2 core with the driver's lock held.
1479 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1480 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1481 * the videobuf2 core releases the driver's lock, takes
1482 * mmap_sem and then takes the driver's lock again.
b18a8ff2
HV
1483 */
1484 mmap_sem = &current->mm->mmap_sem;
1485 call_qop(q, wait_prepare, q);
1486 down_read(mmap_sem);
1487 call_qop(q, wait_finish, q);
1488
ebc087d0 1489 ret = __qbuf_userptr(vb, b);
b18a8ff2
HV
1490
1491 up_read(mmap_sem);
ebc087d0 1492 break;
c5384048
SS
1493 case V4L2_MEMORY_DMABUF:
1494 ret = __qbuf_dmabuf(vb, b);
1495 break;
ebc087d0
GL
1496 default:
1497 WARN(1, "Invalid queue type\n");
1498 ret = -EINVAL;
1499 }
1500
ebc087d0
GL
1501 if (ret)
1502 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
b18a8ff2 1503 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
ebc087d0
GL
1504
1505 return ret;
1506}
1507
012043b8 1508static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
4138111a 1509 const char *opname)
2d86401c 1510{
2d86401c 1511 if (b->type != q->type) {
012043b8 1512 dprintk(1, "%s(): invalid buffer type\n", opname);
b18a8ff2 1513 return -EINVAL;
2d86401c
GL
1514 }
1515
1516 if (b->index >= q->num_buffers) {
012043b8 1517 dprintk(1, "%s(): buffer index out of range\n", opname);
b18a8ff2 1518 return -EINVAL;
2d86401c
GL
1519 }
1520
4138111a 1521 if (q->bufs[b->index] == NULL) {
2d86401c 1522 /* Should never happen */
012043b8 1523 dprintk(1, "%s(): buffer is NULL\n", opname);
b18a8ff2 1524 return -EINVAL;
2d86401c
GL
1525 }
1526
1527 if (b->memory != q->memory) {
012043b8 1528 dprintk(1, "%s(): invalid memory type\n", opname);
b18a8ff2 1529 return -EINVAL;
2d86401c
GL
1530 }
1531
4138111a 1532 return __verify_planes_array(q->bufs[b->index], b);
012043b8 1533}
2d86401c 1534
e23ccc0a 1535/**
012043b8 1536 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
e23ccc0a 1537 * @q: videobuf2 queue
012043b8
LP
1538 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1539 * handler in driver
e23ccc0a 1540 *
012043b8 1541 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
e23ccc0a
PO
1542 * This function:
1543 * 1) verifies the passed buffer,
012043b8
LP
1544 * 2) calls buf_prepare callback in the driver (if provided), in which
1545 * driver-specific buffer initialization can be performed,
e23ccc0a
PO
1546 *
1547 * The return values from this function are intended to be directly returned
012043b8 1548 * from vidioc_prepare_buf handler in driver.
e23ccc0a 1549 */
012043b8 1550int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
e23ccc0a 1551{
4138111a 1552 struct vb2_buffer *vb;
b2f2f047
HV
1553 int ret;
1554
1555 if (q->fileio) {
1556 dprintk(1, "%s(): file io in progress\n", __func__);
1557 return -EBUSY;
1558 }
4138111a 1559
b2f2f047 1560 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
4138111a
HV
1561 if (ret)
1562 return ret;
1563
1564 vb = q->bufs[b->index];
1565 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1566 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1567 vb->state);
1568 return -EINVAL;
1569 }
1570
1571 ret = __buf_prepare(vb, b);
1572 if (!ret) {
1573 /* Fill buffer information for the userspace */
1574 __fill_v4l2_buffer(vb, b);
1575
1576 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1577 }
1578 return ret;
012043b8
LP
1579}
1580EXPORT_SYMBOL_GPL(vb2_prepare_buf);
e23ccc0a 1581
02f142ec
HV
1582/**
1583 * vb2_start_streaming() - Attempt to start streaming.
1584 * @q: videobuf2 queue
1585 *
1586 * If there are not enough buffers, then retry_start_streaming is set to
1587 * 1 and 0 is returned. The next time a buffer is queued and
1588 * retry_start_streaming is 1, this function will be called again to
1589 * retry starting the DMA engine.
1590 */
1591static int vb2_start_streaming(struct vb2_queue *q)
1592{
1593 int ret;
1594
1595 /* Tell the driver to start streaming */
6ea3b980 1596 ret = call_qop(q, start_streaming, q, atomic_read(&q->owned_by_drv_count));
b5b4541e
HV
1597 if (ret)
1598 fail_qop(q, start_streaming);
02f142ec
HV
1599
1600 /*
1601 * If there are not enough buffers queued to start streaming, then
1602 * the start_streaming operation will return -ENOBUFS and you have to
1603 * retry when the next buffer is queued.
1604 */
1605 if (ret == -ENOBUFS) {
1606 dprintk(1, "qbuf: not enough buffers, retry when more buffers are queued.\n");
1607 q->retry_start_streaming = 1;
1608 return 0;
1609 }
1610 if (ret)
1611 dprintk(1, "qbuf: driver refused to start streaming\n");
1612 else
1613 q->retry_start_streaming = 0;
1614 return ret;
1615}
1616
b2f2f047 1617static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
012043b8 1618{
4138111a
HV
1619 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1620 struct vb2_buffer *vb;
1621
1622 if (ret)
1623 return ret;
1624
1625 vb = q->bufs[b->index];
e23ccc0a 1626
ebc087d0
GL
1627 switch (vb->state) {
1628 case VB2_BUF_STATE_DEQUEUED:
1629 ret = __buf_prepare(vb, b);
1630 if (ret)
012043b8 1631 return ret;
4138111a 1632 break;
ebc087d0
GL
1633 case VB2_BUF_STATE_PREPARED:
1634 break;
b18a8ff2
HV
1635 case VB2_BUF_STATE_PREPARING:
1636 dprintk(1, "qbuf: buffer still being prepared\n");
1637 return -EINVAL;
ebc087d0 1638 default:
952c9ee2
HV
1639 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1640 vb->state);
012043b8 1641 return -EINVAL;
e23ccc0a
PO
1642 }
1643
e23ccc0a
PO
1644 /*
1645 * Add to the queued buffers list, a buffer will stay on it until
1646 * dequeued in dqbuf.
1647 */
1648 list_add_tail(&vb->queued_entry, &q->queued_list);
1649 vb->state = VB2_BUF_STATE_QUEUED;
f1343281
HV
1650 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1651 /*
1652 * For output buffers copy the timestamp if needed,
1653 * and the timecode field and flag if needed.
1654 */
c57ff792
SA
1655 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1656 V4L2_BUF_FLAG_TIMESTAMP_COPY)
f1343281
HV
1657 vb->v4l2_buf.timestamp = b->timestamp;
1658 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1659 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1660 vb->v4l2_buf.timecode = b->timecode;
1661 }
e23ccc0a
PO
1662
1663 /*
1664 * If already streaming, give the buffer to driver for processing.
1665 * If not, the buffer will be given to driver on next streamon.
1666 */
1667 if (q->streaming)
1668 __enqueue_in_driver(vb);
1669
4138111a
HV
1670 /* Fill buffer information for the userspace */
1671 __fill_v4l2_buffer(vb, b);
21db3e07 1672
02f142ec
HV
1673 if (q->retry_start_streaming) {
1674 ret = vb2_start_streaming(q);
1675 if (ret)
1676 return ret;
1677 }
1678
4138111a
HV
1679 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1680 return 0;
e23ccc0a 1681}
b2f2f047
HV
1682
1683/**
1684 * vb2_qbuf() - Queue a buffer from userspace
1685 * @q: videobuf2 queue
1686 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1687 * in driver
1688 *
1689 * Should be called from vidioc_qbuf ioctl handler of a driver.
1690 * This function:
1691 * 1) verifies the passed buffer,
1692 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1693 * which driver-specific buffer initialization can be performed,
1694 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1695 * callback for processing.
1696 *
1697 * The return values from this function are intended to be directly returned
1698 * from vidioc_qbuf handler in driver.
1699 */
1700int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1701{
1702 if (q->fileio) {
1703 dprintk(1, "%s(): file io in progress\n", __func__);
1704 return -EBUSY;
1705 }
1706
1707 return vb2_internal_qbuf(q, b);
1708}
e23ccc0a
PO
1709EXPORT_SYMBOL_GPL(vb2_qbuf);
1710
1711/**
1712 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1713 * for dequeuing
1714 *
1715 * Will sleep if required for nonblocking == false.
1716 */
1717static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1718{
1719 /*
1720 * All operations on vb_done_list are performed under done_lock
1721 * spinlock protection. However, buffers may be removed from
1722 * it and returned to userspace only while holding both driver's
1723 * lock and the done_lock spinlock. Thus we can be sure that as
1724 * long as we hold the driver's lock, the list will remain not
1725 * empty if list_empty() check succeeds.
1726 */
1727
1728 for (;;) {
1729 int ret;
1730
1731 if (!q->streaming) {
1732 dprintk(1, "Streaming off, will not wait for buffers\n");
1733 return -EINVAL;
1734 }
1735
1736 if (!list_empty(&q->done_list)) {
1737 /*
1738 * Found a buffer that we were waiting for.
1739 */
1740 break;
1741 }
1742
1743 if (nonblocking) {
1744 dprintk(1, "Nonblocking and no buffers to dequeue, "
1745 "will not wait\n");
1746 return -EAGAIN;
1747 }
1748
1749 /*
1750 * We are streaming and blocking, wait for another buffer to
1751 * become ready or for streamoff. Driver's lock is released to
1752 * allow streamoff or qbuf to be called while waiting.
1753 */
1754 call_qop(q, wait_prepare, q);
1755
1756 /*
1757 * All locks have been released, it is safe to sleep now.
1758 */
1759 dprintk(3, "Will sleep waiting for buffers\n");
1760 ret = wait_event_interruptible(q->done_wq,
1761 !list_empty(&q->done_list) || !q->streaming);
1762
1763 /*
1764 * We need to reevaluate both conditions again after reacquiring
1765 * the locks or return an error if one occurred.
1766 */
1767 call_qop(q, wait_finish, q);
32a77260
HV
1768 if (ret) {
1769 dprintk(1, "Sleep was interrupted\n");
e23ccc0a 1770 return ret;
32a77260 1771 }
e23ccc0a
PO
1772 }
1773 return 0;
1774}
1775
1776/**
1777 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1778 *
1779 * Will sleep if required for nonblocking == false.
1780 */
1781static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
32a77260 1782 struct v4l2_buffer *b, int nonblocking)
e23ccc0a
PO
1783{
1784 unsigned long flags;
1785 int ret;
1786
1787 /*
1788 * Wait for at least one buffer to become available on the done_list.
1789 */
1790 ret = __vb2_wait_for_done_vb(q, nonblocking);
1791 if (ret)
1792 return ret;
1793
1794 /*
1795 * Driver's lock has been held since we last verified that done_list
1796 * is not empty, so no need for another list_empty(done_list) check.
1797 */
1798 spin_lock_irqsave(&q->done_lock, flags);
1799 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
32a77260
HV
1800 /*
1801 * Only remove the buffer from done_list if v4l2_buffer can handle all
1802 * the planes.
1803 */
1804 ret = __verify_planes_array(*vb, b);
1805 if (!ret)
1806 list_del(&(*vb)->done_entry);
e23ccc0a
PO
1807 spin_unlock_irqrestore(&q->done_lock, flags);
1808
32a77260 1809 return ret;
e23ccc0a
PO
1810}
1811
1812/**
1813 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1814 * @q: videobuf2 queue
1815 *
1816 * This function will wait until all buffers that have been given to the driver
1817 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1818 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1819 * taken, for example from stop_streaming() callback.
1820 */
1821int vb2_wait_for_all_buffers(struct vb2_queue *q)
1822{
1823 if (!q->streaming) {
1824 dprintk(1, "Streaming off, will not wait for buffers\n");
1825 return -EINVAL;
1826 }
1827
02f142ec 1828 if (!q->retry_start_streaming)
6ea3b980 1829 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
e23ccc0a
PO
1830 return 0;
1831}
1832EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1833
c5384048
SS
1834/**
1835 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1836 */
1837static void __vb2_dqbuf(struct vb2_buffer *vb)
1838{
1839 struct vb2_queue *q = vb->vb2_queue;
1840 unsigned int i;
1841
1842 /* nothing to do if the buffer is already dequeued */
1843 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1844 return;
1845
1846 vb->state = VB2_BUF_STATE_DEQUEUED;
1847
1848 /* unmap DMABUF buffer */
1849 if (q->memory == V4L2_MEMORY_DMABUF)
1850 for (i = 0; i < vb->num_planes; ++i) {
1851 if (!vb->planes[i].dbuf_mapped)
1852 continue;
b5b4541e 1853 call_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
c5384048
SS
1854 vb->planes[i].dbuf_mapped = 0;
1855 }
1856}
1857
b2f2f047 1858static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
e23ccc0a
PO
1859{
1860 struct vb2_buffer *vb = NULL;
1861 int ret;
1862
1863 if (b->type != q->type) {
1864 dprintk(1, "dqbuf: invalid buffer type\n");
1865 return -EINVAL;
1866 }
32a77260
HV
1867 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1868 if (ret < 0)
e23ccc0a 1869 return ret;
e23ccc0a 1870
06470642 1871 call_vb_qop(vb, buf_finish, vb);
e23ccc0a
PO
1872
1873 switch (vb->state) {
1874 case VB2_BUF_STATE_DONE:
1875 dprintk(3, "dqbuf: Returning done buffer\n");
1876 break;
1877 case VB2_BUF_STATE_ERROR:
1878 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1879 break;
1880 default:
1881 dprintk(1, "dqbuf: Invalid buffer state\n");
1882 return -EINVAL;
1883 }
1884
1885 /* Fill buffer information for the userspace */
1886 __fill_v4l2_buffer(vb, b);
1887 /* Remove from videobuf queue */
1888 list_del(&vb->queued_entry);
c5384048
SS
1889 /* go back to dequeued state */
1890 __vb2_dqbuf(vb);
e23ccc0a
PO
1891
1892 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1893 vb->v4l2_buf.index, vb->state);
1894
e23ccc0a
PO
1895 return 0;
1896}
b2f2f047
HV
1897
1898/**
1899 * vb2_dqbuf() - Dequeue a buffer to the userspace
1900 * @q: videobuf2 queue
1901 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
1902 * in driver
1903 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
1904 * buffers ready for dequeuing are present. Normally the driver
1905 * would be passing (file->f_flags & O_NONBLOCK) here
1906 *
1907 * Should be called from vidioc_dqbuf ioctl handler of a driver.
1908 * This function:
1909 * 1) verifies the passed buffer,
1910 * 2) calls buf_finish callback in the driver (if provided), in which
1911 * driver can perform any additional operations that may be required before
1912 * returning the buffer to userspace, such as cache sync,
1913 * 3) the buffer struct members are filled with relevant information for
1914 * the userspace.
1915 *
1916 * The return values from this function are intended to be directly returned
1917 * from vidioc_dqbuf handler in driver.
1918 */
1919int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1920{
1921 if (q->fileio) {
1922 dprintk(1, "dqbuf: file io in progress\n");
1923 return -EBUSY;
1924 }
1925 return vb2_internal_dqbuf(q, b, nonblocking);
1926}
e23ccc0a
PO
1927EXPORT_SYMBOL_GPL(vb2_dqbuf);
1928
bd323e28
MS
1929/**
1930 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1931 *
1932 * Removes all queued buffers from driver's queue and all buffers queued by
1933 * userspace from videobuf's queue. Returns to state after reqbufs.
1934 */
1935static void __vb2_queue_cancel(struct vb2_queue *q)
1936{
1937 unsigned int i;
1938
02f142ec
HV
1939 if (q->retry_start_streaming) {
1940 q->retry_start_streaming = 0;
1941 q->streaming = 0;
1942 }
1943
bd323e28
MS
1944 /*
1945 * Tell driver to stop all transactions and release all queued
1946 * buffers.
1947 */
1948 if (q->streaming)
1949 call_qop(q, stop_streaming, q);
1950 q->streaming = 0;
1951
1952 /*
1953 * Remove all buffers from videobuf's list...
1954 */
1955 INIT_LIST_HEAD(&q->queued_list);
1956 /*
1957 * ...and done list; userspace will not receive any buffers it
1958 * has not already dequeued before initiating cancel.
1959 */
1960 INIT_LIST_HEAD(&q->done_list);
6ea3b980 1961 atomic_set(&q->owned_by_drv_count, 0);
bd323e28
MS
1962 wake_up_all(&q->done_wq);
1963
1964 /*
1965 * Reinitialize all buffers for next use.
9c0863b1
HV
1966 * Make sure to call buf_finish for any queued buffers. Normally
1967 * that's done in dqbuf, but that's not going to happen when we
1968 * cancel the whole queue. Note: this code belongs here, not in
1969 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
1970 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
1971 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
bd323e28 1972 */
9c0863b1
HV
1973 for (i = 0; i < q->num_buffers; ++i) {
1974 struct vb2_buffer *vb = q->bufs[i];
1975
1976 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1977 vb->state = VB2_BUF_STATE_PREPARED;
1978 call_vb_qop(vb, buf_finish, vb);
1979 }
1980 __vb2_dqbuf(vb);
1981 }
bd323e28
MS
1982}
1983
b2f2f047 1984static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a
PO
1985{
1986 struct vb2_buffer *vb;
5db2c3ba 1987 int ret;
e23ccc0a
PO
1988
1989 if (type != q->type) {
1990 dprintk(1, "streamon: invalid stream type\n");
1991 return -EINVAL;
1992 }
1993
1994 if (q->streaming) {
f956035c
RR
1995 dprintk(3, "streamon successful: already streaming\n");
1996 return 0;
e23ccc0a
PO
1997 }
1998
548df783
RR
1999 if (!q->num_buffers) {
2000 dprintk(1, "streamon: no buffers have been allocated\n");
2001 return -EINVAL;
2002 }
2003
249f5a58
RRD
2004 if (!q->num_buffers) {
2005 dprintk(1, "streamon: no buffers have been allocated\n");
2006 return -EINVAL;
2007 }
2008
e23ccc0a 2009 /*
bd323e28
MS
2010 * If any buffers were queued before streamon,
2011 * we can now pass them to driver for processing.
e23ccc0a 2012 */
bd323e28
MS
2013 list_for_each_entry(vb, &q->queued_list, queued_entry)
2014 __enqueue_in_driver(vb);
e23ccc0a 2015
02f142ec
HV
2016 /* Tell driver to start streaming. */
2017 ret = vb2_start_streaming(q);
5db2c3ba 2018 if (ret) {
bd323e28 2019 __vb2_queue_cancel(q);
5db2c3ba
PO
2020 return ret;
2021 }
2022
2023 q->streaming = 1;
e23ccc0a 2024
e23ccc0a
PO
2025 dprintk(3, "Streamon successful\n");
2026 return 0;
2027}
e23ccc0a
PO
2028
2029/**
b2f2f047 2030 * vb2_streamon - start streaming
e23ccc0a 2031 * @q: videobuf2 queue
b2f2f047 2032 * @type: type argument passed from userspace to vidioc_streamon handler
e23ccc0a 2033 *
b2f2f047 2034 * Should be called from vidioc_streamon handler of a driver.
e23ccc0a 2035 * This function:
b2f2f047
HV
2036 * 1) verifies current state
2037 * 2) passes any previously queued buffers to the driver and starts streaming
e23ccc0a 2038 *
e23ccc0a 2039 * The return values from this function are intended to be directly returned
b2f2f047 2040 * from vidioc_streamon handler in the driver.
e23ccc0a 2041 */
b2f2f047 2042int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 2043{
b25748fe 2044 if (q->fileio) {
b2f2f047 2045 dprintk(1, "streamon: file io in progress\n");
b25748fe
MS
2046 return -EBUSY;
2047 }
b2f2f047
HV
2048 return vb2_internal_streamon(q, type);
2049}
2050EXPORT_SYMBOL_GPL(vb2_streamon);
b25748fe 2051
b2f2f047
HV
2052static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2053{
e23ccc0a
PO
2054 if (type != q->type) {
2055 dprintk(1, "streamoff: invalid stream type\n");
2056 return -EINVAL;
2057 }
2058
2059 if (!q->streaming) {
f956035c
RR
2060 dprintk(3, "streamoff successful: not streaming\n");
2061 return 0;
e23ccc0a
PO
2062 }
2063
2064 /*
2065 * Cancel will pause streaming and remove all buffers from the driver
2066 * and videobuf, effectively returning control over them to userspace.
2067 */
2068 __vb2_queue_cancel(q);
2069
2070 dprintk(3, "Streamoff successful\n");
2071 return 0;
2072}
b2f2f047
HV
2073
2074/**
2075 * vb2_streamoff - stop streaming
2076 * @q: videobuf2 queue
2077 * @type: type argument passed from userspace to vidioc_streamoff handler
2078 *
2079 * Should be called from vidioc_streamoff handler of a driver.
2080 * This function:
2081 * 1) verifies current state,
2082 * 2) stop streaming and dequeues any queued buffers, including those previously
2083 * passed to the driver (after waiting for the driver to finish).
2084 *
2085 * This call can be used for pausing playback.
2086 * The return values from this function are intended to be directly returned
2087 * from vidioc_streamoff handler in the driver
2088 */
2089int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2090{
2091 if (q->fileio) {
2092 dprintk(1, "streamoff: file io in progress\n");
2093 return -EBUSY;
2094 }
2095 return vb2_internal_streamoff(q, type);
2096}
e23ccc0a
PO
2097EXPORT_SYMBOL_GPL(vb2_streamoff);
2098
2099/**
2100 * __find_plane_by_offset() - find plane associated with the given offset off
2101 */
2102static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2103 unsigned int *_buffer, unsigned int *_plane)
2104{
2105 struct vb2_buffer *vb;
2106 unsigned int buffer, plane;
2107
2108 /*
2109 * Go over all buffers and their planes, comparing the given offset
2110 * with an offset assigned to each plane. If a match is found,
2111 * return its buffer and plane numbers.
2112 */
2113 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2114 vb = q->bufs[buffer];
2115
2116 for (plane = 0; plane < vb->num_planes; ++plane) {
2117 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2118 *_buffer = buffer;
2119 *_plane = plane;
2120 return 0;
2121 }
2122 }
2123 }
2124
2125 return -EINVAL;
2126}
2127
83ae7c5a
TS
2128/**
2129 * vb2_expbuf() - Export a buffer as a file descriptor
2130 * @q: videobuf2 queue
2131 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2132 * handler in driver
2133 *
2134 * The return values from this function are intended to be directly returned
2135 * from vidioc_expbuf handler in driver.
2136 */
2137int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2138{
2139 struct vb2_buffer *vb = NULL;
2140 struct vb2_plane *vb_plane;
2141 int ret;
2142 struct dma_buf *dbuf;
2143
2144 if (q->memory != V4L2_MEMORY_MMAP) {
2145 dprintk(1, "Queue is not currently set up for mmap\n");
2146 return -EINVAL;
2147 }
2148
2149 if (!q->mem_ops->get_dmabuf) {
2150 dprintk(1, "Queue does not support DMA buffer exporting\n");
2151 return -EINVAL;
2152 }
2153
ea3aba84
PZ
2154 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2155 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
83ae7c5a
TS
2156 return -EINVAL;
2157 }
2158
2159 if (eb->type != q->type) {
2160 dprintk(1, "qbuf: invalid buffer type\n");
2161 return -EINVAL;
2162 }
2163
2164 if (eb->index >= q->num_buffers) {
2165 dprintk(1, "buffer index out of range\n");
2166 return -EINVAL;
2167 }
2168
2169 vb = q->bufs[eb->index];
2170
2171 if (eb->plane >= vb->num_planes) {
2172 dprintk(1, "buffer plane out of range\n");
2173 return -EINVAL;
2174 }
2175
2176 vb_plane = &vb->planes[eb->plane];
2177
b5b4541e 2178 dbuf = call_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
83ae7c5a
TS
2179 if (IS_ERR_OR_NULL(dbuf)) {
2180 dprintk(1, "Failed to export buffer %d, plane %d\n",
2181 eb->index, eb->plane);
b5b4541e 2182 fail_memop(vb, get_dmabuf);
83ae7c5a
TS
2183 return -EINVAL;
2184 }
2185
ea3aba84 2186 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
83ae7c5a
TS
2187 if (ret < 0) {
2188 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2189 eb->index, eb->plane, ret);
2190 dma_buf_put(dbuf);
2191 return ret;
2192 }
2193
2194 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2195 eb->index, eb->plane, ret);
2196 eb->fd = ret;
2197
2198 return 0;
2199}
2200EXPORT_SYMBOL_GPL(vb2_expbuf);
2201
e23ccc0a
PO
2202/**
2203 * vb2_mmap() - map video buffers into application address space
2204 * @q: videobuf2 queue
2205 * @vma: vma passed to the mmap file operation handler in the driver
2206 *
2207 * Should be called from mmap file operation handler of a driver.
2208 * This function maps one plane of one of the available video buffers to
2209 * userspace. To map whole video memory allocated on reqbufs, this function
2210 * has to be called once per each plane per each buffer previously allocated.
2211 *
2212 * When the userspace application calls mmap, it passes to it an offset returned
2213 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2214 * a "cookie", which is then used to identify the plane to be mapped.
2215 * This function finds a plane with a matching offset and a mapping is performed
2216 * by the means of a provided memory operation.
2217 *
2218 * The return values from this function are intended to be directly returned
2219 * from the mmap handler in driver.
2220 */
2221int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2222{
2223 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
e23ccc0a
PO
2224 struct vb2_buffer *vb;
2225 unsigned int buffer, plane;
2226 int ret;
7f841459 2227 unsigned long length;
e23ccc0a
PO
2228
2229 if (q->memory != V4L2_MEMORY_MMAP) {
2230 dprintk(1, "Queue is not currently set up for mmap\n");
2231 return -EINVAL;
2232 }
2233
2234 /*
2235 * Check memory area access mode.
2236 */
2237 if (!(vma->vm_flags & VM_SHARED)) {
2238 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2239 return -EINVAL;
2240 }
2241 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2242 if (!(vma->vm_flags & VM_WRITE)) {
2243 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2244 return -EINVAL;
2245 }
2246 } else {
2247 if (!(vma->vm_flags & VM_READ)) {
2248 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2249 return -EINVAL;
2250 }
2251 }
2252
2253 /*
2254 * Find the plane corresponding to the offset passed by userspace.
2255 */
2256 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2257 if (ret)
2258 return ret;
2259
2260 vb = q->bufs[buffer];
e23ccc0a 2261
7f841459
MCC
2262 /*
2263 * MMAP requires page_aligned buffers.
2264 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2265 * so, we need to do the same here.
2266 */
2267 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2268 if (length < (vma->vm_end - vma->vm_start)) {
2269 dprintk(1,
2270 "MMAP invalid, as it would overflow buffer length\n");
068a0df7
SWK
2271 return -EINVAL;
2272 }
2273
b5b4541e
HV
2274 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2275 if (ret) {
2276 fail_memop(vb, mmap);
e23ccc0a 2277 return ret;
b5b4541e 2278 }
e23ccc0a 2279
e23ccc0a
PO
2280 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2281 return 0;
2282}
2283EXPORT_SYMBOL_GPL(vb2_mmap);
2284
6f524ec1
SJ
2285#ifndef CONFIG_MMU
2286unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2287 unsigned long addr,
2288 unsigned long len,
2289 unsigned long pgoff,
2290 unsigned long flags)
2291{
2292 unsigned long off = pgoff << PAGE_SHIFT;
2293 struct vb2_buffer *vb;
2294 unsigned int buffer, plane;
2295 int ret;
2296
2297 if (q->memory != V4L2_MEMORY_MMAP) {
2298 dprintk(1, "Queue is not currently set up for mmap\n");
2299 return -EINVAL;
2300 }
2301
2302 /*
2303 * Find the plane corresponding to the offset passed by userspace.
2304 */
2305 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2306 if (ret)
2307 return ret;
2308
2309 vb = q->bufs[buffer];
2310
2311 return (unsigned long)vb2_plane_vaddr(vb, plane);
2312}
2313EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2314#endif
2315
b25748fe
MS
2316static int __vb2_init_fileio(struct vb2_queue *q, int read);
2317static int __vb2_cleanup_fileio(struct vb2_queue *q);
e23ccc0a
PO
2318
2319/**
2320 * vb2_poll() - implements poll userspace operation
2321 * @q: videobuf2 queue
2322 * @file: file argument passed to the poll file operation handler
2323 * @wait: wait argument passed to the poll file operation handler
2324 *
2325 * This function implements poll file operation handler for a driver.
2326 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2327 * be informed that the file descriptor of a video device is available for
2328 * reading.
2329 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2330 * will be reported as available for writing.
2331 *
95213ceb
HV
2332 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2333 * pending events.
2334 *
e23ccc0a
PO
2335 * The return values from this function are intended to be directly returned
2336 * from poll handler in driver.
2337 */
2338unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2339{
95213ceb 2340 struct video_device *vfd = video_devdata(file);
bf5c7cbb 2341 unsigned long req_events = poll_requested_events(wait);
e23ccc0a 2342 struct vb2_buffer *vb = NULL;
95213ceb
HV
2343 unsigned int res = 0;
2344 unsigned long flags;
2345
2346 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2347 struct v4l2_fh *fh = file->private_data;
2348
2349 if (v4l2_event_pending(fh))
2350 res = POLLPRI;
2351 else if (req_events & POLLPRI)
2352 poll_wait(file, &fh->wait, wait);
2353 }
e23ccc0a 2354
cd13823f
HV
2355 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2356 return res;
2357 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2358 return res;
2359
b25748fe 2360 /*
4ffabdb3 2361 * Start file I/O emulator only if streaming API has not been used yet.
b25748fe
MS
2362 */
2363 if (q->num_buffers == 0 && q->fileio == NULL) {
bf5c7cbb
HV
2364 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2365 (req_events & (POLLIN | POLLRDNORM))) {
95213ceb
HV
2366 if (__vb2_init_fileio(q, 1))
2367 return res | POLLERR;
b25748fe 2368 }
bf5c7cbb
HV
2369 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2370 (req_events & (POLLOUT | POLLWRNORM))) {
95213ceb
HV
2371 if (__vb2_init_fileio(q, 0))
2372 return res | POLLERR;
b25748fe
MS
2373 /*
2374 * Write to OUTPUT queue can be done immediately.
2375 */
95213ceb 2376 return res | POLLOUT | POLLWRNORM;
b25748fe
MS
2377 }
2378 }
2379
e23ccc0a
PO
2380 /*
2381 * There is nothing to wait for if no buffers have already been queued.
2382 */
2383 if (list_empty(&q->queued_list))
95213ceb 2384 return res | POLLERR;
e23ccc0a 2385
412cb87d
SWK
2386 if (list_empty(&q->done_list))
2387 poll_wait(file, &q->done_wq, wait);
e23ccc0a
PO
2388
2389 /*
2390 * Take first buffer available for dequeuing.
2391 */
2392 spin_lock_irqsave(&q->done_lock, flags);
2393 if (!list_empty(&q->done_list))
2394 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2395 done_entry);
2396 spin_unlock_irqrestore(&q->done_lock, flags);
2397
2398 if (vb && (vb->state == VB2_BUF_STATE_DONE
2399 || vb->state == VB2_BUF_STATE_ERROR)) {
95213ceb
HV
2400 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2401 res | POLLOUT | POLLWRNORM :
2402 res | POLLIN | POLLRDNORM;
e23ccc0a 2403 }
95213ceb 2404 return res;
e23ccc0a
PO
2405}
2406EXPORT_SYMBOL_GPL(vb2_poll);
2407
2408/**
2409 * vb2_queue_init() - initialize a videobuf2 queue
2410 * @q: videobuf2 queue; this structure should be allocated in driver
2411 *
2412 * The vb2_queue structure should be allocated by the driver. The driver is
2413 * responsible of clearing it's content and setting initial values for some
2414 * required entries before calling this function.
2415 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2416 * to the struct vb2_queue description in include/media/videobuf2-core.h
2417 * for more information.
2418 */
2419int vb2_queue_init(struct vb2_queue *q)
2420{
896f38f5
EG
2421 /*
2422 * Sanity check
2423 */
2424 if (WARN_ON(!q) ||
2425 WARN_ON(!q->ops) ||
2426 WARN_ON(!q->mem_ops) ||
2427 WARN_ON(!q->type) ||
2428 WARN_ON(!q->io_modes) ||
2429 WARN_ON(!q->ops->queue_setup) ||
6aa69f99 2430 WARN_ON(!q->ops->buf_queue) ||
872484ce
SA
2431 WARN_ON(q->timestamp_flags &
2432 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2433 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
896f38f5 2434 return -EINVAL;
e23ccc0a 2435
6aa69f99 2436 /* Warn that the driver should choose an appropriate timestamp type */
c57ff792
SA
2437 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2438 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
6aa69f99 2439
e23ccc0a
PO
2440 INIT_LIST_HEAD(&q->queued_list);
2441 INIT_LIST_HEAD(&q->done_list);
2442 spin_lock_init(&q->done_lock);
2443 init_waitqueue_head(&q->done_wq);
2444
2445 if (q->buf_struct_size == 0)
2446 q->buf_struct_size = sizeof(struct vb2_buffer);
2447
2448 return 0;
2449}
2450EXPORT_SYMBOL_GPL(vb2_queue_init);
2451
2452/**
2453 * vb2_queue_release() - stop streaming, release the queue and free memory
2454 * @q: videobuf2 queue
2455 *
2456 * This function stops streaming and performs necessary clean ups, including
2457 * freeing video buffer memory. The driver is responsible for freeing
2458 * the vb2_queue structure itself.
2459 */
2460void vb2_queue_release(struct vb2_queue *q)
2461{
b25748fe 2462 __vb2_cleanup_fileio(q);
e23ccc0a 2463 __vb2_queue_cancel(q);
2d86401c 2464 __vb2_queue_free(q, q->num_buffers);
e23ccc0a
PO
2465}
2466EXPORT_SYMBOL_GPL(vb2_queue_release);
2467
b25748fe
MS
2468/**
2469 * struct vb2_fileio_buf - buffer context used by file io emulator
2470 *
2471 * vb2 provides a compatibility layer and emulator of file io (read and
2472 * write) calls on top of streaming API. This structure is used for
2473 * tracking context related to the buffers.
2474 */
2475struct vb2_fileio_buf {
2476 void *vaddr;
2477 unsigned int size;
2478 unsigned int pos;
2479 unsigned int queued:1;
2480};
2481
2482/**
2483 * struct vb2_fileio_data - queue context used by file io emulator
2484 *
4e5a4d8a
HV
2485 * @cur_index: the index of the buffer currently being read from or
2486 * written to. If equal to q->num_buffers then a new buffer
2487 * must be dequeued.
2488 * @initial_index: in the read() case all buffers are queued up immediately
2489 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2490 * buffers. However, in the write() case no buffers are initially
2491 * queued, instead whenever a buffer is full it is queued up by
2492 * __vb2_perform_fileio(). Only once all available buffers have
2493 * been queued up will __vb2_perform_fileio() start to dequeue
2494 * buffers. This means that initially __vb2_perform_fileio()
2495 * needs to know what buffer index to use when it is queuing up
2496 * the buffers for the first time. That initial index is stored
2497 * in this field. Once it is equal to q->num_buffers all
2498 * available buffers have been queued and __vb2_perform_fileio()
2499 * should start the normal dequeue/queue cycle.
2500 *
b25748fe
MS
2501 * vb2 provides a compatibility layer and emulator of file io (read and
2502 * write) calls on top of streaming API. For proper operation it required
2503 * this structure to save the driver state between each call of the read
2504 * or write function.
2505 */
2506struct vb2_fileio_data {
2507 struct v4l2_requestbuffers req;
2508 struct v4l2_buffer b;
2509 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
4e5a4d8a
HV
2510 unsigned int cur_index;
2511 unsigned int initial_index;
b25748fe
MS
2512 unsigned int q_count;
2513 unsigned int dq_count;
2514 unsigned int flags;
2515};
2516
2517/**
2518 * __vb2_init_fileio() - initialize file io emulator
2519 * @q: videobuf2 queue
2520 * @read: mode selector (1 means read, 0 means write)
2521 */
2522static int __vb2_init_fileio(struct vb2_queue *q, int read)
2523{
2524 struct vb2_fileio_data *fileio;
2525 int i, ret;
2526 unsigned int count = 0;
2527
2528 /*
2529 * Sanity check
2530 */
2531 if ((read && !(q->io_modes & VB2_READ)) ||
2532 (!read && !(q->io_modes & VB2_WRITE)))
2533 BUG();
2534
2535 /*
2536 * Check if device supports mapping buffers to kernel virtual space.
2537 */
2538 if (!q->mem_ops->vaddr)
2539 return -EBUSY;
2540
2541 /*
2542 * Check if streaming api has not been already activated.
2543 */
2544 if (q->streaming || q->num_buffers > 0)
2545 return -EBUSY;
2546
2547 /*
2548 * Start with count 1, driver can increase it in queue_setup()
2549 */
2550 count = 1;
2551
2552 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2553 (read) ? "read" : "write", count, q->io_flags);
2554
2555 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2556 if (fileio == NULL)
2557 return -ENOMEM;
2558
2559 fileio->flags = q->io_flags;
2560
2561 /*
2562 * Request buffers and use MMAP type to force driver
2563 * to allocate buffers by itself.
2564 */
2565 fileio->req.count = count;
2566 fileio->req.memory = V4L2_MEMORY_MMAP;
2567 fileio->req.type = q->type;
2568 ret = vb2_reqbufs(q, &fileio->req);
2569 if (ret)
2570 goto err_kfree;
2571
2572 /*
2573 * Check if plane_count is correct
2574 * (multiplane buffers are not supported).
2575 */
2576 if (q->bufs[0]->num_planes != 1) {
b25748fe
MS
2577 ret = -EBUSY;
2578 goto err_reqbufs;
2579 }
2580
2581 /*
2582 * Get kernel address of each buffer.
2583 */
2584 for (i = 0; i < q->num_buffers; i++) {
2585 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
5dd6946c
WY
2586 if (fileio->bufs[i].vaddr == NULL) {
2587 ret = -EINVAL;
b25748fe 2588 goto err_reqbufs;
5dd6946c 2589 }
b25748fe
MS
2590 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2591 }
2592
2593 /*
2594 * Read mode requires pre queuing of all buffers.
2595 */
2596 if (read) {
2597 /*
2598 * Queue all buffers.
2599 */
2600 for (i = 0; i < q->num_buffers; i++) {
2601 struct v4l2_buffer *b = &fileio->b;
2602 memset(b, 0, sizeof(*b));
2603 b->type = q->type;
2604 b->memory = q->memory;
2605 b->index = i;
2606 ret = vb2_qbuf(q, b);
2607 if (ret)
2608 goto err_reqbufs;
2609 fileio->bufs[i].queued = 1;
2610 }
4e5a4d8a
HV
2611 /*
2612 * All buffers have been queued, so mark that by setting
2613 * initial_index to q->num_buffers
2614 */
2615 fileio->initial_index = q->num_buffers;
2616 fileio->cur_index = q->num_buffers;
b25748fe
MS
2617 }
2618
02f142ec
HV
2619 /*
2620 * Start streaming.
2621 */
2622 ret = vb2_streamon(q, q->type);
2623 if (ret)
2624 goto err_reqbufs;
2625
b25748fe
MS
2626 q->fileio = fileio;
2627
2628 return ret;
2629
2630err_reqbufs:
a67e1722 2631 fileio->req.count = 0;
b25748fe
MS
2632 vb2_reqbufs(q, &fileio->req);
2633
2634err_kfree:
2635 kfree(fileio);
2636 return ret;
2637}
2638
2639/**
2640 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2641 * @q: videobuf2 queue
2642 */
2643static int __vb2_cleanup_fileio(struct vb2_queue *q)
2644{
2645 struct vb2_fileio_data *fileio = q->fileio;
2646
2647 if (fileio) {
b2f2f047 2648 vb2_internal_streamoff(q, q->type);
b25748fe 2649 q->fileio = NULL;
b25748fe
MS
2650 fileio->req.count = 0;
2651 vb2_reqbufs(q, &fileio->req);
2652 kfree(fileio);
2653 dprintk(3, "file io emulator closed\n");
2654 }
2655 return 0;
2656}
2657
2658/**
2659 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2660 * @q: videobuf2 queue
2661 * @data: pointed to target userspace buffer
2662 * @count: number of bytes to read or write
2663 * @ppos: file handle position tracking pointer
2664 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2665 * @read: access mode selector (1 means read, 0 means write)
2666 */
2667static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2668 loff_t *ppos, int nonblock, int read)
2669{
2670 struct vb2_fileio_data *fileio;
2671 struct vb2_fileio_buf *buf;
2672 int ret, index;
2673
08b99e26 2674 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
b25748fe
MS
2675 read ? "read" : "write", (long)*ppos, count,
2676 nonblock ? "non" : "");
2677
2678 if (!data)
2679 return -EINVAL;
2680
2681 /*
2682 * Initialize emulator on first call.
2683 */
2684 if (!q->fileio) {
2685 ret = __vb2_init_fileio(q, read);
2686 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2687 if (ret)
2688 return ret;
2689 }
2690 fileio = q->fileio;
2691
b25748fe
MS
2692 /*
2693 * Check if we need to dequeue the buffer.
2694 */
4e5a4d8a 2695 index = fileio->cur_index;
88e26870 2696 if (index >= q->num_buffers) {
b25748fe
MS
2697 /*
2698 * Call vb2_dqbuf to get buffer back.
2699 */
2700 memset(&fileio->b, 0, sizeof(fileio->b));
2701 fileio->b.type = q->type;
2702 fileio->b.memory = q->memory;
b2f2f047 2703 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
b25748fe
MS
2704 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2705 if (ret)
b2f2f047 2706 return ret;
b25748fe
MS
2707 fileio->dq_count += 1;
2708
4e5a4d8a 2709 fileio->cur_index = index = fileio->b.index;
88e26870
HV
2710 buf = &fileio->bufs[index];
2711
b25748fe
MS
2712 /*
2713 * Get number of bytes filled by the driver
2714 */
88e26870 2715 buf->pos = 0;
b25748fe 2716 buf->queued = 0;
88e26870
HV
2717 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2718 : vb2_plane_size(q->bufs[index], 0);
2719 } else {
2720 buf = &fileio->bufs[index];
b25748fe
MS
2721 }
2722
2723 /*
2724 * Limit count on last few bytes of the buffer.
2725 */
2726 if (buf->pos + count > buf->size) {
2727 count = buf->size - buf->pos;
08b99e26 2728 dprintk(5, "reducing read count: %zd\n", count);
b25748fe
MS
2729 }
2730
2731 /*
2732 * Transfer data to userspace.
2733 */
08b99e26 2734 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
b25748fe
MS
2735 count, index, buf->pos);
2736 if (read)
2737 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2738 else
2739 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2740 if (ret) {
2741 dprintk(3, "file io: error copying data\n");
b2f2f047 2742 return -EFAULT;
b25748fe
MS
2743 }
2744
2745 /*
2746 * Update counters.
2747 */
2748 buf->pos += count;
2749 *ppos += count;
2750
2751 /*
2752 * Queue next buffer if required.
2753 */
2754 if (buf->pos == buf->size ||
2755 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2756 /*
2757 * Check if this is the last buffer to read.
2758 */
2759 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2760 fileio->dq_count == 1) {
2761 dprintk(3, "file io: read limit reached\n");
b25748fe
MS
2762 return __vb2_cleanup_fileio(q);
2763 }
2764
2765 /*
2766 * Call vb2_qbuf and give buffer to the driver.
2767 */
2768 memset(&fileio->b, 0, sizeof(fileio->b));
2769 fileio->b.type = q->type;
2770 fileio->b.memory = q->memory;
2771 fileio->b.index = index;
2772 fileio->b.bytesused = buf->pos;
b2f2f047 2773 ret = vb2_internal_qbuf(q, &fileio->b);
b25748fe
MS
2774 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2775 if (ret)
b2f2f047 2776 return ret;
b25748fe
MS
2777
2778 /*
2779 * Buffer has been queued, update the status
2780 */
2781 buf->pos = 0;
2782 buf->queued = 1;
88e26870 2783 buf->size = vb2_plane_size(q->bufs[index], 0);
b25748fe 2784 fileio->q_count += 1;
4e5a4d8a
HV
2785 /*
2786 * If we are queuing up buffers for the first time, then
2787 * increase initial_index by one.
2788 */
2789 if (fileio->initial_index < q->num_buffers)
2790 fileio->initial_index++;
2791 /*
2792 * The next buffer to use is either a buffer that's going to be
2793 * queued for the first time (initial_index < q->num_buffers)
2794 * or it is equal to q->num_buffers, meaning that the next
2795 * time we need to dequeue a buffer since we've now queued up
2796 * all the 'first time' buffers.
2797 */
2798 fileio->cur_index = fileio->initial_index;
b25748fe
MS
2799 }
2800
2801 /*
2802 * Return proper number of bytes processed.
2803 */
2804 if (ret == 0)
2805 ret = count;
b25748fe
MS
2806 return ret;
2807}
2808
2809size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2810 loff_t *ppos, int nonblocking)
2811{
2812 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2813}
2814EXPORT_SYMBOL_GPL(vb2_read);
2815
819585bc 2816size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
b25748fe
MS
2817 loff_t *ppos, int nonblocking)
2818{
819585bc
RR
2819 return __vb2_perform_fileio(q, (char __user *) data, count,
2820 ppos, nonblocking, 0);
b25748fe
MS
2821}
2822EXPORT_SYMBOL_GPL(vb2_write);
2823
4c1ffcaa
HV
2824
2825/*
2826 * The following functions are not part of the vb2 core API, but are helper
2827 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2828 * and struct vb2_ops.
2829 * They contain boilerplate code that most if not all drivers have to do
2830 * and so they simplify the driver code.
2831 */
2832
2833/* The queue is busy if there is a owner and you are not that owner. */
2834static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2835{
2836 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2837}
2838
2839/* vb2 ioctl helpers */
2840
2841int vb2_ioctl_reqbufs(struct file *file, void *priv,
2842 struct v4l2_requestbuffers *p)
2843{
2844 struct video_device *vdev = video_devdata(file);
2845 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2846
2847 if (res)
2848 return res;
2849 if (vb2_queue_is_busy(vdev, file))
2850 return -EBUSY;
2851 res = __reqbufs(vdev->queue, p);
2852 /* If count == 0, then the owner has released all buffers and he
2853 is no longer owner of the queue. Otherwise we have a new owner. */
2854 if (res == 0)
2855 vdev->queue->owner = p->count ? file->private_data : NULL;
2856 return res;
2857}
2858EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2859
2860int vb2_ioctl_create_bufs(struct file *file, void *priv,
2861 struct v4l2_create_buffers *p)
2862{
2863 struct video_device *vdev = video_devdata(file);
2864 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2865
2866 p->index = vdev->queue->num_buffers;
2867 /* If count == 0, then just check if memory and type are valid.
2868 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
2869 if (p->count == 0)
2870 return res != -EBUSY ? res : 0;
2871 if (res)
2872 return res;
2873 if (vb2_queue_is_busy(vdev, file))
2874 return -EBUSY;
2875 res = __create_bufs(vdev->queue, p);
2876 if (res == 0)
2877 vdev->queue->owner = file->private_data;
2878 return res;
2879}
2880EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
2881
2882int vb2_ioctl_prepare_buf(struct file *file, void *priv,
2883 struct v4l2_buffer *p)
2884{
2885 struct video_device *vdev = video_devdata(file);
2886
2887 if (vb2_queue_is_busy(vdev, file))
2888 return -EBUSY;
2889 return vb2_prepare_buf(vdev->queue, p);
2890}
2891EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
2892
2893int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
2894{
2895 struct video_device *vdev = video_devdata(file);
2896
2897 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
2898 return vb2_querybuf(vdev->queue, p);
2899}
2900EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
2901
2902int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2903{
2904 struct video_device *vdev = video_devdata(file);
2905
2906 if (vb2_queue_is_busy(vdev, file))
2907 return -EBUSY;
2908 return vb2_qbuf(vdev->queue, p);
2909}
2910EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
2911
2912int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2913{
2914 struct video_device *vdev = video_devdata(file);
2915
2916 if (vb2_queue_is_busy(vdev, file))
2917 return -EBUSY;
2918 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
2919}
2920EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
2921
2922int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
2923{
2924 struct video_device *vdev = video_devdata(file);
2925
2926 if (vb2_queue_is_busy(vdev, file))
2927 return -EBUSY;
2928 return vb2_streamon(vdev->queue, i);
2929}
2930EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
2931
2932int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2933{
2934 struct video_device *vdev = video_devdata(file);
2935
2936 if (vb2_queue_is_busy(vdev, file))
2937 return -EBUSY;
2938 return vb2_streamoff(vdev->queue, i);
2939}
2940EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
2941
83ae7c5a
TS
2942int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
2943{
2944 struct video_device *vdev = video_devdata(file);
2945
2946 if (vb2_queue_is_busy(vdev, file))
2947 return -EBUSY;
2948 return vb2_expbuf(vdev->queue, p);
2949}
2950EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
2951
4c1ffcaa
HV
2952/* v4l2_file_operations helpers */
2953
2954int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
2955{
2956 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
2957 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2958 int err;
4c1ffcaa 2959
8a90f1a6
LP
2960 if (lock && mutex_lock_interruptible(lock))
2961 return -ERESTARTSYS;
2962 err = vb2_mmap(vdev->queue, vma);
2963 if (lock)
2964 mutex_unlock(lock);
2965 return err;
4c1ffcaa
HV
2966}
2967EXPORT_SYMBOL_GPL(vb2_fop_mmap);
2968
1380f575 2969int _vb2_fop_release(struct file *file, struct mutex *lock)
4c1ffcaa
HV
2970{
2971 struct video_device *vdev = video_devdata(file);
2972
2973 if (file->private_data == vdev->queue->owner) {
1380f575
RR
2974 if (lock)
2975 mutex_lock(lock);
4c1ffcaa
HV
2976 vb2_queue_release(vdev->queue);
2977 vdev->queue->owner = NULL;
1380f575
RR
2978 if (lock)
2979 mutex_unlock(lock);
4c1ffcaa
HV
2980 }
2981 return v4l2_fh_release(file);
2982}
1380f575
RR
2983EXPORT_SYMBOL_GPL(_vb2_fop_release);
2984
2985int vb2_fop_release(struct file *file)
2986{
2987 struct video_device *vdev = video_devdata(file);
2988 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2989
2990 return _vb2_fop_release(file, lock);
2991}
4c1ffcaa
HV
2992EXPORT_SYMBOL_GPL(vb2_fop_release);
2993
819585bc 2994ssize_t vb2_fop_write(struct file *file, const char __user *buf,
4c1ffcaa
HV
2995 size_t count, loff_t *ppos)
2996{
2997 struct video_device *vdev = video_devdata(file);
2998 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
2999 int err = -EBUSY;
3000
cf533735 3001 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3002 return -ERESTARTSYS;
3003 if (vb2_queue_is_busy(vdev, file))
3004 goto exit;
3005 err = vb2_write(vdev->queue, buf, count, ppos,
3006 file->f_flags & O_NONBLOCK);
8c82c75c 3007 if (vdev->queue->fileio)
4c1ffcaa
HV
3008 vdev->queue->owner = file->private_data;
3009exit:
cf533735 3010 if (lock)
4c1ffcaa
HV
3011 mutex_unlock(lock);
3012 return err;
3013}
3014EXPORT_SYMBOL_GPL(vb2_fop_write);
3015
3016ssize_t vb2_fop_read(struct file *file, char __user *buf,
3017 size_t count, loff_t *ppos)
3018{
3019 struct video_device *vdev = video_devdata(file);
3020 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
3021 int err = -EBUSY;
3022
cf533735 3023 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3024 return -ERESTARTSYS;
3025 if (vb2_queue_is_busy(vdev, file))
3026 goto exit;
3027 err = vb2_read(vdev->queue, buf, count, ppos,
3028 file->f_flags & O_NONBLOCK);
8c82c75c 3029 if (vdev->queue->fileio)
4c1ffcaa
HV
3030 vdev->queue->owner = file->private_data;
3031exit:
cf533735 3032 if (lock)
4c1ffcaa
HV
3033 mutex_unlock(lock);
3034 return err;
3035}
3036EXPORT_SYMBOL_GPL(vb2_fop_read);
3037
3038unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3039{
3040 struct video_device *vdev = video_devdata(file);
3041 struct vb2_queue *q = vdev->queue;
3042 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3043 unsigned long req_events = poll_requested_events(wait);
3044 unsigned res;
3045 void *fileio;
4c1ffcaa
HV
3046 bool must_lock = false;
3047
3048 /* Try to be smart: only lock if polling might start fileio,
3049 otherwise locking will only introduce unwanted delays. */
3050 if (q->num_buffers == 0 && q->fileio == NULL) {
3051 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3052 (req_events & (POLLIN | POLLRDNORM)))
3053 must_lock = true;
3054 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3055 (req_events & (POLLOUT | POLLWRNORM)))
3056 must_lock = true;
3057 }
3058
3059 /* If locking is needed, but this helper doesn't know how, then you
3060 shouldn't be using this helper but you should write your own. */
cf533735 3061 WARN_ON(must_lock && !lock);
4c1ffcaa 3062
cf533735 3063 if (must_lock && lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3064 return POLLERR;
3065
3066 fileio = q->fileio;
3067
3068 res = vb2_poll(vdev->queue, file, wait);
3069
3070 /* If fileio was started, then we have a new queue owner. */
3071 if (must_lock && !fileio && q->fileio)
3072 q->owner = file->private_data;
cf533735 3073 if (must_lock && lock)
4c1ffcaa
HV
3074 mutex_unlock(lock);
3075 return res;
3076}
3077EXPORT_SYMBOL_GPL(vb2_fop_poll);
3078
3079#ifndef CONFIG_MMU
3080unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3081 unsigned long len, unsigned long pgoff, unsigned long flags)
3082{
3083 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
3084 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3085 int ret;
4c1ffcaa 3086
8a90f1a6
LP
3087 if (lock && mutex_lock_interruptible(lock))
3088 return -ERESTARTSYS;
3089 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3090 if (lock)
3091 mutex_unlock(lock);
3092 return ret;
4c1ffcaa
HV
3093}
3094EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3095#endif
3096
3097/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3098
3099void vb2_ops_wait_prepare(struct vb2_queue *vq)
3100{
3101 mutex_unlock(vq->lock);
3102}
3103EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3104
3105void vb2_ops_wait_finish(struct vb2_queue *vq)
3106{
3107 mutex_lock(vq->lock);
3108}
3109EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3110
e23ccc0a 3111MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
95072084 3112MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
e23ccc0a 3113MODULE_LICENSE("GPL");
This page took 0.519879 seconds and 5 git commands to generate.