[media] vb2: if bytesused is 0, then fill with output buffer length
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-core.c
CommitLineData
e23ccc0a
PO
1/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
e23ccc0a
PO
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
95213ceb
HV
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
e23ccc0a
PO
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
b5b4541e
HV
36#ifdef CONFIG_VIDEO_ADV_DEBUG
37
38/*
a1d36d8c
HV
39 * If advanced debugging is on, then count how often each op is called
40 * successfully, which can either be per-buffer or per-queue.
b5b4541e 41 *
a1d36d8c 42 * This makes it easy to check that the 'init' and 'cleanup'
b5b4541e
HV
43 * (and variations thereof) stay balanced.
44 */
45
a1d36d8c
HV
46#define log_memop(vb, op) \
47 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
48 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
49 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
50
b5b4541e
HV
51#define call_memop(vb, op, args...) \
52({ \
53 struct vb2_queue *_q = (vb)->vb2_queue; \
a1d36d8c
HV
54 int err; \
55 \
56 log_memop(vb, op); \
57 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
58 if (!err) \
59 (vb)->cnt_mem_ ## op++; \
60 err; \
61})
62
63#define call_ptr_memop(vb, op, args...) \
64({ \
65 struct vb2_queue *_q = (vb)->vb2_queue; \
66 void *ptr; \
67 \
68 log_memop(vb, op); \
69 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
70 if (!IS_ERR_OR_NULL(ptr)) \
71 (vb)->cnt_mem_ ## op++; \
72 ptr; \
73})
74
75#define call_void_memop(vb, op, args...) \
76({ \
77 struct vb2_queue *_q = (vb)->vb2_queue; \
78 \
79 log_memop(vb, op); \
80 if (_q->mem_ops->op) \
81 _q->mem_ops->op(args); \
b5b4541e 82 (vb)->cnt_mem_ ## op++; \
b5b4541e 83})
a1d36d8c
HV
84
85#define log_qop(q, op) \
86 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
87 (q)->ops->op ? "" : " (nop)")
b5b4541e
HV
88
89#define call_qop(q, op, args...) \
90({ \
a1d36d8c
HV
91 int err; \
92 \
93 log_qop(q, op); \
94 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
95 if (!err) \
96 (q)->cnt_ ## op++; \
97 err; \
98})
99
100#define call_void_qop(q, op, args...) \
101({ \
102 log_qop(q, op); \
103 if ((q)->ops->op) \
104 (q)->ops->op(args); \
b5b4541e 105 (q)->cnt_ ## op++; \
b5b4541e 106})
a1d36d8c
HV
107
108#define log_vb_qop(vb, op, args...) \
109 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
110 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
111 (vb)->vb2_queue->ops->op ? "" : " (nop)")
b5b4541e
HV
112
113#define call_vb_qop(vb, op, args...) \
114({ \
a1d36d8c
HV
115 int err; \
116 \
117 log_vb_qop(vb, op); \
118 err = (vb)->vb2_queue->ops->op ? \
119 (vb)->vb2_queue->ops->op(args) : 0; \
120 if (!err) \
121 (vb)->cnt_ ## op++; \
122 err; \
123})
124
125#define call_void_vb_qop(vb, op, args...) \
126({ \
127 log_vb_qop(vb, op); \
128 if ((vb)->vb2_queue->ops->op) \
129 (vb)->vb2_queue->ops->op(args); \
b5b4541e 130 (vb)->cnt_ ## op++; \
b5b4541e 131})
b5b4541e
HV
132
133#else
134
135#define call_memop(vb, op, args...) \
a1d36d8c
HV
136 ((vb)->vb2_queue->mem_ops->op ? \
137 (vb)->vb2_queue->mem_ops->op(args) : 0)
138
139#define call_ptr_memop(vb, op, args...) \
140 ((vb)->vb2_queue->mem_ops->op ? \
141 (vb)->vb2_queue->mem_ops->op(args) : NULL)
142
143#define call_void_memop(vb, op, args...) \
144 do { \
145 if ((vb)->vb2_queue->mem_ops->op) \
146 (vb)->vb2_queue->mem_ops->op(args); \
147 } while (0)
e23ccc0a
PO
148
149#define call_qop(q, op, args...) \
b5b4541e 150 ((q)->ops->op ? (q)->ops->op(args) : 0)
a1d36d8c
HV
151
152#define call_void_qop(q, op, args...) \
153 do { \
154 if ((q)->ops->op) \
155 (q)->ops->op(args); \
156 } while (0)
b5b4541e
HV
157
158#define call_vb_qop(vb, op, args...) \
159 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
a1d36d8c
HV
160
161#define call_void_vb_qop(vb, op, args...) \
162 do { \
163 if ((vb)->vb2_queue->ops->op) \
164 (vb)->vb2_queue->ops->op(args); \
165 } while (0)
b5b4541e
HV
166
167#endif
e23ccc0a 168
f1343281 169/* Flags that are set by the vb2 core */
1b18e7a0 170#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
2d86401c 171 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
1b18e7a0
SA
172 V4L2_BUF_FLAG_PREPARED | \
173 V4L2_BUF_FLAG_TIMESTAMP_MASK)
f1343281
HV
174/* Output buffer flags that should be passed on to the driver */
175#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
176 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
ea42c8ec 177
fb64dca8
HV
178static void __vb2_queue_cancel(struct vb2_queue *q);
179
e23ccc0a
PO
180/**
181 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
182 */
c1426bc7 183static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
e23ccc0a
PO
184{
185 struct vb2_queue *q = vb->vb2_queue;
186 void *mem_priv;
187 int plane;
188
7f841459
MCC
189 /*
190 * Allocate memory for all planes in this buffer
191 * NOTE: mmapped areas should be page aligned
192 */
e23ccc0a 193 for (plane = 0; plane < vb->num_planes; ++plane) {
7f841459
MCC
194 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
195
a1d36d8c 196 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
7f841459 197 size, q->gfp_flags);
62a79436 198 if (IS_ERR_OR_NULL(mem_priv))
e23ccc0a
PO
199 goto free;
200
201 /* Associate allocator private data with this plane */
202 vb->planes[plane].mem_priv = mem_priv;
c1426bc7 203 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
e23ccc0a
PO
204 }
205
206 return 0;
207free:
208 /* Free already allocated memory if one of the allocations failed */
a00d0266 209 for (; plane > 0; --plane) {
a1d36d8c 210 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
a00d0266
MS
211 vb->planes[plane - 1].mem_priv = NULL;
212 }
e23ccc0a
PO
213
214 return -ENOMEM;
215}
216
217/**
218 * __vb2_buf_mem_free() - free memory of the given buffer
219 */
220static void __vb2_buf_mem_free(struct vb2_buffer *vb)
221{
e23ccc0a
PO
222 unsigned int plane;
223
224 for (plane = 0; plane < vb->num_planes; ++plane) {
a1d36d8c 225 call_void_memop(vb, put, vb->planes[plane].mem_priv);
e23ccc0a 226 vb->planes[plane].mem_priv = NULL;
a00d0266
MS
227 dprintk(3, "Freed plane %d of buffer %d\n", plane,
228 vb->v4l2_buf.index);
e23ccc0a
PO
229 }
230}
231
232/**
233 * __vb2_buf_userptr_put() - release userspace memory associated with
234 * a USERPTR buffer
235 */
236static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
237{
e23ccc0a
PO
238 unsigned int plane;
239
240 for (plane = 0; plane < vb->num_planes; ++plane) {
a00d0266 241 if (vb->planes[plane].mem_priv)
a1d36d8c 242 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
a00d0266 243 vb->planes[plane].mem_priv = NULL;
e23ccc0a
PO
244 }
245}
246
c5384048
SS
247/**
248 * __vb2_plane_dmabuf_put() - release memory associated with
249 * a DMABUF shared plane
250 */
b5b4541e 251static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
c5384048
SS
252{
253 if (!p->mem_priv)
254 return;
255
256 if (p->dbuf_mapped)
a1d36d8c 257 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
c5384048 258
a1d36d8c 259 call_void_memop(vb, detach_dmabuf, p->mem_priv);
c5384048
SS
260 dma_buf_put(p->dbuf);
261 memset(p, 0, sizeof(*p));
262}
263
264/**
265 * __vb2_buf_dmabuf_put() - release memory associated with
266 * a DMABUF shared buffer
267 */
268static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
269{
c5384048
SS
270 unsigned int plane;
271
272 for (plane = 0; plane < vb->num_planes; ++plane)
b5b4541e 273 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
274}
275
a5e3d743
HV
276/**
277 * __setup_lengths() - setup initial lengths for every plane in
278 * every buffer on the queue
279 */
280static void __setup_lengths(struct vb2_queue *q, unsigned int n)
281{
282 unsigned int buffer, plane;
283 struct vb2_buffer *vb;
284
285 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
286 vb = q->bufs[buffer];
287 if (!vb)
288 continue;
289
290 for (plane = 0; plane < vb->num_planes; ++plane)
291 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
292 }
293}
294
e23ccc0a
PO
295/**
296 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
297 * every buffer on the queue
298 */
2d86401c 299static void __setup_offsets(struct vb2_queue *q, unsigned int n)
e23ccc0a
PO
300{
301 unsigned int buffer, plane;
302 struct vb2_buffer *vb;
2d86401c 303 unsigned long off;
e23ccc0a 304
2d86401c
GL
305 if (q->num_buffers) {
306 struct v4l2_plane *p;
307 vb = q->bufs[q->num_buffers - 1];
308 p = &vb->v4l2_planes[vb->num_planes - 1];
309 off = PAGE_ALIGN(p->m.mem_offset + p->length);
310 } else {
311 off = 0;
312 }
313
314 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
e23ccc0a
PO
315 vb = q->bufs[buffer];
316 if (!vb)
317 continue;
318
319 for (plane = 0; plane < vb->num_planes; ++plane) {
320 vb->v4l2_planes[plane].m.mem_offset = off;
321
322 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
323 buffer, plane, off);
324
325 off += vb->v4l2_planes[plane].length;
326 off = PAGE_ALIGN(off);
327 }
328 }
329}
330
331/**
332 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
333 * video buffer memory for all buffers/planes on the queue and initializes the
334 * queue
335 *
336 * Returns the number of buffers successfully allocated.
337 */
338static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
c1426bc7 339 unsigned int num_buffers, unsigned int num_planes)
e23ccc0a
PO
340{
341 unsigned int buffer;
342 struct vb2_buffer *vb;
343 int ret;
344
345 for (buffer = 0; buffer < num_buffers; ++buffer) {
346 /* Allocate videobuf buffer structures */
347 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
348 if (!vb) {
349 dprintk(1, "Memory alloc for buffer struct failed\n");
350 break;
351 }
352
353 /* Length stores number of planes for multiplanar buffers */
354 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
355 vb->v4l2_buf.length = num_planes;
356
357 vb->state = VB2_BUF_STATE_DEQUEUED;
358 vb->vb2_queue = q;
359 vb->num_planes = num_planes;
2d86401c 360 vb->v4l2_buf.index = q->num_buffers + buffer;
e23ccc0a
PO
361 vb->v4l2_buf.type = q->type;
362 vb->v4l2_buf.memory = memory;
363
364 /* Allocate video buffer memory for the MMAP type */
365 if (memory == V4L2_MEMORY_MMAP) {
c1426bc7 366 ret = __vb2_buf_mem_alloc(vb);
e23ccc0a
PO
367 if (ret) {
368 dprintk(1, "Failed allocating memory for "
369 "buffer %d\n", buffer);
370 kfree(vb);
371 break;
372 }
373 /*
374 * Call the driver-provided buffer initialization
375 * callback, if given. An error in initialization
376 * results in queue setup failure.
377 */
b5b4541e 378 ret = call_vb_qop(vb, buf_init, vb);
e23ccc0a
PO
379 if (ret) {
380 dprintk(1, "Buffer %d %p initialization"
381 " failed\n", buffer, vb);
382 __vb2_buf_mem_free(vb);
383 kfree(vb);
384 break;
385 }
386 }
387
2d86401c 388 q->bufs[q->num_buffers + buffer] = vb;
e23ccc0a
PO
389 }
390
a5e3d743 391 __setup_lengths(q, buffer);
dc77523c
PZ
392 if (memory == V4L2_MEMORY_MMAP)
393 __setup_offsets(q, buffer);
e23ccc0a
PO
394
395 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
2d86401c 396 buffer, num_planes);
e23ccc0a
PO
397
398 return buffer;
399}
400
401/**
402 * __vb2_free_mem() - release all video buffer memory for a given queue
403 */
2d86401c 404static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
405{
406 unsigned int buffer;
407 struct vb2_buffer *vb;
408
2d86401c
GL
409 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
410 ++buffer) {
e23ccc0a
PO
411 vb = q->bufs[buffer];
412 if (!vb)
413 continue;
414
415 /* Free MMAP buffers or release USERPTR buffers */
416 if (q->memory == V4L2_MEMORY_MMAP)
417 __vb2_buf_mem_free(vb);
c5384048
SS
418 else if (q->memory == V4L2_MEMORY_DMABUF)
419 __vb2_buf_dmabuf_put(vb);
e23ccc0a
PO
420 else
421 __vb2_buf_userptr_put(vb);
422 }
423}
424
425/**
2d86401c
GL
426 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
427 * related information, if no buffers are left return the queue to an
428 * uninitialized state. Might be called even if the queue has already been freed.
e23ccc0a 429 */
63faabfd 430static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
431{
432 unsigned int buffer;
433
63faabfd
HV
434 /*
435 * Sanity check: when preparing a buffer the queue lock is released for
436 * a short while (see __buf_prepare for the details), which would allow
437 * a race with a reqbufs which can call this function. Removing the
438 * buffers from underneath __buf_prepare is obviously a bad idea, so we
439 * check if any of the buffers is in the state PREPARING, and if so we
440 * just return -EAGAIN.
441 */
442 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
443 ++buffer) {
444 if (q->bufs[buffer] == NULL)
445 continue;
446 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
447 dprintk(1, "reqbufs: preparing buffers, cannot free\n");
448 return -EAGAIN;
449 }
450 }
451
e23ccc0a 452 /* Call driver-provided cleanup function for each buffer, if provided */
b5b4541e
HV
453 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
454 ++buffer) {
256f3162
HV
455 struct vb2_buffer *vb = q->bufs[buffer];
456
457 if (vb && vb->planes[0].mem_priv)
a1d36d8c 458 call_void_vb_qop(vb, buf_cleanup, vb);
e23ccc0a
PO
459 }
460
461 /* Release video buffer memory */
2d86401c 462 __vb2_free_mem(q, buffers);
e23ccc0a 463
b5b4541e
HV
464#ifdef CONFIG_VIDEO_ADV_DEBUG
465 /*
466 * Check that all the calls were balances during the life-time of this
467 * queue. If not (or if the debug level is 1 or up), then dump the
468 * counters to the kernel log.
469 */
470 if (q->num_buffers) {
471 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
472 q->cnt_wait_prepare != q->cnt_wait_finish;
473
474 if (unbalanced || debug) {
475 pr_info("vb2: counters for queue %p:%s\n", q,
476 unbalanced ? " UNBALANCED!" : "");
477 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
478 q->cnt_queue_setup, q->cnt_start_streaming,
479 q->cnt_stop_streaming);
480 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
481 q->cnt_wait_prepare, q->cnt_wait_finish);
482 }
483 q->cnt_queue_setup = 0;
484 q->cnt_wait_prepare = 0;
485 q->cnt_wait_finish = 0;
486 q->cnt_start_streaming = 0;
487 q->cnt_stop_streaming = 0;
488 }
489 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
490 struct vb2_buffer *vb = q->bufs[buffer];
491 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
492 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
493 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
494 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
495 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
496 vb->cnt_buf_queue != vb->cnt_buf_done ||
497 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
498 vb->cnt_buf_init != vb->cnt_buf_cleanup;
499
500 if (unbalanced || debug) {
501 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
502 q, buffer, unbalanced ? " UNBALANCED!" : "");
503 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
504 vb->cnt_buf_init, vb->cnt_buf_cleanup,
505 vb->cnt_buf_prepare, vb->cnt_buf_finish);
506 pr_info("vb2: buf_queue: %u buf_done: %u\n",
507 vb->cnt_buf_queue, vb->cnt_buf_done);
508 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
509 vb->cnt_mem_alloc, vb->cnt_mem_put,
510 vb->cnt_mem_prepare, vb->cnt_mem_finish,
511 vb->cnt_mem_mmap);
512 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
513 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
514 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
515 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
516 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
517 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
518 vb->cnt_mem_get_dmabuf,
519 vb->cnt_mem_num_users,
520 vb->cnt_mem_vaddr,
521 vb->cnt_mem_cookie);
522 }
523 }
524#endif
525
e23ccc0a 526 /* Free videobuf buffers */
2d86401c
GL
527 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
528 ++buffer) {
e23ccc0a
PO
529 kfree(q->bufs[buffer]);
530 q->bufs[buffer] = NULL;
531 }
532
2d86401c 533 q->num_buffers -= buffers;
a7afcacc 534 if (!q->num_buffers) {
2d86401c 535 q->memory = 0;
a7afcacc
HV
536 INIT_LIST_HEAD(&q->queued_list);
537 }
63faabfd 538 return 0;
e23ccc0a
PO
539}
540
541/**
542 * __verify_planes_array() - verify that the planes array passed in struct
543 * v4l2_buffer from userspace can be safely used
544 */
2d86401c 545static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 546{
32a77260
HV
547 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
548 return 0;
549
e23ccc0a
PO
550 /* Is memory for copying plane information present? */
551 if (NULL == b->m.planes) {
552 dprintk(1, "Multi-planar buffer passed but "
553 "planes array not provided\n");
554 return -EINVAL;
555 }
556
557 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
558 dprintk(1, "Incorrect planes array length, "
559 "expected %d, got %d\n", vb->num_planes, b->length);
560 return -EINVAL;
561 }
562
563 return 0;
564}
565
8023ed09
LP
566/**
567 * __verify_length() - Verify that the bytesused value for each plane fits in
568 * the plane length and that the data offset doesn't exceed the bytesused value.
569 */
570static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
571{
572 unsigned int length;
573 unsigned int plane;
574
575 if (!V4L2_TYPE_IS_OUTPUT(b->type))
576 return 0;
577
578 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
579 for (plane = 0; plane < vb->num_planes; ++plane) {
580 length = (b->memory == V4L2_MEMORY_USERPTR)
581 ? b->m.planes[plane].length
582 : vb->v4l2_planes[plane].length;
583
584 if (b->m.planes[plane].bytesused > length)
585 return -EINVAL;
3c5c23c5
SN
586
587 if (b->m.planes[plane].data_offset > 0 &&
588 b->m.planes[plane].data_offset >=
8023ed09
LP
589 b->m.planes[plane].bytesused)
590 return -EINVAL;
591 }
592 } else {
593 length = (b->memory == V4L2_MEMORY_USERPTR)
594 ? b->length : vb->v4l2_planes[0].length;
595
596 if (b->bytesused > length)
597 return -EINVAL;
598 }
599
600 return 0;
601}
602
25a27d91
MS
603/**
604 * __buffer_in_use() - return true if the buffer is in use and
605 * the queue cannot be freed (by the means of REQBUFS(0)) call
606 */
607static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
608{
609 unsigned int plane;
610 for (plane = 0; plane < vb->num_planes; ++plane) {
2c2dd6ac 611 void *mem_priv = vb->planes[plane].mem_priv;
25a27d91
MS
612 /*
613 * If num_users() has not been provided, call_memop
614 * will return 0, apparently nobody cares about this
615 * case anyway. If num_users() returns more than 1,
616 * we are not the only user of the plane's memory.
617 */
b5b4541e 618 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
25a27d91
MS
619 return true;
620 }
621 return false;
622}
623
624/**
625 * __buffers_in_use() - return true if any buffers on the queue are in use and
626 * the queue cannot be freed (by the means of REQBUFS(0)) call
627 */
628static bool __buffers_in_use(struct vb2_queue *q)
629{
630 unsigned int buffer;
631 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
632 if (__buffer_in_use(q, q->bufs[buffer]))
633 return true;
634 }
635 return false;
636}
637
e23ccc0a
PO
638/**
639 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
640 * returned to userspace
641 */
32a77260 642static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
e23ccc0a
PO
643{
644 struct vb2_queue *q = vb->vb2_queue;
e23ccc0a 645
2b719d7b 646 /* Copy back data such as timestamp, flags, etc. */
e23ccc0a 647 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
2b719d7b 648 b->reserved2 = vb->v4l2_buf.reserved2;
e23ccc0a
PO
649 b->reserved = vb->v4l2_buf.reserved;
650
651 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
e23ccc0a
PO
652 /*
653 * Fill in plane-related data if userspace provided an array
32a77260 654 * for it. The caller has already verified memory and size.
e23ccc0a 655 */
3c0b6061 656 b->length = vb->num_planes;
e23ccc0a
PO
657 memcpy(b->m.planes, vb->v4l2_planes,
658 b->length * sizeof(struct v4l2_plane));
659 } else {
660 /*
661 * We use length and offset in v4l2_planes array even for
662 * single-planar buffers, but userspace does not.
663 */
664 b->length = vb->v4l2_planes[0].length;
665 b->bytesused = vb->v4l2_planes[0].bytesused;
666 if (q->memory == V4L2_MEMORY_MMAP)
667 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
668 else if (q->memory == V4L2_MEMORY_USERPTR)
669 b->m.userptr = vb->v4l2_planes[0].m.userptr;
c5384048
SS
670 else if (q->memory == V4L2_MEMORY_DMABUF)
671 b->m.fd = vb->v4l2_planes[0].m.fd;
e23ccc0a
PO
672 }
673
ea42c8ec
MS
674 /*
675 * Clear any buffer state related flags.
676 */
1b18e7a0 677 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
678 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
679 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
680 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
681 /*
682 * For non-COPY timestamps, drop timestamp source bits
683 * and obtain the timestamp source from the queue.
684 */
685 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
686 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
687 }
e23ccc0a
PO
688
689 switch (vb->state) {
690 case VB2_BUF_STATE_QUEUED:
691 case VB2_BUF_STATE_ACTIVE:
692 b->flags |= V4L2_BUF_FLAG_QUEUED;
693 break;
694 case VB2_BUF_STATE_ERROR:
695 b->flags |= V4L2_BUF_FLAG_ERROR;
696 /* fall through */
697 case VB2_BUF_STATE_DONE:
698 b->flags |= V4L2_BUF_FLAG_DONE;
699 break;
ebc087d0 700 case VB2_BUF_STATE_PREPARED:
2d86401c
GL
701 b->flags |= V4L2_BUF_FLAG_PREPARED;
702 break;
b18a8ff2 703 case VB2_BUF_STATE_PREPARING:
2d86401c 704 case VB2_BUF_STATE_DEQUEUED:
e23ccc0a
PO
705 /* nothing */
706 break;
707 }
708
25a27d91 709 if (__buffer_in_use(q, vb))
e23ccc0a 710 b->flags |= V4L2_BUF_FLAG_MAPPED;
e23ccc0a
PO
711}
712
713/**
714 * vb2_querybuf() - query video buffer information
715 * @q: videobuf queue
716 * @b: buffer struct passed from userspace to vidioc_querybuf handler
717 * in driver
718 *
719 * Should be called from vidioc_querybuf ioctl handler in driver.
720 * This function will verify the passed v4l2_buffer structure and fill the
721 * relevant information for the userspace.
722 *
723 * The return values from this function are intended to be directly returned
724 * from vidioc_querybuf handler in driver.
725 */
726int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
727{
728 struct vb2_buffer *vb;
32a77260 729 int ret;
e23ccc0a
PO
730
731 if (b->type != q->type) {
732 dprintk(1, "querybuf: wrong buffer type\n");
733 return -EINVAL;
734 }
735
736 if (b->index >= q->num_buffers) {
737 dprintk(1, "querybuf: buffer index out of range\n");
738 return -EINVAL;
739 }
740 vb = q->bufs[b->index];
32a77260
HV
741 ret = __verify_planes_array(vb, b);
742 if (!ret)
743 __fill_v4l2_buffer(vb, b);
744 return ret;
e23ccc0a
PO
745}
746EXPORT_SYMBOL(vb2_querybuf);
747
748/**
749 * __verify_userptr_ops() - verify that all memory operations required for
750 * USERPTR queue type have been provided
751 */
752static int __verify_userptr_ops(struct vb2_queue *q)
753{
754 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
755 !q->mem_ops->put_userptr)
756 return -EINVAL;
757
758 return 0;
759}
760
761/**
762 * __verify_mmap_ops() - verify that all memory operations required for
763 * MMAP queue type have been provided
764 */
765static int __verify_mmap_ops(struct vb2_queue *q)
766{
767 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
768 !q->mem_ops->put || !q->mem_ops->mmap)
769 return -EINVAL;
770
771 return 0;
772}
773
c5384048
SS
774/**
775 * __verify_dmabuf_ops() - verify that all memory operations required for
776 * DMABUF queue type have been provided
777 */
778static int __verify_dmabuf_ops(struct vb2_queue *q)
779{
780 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
781 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
782 !q->mem_ops->unmap_dmabuf)
783 return -EINVAL;
784
785 return 0;
786}
787
e23ccc0a 788/**
37d9ed94
HV
789 * __verify_memory_type() - Check whether the memory type and buffer type
790 * passed to a buffer operation are compatible with the queue.
791 */
792static int __verify_memory_type(struct vb2_queue *q,
793 enum v4l2_memory memory, enum v4l2_buf_type type)
794{
c5384048
SS
795 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
796 memory != V4L2_MEMORY_DMABUF) {
37d9ed94
HV
797 dprintk(1, "reqbufs: unsupported memory type\n");
798 return -EINVAL;
799 }
800
801 if (type != q->type) {
802 dprintk(1, "reqbufs: requested type is incorrect\n");
803 return -EINVAL;
804 }
805
806 /*
807 * Make sure all the required memory ops for given memory type
808 * are available.
809 */
810 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
811 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
812 return -EINVAL;
813 }
814
815 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
816 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
817 return -EINVAL;
818 }
819
c5384048
SS
820 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
821 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
822 return -EINVAL;
823 }
824
37d9ed94
HV
825 /*
826 * Place the busy tests at the end: -EBUSY can be ignored when
827 * create_bufs is called with count == 0, but count == 0 should still
828 * do the memory and type validation.
829 */
830 if (q->fileio) {
831 dprintk(1, "reqbufs: file io in progress\n");
832 return -EBUSY;
833 }
834 return 0;
835}
836
837/**
838 * __reqbufs() - Initiate streaming
e23ccc0a
PO
839 * @q: videobuf2 queue
840 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
841 *
842 * Should be called from vidioc_reqbufs ioctl handler of a driver.
843 * This function:
844 * 1) verifies streaming parameters passed from the userspace,
845 * 2) sets up the queue,
846 * 3) negotiates number of buffers and planes per buffer with the driver
847 * to be used during streaming,
848 * 4) allocates internal buffer structures (struct vb2_buffer), according to
849 * the agreed parameters,
850 * 5) for MMAP memory type, allocates actual video memory, using the
851 * memory handling/allocation routines provided during queue initialization
852 *
853 * If req->count is 0, all the memory will be freed instead.
854 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
855 * and the queue is not busy, memory will be reallocated.
856 *
857 * The return values from this function are intended to be directly returned
858 * from vidioc_reqbufs handler in driver.
859 */
37d9ed94 860static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
e23ccc0a 861{
2d86401c 862 unsigned int num_buffers, allocated_buffers, num_planes = 0;
37d9ed94 863 int ret;
e23ccc0a
PO
864
865 if (q->streaming) {
866 dprintk(1, "reqbufs: streaming active\n");
867 return -EBUSY;
868 }
869
29e3fbd8 870 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
e23ccc0a
PO
871 /*
872 * We already have buffers allocated, so first check if they
873 * are not in use and can be freed.
874 */
875 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
876 dprintk(1, "reqbufs: memory in use, cannot free\n");
877 return -EBUSY;
878 }
879
fb64dca8
HV
880 /*
881 * Call queue_cancel to clean up any buffers in the PREPARED or
882 * QUEUED state which is possible if buffers were prepared or
883 * queued without ever calling STREAMON.
884 */
885 __vb2_queue_cancel(q);
63faabfd
HV
886 ret = __vb2_queue_free(q, q->num_buffers);
887 if (ret)
888 return ret;
29e3fbd8
MS
889
890 /*
891 * In case of REQBUFS(0) return immediately without calling
892 * driver's queue_setup() callback and allocating resources.
893 */
894 if (req->count == 0)
895 return 0;
e23ccc0a
PO
896 }
897
898 /*
899 * Make sure the requested values and current defaults are sane.
900 */
901 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
b3379c62 902 num_buffers = max_t(unsigned int, req->count, q->min_buffers_needed);
c1426bc7 903 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
e23ccc0a 904 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
13b14095 905 q->memory = req->memory;
e23ccc0a
PO
906
907 /*
908 * Ask the driver how many buffers and planes per buffer it requires.
909 * Driver also sets the size and allocator context for each plane.
910 */
fc714e70 911 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
c1426bc7 912 q->plane_sizes, q->alloc_ctx);
a1d36d8c 913 if (ret)
e23ccc0a
PO
914 return ret;
915
916 /* Finally, allocate buffers and video memory */
a7afcacc
HV
917 allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
918 if (allocated_buffers == 0) {
66072d4f
MS
919 dprintk(1, "Memory allocation failed\n");
920 return -ENOMEM;
e23ccc0a
PO
921 }
922
b3379c62
HV
923 /*
924 * There is no point in continuing if we can't allocate the minimum
925 * number of buffers needed by this vb2_queue.
926 */
927 if (allocated_buffers < q->min_buffers_needed)
928 ret = -ENOMEM;
929
e23ccc0a
PO
930 /*
931 * Check if driver can handle the allocated number of buffers.
932 */
b3379c62 933 if (!ret && allocated_buffers < num_buffers) {
2d86401c 934 num_buffers = allocated_buffers;
e23ccc0a 935
fc714e70
GL
936 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
937 &num_planes, q->plane_sizes, q->alloc_ctx);
e23ccc0a 938
2d86401c 939 if (!ret && allocated_buffers < num_buffers)
e23ccc0a 940 ret = -ENOMEM;
e23ccc0a
PO
941
942 /*
2d86401c
GL
943 * Either the driver has accepted a smaller number of buffers,
944 * or .queue_setup() returned an error
e23ccc0a 945 */
2d86401c
GL
946 }
947
948 q->num_buffers = allocated_buffers;
949
950 if (ret < 0) {
a7afcacc
HV
951 /*
952 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
953 * from q->num_buffers.
954 */
2d86401c
GL
955 __vb2_queue_free(q, allocated_buffers);
956 return ret;
e23ccc0a
PO
957 }
958
e23ccc0a
PO
959 /*
960 * Return the number of successfully allocated buffers
961 * to the userspace.
962 */
2d86401c 963 req->count = allocated_buffers;
e23ccc0a
PO
964
965 return 0;
e23ccc0a 966}
37d9ed94
HV
967
968/**
969 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
970 * type values.
971 * @q: videobuf2 queue
972 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
973 */
974int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
975{
976 int ret = __verify_memory_type(q, req->memory, req->type);
977
978 return ret ? ret : __reqbufs(q, req);
979}
e23ccc0a
PO
980EXPORT_SYMBOL_GPL(vb2_reqbufs);
981
2d86401c 982/**
37d9ed94 983 * __create_bufs() - Allocate buffers and any required auxiliary structs
2d86401c
GL
984 * @q: videobuf2 queue
985 * @create: creation parameters, passed from userspace to vidioc_create_bufs
986 * handler in driver
987 *
988 * Should be called from vidioc_create_bufs ioctl handler of a driver.
989 * This function:
990 * 1) verifies parameter sanity
991 * 2) calls the .queue_setup() queue operation
992 * 3) performs any necessary memory allocations
993 *
994 * The return values from this function are intended to be directly returned
995 * from vidioc_create_bufs handler in driver.
996 */
37d9ed94 997static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
2d86401c
GL
998{
999 unsigned int num_planes = 0, num_buffers, allocated_buffers;
37d9ed94 1000 int ret;
2d86401c
GL
1001
1002 if (q->num_buffers == VIDEO_MAX_FRAME) {
1003 dprintk(1, "%s(): maximum number of buffers already allocated\n",
1004 __func__);
1005 return -ENOBUFS;
1006 }
1007
2d86401c
GL
1008 if (!q->num_buffers) {
1009 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1010 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1011 q->memory = create->memory;
1012 }
1013
1014 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1015
1016 /*
1017 * Ask the driver, whether the requested number of buffers, planes per
1018 * buffer and their sizes are acceptable
1019 */
1020 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1021 &num_planes, q->plane_sizes, q->alloc_ctx);
a1d36d8c 1022 if (ret)
2d86401c
GL
1023 return ret;
1024
1025 /* Finally, allocate buffers and video memory */
a7afcacc 1026 allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
2d86401c 1027 num_planes);
a7afcacc 1028 if (allocated_buffers == 0) {
f05393d2
HV
1029 dprintk(1, "Memory allocation failed\n");
1030 return -ENOMEM;
2d86401c
GL
1031 }
1032
2d86401c
GL
1033 /*
1034 * Check if driver can handle the so far allocated number of buffers.
1035 */
a7afcacc
HV
1036 if (allocated_buffers < num_buffers) {
1037 num_buffers = allocated_buffers;
2d86401c
GL
1038
1039 /*
1040 * q->num_buffers contains the total number of buffers, that the
1041 * queue driver has set up
1042 */
1043 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1044 &num_planes, q->plane_sizes, q->alloc_ctx);
1045
1046 if (!ret && allocated_buffers < num_buffers)
1047 ret = -ENOMEM;
1048
1049 /*
1050 * Either the driver has accepted a smaller number of buffers,
1051 * or .queue_setup() returned an error
1052 */
1053 }
1054
1055 q->num_buffers += allocated_buffers;
1056
1057 if (ret < 0) {
a7afcacc
HV
1058 /*
1059 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1060 * from q->num_buffers.
1061 */
2d86401c 1062 __vb2_queue_free(q, allocated_buffers);
f05393d2 1063 return -ENOMEM;
2d86401c
GL
1064 }
1065
1066 /*
1067 * Return the number of successfully allocated buffers
1068 * to the userspace.
1069 */
1070 create->count = allocated_buffers;
1071
1072 return 0;
1073}
37d9ed94
HV
1074
1075/**
53aa3b19
NT
1076 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
1077 * memory and type values.
37d9ed94
HV
1078 * @q: videobuf2 queue
1079 * @create: creation parameters, passed from userspace to vidioc_create_bufs
1080 * handler in driver
1081 */
1082int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1083{
1084 int ret = __verify_memory_type(q, create->memory, create->format.type);
1085
1086 create->index = q->num_buffers;
f05393d2
HV
1087 if (create->count == 0)
1088 return ret != -EBUSY ? ret : 0;
37d9ed94
HV
1089 return ret ? ret : __create_bufs(q, create);
1090}
2d86401c
GL
1091EXPORT_SYMBOL_GPL(vb2_create_bufs);
1092
e23ccc0a
PO
1093/**
1094 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1095 * @vb: vb2_buffer to which the plane in question belongs to
1096 * @plane_no: plane number for which the address is to be returned
1097 *
1098 * This function returns a kernel virtual address of a given plane if
1099 * such a mapping exist, NULL otherwise.
1100 */
1101void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1102{
a00d0266 1103 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1104 return NULL;
1105
a1d36d8c 1106 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1107
1108}
1109EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1110
1111/**
1112 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1113 * @vb: vb2_buffer to which the plane in question belongs to
1114 * @plane_no: plane number for which the cookie is to be returned
1115 *
1116 * This function returns an allocator specific cookie for a given plane if
1117 * available, NULL otherwise. The allocator should provide some simple static
1118 * inline function, which would convert this cookie to the allocator specific
1119 * type that can be used directly by the driver to access the buffer. This can
1120 * be for example physical address, pointer to scatter list or IOMMU mapping.
1121 */
1122void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1123{
a00d0266 1124 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1125 return NULL;
1126
a1d36d8c 1127 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1128}
1129EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1130
1131/**
1132 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1133 * @vb: vb2_buffer returned from the driver
1134 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
b3379c62
HV
1135 * or VB2_BUF_STATE_ERROR if the operation finished with an error.
1136 * If start_streaming fails then it should return buffers with state
1137 * VB2_BUF_STATE_QUEUED to put them back into the queue.
e23ccc0a
PO
1138 *
1139 * This function should be called by the driver after a hardware operation on
1140 * a buffer is finished and the buffer may be returned to userspace. The driver
1141 * cannot use this buffer anymore until it is queued back to it by videobuf
1142 * by the means of buf_queue callback. Only buffers previously queued to the
1143 * driver by buf_queue can be passed to this function.
b3379c62
HV
1144 *
1145 * While streaming a buffer can only be returned in state DONE or ERROR.
1146 * The start_streaming op can also return them in case the DMA engine cannot
1147 * be started for some reason. In that case the buffers should be returned with
1148 * state QUEUED.
e23ccc0a
PO
1149 */
1150void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1151{
1152 struct vb2_queue *q = vb->vb2_queue;
1153 unsigned long flags;
3e0c2f20 1154 unsigned int plane;
e23ccc0a 1155
b3379c62 1156 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
e23ccc0a
PO
1157 return;
1158
b3379c62
HV
1159 if (!q->start_streaming_called) {
1160 if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
1161 state = VB2_BUF_STATE_QUEUED;
1162 } else if (!WARN_ON(!q->start_streaming_called)) {
1163 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1164 state != VB2_BUF_STATE_ERROR))
1165 state = VB2_BUF_STATE_ERROR;
1166 }
e23ccc0a 1167
b5b4541e
HV
1168#ifdef CONFIG_VIDEO_ADV_DEBUG
1169 /*
1170 * Although this is not a callback, it still does have to balance
1171 * with the buf_queue op. So update this counter manually.
1172 */
1173 vb->cnt_buf_done++;
1174#endif
e23ccc0a 1175 dprintk(4, "Done processing on buffer %d, state: %d\n",
9b6f5dc0 1176 vb->v4l2_buf.index, state);
e23ccc0a 1177
3e0c2f20
MS
1178 /* sync buffers */
1179 for (plane = 0; plane < vb->num_planes; ++plane)
a1d36d8c 1180 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
3e0c2f20 1181
e23ccc0a
PO
1182 /* Add the buffer to the done buffers list */
1183 spin_lock_irqsave(&q->done_lock, flags);
1184 vb->state = state;
b3379c62
HV
1185 if (state != VB2_BUF_STATE_QUEUED)
1186 list_add_tail(&vb->done_entry, &q->done_list);
6ea3b980 1187 atomic_dec(&q->owned_by_drv_count);
e23ccc0a
PO
1188 spin_unlock_irqrestore(&q->done_lock, flags);
1189
b3379c62
HV
1190 if (state == VB2_BUF_STATE_QUEUED)
1191 return;
1192
e23ccc0a
PO
1193 /* Inform any processes that may be waiting for buffers */
1194 wake_up(&q->done_wq);
1195}
1196EXPORT_SYMBOL_GPL(vb2_buffer_done);
1197
1198/**
32a77260
HV
1199 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1200 * v4l2_buffer by the userspace. The caller has already verified that struct
1201 * v4l2_buffer has a valid number of planes.
e23ccc0a 1202 */
32a77260 1203static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
e23ccc0a
PO
1204 struct v4l2_plane *v4l2_planes)
1205{
1206 unsigned int plane;
e23ccc0a
PO
1207
1208 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
e23ccc0a
PO
1209 /* Fill in driver-provided information for OUTPUT types */
1210 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
61bd8fb3
HV
1211 bool bytesused_is_used;
1212
1213 /* Check if bytesused == 0 for all planes */
1214 for (plane = 0; plane < vb->num_planes; ++plane)
1215 if (b->m.planes[plane].bytesused)
1216 break;
1217 bytesused_is_used = plane < vb->num_planes;
1218
e23ccc0a
PO
1219 /*
1220 * Will have to go up to b->length when API starts
1221 * accepting variable number of planes.
61bd8fb3
HV
1222 *
1223 * If bytesused_is_used is false, then fall back to the
1224 * full buffer size. In that case userspace clearly
1225 * never bothered to set it and it's a safe assumption
1226 * that they really meant to use the full plane sizes.
e23ccc0a
PO
1227 */
1228 for (plane = 0; plane < vb->num_planes; ++plane) {
61bd8fb3
HV
1229 struct v4l2_plane *pdst = &v4l2_planes[plane];
1230 struct v4l2_plane *psrc = &b->m.planes[plane];
1231
1232 pdst->bytesused = bytesused_is_used ?
1233 psrc->bytesused : psrc->length;
1234 pdst->data_offset = psrc->data_offset;
e23ccc0a
PO
1235 }
1236 }
1237
1238 if (b->memory == V4L2_MEMORY_USERPTR) {
1239 for (plane = 0; plane < vb->num_planes; ++plane) {
1240 v4l2_planes[plane].m.userptr =
1241 b->m.planes[plane].m.userptr;
1242 v4l2_planes[plane].length =
1243 b->m.planes[plane].length;
1244 }
1245 }
c5384048
SS
1246 if (b->memory == V4L2_MEMORY_DMABUF) {
1247 for (plane = 0; plane < vb->num_planes; ++plane) {
1248 v4l2_planes[plane].m.fd =
1249 b->m.planes[plane].m.fd;
1250 v4l2_planes[plane].length =
1251 b->m.planes[plane].length;
c5384048
SS
1252 }
1253 }
e23ccc0a
PO
1254 } else {
1255 /*
1256 * Single-planar buffers do not use planes array,
1257 * so fill in relevant v4l2_buffer struct fields instead.
1258 * In videobuf we use our internal V4l2_planes struct for
1259 * single-planar buffers as well, for simplicity.
61bd8fb3
HV
1260 *
1261 * If bytesused == 0, then fall back to the full buffer size
1262 * as that's a sensible default.
e23ccc0a 1263 */
412376a1 1264 if (V4L2_TYPE_IS_OUTPUT(b->type))
61bd8fb3
HV
1265 v4l2_planes[0].bytesused =
1266 b->bytesused ? b->bytesused : b->length;
1267 else
1268 v4l2_planes[0].bytesused = 0;
e23ccc0a
PO
1269
1270 if (b->memory == V4L2_MEMORY_USERPTR) {
1271 v4l2_planes[0].m.userptr = b->m.userptr;
1272 v4l2_planes[0].length = b->length;
1273 }
c5384048
SS
1274
1275 if (b->memory == V4L2_MEMORY_DMABUF) {
1276 v4l2_planes[0].m.fd = b->m.fd;
1277 v4l2_planes[0].length = b->length;
c5384048 1278 }
e23ccc0a
PO
1279 }
1280
f1343281 1281 /* Zero flags that the vb2 core handles */
1b18e7a0 1282 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
1283 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1284 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1285 /*
1286 * Non-COPY timestamps and non-OUTPUT queues will get
1287 * their timestamp and timestamp source flags from the
1288 * queue.
1289 */
1290 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1291 }
1292
f1343281
HV
1293 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1294 /*
1295 * For output buffers mask out the timecode flag:
1296 * this will be handled later in vb2_internal_qbuf().
1297 * The 'field' is valid metadata for this output buffer
1298 * and so that needs to be copied here.
1299 */
1300 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1301 vb->v4l2_buf.field = b->field;
1302 } else {
1303 /* Zero any output buffer flags as this is a capture buffer */
1304 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1305 }
e23ccc0a
PO
1306}
1307
1308/**
1309 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1310 */
2d86401c 1311static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a
PO
1312{
1313 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1314 struct vb2_queue *q = vb->vb2_queue;
1315 void *mem_priv;
1316 unsigned int plane;
1317 int ret;
1318 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1319 bool reacquired = vb->planes[0].mem_priv == NULL;
e23ccc0a 1320
412376a1 1321 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
32a77260
HV
1322 /* Copy relevant information provided by the userspace */
1323 __fill_vb2_buffer(vb, b, planes);
e23ccc0a
PO
1324
1325 for (plane = 0; plane < vb->num_planes; ++plane) {
1326 /* Skip the plane if already verified */
f0b7c7fc
MS
1327 if (vb->v4l2_planes[plane].m.userptr &&
1328 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
e23ccc0a
PO
1329 && vb->v4l2_planes[plane].length == planes[plane].length)
1330 continue;
1331
1332 dprintk(3, "qbuf: userspace address for plane %d changed, "
1333 "reacquiring memory\n", plane);
1334
c1426bc7
MS
1335 /* Check if the provided plane buffer is large enough */
1336 if (planes[plane].length < q->plane_sizes[plane]) {
2484a7e2
SWK
1337 dprintk(1, "qbuf: provided buffer size %u is less than "
1338 "setup size %u for plane %d\n",
1339 planes[plane].length,
1340 q->plane_sizes[plane], plane);
4c2625db 1341 ret = -EINVAL;
c1426bc7
MS
1342 goto err;
1343 }
1344
e23ccc0a 1345 /* Release previously acquired memory if present */
256f3162
HV
1346 if (vb->planes[plane].mem_priv) {
1347 if (!reacquired) {
1348 reacquired = true;
a1d36d8c 1349 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162 1350 }
a1d36d8c 1351 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
256f3162 1352 }
e23ccc0a
PO
1353
1354 vb->planes[plane].mem_priv = NULL;
256f3162 1355 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
e23ccc0a
PO
1356
1357 /* Acquire each plane's memory */
a1d36d8c 1358 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
a00d0266
MS
1359 planes[plane].m.userptr,
1360 planes[plane].length, write);
1361 if (IS_ERR_OR_NULL(mem_priv)) {
1362 dprintk(1, "qbuf: failed acquiring userspace "
e23ccc0a 1363 "memory for plane %d\n", plane);
a00d0266
MS
1364 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1365 goto err;
e23ccc0a 1366 }
a00d0266 1367 vb->planes[plane].mem_priv = mem_priv;
e23ccc0a
PO
1368 }
1369
e23ccc0a
PO
1370 /*
1371 * Now that everything is in order, copy relevant information
1372 * provided by userspace.
1373 */
1374 for (plane = 0; plane < vb->num_planes; ++plane)
1375 vb->v4l2_planes[plane] = planes[plane];
1376
256f3162
HV
1377 if (reacquired) {
1378 /*
1379 * One or more planes changed, so we must call buf_init to do
1380 * the driver-specific initialization on the newly acquired
1381 * buffer, if provided.
1382 */
1383 ret = call_vb_qop(vb, buf_init, vb);
1384 if (ret) {
1385 dprintk(1, "qbuf: buffer initialization failed\n");
256f3162
HV
1386 goto err;
1387 }
1388 }
1389
1390 ret = call_vb_qop(vb, buf_prepare, vb);
1391 if (ret) {
1392 dprintk(1, "qbuf: buffer preparation failed\n");
a1d36d8c 1393 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1394 goto err;
1395 }
1396
e23ccc0a
PO
1397 return 0;
1398err:
1399 /* In case of errors, release planes that were already acquired */
c1426bc7
MS
1400 for (plane = 0; plane < vb->num_planes; ++plane) {
1401 if (vb->planes[plane].mem_priv)
a1d36d8c 1402 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
c1426bc7
MS
1403 vb->planes[plane].mem_priv = NULL;
1404 vb->v4l2_planes[plane].m.userptr = 0;
1405 vb->v4l2_planes[plane].length = 0;
e23ccc0a
PO
1406 }
1407
1408 return ret;
1409}
1410
1411/**
1412 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1413 */
2d86401c 1414static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 1415{
32a77260 1416 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
a1d36d8c 1417 return call_vb_qop(vb, buf_prepare, vb);
e23ccc0a
PO
1418}
1419
c5384048
SS
1420/**
1421 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1422 */
1423static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1424{
1425 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1426 struct vb2_queue *q = vb->vb2_queue;
1427 void *mem_priv;
1428 unsigned int plane;
1429 int ret;
1430 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1431 bool reacquired = vb->planes[0].mem_priv == NULL;
c5384048 1432
412376a1 1433 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
6f546c5f 1434 /* Copy relevant information provided by the userspace */
c5384048
SS
1435 __fill_vb2_buffer(vb, b, planes);
1436
1437 for (plane = 0; plane < vb->num_planes; ++plane) {
1438 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1439
1440 if (IS_ERR_OR_NULL(dbuf)) {
1441 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1442 plane);
1443 ret = -EINVAL;
1444 goto err;
1445 }
1446
1447 /* use DMABUF size if length is not provided */
1448 if (planes[plane].length == 0)
1449 planes[plane].length = dbuf->size;
1450
412376a1 1451 if (planes[plane].length < q->plane_sizes[plane]) {
77c0782e
SWK
1452 dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
1453 plane);
c5384048
SS
1454 ret = -EINVAL;
1455 goto err;
1456 }
1457
1458 /* Skip the plane if already verified */
1459 if (dbuf == vb->planes[plane].dbuf &&
1460 vb->v4l2_planes[plane].length == planes[plane].length) {
1461 dma_buf_put(dbuf);
1462 continue;
1463 }
1464
1465 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1466
256f3162
HV
1467 if (!reacquired) {
1468 reacquired = true;
a1d36d8c 1469 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1470 }
1471
c5384048 1472 /* Release previously acquired memory if present */
b5b4541e 1473 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
1474 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1475
1476 /* Acquire each plane's memory */
a1d36d8c 1477 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
c5384048
SS
1478 dbuf, planes[plane].length, write);
1479 if (IS_ERR(mem_priv)) {
1480 dprintk(1, "qbuf: failed to attach dmabuf\n");
1481 ret = PTR_ERR(mem_priv);
1482 dma_buf_put(dbuf);
1483 goto err;
1484 }
1485
1486 vb->planes[plane].dbuf = dbuf;
1487 vb->planes[plane].mem_priv = mem_priv;
1488 }
1489
1490 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1491 * really we want to do this just before the DMA, not while queueing
1492 * the buffer(s)..
1493 */
1494 for (plane = 0; plane < vb->num_planes; ++plane) {
b5b4541e 1495 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
c5384048
SS
1496 if (ret) {
1497 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1498 plane);
1499 goto err;
1500 }
1501 vb->planes[plane].dbuf_mapped = 1;
1502 }
1503
c5384048
SS
1504 /*
1505 * Now that everything is in order, copy relevant information
1506 * provided by userspace.
1507 */
1508 for (plane = 0; plane < vb->num_planes; ++plane)
1509 vb->v4l2_planes[plane] = planes[plane];
1510
256f3162
HV
1511 if (reacquired) {
1512 /*
1513 * Call driver-specific initialization on the newly acquired buffer,
1514 * if provided.
1515 */
1516 ret = call_vb_qop(vb, buf_init, vb);
1517 if (ret) {
1518 dprintk(1, "qbuf: buffer initialization failed\n");
256f3162
HV
1519 goto err;
1520 }
1521 }
1522
1523 ret = call_vb_qop(vb, buf_prepare, vb);
1524 if (ret) {
1525 dprintk(1, "qbuf: buffer preparation failed\n");
a1d36d8c 1526 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1527 goto err;
1528 }
1529
c5384048
SS
1530 return 0;
1531err:
1532 /* In case of errors, release planes that were already acquired */
1533 __vb2_buf_dmabuf_put(vb);
1534
1535 return ret;
1536}
1537
e23ccc0a
PO
1538/**
1539 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1540 */
1541static void __enqueue_in_driver(struct vb2_buffer *vb)
1542{
1543 struct vb2_queue *q = vb->vb2_queue;
3e0c2f20 1544 unsigned int plane;
e23ccc0a
PO
1545
1546 vb->state = VB2_BUF_STATE_ACTIVE;
6ea3b980 1547 atomic_inc(&q->owned_by_drv_count);
3e0c2f20
MS
1548
1549 /* sync buffers */
1550 for (plane = 0; plane < vb->num_planes; ++plane)
a1d36d8c 1551 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
3e0c2f20 1552
a1d36d8c 1553 call_void_vb_qop(vb, buf_queue, vb);
e23ccc0a
PO
1554}
1555
2d86401c 1556static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ebc087d0
GL
1557{
1558 struct vb2_queue *q = vb->vb2_queue;
b18a8ff2 1559 struct rw_semaphore *mmap_sem;
ebc087d0
GL
1560 int ret;
1561
8023ed09 1562 ret = __verify_length(vb, b);
3a9621b0
SN
1563 if (ret < 0) {
1564 dprintk(1, "%s(): plane parameters verification failed: %d\n",
1565 __func__, ret);
8023ed09 1566 return ret;
3a9621b0 1567 }
8023ed09 1568
b18a8ff2 1569 vb->state = VB2_BUF_STATE_PREPARING;
f1343281
HV
1570 vb->v4l2_buf.timestamp.tv_sec = 0;
1571 vb->v4l2_buf.timestamp.tv_usec = 0;
1572 vb->v4l2_buf.sequence = 0;
1573
ebc087d0
GL
1574 switch (q->memory) {
1575 case V4L2_MEMORY_MMAP:
1576 ret = __qbuf_mmap(vb, b);
1577 break;
1578 case V4L2_MEMORY_USERPTR:
b18a8ff2 1579 /*
f103b5d6
MCC
1580 * In case of user pointer buffers vb2 allocators need to get
1581 * direct access to userspace pages. This requires getting
1582 * the mmap semaphore for read access in the current process
1583 * structure. The same semaphore is taken before calling mmap
1584 * operation, while both qbuf/prepare_buf and mmap are called
1585 * by the driver or v4l2 core with the driver's lock held.
1586 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1587 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1588 * the videobuf2 core releases the driver's lock, takes
1589 * mmap_sem and then takes the driver's lock again.
b18a8ff2
HV
1590 */
1591 mmap_sem = &current->mm->mmap_sem;
a1d36d8c 1592 call_void_qop(q, wait_prepare, q);
b18a8ff2 1593 down_read(mmap_sem);
a1d36d8c 1594 call_void_qop(q, wait_finish, q);
b18a8ff2 1595
ebc087d0 1596 ret = __qbuf_userptr(vb, b);
b18a8ff2
HV
1597
1598 up_read(mmap_sem);
ebc087d0 1599 break;
c5384048
SS
1600 case V4L2_MEMORY_DMABUF:
1601 ret = __qbuf_dmabuf(vb, b);
1602 break;
ebc087d0
GL
1603 default:
1604 WARN(1, "Invalid queue type\n");
1605 ret = -EINVAL;
1606 }
1607
ebc087d0
GL
1608 if (ret)
1609 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
b18a8ff2 1610 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
ebc087d0
GL
1611
1612 return ret;
1613}
1614
012043b8 1615static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
4138111a 1616 const char *opname)
2d86401c 1617{
2d86401c 1618 if (b->type != q->type) {
012043b8 1619 dprintk(1, "%s(): invalid buffer type\n", opname);
b18a8ff2 1620 return -EINVAL;
2d86401c
GL
1621 }
1622
1623 if (b->index >= q->num_buffers) {
012043b8 1624 dprintk(1, "%s(): buffer index out of range\n", opname);
b18a8ff2 1625 return -EINVAL;
2d86401c
GL
1626 }
1627
4138111a 1628 if (q->bufs[b->index] == NULL) {
2d86401c 1629 /* Should never happen */
012043b8 1630 dprintk(1, "%s(): buffer is NULL\n", opname);
b18a8ff2 1631 return -EINVAL;
2d86401c
GL
1632 }
1633
1634 if (b->memory != q->memory) {
012043b8 1635 dprintk(1, "%s(): invalid memory type\n", opname);
b18a8ff2 1636 return -EINVAL;
2d86401c
GL
1637 }
1638
4138111a 1639 return __verify_planes_array(q->bufs[b->index], b);
012043b8 1640}
2d86401c 1641
e23ccc0a 1642/**
012043b8 1643 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
e23ccc0a 1644 * @q: videobuf2 queue
012043b8
LP
1645 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1646 * handler in driver
e23ccc0a 1647 *
012043b8 1648 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
e23ccc0a
PO
1649 * This function:
1650 * 1) verifies the passed buffer,
012043b8
LP
1651 * 2) calls buf_prepare callback in the driver (if provided), in which
1652 * driver-specific buffer initialization can be performed,
e23ccc0a
PO
1653 *
1654 * The return values from this function are intended to be directly returned
012043b8 1655 * from vidioc_prepare_buf handler in driver.
e23ccc0a 1656 */
012043b8 1657int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
e23ccc0a 1658{
4138111a 1659 struct vb2_buffer *vb;
b2f2f047
HV
1660 int ret;
1661
1662 if (q->fileio) {
1663 dprintk(1, "%s(): file io in progress\n", __func__);
1664 return -EBUSY;
1665 }
4138111a 1666
b2f2f047 1667 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
4138111a
HV
1668 if (ret)
1669 return ret;
1670
1671 vb = q->bufs[b->index];
1672 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1673 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1674 vb->state);
1675 return -EINVAL;
1676 }
1677
1678 ret = __buf_prepare(vb, b);
1679 if (!ret) {
1680 /* Fill buffer information for the userspace */
1681 __fill_v4l2_buffer(vb, b);
1682
1683 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1684 }
1685 return ret;
012043b8
LP
1686}
1687EXPORT_SYMBOL_GPL(vb2_prepare_buf);
e23ccc0a 1688
02f142ec
HV
1689/**
1690 * vb2_start_streaming() - Attempt to start streaming.
1691 * @q: videobuf2 queue
1692 *
b3379c62
HV
1693 * Attempt to start streaming. When this function is called there must be
1694 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1695 * number of buffers required for the DMA engine to function). If the
1696 * @start_streaming op fails it is supposed to return all the driver-owned
1697 * buffers back to vb2 in state QUEUED. Check if that happened and if
1698 * not warn and reclaim them forcefully.
02f142ec
HV
1699 */
1700static int vb2_start_streaming(struct vb2_queue *q)
1701{
b3379c62 1702 struct vb2_buffer *vb;
02f142ec
HV
1703 int ret;
1704
02f142ec 1705 /*
b3379c62
HV
1706 * If any buffers were queued before streamon,
1707 * we can now pass them to driver for processing.
02f142ec 1708 */
b3379c62
HV
1709 list_for_each_entry(vb, &q->queued_list, queued_entry)
1710 __enqueue_in_driver(vb);
1711
1712 /* Tell the driver to start streaming */
1713 ret = call_qop(q, start_streaming, q,
1714 atomic_read(&q->owned_by_drv_count));
1715 q->start_streaming_called = ret == 0;
1716 if (!ret)
02f142ec 1717 return 0;
b3379c62 1718
b3379c62
HV
1719 dprintk(1, "qbuf: driver refused to start streaming\n");
1720 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1721 unsigned i;
1722
1723 /*
1724 * Forcefully reclaim buffers if the driver did not
1725 * correctly return them to vb2.
1726 */
1727 for (i = 0; i < q->num_buffers; ++i) {
1728 vb = q->bufs[i];
1729 if (vb->state == VB2_BUF_STATE_ACTIVE)
1730 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1731 }
1732 /* Must be zero now */
1733 WARN_ON(atomic_read(&q->owned_by_drv_count));
02f142ec 1734 }
02f142ec
HV
1735 return ret;
1736}
1737
b2f2f047 1738static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
012043b8 1739{
4138111a
HV
1740 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1741 struct vb2_buffer *vb;
1742
1743 if (ret)
1744 return ret;
1745
1746 vb = q->bufs[b->index];
e23ccc0a 1747
ebc087d0
GL
1748 switch (vb->state) {
1749 case VB2_BUF_STATE_DEQUEUED:
1750 ret = __buf_prepare(vb, b);
1751 if (ret)
012043b8 1752 return ret;
4138111a 1753 break;
ebc087d0
GL
1754 case VB2_BUF_STATE_PREPARED:
1755 break;
b18a8ff2
HV
1756 case VB2_BUF_STATE_PREPARING:
1757 dprintk(1, "qbuf: buffer still being prepared\n");
1758 return -EINVAL;
ebc087d0 1759 default:
952c9ee2
HV
1760 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1761 vb->state);
012043b8 1762 return -EINVAL;
e23ccc0a
PO
1763 }
1764
e23ccc0a
PO
1765 /*
1766 * Add to the queued buffers list, a buffer will stay on it until
1767 * dequeued in dqbuf.
1768 */
1769 list_add_tail(&vb->queued_entry, &q->queued_list);
b3379c62 1770 q->queued_count++;
e23ccc0a 1771 vb->state = VB2_BUF_STATE_QUEUED;
f1343281
HV
1772 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1773 /*
1774 * For output buffers copy the timestamp if needed,
1775 * and the timecode field and flag if needed.
1776 */
c57ff792
SA
1777 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1778 V4L2_BUF_FLAG_TIMESTAMP_COPY)
f1343281
HV
1779 vb->v4l2_buf.timestamp = b->timestamp;
1780 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1781 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1782 vb->v4l2_buf.timecode = b->timecode;
1783 }
e23ccc0a
PO
1784
1785 /*
1786 * If already streaming, give the buffer to driver for processing.
1787 * If not, the buffer will be given to driver on next streamon.
1788 */
b3379c62 1789 if (q->start_streaming_called)
e23ccc0a
PO
1790 __enqueue_in_driver(vb);
1791
4138111a
HV
1792 /* Fill buffer information for the userspace */
1793 __fill_v4l2_buffer(vb, b);
21db3e07 1794
b3379c62
HV
1795 /*
1796 * If streamon has been called, and we haven't yet called
1797 * start_streaming() since not enough buffers were queued, and
1798 * we now have reached the minimum number of queued buffers,
1799 * then we can finally call start_streaming().
1800 */
1801 if (q->streaming && !q->start_streaming_called &&
1802 q->queued_count >= q->min_buffers_needed) {
02f142ec
HV
1803 ret = vb2_start_streaming(q);
1804 if (ret)
1805 return ret;
1806 }
1807
4138111a
HV
1808 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1809 return 0;
e23ccc0a 1810}
b2f2f047
HV
1811
1812/**
1813 * vb2_qbuf() - Queue a buffer from userspace
1814 * @q: videobuf2 queue
1815 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1816 * in driver
1817 *
1818 * Should be called from vidioc_qbuf ioctl handler of a driver.
1819 * This function:
1820 * 1) verifies the passed buffer,
1821 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1822 * which driver-specific buffer initialization can be performed,
1823 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1824 * callback for processing.
1825 *
1826 * The return values from this function are intended to be directly returned
1827 * from vidioc_qbuf handler in driver.
1828 */
1829int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1830{
1831 if (q->fileio) {
1832 dprintk(1, "%s(): file io in progress\n", __func__);
1833 return -EBUSY;
1834 }
1835
1836 return vb2_internal_qbuf(q, b);
1837}
e23ccc0a
PO
1838EXPORT_SYMBOL_GPL(vb2_qbuf);
1839
1840/**
1841 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1842 * for dequeuing
1843 *
1844 * Will sleep if required for nonblocking == false.
1845 */
1846static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1847{
1848 /*
1849 * All operations on vb_done_list are performed under done_lock
1850 * spinlock protection. However, buffers may be removed from
1851 * it and returned to userspace only while holding both driver's
1852 * lock and the done_lock spinlock. Thus we can be sure that as
1853 * long as we hold the driver's lock, the list will remain not
1854 * empty if list_empty() check succeeds.
1855 */
1856
1857 for (;;) {
1858 int ret;
1859
1860 if (!q->streaming) {
1861 dprintk(1, "Streaming off, will not wait for buffers\n");
1862 return -EINVAL;
1863 }
1864
1865 if (!list_empty(&q->done_list)) {
1866 /*
1867 * Found a buffer that we were waiting for.
1868 */
1869 break;
1870 }
1871
1872 if (nonblocking) {
1873 dprintk(1, "Nonblocking and no buffers to dequeue, "
1874 "will not wait\n");
1875 return -EAGAIN;
1876 }
1877
1878 /*
1879 * We are streaming and blocking, wait for another buffer to
1880 * become ready or for streamoff. Driver's lock is released to
1881 * allow streamoff or qbuf to be called while waiting.
1882 */
a1d36d8c 1883 call_void_qop(q, wait_prepare, q);
e23ccc0a
PO
1884
1885 /*
1886 * All locks have been released, it is safe to sleep now.
1887 */
1888 dprintk(3, "Will sleep waiting for buffers\n");
1889 ret = wait_event_interruptible(q->done_wq,
1890 !list_empty(&q->done_list) || !q->streaming);
1891
1892 /*
1893 * We need to reevaluate both conditions again after reacquiring
1894 * the locks or return an error if one occurred.
1895 */
a1d36d8c 1896 call_void_qop(q, wait_finish, q);
32a77260
HV
1897 if (ret) {
1898 dprintk(1, "Sleep was interrupted\n");
e23ccc0a 1899 return ret;
32a77260 1900 }
e23ccc0a
PO
1901 }
1902 return 0;
1903}
1904
1905/**
1906 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1907 *
1908 * Will sleep if required for nonblocking == false.
1909 */
1910static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
32a77260 1911 struct v4l2_buffer *b, int nonblocking)
e23ccc0a
PO
1912{
1913 unsigned long flags;
1914 int ret;
1915
1916 /*
1917 * Wait for at least one buffer to become available on the done_list.
1918 */
1919 ret = __vb2_wait_for_done_vb(q, nonblocking);
1920 if (ret)
1921 return ret;
1922
1923 /*
1924 * Driver's lock has been held since we last verified that done_list
1925 * is not empty, so no need for another list_empty(done_list) check.
1926 */
1927 spin_lock_irqsave(&q->done_lock, flags);
1928 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
32a77260
HV
1929 /*
1930 * Only remove the buffer from done_list if v4l2_buffer can handle all
1931 * the planes.
1932 */
1933 ret = __verify_planes_array(*vb, b);
1934 if (!ret)
1935 list_del(&(*vb)->done_entry);
e23ccc0a
PO
1936 spin_unlock_irqrestore(&q->done_lock, flags);
1937
32a77260 1938 return ret;
e23ccc0a
PO
1939}
1940
1941/**
1942 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1943 * @q: videobuf2 queue
1944 *
1945 * This function will wait until all buffers that have been given to the driver
1946 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1947 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1948 * taken, for example from stop_streaming() callback.
1949 */
1950int vb2_wait_for_all_buffers(struct vb2_queue *q)
1951{
1952 if (!q->streaming) {
1953 dprintk(1, "Streaming off, will not wait for buffers\n");
1954 return -EINVAL;
1955 }
1956
b3379c62 1957 if (q->start_streaming_called)
6ea3b980 1958 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
e23ccc0a
PO
1959 return 0;
1960}
1961EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1962
c5384048
SS
1963/**
1964 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1965 */
1966static void __vb2_dqbuf(struct vb2_buffer *vb)
1967{
1968 struct vb2_queue *q = vb->vb2_queue;
1969 unsigned int i;
1970
1971 /* nothing to do if the buffer is already dequeued */
1972 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1973 return;
1974
1975 vb->state = VB2_BUF_STATE_DEQUEUED;
1976
1977 /* unmap DMABUF buffer */
1978 if (q->memory == V4L2_MEMORY_DMABUF)
1979 for (i = 0; i < vb->num_planes; ++i) {
1980 if (!vb->planes[i].dbuf_mapped)
1981 continue;
a1d36d8c 1982 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
c5384048
SS
1983 vb->planes[i].dbuf_mapped = 0;
1984 }
1985}
1986
b2f2f047 1987static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
e23ccc0a
PO
1988{
1989 struct vb2_buffer *vb = NULL;
1990 int ret;
1991
1992 if (b->type != q->type) {
1993 dprintk(1, "dqbuf: invalid buffer type\n");
1994 return -EINVAL;
1995 }
32a77260
HV
1996 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1997 if (ret < 0)
e23ccc0a 1998 return ret;
e23ccc0a 1999
e23ccc0a
PO
2000 switch (vb->state) {
2001 case VB2_BUF_STATE_DONE:
2002 dprintk(3, "dqbuf: Returning done buffer\n");
2003 break;
2004 case VB2_BUF_STATE_ERROR:
2005 dprintk(3, "dqbuf: Returning done buffer with errors\n");
2006 break;
2007 default:
2008 dprintk(1, "dqbuf: Invalid buffer state\n");
2009 return -EINVAL;
2010 }
2011
a1d36d8c 2012 call_void_vb_qop(vb, buf_finish, vb);
9cf3c31a 2013
e23ccc0a
PO
2014 /* Fill buffer information for the userspace */
2015 __fill_v4l2_buffer(vb, b);
2016 /* Remove from videobuf queue */
2017 list_del(&vb->queued_entry);
b3379c62 2018 q->queued_count--;
c5384048
SS
2019 /* go back to dequeued state */
2020 __vb2_dqbuf(vb);
e23ccc0a
PO
2021
2022 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2023 vb->v4l2_buf.index, vb->state);
2024
e23ccc0a
PO
2025 return 0;
2026}
b2f2f047
HV
2027
2028/**
2029 * vb2_dqbuf() - Dequeue a buffer to the userspace
2030 * @q: videobuf2 queue
2031 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
2032 * in driver
2033 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
2034 * buffers ready for dequeuing are present. Normally the driver
2035 * would be passing (file->f_flags & O_NONBLOCK) here
2036 *
2037 * Should be called from vidioc_dqbuf ioctl handler of a driver.
2038 * This function:
2039 * 1) verifies the passed buffer,
2040 * 2) calls buf_finish callback in the driver (if provided), in which
2041 * driver can perform any additional operations that may be required before
2042 * returning the buffer to userspace, such as cache sync,
2043 * 3) the buffer struct members are filled with relevant information for
2044 * the userspace.
2045 *
2046 * The return values from this function are intended to be directly returned
2047 * from vidioc_dqbuf handler in driver.
2048 */
2049int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2050{
2051 if (q->fileio) {
2052 dprintk(1, "dqbuf: file io in progress\n");
2053 return -EBUSY;
2054 }
2055 return vb2_internal_dqbuf(q, b, nonblocking);
2056}
e23ccc0a
PO
2057EXPORT_SYMBOL_GPL(vb2_dqbuf);
2058
bd323e28
MS
2059/**
2060 * __vb2_queue_cancel() - cancel and stop (pause) streaming
2061 *
2062 * Removes all queued buffers from driver's queue and all buffers queued by
2063 * userspace from videobuf's queue. Returns to state after reqbufs.
2064 */
2065static void __vb2_queue_cancel(struct vb2_queue *q)
2066{
2067 unsigned int i;
2068
2069 /*
2070 * Tell driver to stop all transactions and release all queued
2071 * buffers.
2072 */
b3379c62 2073 if (q->start_streaming_called)
bd323e28
MS
2074 call_qop(q, stop_streaming, q);
2075 q->streaming = 0;
b3379c62
HV
2076 q->start_streaming_called = 0;
2077 q->queued_count = 0;
2078
2079 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2080 for (i = 0; i < q->num_buffers; ++i)
2081 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2082 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2083 /* Must be zero now */
2084 WARN_ON(atomic_read(&q->owned_by_drv_count));
2085 }
bd323e28
MS
2086
2087 /*
2088 * Remove all buffers from videobuf's list...
2089 */
2090 INIT_LIST_HEAD(&q->queued_list);
2091 /*
2092 * ...and done list; userspace will not receive any buffers it
2093 * has not already dequeued before initiating cancel.
2094 */
2095 INIT_LIST_HEAD(&q->done_list);
6ea3b980 2096 atomic_set(&q->owned_by_drv_count, 0);
bd323e28
MS
2097 wake_up_all(&q->done_wq);
2098
2099 /*
2100 * Reinitialize all buffers for next use.
9c0863b1
HV
2101 * Make sure to call buf_finish for any queued buffers. Normally
2102 * that's done in dqbuf, but that's not going to happen when we
2103 * cancel the whole queue. Note: this code belongs here, not in
2104 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
2105 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
2106 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
bd323e28 2107 */
9c0863b1
HV
2108 for (i = 0; i < q->num_buffers; ++i) {
2109 struct vb2_buffer *vb = q->bufs[i];
2110
2111 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2112 vb->state = VB2_BUF_STATE_PREPARED;
a1d36d8c 2113 call_void_vb_qop(vb, buf_finish, vb);
9c0863b1
HV
2114 }
2115 __vb2_dqbuf(vb);
2116 }
bd323e28
MS
2117}
2118
b2f2f047 2119static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 2120{
5db2c3ba 2121 int ret;
e23ccc0a
PO
2122
2123 if (type != q->type) {
2124 dprintk(1, "streamon: invalid stream type\n");
2125 return -EINVAL;
2126 }
2127
2128 if (q->streaming) {
f956035c
RR
2129 dprintk(3, "streamon successful: already streaming\n");
2130 return 0;
e23ccc0a
PO
2131 }
2132
548df783
RR
2133 if (!q->num_buffers) {
2134 dprintk(1, "streamon: no buffers have been allocated\n");
2135 return -EINVAL;
2136 }
2137
249f5a58
RRD
2138 if (!q->num_buffers) {
2139 dprintk(1, "streamon: no buffers have been allocated\n");
2140 return -EINVAL;
2141 }
b3379c62
HV
2142 if (q->num_buffers < q->min_buffers_needed) {
2143 dprintk(1, "streamon: need at least %u allocated buffers\n",
2144 q->min_buffers_needed);
2145 return -EINVAL;
2146 }
249f5a58 2147
e23ccc0a 2148 /*
b3379c62
HV
2149 * Tell driver to start streaming provided sufficient buffers
2150 * are available.
e23ccc0a 2151 */
b3379c62
HV
2152 if (q->queued_count >= q->min_buffers_needed) {
2153 ret = vb2_start_streaming(q);
2154 if (ret) {
2155 __vb2_queue_cancel(q);
2156 return ret;
2157 }
5db2c3ba
PO
2158 }
2159
2160 q->streaming = 1;
e23ccc0a 2161
e23ccc0a
PO
2162 dprintk(3, "Streamon successful\n");
2163 return 0;
2164}
e23ccc0a
PO
2165
2166/**
b2f2f047 2167 * vb2_streamon - start streaming
e23ccc0a 2168 * @q: videobuf2 queue
b2f2f047 2169 * @type: type argument passed from userspace to vidioc_streamon handler
e23ccc0a 2170 *
b2f2f047 2171 * Should be called from vidioc_streamon handler of a driver.
e23ccc0a 2172 * This function:
b2f2f047
HV
2173 * 1) verifies current state
2174 * 2) passes any previously queued buffers to the driver and starts streaming
e23ccc0a 2175 *
e23ccc0a 2176 * The return values from this function are intended to be directly returned
b2f2f047 2177 * from vidioc_streamon handler in the driver.
e23ccc0a 2178 */
b2f2f047 2179int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 2180{
b25748fe 2181 if (q->fileio) {
b2f2f047 2182 dprintk(1, "streamon: file io in progress\n");
b25748fe
MS
2183 return -EBUSY;
2184 }
b2f2f047
HV
2185 return vb2_internal_streamon(q, type);
2186}
2187EXPORT_SYMBOL_GPL(vb2_streamon);
b25748fe 2188
b2f2f047
HV
2189static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2190{
e23ccc0a
PO
2191 if (type != q->type) {
2192 dprintk(1, "streamoff: invalid stream type\n");
2193 return -EINVAL;
2194 }
2195
e23ccc0a
PO
2196 /*
2197 * Cancel will pause streaming and remove all buffers from the driver
2198 * and videobuf, effectively returning control over them to userspace.
3f1a9a33
HV
2199 *
2200 * Note that we do this even if q->streaming == 0: if you prepare or
2201 * queue buffers, and then call streamoff without ever having called
2202 * streamon, you would still expect those buffers to be returned to
2203 * their normal dequeued state.
e23ccc0a
PO
2204 */
2205 __vb2_queue_cancel(q);
2206
2207 dprintk(3, "Streamoff successful\n");
2208 return 0;
2209}
b2f2f047
HV
2210
2211/**
2212 * vb2_streamoff - stop streaming
2213 * @q: videobuf2 queue
2214 * @type: type argument passed from userspace to vidioc_streamoff handler
2215 *
2216 * Should be called from vidioc_streamoff handler of a driver.
2217 * This function:
2218 * 1) verifies current state,
2219 * 2) stop streaming and dequeues any queued buffers, including those previously
2220 * passed to the driver (after waiting for the driver to finish).
2221 *
2222 * This call can be used for pausing playback.
2223 * The return values from this function are intended to be directly returned
2224 * from vidioc_streamoff handler in the driver
2225 */
2226int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2227{
2228 if (q->fileio) {
2229 dprintk(1, "streamoff: file io in progress\n");
2230 return -EBUSY;
2231 }
2232 return vb2_internal_streamoff(q, type);
2233}
e23ccc0a
PO
2234EXPORT_SYMBOL_GPL(vb2_streamoff);
2235
2236/**
2237 * __find_plane_by_offset() - find plane associated with the given offset off
2238 */
2239static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2240 unsigned int *_buffer, unsigned int *_plane)
2241{
2242 struct vb2_buffer *vb;
2243 unsigned int buffer, plane;
2244
2245 /*
2246 * Go over all buffers and their planes, comparing the given offset
2247 * with an offset assigned to each plane. If a match is found,
2248 * return its buffer and plane numbers.
2249 */
2250 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2251 vb = q->bufs[buffer];
2252
2253 for (plane = 0; plane < vb->num_planes; ++plane) {
2254 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2255 *_buffer = buffer;
2256 *_plane = plane;
2257 return 0;
2258 }
2259 }
2260 }
2261
2262 return -EINVAL;
2263}
2264
83ae7c5a
TS
2265/**
2266 * vb2_expbuf() - Export a buffer as a file descriptor
2267 * @q: videobuf2 queue
2268 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2269 * handler in driver
2270 *
2271 * The return values from this function are intended to be directly returned
2272 * from vidioc_expbuf handler in driver.
2273 */
2274int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2275{
2276 struct vb2_buffer *vb = NULL;
2277 struct vb2_plane *vb_plane;
2278 int ret;
2279 struct dma_buf *dbuf;
2280
2281 if (q->memory != V4L2_MEMORY_MMAP) {
2282 dprintk(1, "Queue is not currently set up for mmap\n");
2283 return -EINVAL;
2284 }
2285
2286 if (!q->mem_ops->get_dmabuf) {
2287 dprintk(1, "Queue does not support DMA buffer exporting\n");
2288 return -EINVAL;
2289 }
2290
ea3aba84
PZ
2291 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2292 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
83ae7c5a
TS
2293 return -EINVAL;
2294 }
2295
2296 if (eb->type != q->type) {
2297 dprintk(1, "qbuf: invalid buffer type\n");
2298 return -EINVAL;
2299 }
2300
2301 if (eb->index >= q->num_buffers) {
2302 dprintk(1, "buffer index out of range\n");
2303 return -EINVAL;
2304 }
2305
2306 vb = q->bufs[eb->index];
2307
2308 if (eb->plane >= vb->num_planes) {
2309 dprintk(1, "buffer plane out of range\n");
2310 return -EINVAL;
2311 }
2312
2313 vb_plane = &vb->planes[eb->plane];
2314
a1d36d8c 2315 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
83ae7c5a
TS
2316 if (IS_ERR_OR_NULL(dbuf)) {
2317 dprintk(1, "Failed to export buffer %d, plane %d\n",
2318 eb->index, eb->plane);
2319 return -EINVAL;
2320 }
2321
ea3aba84 2322 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
83ae7c5a
TS
2323 if (ret < 0) {
2324 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2325 eb->index, eb->plane, ret);
2326 dma_buf_put(dbuf);
2327 return ret;
2328 }
2329
2330 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2331 eb->index, eb->plane, ret);
2332 eb->fd = ret;
2333
2334 return 0;
2335}
2336EXPORT_SYMBOL_GPL(vb2_expbuf);
2337
e23ccc0a
PO
2338/**
2339 * vb2_mmap() - map video buffers into application address space
2340 * @q: videobuf2 queue
2341 * @vma: vma passed to the mmap file operation handler in the driver
2342 *
2343 * Should be called from mmap file operation handler of a driver.
2344 * This function maps one plane of one of the available video buffers to
2345 * userspace. To map whole video memory allocated on reqbufs, this function
2346 * has to be called once per each plane per each buffer previously allocated.
2347 *
2348 * When the userspace application calls mmap, it passes to it an offset returned
2349 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2350 * a "cookie", which is then used to identify the plane to be mapped.
2351 * This function finds a plane with a matching offset and a mapping is performed
2352 * by the means of a provided memory operation.
2353 *
2354 * The return values from this function are intended to be directly returned
2355 * from the mmap handler in driver.
2356 */
2357int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2358{
2359 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
e23ccc0a
PO
2360 struct vb2_buffer *vb;
2361 unsigned int buffer, plane;
2362 int ret;
7f841459 2363 unsigned long length;
e23ccc0a
PO
2364
2365 if (q->memory != V4L2_MEMORY_MMAP) {
2366 dprintk(1, "Queue is not currently set up for mmap\n");
2367 return -EINVAL;
2368 }
2369
2370 /*
2371 * Check memory area access mode.
2372 */
2373 if (!(vma->vm_flags & VM_SHARED)) {
2374 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2375 return -EINVAL;
2376 }
2377 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2378 if (!(vma->vm_flags & VM_WRITE)) {
2379 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2380 return -EINVAL;
2381 }
2382 } else {
2383 if (!(vma->vm_flags & VM_READ)) {
2384 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2385 return -EINVAL;
2386 }
2387 }
2388
2389 /*
2390 * Find the plane corresponding to the offset passed by userspace.
2391 */
2392 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2393 if (ret)
2394 return ret;
2395
2396 vb = q->bufs[buffer];
e23ccc0a 2397
7f841459
MCC
2398 /*
2399 * MMAP requires page_aligned buffers.
2400 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2401 * so, we need to do the same here.
2402 */
2403 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2404 if (length < (vma->vm_end - vma->vm_start)) {
2405 dprintk(1,
2406 "MMAP invalid, as it would overflow buffer length\n");
068a0df7
SWK
2407 return -EINVAL;
2408 }
2409
b5b4541e 2410 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
a1d36d8c 2411 if (ret)
e23ccc0a
PO
2412 return ret;
2413
e23ccc0a
PO
2414 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2415 return 0;
2416}
2417EXPORT_SYMBOL_GPL(vb2_mmap);
2418
6f524ec1
SJ
2419#ifndef CONFIG_MMU
2420unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2421 unsigned long addr,
2422 unsigned long len,
2423 unsigned long pgoff,
2424 unsigned long flags)
2425{
2426 unsigned long off = pgoff << PAGE_SHIFT;
2427 struct vb2_buffer *vb;
2428 unsigned int buffer, plane;
2429 int ret;
2430
2431 if (q->memory != V4L2_MEMORY_MMAP) {
2432 dprintk(1, "Queue is not currently set up for mmap\n");
2433 return -EINVAL;
2434 }
2435
2436 /*
2437 * Find the plane corresponding to the offset passed by userspace.
2438 */
2439 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2440 if (ret)
2441 return ret;
2442
2443 vb = q->bufs[buffer];
2444
2445 return (unsigned long)vb2_plane_vaddr(vb, plane);
2446}
2447EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2448#endif
2449
b25748fe
MS
2450static int __vb2_init_fileio(struct vb2_queue *q, int read);
2451static int __vb2_cleanup_fileio(struct vb2_queue *q);
e23ccc0a
PO
2452
2453/**
2454 * vb2_poll() - implements poll userspace operation
2455 * @q: videobuf2 queue
2456 * @file: file argument passed to the poll file operation handler
2457 * @wait: wait argument passed to the poll file operation handler
2458 *
2459 * This function implements poll file operation handler for a driver.
2460 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2461 * be informed that the file descriptor of a video device is available for
2462 * reading.
2463 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2464 * will be reported as available for writing.
2465 *
95213ceb
HV
2466 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2467 * pending events.
2468 *
e23ccc0a
PO
2469 * The return values from this function are intended to be directly returned
2470 * from poll handler in driver.
2471 */
2472unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2473{
95213ceb 2474 struct video_device *vfd = video_devdata(file);
bf5c7cbb 2475 unsigned long req_events = poll_requested_events(wait);
e23ccc0a 2476 struct vb2_buffer *vb = NULL;
95213ceb
HV
2477 unsigned int res = 0;
2478 unsigned long flags;
2479
2480 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2481 struct v4l2_fh *fh = file->private_data;
2482
2483 if (v4l2_event_pending(fh))
2484 res = POLLPRI;
2485 else if (req_events & POLLPRI)
2486 poll_wait(file, &fh->wait, wait);
2487 }
e23ccc0a 2488
cd13823f
HV
2489 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2490 return res;
2491 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2492 return res;
2493
b25748fe 2494 /*
4ffabdb3 2495 * Start file I/O emulator only if streaming API has not been used yet.
b25748fe
MS
2496 */
2497 if (q->num_buffers == 0 && q->fileio == NULL) {
bf5c7cbb
HV
2498 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2499 (req_events & (POLLIN | POLLRDNORM))) {
95213ceb
HV
2500 if (__vb2_init_fileio(q, 1))
2501 return res | POLLERR;
b25748fe 2502 }
bf5c7cbb
HV
2503 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2504 (req_events & (POLLOUT | POLLWRNORM))) {
95213ceb
HV
2505 if (__vb2_init_fileio(q, 0))
2506 return res | POLLERR;
b25748fe
MS
2507 /*
2508 * Write to OUTPUT queue can be done immediately.
2509 */
95213ceb 2510 return res | POLLOUT | POLLWRNORM;
b25748fe
MS
2511 }
2512 }
2513
e23ccc0a
PO
2514 /*
2515 * There is nothing to wait for if no buffers have already been queued.
2516 */
2517 if (list_empty(&q->queued_list))
95213ceb 2518 return res | POLLERR;
e23ccc0a 2519
412cb87d
SWK
2520 if (list_empty(&q->done_list))
2521 poll_wait(file, &q->done_wq, wait);
e23ccc0a
PO
2522
2523 /*
2524 * Take first buffer available for dequeuing.
2525 */
2526 spin_lock_irqsave(&q->done_lock, flags);
2527 if (!list_empty(&q->done_list))
2528 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2529 done_entry);
2530 spin_unlock_irqrestore(&q->done_lock, flags);
2531
2532 if (vb && (vb->state == VB2_BUF_STATE_DONE
2533 || vb->state == VB2_BUF_STATE_ERROR)) {
95213ceb
HV
2534 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2535 res | POLLOUT | POLLWRNORM :
2536 res | POLLIN | POLLRDNORM;
e23ccc0a 2537 }
95213ceb 2538 return res;
e23ccc0a
PO
2539}
2540EXPORT_SYMBOL_GPL(vb2_poll);
2541
2542/**
2543 * vb2_queue_init() - initialize a videobuf2 queue
2544 * @q: videobuf2 queue; this structure should be allocated in driver
2545 *
2546 * The vb2_queue structure should be allocated by the driver. The driver is
2547 * responsible of clearing it's content and setting initial values for some
2548 * required entries before calling this function.
2549 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2550 * to the struct vb2_queue description in include/media/videobuf2-core.h
2551 * for more information.
2552 */
2553int vb2_queue_init(struct vb2_queue *q)
2554{
896f38f5
EG
2555 /*
2556 * Sanity check
2557 */
2558 if (WARN_ON(!q) ||
2559 WARN_ON(!q->ops) ||
2560 WARN_ON(!q->mem_ops) ||
2561 WARN_ON(!q->type) ||
2562 WARN_ON(!q->io_modes) ||
2563 WARN_ON(!q->ops->queue_setup) ||
6aa69f99 2564 WARN_ON(!q->ops->buf_queue) ||
872484ce
SA
2565 WARN_ON(q->timestamp_flags &
2566 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2567 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
896f38f5 2568 return -EINVAL;
e23ccc0a 2569
6aa69f99 2570 /* Warn that the driver should choose an appropriate timestamp type */
c57ff792
SA
2571 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2572 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
6aa69f99 2573
e23ccc0a
PO
2574 INIT_LIST_HEAD(&q->queued_list);
2575 INIT_LIST_HEAD(&q->done_list);
2576 spin_lock_init(&q->done_lock);
2577 init_waitqueue_head(&q->done_wq);
2578
2579 if (q->buf_struct_size == 0)
2580 q->buf_struct_size = sizeof(struct vb2_buffer);
2581
2582 return 0;
2583}
2584EXPORT_SYMBOL_GPL(vb2_queue_init);
2585
2586/**
2587 * vb2_queue_release() - stop streaming, release the queue and free memory
2588 * @q: videobuf2 queue
2589 *
2590 * This function stops streaming and performs necessary clean ups, including
2591 * freeing video buffer memory. The driver is responsible for freeing
2592 * the vb2_queue structure itself.
2593 */
2594void vb2_queue_release(struct vb2_queue *q)
2595{
b25748fe 2596 __vb2_cleanup_fileio(q);
e23ccc0a 2597 __vb2_queue_cancel(q);
2d86401c 2598 __vb2_queue_free(q, q->num_buffers);
e23ccc0a
PO
2599}
2600EXPORT_SYMBOL_GPL(vb2_queue_release);
2601
b25748fe
MS
2602/**
2603 * struct vb2_fileio_buf - buffer context used by file io emulator
2604 *
2605 * vb2 provides a compatibility layer and emulator of file io (read and
2606 * write) calls on top of streaming API. This structure is used for
2607 * tracking context related to the buffers.
2608 */
2609struct vb2_fileio_buf {
2610 void *vaddr;
2611 unsigned int size;
2612 unsigned int pos;
2613 unsigned int queued:1;
2614};
2615
2616/**
2617 * struct vb2_fileio_data - queue context used by file io emulator
2618 *
4e5a4d8a
HV
2619 * @cur_index: the index of the buffer currently being read from or
2620 * written to. If equal to q->num_buffers then a new buffer
2621 * must be dequeued.
2622 * @initial_index: in the read() case all buffers are queued up immediately
2623 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2624 * buffers. However, in the write() case no buffers are initially
2625 * queued, instead whenever a buffer is full it is queued up by
2626 * __vb2_perform_fileio(). Only once all available buffers have
2627 * been queued up will __vb2_perform_fileio() start to dequeue
2628 * buffers. This means that initially __vb2_perform_fileio()
2629 * needs to know what buffer index to use when it is queuing up
2630 * the buffers for the first time. That initial index is stored
2631 * in this field. Once it is equal to q->num_buffers all
2632 * available buffers have been queued and __vb2_perform_fileio()
2633 * should start the normal dequeue/queue cycle.
2634 *
b25748fe
MS
2635 * vb2 provides a compatibility layer and emulator of file io (read and
2636 * write) calls on top of streaming API. For proper operation it required
2637 * this structure to save the driver state between each call of the read
2638 * or write function.
2639 */
2640struct vb2_fileio_data {
2641 struct v4l2_requestbuffers req;
2642 struct v4l2_buffer b;
2643 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
4e5a4d8a
HV
2644 unsigned int cur_index;
2645 unsigned int initial_index;
b25748fe
MS
2646 unsigned int q_count;
2647 unsigned int dq_count;
2648 unsigned int flags;
2649};
2650
2651/**
2652 * __vb2_init_fileio() - initialize file io emulator
2653 * @q: videobuf2 queue
2654 * @read: mode selector (1 means read, 0 means write)
2655 */
2656static int __vb2_init_fileio(struct vb2_queue *q, int read)
2657{
2658 struct vb2_fileio_data *fileio;
2659 int i, ret;
2660 unsigned int count = 0;
2661
2662 /*
2663 * Sanity check
2664 */
e4d25816
HV
2665 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2666 (!read && !(q->io_modes & VB2_WRITE))))
2667 return -EINVAL;
b25748fe
MS
2668
2669 /*
2670 * Check if device supports mapping buffers to kernel virtual space.
2671 */
2672 if (!q->mem_ops->vaddr)
2673 return -EBUSY;
2674
2675 /*
2676 * Check if streaming api has not been already activated.
2677 */
2678 if (q->streaming || q->num_buffers > 0)
2679 return -EBUSY;
2680
2681 /*
2682 * Start with count 1, driver can increase it in queue_setup()
2683 */
2684 count = 1;
2685
2686 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2687 (read) ? "read" : "write", count, q->io_flags);
2688
2689 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2690 if (fileio == NULL)
2691 return -ENOMEM;
2692
2693 fileio->flags = q->io_flags;
2694
2695 /*
2696 * Request buffers and use MMAP type to force driver
2697 * to allocate buffers by itself.
2698 */
2699 fileio->req.count = count;
2700 fileio->req.memory = V4L2_MEMORY_MMAP;
2701 fileio->req.type = q->type;
2702 ret = vb2_reqbufs(q, &fileio->req);
2703 if (ret)
2704 goto err_kfree;
2705
2706 /*
2707 * Check if plane_count is correct
2708 * (multiplane buffers are not supported).
2709 */
2710 if (q->bufs[0]->num_planes != 1) {
b25748fe
MS
2711 ret = -EBUSY;
2712 goto err_reqbufs;
2713 }
2714
2715 /*
2716 * Get kernel address of each buffer.
2717 */
2718 for (i = 0; i < q->num_buffers; i++) {
2719 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
5dd6946c
WY
2720 if (fileio->bufs[i].vaddr == NULL) {
2721 ret = -EINVAL;
b25748fe 2722 goto err_reqbufs;
5dd6946c 2723 }
b25748fe
MS
2724 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2725 }
2726
2727 /*
2728 * Read mode requires pre queuing of all buffers.
2729 */
2730 if (read) {
2731 /*
2732 * Queue all buffers.
2733 */
2734 for (i = 0; i < q->num_buffers; i++) {
2735 struct v4l2_buffer *b = &fileio->b;
2736 memset(b, 0, sizeof(*b));
2737 b->type = q->type;
2738 b->memory = q->memory;
2739 b->index = i;
2740 ret = vb2_qbuf(q, b);
2741 if (ret)
2742 goto err_reqbufs;
2743 fileio->bufs[i].queued = 1;
2744 }
4e5a4d8a
HV
2745 /*
2746 * All buffers have been queued, so mark that by setting
2747 * initial_index to q->num_buffers
2748 */
2749 fileio->initial_index = q->num_buffers;
2750 fileio->cur_index = q->num_buffers;
b25748fe
MS
2751 }
2752
02f142ec
HV
2753 /*
2754 * Start streaming.
2755 */
2756 ret = vb2_streamon(q, q->type);
2757 if (ret)
2758 goto err_reqbufs;
2759
b25748fe
MS
2760 q->fileio = fileio;
2761
2762 return ret;
2763
2764err_reqbufs:
a67e1722 2765 fileio->req.count = 0;
b25748fe
MS
2766 vb2_reqbufs(q, &fileio->req);
2767
2768err_kfree:
2769 kfree(fileio);
2770 return ret;
2771}
2772
2773/**
2774 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2775 * @q: videobuf2 queue
2776 */
2777static int __vb2_cleanup_fileio(struct vb2_queue *q)
2778{
2779 struct vb2_fileio_data *fileio = q->fileio;
2780
2781 if (fileio) {
b2f2f047 2782 vb2_internal_streamoff(q, q->type);
b25748fe 2783 q->fileio = NULL;
b25748fe
MS
2784 fileio->req.count = 0;
2785 vb2_reqbufs(q, &fileio->req);
2786 kfree(fileio);
2787 dprintk(3, "file io emulator closed\n");
2788 }
2789 return 0;
2790}
2791
2792/**
2793 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2794 * @q: videobuf2 queue
2795 * @data: pointed to target userspace buffer
2796 * @count: number of bytes to read or write
2797 * @ppos: file handle position tracking pointer
2798 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2799 * @read: access mode selector (1 means read, 0 means write)
2800 */
2801static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2802 loff_t *ppos, int nonblock, int read)
2803{
2804 struct vb2_fileio_data *fileio;
2805 struct vb2_fileio_buf *buf;
2806 int ret, index;
2807
08b99e26 2808 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
b25748fe
MS
2809 read ? "read" : "write", (long)*ppos, count,
2810 nonblock ? "non" : "");
2811
2812 if (!data)
2813 return -EINVAL;
2814
2815 /*
2816 * Initialize emulator on first call.
2817 */
2818 if (!q->fileio) {
2819 ret = __vb2_init_fileio(q, read);
2820 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2821 if (ret)
2822 return ret;
2823 }
2824 fileio = q->fileio;
2825
b25748fe
MS
2826 /*
2827 * Check if we need to dequeue the buffer.
2828 */
4e5a4d8a 2829 index = fileio->cur_index;
88e26870 2830 if (index >= q->num_buffers) {
b25748fe
MS
2831 /*
2832 * Call vb2_dqbuf to get buffer back.
2833 */
2834 memset(&fileio->b, 0, sizeof(fileio->b));
2835 fileio->b.type = q->type;
2836 fileio->b.memory = q->memory;
b2f2f047 2837 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
b25748fe
MS
2838 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2839 if (ret)
b2f2f047 2840 return ret;
b25748fe
MS
2841 fileio->dq_count += 1;
2842
4e5a4d8a 2843 fileio->cur_index = index = fileio->b.index;
88e26870
HV
2844 buf = &fileio->bufs[index];
2845
b25748fe
MS
2846 /*
2847 * Get number of bytes filled by the driver
2848 */
88e26870 2849 buf->pos = 0;
b25748fe 2850 buf->queued = 0;
88e26870
HV
2851 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2852 : vb2_plane_size(q->bufs[index], 0);
2853 } else {
2854 buf = &fileio->bufs[index];
b25748fe
MS
2855 }
2856
2857 /*
2858 * Limit count on last few bytes of the buffer.
2859 */
2860 if (buf->pos + count > buf->size) {
2861 count = buf->size - buf->pos;
08b99e26 2862 dprintk(5, "reducing read count: %zd\n", count);
b25748fe
MS
2863 }
2864
2865 /*
2866 * Transfer data to userspace.
2867 */
08b99e26 2868 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
b25748fe
MS
2869 count, index, buf->pos);
2870 if (read)
2871 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2872 else
2873 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2874 if (ret) {
2875 dprintk(3, "file io: error copying data\n");
b2f2f047 2876 return -EFAULT;
b25748fe
MS
2877 }
2878
2879 /*
2880 * Update counters.
2881 */
2882 buf->pos += count;
2883 *ppos += count;
2884
2885 /*
2886 * Queue next buffer if required.
2887 */
2888 if (buf->pos == buf->size ||
2889 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2890 /*
2891 * Check if this is the last buffer to read.
2892 */
2893 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2894 fileio->dq_count == 1) {
2895 dprintk(3, "file io: read limit reached\n");
b25748fe
MS
2896 return __vb2_cleanup_fileio(q);
2897 }
2898
2899 /*
2900 * Call vb2_qbuf and give buffer to the driver.
2901 */
2902 memset(&fileio->b, 0, sizeof(fileio->b));
2903 fileio->b.type = q->type;
2904 fileio->b.memory = q->memory;
2905 fileio->b.index = index;
2906 fileio->b.bytesused = buf->pos;
b2f2f047 2907 ret = vb2_internal_qbuf(q, &fileio->b);
b25748fe
MS
2908 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2909 if (ret)
b2f2f047 2910 return ret;
b25748fe
MS
2911
2912 /*
2913 * Buffer has been queued, update the status
2914 */
2915 buf->pos = 0;
2916 buf->queued = 1;
88e26870 2917 buf->size = vb2_plane_size(q->bufs[index], 0);
b25748fe 2918 fileio->q_count += 1;
4e5a4d8a
HV
2919 /*
2920 * If we are queuing up buffers for the first time, then
2921 * increase initial_index by one.
2922 */
2923 if (fileio->initial_index < q->num_buffers)
2924 fileio->initial_index++;
2925 /*
2926 * The next buffer to use is either a buffer that's going to be
2927 * queued for the first time (initial_index < q->num_buffers)
2928 * or it is equal to q->num_buffers, meaning that the next
2929 * time we need to dequeue a buffer since we've now queued up
2930 * all the 'first time' buffers.
2931 */
2932 fileio->cur_index = fileio->initial_index;
b25748fe
MS
2933 }
2934
2935 /*
2936 * Return proper number of bytes processed.
2937 */
2938 if (ret == 0)
2939 ret = count;
b25748fe
MS
2940 return ret;
2941}
2942
2943size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2944 loff_t *ppos, int nonblocking)
2945{
2946 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2947}
2948EXPORT_SYMBOL_GPL(vb2_read);
2949
819585bc 2950size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
b25748fe
MS
2951 loff_t *ppos, int nonblocking)
2952{
819585bc
RR
2953 return __vb2_perform_fileio(q, (char __user *) data, count,
2954 ppos, nonblocking, 0);
b25748fe
MS
2955}
2956EXPORT_SYMBOL_GPL(vb2_write);
2957
4c1ffcaa
HV
2958
2959/*
2960 * The following functions are not part of the vb2 core API, but are helper
2961 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2962 * and struct vb2_ops.
2963 * They contain boilerplate code that most if not all drivers have to do
2964 * and so they simplify the driver code.
2965 */
2966
2967/* The queue is busy if there is a owner and you are not that owner. */
2968static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2969{
2970 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2971}
2972
2973/* vb2 ioctl helpers */
2974
2975int vb2_ioctl_reqbufs(struct file *file, void *priv,
2976 struct v4l2_requestbuffers *p)
2977{
2978 struct video_device *vdev = video_devdata(file);
2979 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2980
2981 if (res)
2982 return res;
2983 if (vb2_queue_is_busy(vdev, file))
2984 return -EBUSY;
2985 res = __reqbufs(vdev->queue, p);
2986 /* If count == 0, then the owner has released all buffers and he
2987 is no longer owner of the queue. Otherwise we have a new owner. */
2988 if (res == 0)
2989 vdev->queue->owner = p->count ? file->private_data : NULL;
2990 return res;
2991}
2992EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2993
2994int vb2_ioctl_create_bufs(struct file *file, void *priv,
2995 struct v4l2_create_buffers *p)
2996{
2997 struct video_device *vdev = video_devdata(file);
2998 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2999
3000 p->index = vdev->queue->num_buffers;
3001 /* If count == 0, then just check if memory and type are valid.
3002 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
3003 if (p->count == 0)
3004 return res != -EBUSY ? res : 0;
3005 if (res)
3006 return res;
3007 if (vb2_queue_is_busy(vdev, file))
3008 return -EBUSY;
3009 res = __create_bufs(vdev->queue, p);
3010 if (res == 0)
3011 vdev->queue->owner = file->private_data;
3012 return res;
3013}
3014EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
3015
3016int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3017 struct v4l2_buffer *p)
3018{
3019 struct video_device *vdev = video_devdata(file);
3020
3021 if (vb2_queue_is_busy(vdev, file))
3022 return -EBUSY;
3023 return vb2_prepare_buf(vdev->queue, p);
3024}
3025EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3026
3027int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3028{
3029 struct video_device *vdev = video_devdata(file);
3030
3031 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
3032 return vb2_querybuf(vdev->queue, p);
3033}
3034EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3035
3036int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3037{
3038 struct video_device *vdev = video_devdata(file);
3039
3040 if (vb2_queue_is_busy(vdev, file))
3041 return -EBUSY;
3042 return vb2_qbuf(vdev->queue, p);
3043}
3044EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3045
3046int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3047{
3048 struct video_device *vdev = video_devdata(file);
3049
3050 if (vb2_queue_is_busy(vdev, file))
3051 return -EBUSY;
3052 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3053}
3054EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3055
3056int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3057{
3058 struct video_device *vdev = video_devdata(file);
3059
3060 if (vb2_queue_is_busy(vdev, file))
3061 return -EBUSY;
3062 return vb2_streamon(vdev->queue, i);
3063}
3064EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3065
3066int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3067{
3068 struct video_device *vdev = video_devdata(file);
3069
3070 if (vb2_queue_is_busy(vdev, file))
3071 return -EBUSY;
3072 return vb2_streamoff(vdev->queue, i);
3073}
3074EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3075
83ae7c5a
TS
3076int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3077{
3078 struct video_device *vdev = video_devdata(file);
3079
3080 if (vb2_queue_is_busy(vdev, file))
3081 return -EBUSY;
3082 return vb2_expbuf(vdev->queue, p);
3083}
3084EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3085
4c1ffcaa
HV
3086/* v4l2_file_operations helpers */
3087
3088int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3089{
3090 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
3091 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3092 int err;
4c1ffcaa 3093
8a90f1a6
LP
3094 if (lock && mutex_lock_interruptible(lock))
3095 return -ERESTARTSYS;
3096 err = vb2_mmap(vdev->queue, vma);
3097 if (lock)
3098 mutex_unlock(lock);
3099 return err;
4c1ffcaa
HV
3100}
3101EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3102
1380f575 3103int _vb2_fop_release(struct file *file, struct mutex *lock)
4c1ffcaa
HV
3104{
3105 struct video_device *vdev = video_devdata(file);
3106
3107 if (file->private_data == vdev->queue->owner) {
1380f575
RR
3108 if (lock)
3109 mutex_lock(lock);
4c1ffcaa
HV
3110 vb2_queue_release(vdev->queue);
3111 vdev->queue->owner = NULL;
1380f575
RR
3112 if (lock)
3113 mutex_unlock(lock);
4c1ffcaa
HV
3114 }
3115 return v4l2_fh_release(file);
3116}
1380f575
RR
3117EXPORT_SYMBOL_GPL(_vb2_fop_release);
3118
3119int vb2_fop_release(struct file *file)
3120{
3121 struct video_device *vdev = video_devdata(file);
3122 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3123
3124 return _vb2_fop_release(file, lock);
3125}
4c1ffcaa
HV
3126EXPORT_SYMBOL_GPL(vb2_fop_release);
3127
819585bc 3128ssize_t vb2_fop_write(struct file *file, const char __user *buf,
4c1ffcaa
HV
3129 size_t count, loff_t *ppos)
3130{
3131 struct video_device *vdev = video_devdata(file);
3132 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
3133 int err = -EBUSY;
3134
cf533735 3135 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3136 return -ERESTARTSYS;
3137 if (vb2_queue_is_busy(vdev, file))
3138 goto exit;
3139 err = vb2_write(vdev->queue, buf, count, ppos,
3140 file->f_flags & O_NONBLOCK);
8c82c75c 3141 if (vdev->queue->fileio)
4c1ffcaa
HV
3142 vdev->queue->owner = file->private_data;
3143exit:
cf533735 3144 if (lock)
4c1ffcaa
HV
3145 mutex_unlock(lock);
3146 return err;
3147}
3148EXPORT_SYMBOL_GPL(vb2_fop_write);
3149
3150ssize_t vb2_fop_read(struct file *file, char __user *buf,
3151 size_t count, loff_t *ppos)
3152{
3153 struct video_device *vdev = video_devdata(file);
3154 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
3155 int err = -EBUSY;
3156
cf533735 3157 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3158 return -ERESTARTSYS;
3159 if (vb2_queue_is_busy(vdev, file))
3160 goto exit;
3161 err = vb2_read(vdev->queue, buf, count, ppos,
3162 file->f_flags & O_NONBLOCK);
8c82c75c 3163 if (vdev->queue->fileio)
4c1ffcaa
HV
3164 vdev->queue->owner = file->private_data;
3165exit:
cf533735 3166 if (lock)
4c1ffcaa
HV
3167 mutex_unlock(lock);
3168 return err;
3169}
3170EXPORT_SYMBOL_GPL(vb2_fop_read);
3171
3172unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3173{
3174 struct video_device *vdev = video_devdata(file);
3175 struct vb2_queue *q = vdev->queue;
3176 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3177 unsigned long req_events = poll_requested_events(wait);
3178 unsigned res;
3179 void *fileio;
4c1ffcaa
HV
3180 bool must_lock = false;
3181
3182 /* Try to be smart: only lock if polling might start fileio,
3183 otherwise locking will only introduce unwanted delays. */
3184 if (q->num_buffers == 0 && q->fileio == NULL) {
3185 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3186 (req_events & (POLLIN | POLLRDNORM)))
3187 must_lock = true;
3188 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3189 (req_events & (POLLOUT | POLLWRNORM)))
3190 must_lock = true;
3191 }
3192
3193 /* If locking is needed, but this helper doesn't know how, then you
3194 shouldn't be using this helper but you should write your own. */
cf533735 3195 WARN_ON(must_lock && !lock);
4c1ffcaa 3196
cf533735 3197 if (must_lock && lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3198 return POLLERR;
3199
3200 fileio = q->fileio;
3201
3202 res = vb2_poll(vdev->queue, file, wait);
3203
3204 /* If fileio was started, then we have a new queue owner. */
3205 if (must_lock && !fileio && q->fileio)
3206 q->owner = file->private_data;
cf533735 3207 if (must_lock && lock)
4c1ffcaa
HV
3208 mutex_unlock(lock);
3209 return res;
3210}
3211EXPORT_SYMBOL_GPL(vb2_fop_poll);
3212
3213#ifndef CONFIG_MMU
3214unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3215 unsigned long len, unsigned long pgoff, unsigned long flags)
3216{
3217 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
3218 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3219 int ret;
4c1ffcaa 3220
8a90f1a6
LP
3221 if (lock && mutex_lock_interruptible(lock))
3222 return -ERESTARTSYS;
3223 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3224 if (lock)
3225 mutex_unlock(lock);
3226 return ret;
4c1ffcaa
HV
3227}
3228EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3229#endif
3230
3231/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3232
3233void vb2_ops_wait_prepare(struct vb2_queue *vq)
3234{
3235 mutex_unlock(vq->lock);
3236}
3237EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3238
3239void vb2_ops_wait_finish(struct vb2_queue *vq)
3240{
3241 mutex_lock(vq->lock);
3242}
3243EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3244
e23ccc0a 3245MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
95072084 3246MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
e23ccc0a 3247MODULE_LICENSE("GPL");
This page took 0.44793 seconds and 5 git commands to generate.