[media] videodev2.h: add support for transfer functions
[deliverable/linux.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 /*
2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3 *
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
6 *
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
29
30 static bool debug;
31 module_param(debug, bool, 0644);
32
33 #define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT (1 << 2)
46
47
48 /* Offset base for buffers on the destination queue - used to distinguish
49 * between source and destination buffers when mmapping - they receive the same
50 * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE (1 << 30)
52
53
54 /**
55 * struct v4l2_m2m_dev - per-device context
56 * @curr_ctx: currently running instance
57 * @job_queue: instances queued to run
58 * @job_spinlock: protects job_queue
59 * @m2m_ops: driver callbacks
60 */
61 struct v4l2_m2m_dev {
62 struct v4l2_m2m_ctx *curr_ctx;
63
64 struct list_head job_queue;
65 spinlock_t job_spinlock;
66
67 const struct v4l2_m2m_ops *m2m_ops;
68 };
69
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71 enum v4l2_buf_type type)
72 {
73 if (V4L2_TYPE_IS_OUTPUT(type))
74 return &m2m_ctx->out_q_ctx;
75 else
76 return &m2m_ctx->cap_q_ctx;
77 }
78
79 /**
80 * v4l2_m2m_get_vq() - return vb2_queue for the given type
81 */
82 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83 enum v4l2_buf_type type)
84 {
85 struct v4l2_m2m_queue_ctx *q_ctx;
86
87 q_ctx = get_queue_ctx(m2m_ctx, type);
88 if (!q_ctx)
89 return NULL;
90
91 return &q_ctx->q;
92 }
93 EXPORT_SYMBOL(v4l2_m2m_get_vq);
94
95 /**
96 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
97 */
98 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
99 {
100 struct v4l2_m2m_buffer *b;
101 unsigned long flags;
102
103 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
104
105 if (list_empty(&q_ctx->rdy_queue)) {
106 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
107 return NULL;
108 }
109
110 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
111 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
112 return &b->vb;
113 }
114 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
115
116 /**
117 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
118 * return it
119 */
120 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
121 {
122 struct v4l2_m2m_buffer *b;
123 unsigned long flags;
124
125 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
126 if (list_empty(&q_ctx->rdy_queue)) {
127 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
128 return NULL;
129 }
130 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
131 list_del(&b->list);
132 q_ctx->num_rdy--;
133 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
134
135 return &b->vb;
136 }
137 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
138
139 /*
140 * Scheduling handlers
141 */
142
143 /**
144 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145 * running instance or NULL if no instance is running
146 */
147 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
148 {
149 unsigned long flags;
150 void *ret = NULL;
151
152 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
153 if (m2m_dev->curr_ctx)
154 ret = m2m_dev->curr_ctx->priv;
155 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
156
157 return ret;
158 }
159 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
160
161 /**
162 * v4l2_m2m_try_run() - select next job to perform and run it if possible
163 *
164 * Get next transaction (if present) from the waiting jobs list and run it.
165 */
166 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
167 {
168 unsigned long flags;
169
170 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
171 if (NULL != m2m_dev->curr_ctx) {
172 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
173 dprintk("Another instance is running, won't run now\n");
174 return;
175 }
176
177 if (list_empty(&m2m_dev->job_queue)) {
178 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
179 dprintk("No job pending\n");
180 return;
181 }
182
183 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
184 struct v4l2_m2m_ctx, queue);
185 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
186 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
187
188 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
189 }
190
191 /**
192 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
193 * the pending job queue and add it if so.
194 * @m2m_ctx: m2m context assigned to the instance to be checked
195 *
196 * There are three basic requirements an instance has to meet to be able to run:
197 * 1) at least one source buffer has to be queued,
198 * 2) at least one destination buffer has to be queued,
199 * 3) streaming has to be on.
200 *
201 * If a queue is buffered (for example a decoder hardware ringbuffer that has
202 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
203 * on that queue.
204 *
205 * There may also be additional, custom requirements. In such case the driver
206 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
207 * return 1 if the instance is ready.
208 * An example of the above could be an instance that requires more than one
209 * src/dst buffer per transaction.
210 */
211 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
212 {
213 struct v4l2_m2m_dev *m2m_dev;
214 unsigned long flags_job, flags_out, flags_cap;
215
216 m2m_dev = m2m_ctx->m2m_dev;
217 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
218
219 if (!m2m_ctx->out_q_ctx.q.streaming
220 || !m2m_ctx->cap_q_ctx.q.streaming) {
221 dprintk("Streaming needs to be on for both queues\n");
222 return;
223 }
224
225 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
226
227 /* If the context is aborted then don't schedule it */
228 if (m2m_ctx->job_flags & TRANS_ABORT) {
229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 dprintk("Aborted context\n");
231 return;
232 }
233
234 if (m2m_ctx->job_flags & TRANS_QUEUED) {
235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236 dprintk("On job queue already\n");
237 return;
238 }
239
240 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
241 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
242 && !m2m_ctx->out_q_ctx.buffered) {
243 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
244 flags_out);
245 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
246 dprintk("No input buffers available\n");
247 return;
248 }
249 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
250 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
251 && !m2m_ctx->cap_q_ctx.buffered) {
252 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
253 flags_cap);
254 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
255 flags_out);
256 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
257 dprintk("No output buffers available\n");
258 return;
259 }
260 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
261 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
262
263 if (m2m_dev->m2m_ops->job_ready
264 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
265 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
266 dprintk("Driver not ready\n");
267 return;
268 }
269
270 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
271 m2m_ctx->job_flags |= TRANS_QUEUED;
272
273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
274
275 v4l2_m2m_try_run(m2m_dev);
276 }
277 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
278
279 /**
280 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
281 *
282 * In case of streamoff or release called on any context,
283 * 1] If the context is currently running, then abort job will be called
284 * 2] If the context is queued, then the context will be removed from
285 * the job_queue
286 */
287 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
288 {
289 struct v4l2_m2m_dev *m2m_dev;
290 unsigned long flags;
291
292 m2m_dev = m2m_ctx->m2m_dev;
293 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
294
295 m2m_ctx->job_flags |= TRANS_ABORT;
296 if (m2m_ctx->job_flags & TRANS_RUNNING) {
297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
299 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
300 wait_event(m2m_ctx->finished,
301 !(m2m_ctx->job_flags & TRANS_RUNNING));
302 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
303 list_del(&m2m_ctx->queue);
304 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
305 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
306 dprintk("m2m_ctx: %p had been on queue and was removed\n",
307 m2m_ctx);
308 } else {
309 /* Do nothing, was not on queue/running */
310 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
311 }
312 }
313
314 /**
315 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
316 * and have it clean up
317 *
318 * Called by a driver to yield back the device after it has finished with it.
319 * Should be called as soon as possible after reaching a state which allows
320 * other instances to take control of the device.
321 *
322 * This function has to be called only after device_run() callback has been
323 * called on the driver. To prevent recursion, it should not be called directly
324 * from the device_run() callback though.
325 */
326 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
327 struct v4l2_m2m_ctx *m2m_ctx)
328 {
329 unsigned long flags;
330
331 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
332 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
333 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
334 dprintk("Called by an instance not currently running\n");
335 return;
336 }
337
338 list_del(&m2m_dev->curr_ctx->queue);
339 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
340 wake_up(&m2m_dev->curr_ctx->finished);
341 m2m_dev->curr_ctx = NULL;
342
343 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
344
345 /* This instance might have more buffers ready, but since we do not
346 * allow more than one job on the job_queue per instance, each has
347 * to be scheduled separately after the previous one finishes. */
348 v4l2_m2m_try_schedule(m2m_ctx);
349 v4l2_m2m_try_run(m2m_dev);
350 }
351 EXPORT_SYMBOL(v4l2_m2m_job_finish);
352
353 /**
354 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
355 */
356 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
357 struct v4l2_requestbuffers *reqbufs)
358 {
359 struct vb2_queue *vq;
360
361 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
362 return vb2_reqbufs(vq, reqbufs);
363 }
364 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
365
366 /**
367 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
368 *
369 * See v4l2_m2m_mmap() documentation for details.
370 */
371 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
372 struct v4l2_buffer *buf)
373 {
374 struct vb2_queue *vq;
375 int ret = 0;
376 unsigned int i;
377
378 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
379 ret = vb2_querybuf(vq, buf);
380
381 /* Adjust MMAP memory offsets for the CAPTURE queue */
382 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
383 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
384 for (i = 0; i < buf->length; ++i)
385 buf->m.planes[i].m.mem_offset
386 += DST_QUEUE_OFF_BASE;
387 } else {
388 buf->m.offset += DST_QUEUE_OFF_BASE;
389 }
390 }
391
392 return ret;
393 }
394 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
395
396 /**
397 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
398 * the type
399 */
400 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
401 struct v4l2_buffer *buf)
402 {
403 struct vb2_queue *vq;
404 int ret;
405
406 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
407 ret = vb2_qbuf(vq, buf);
408 if (!ret)
409 v4l2_m2m_try_schedule(m2m_ctx);
410
411 return ret;
412 }
413 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
414
415 /**
416 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
417 * the type
418 */
419 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
420 struct v4l2_buffer *buf)
421 {
422 struct vb2_queue *vq;
423
424 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
425 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
426 }
427 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
428
429 /**
430 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
431 * on the type
432 */
433 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
434 struct v4l2_create_buffers *create)
435 {
436 struct vb2_queue *vq;
437
438 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
439 return vb2_create_bufs(vq, create);
440 }
441 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
442
443 /**
444 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
445 * the type
446 */
447 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
448 struct v4l2_exportbuffer *eb)
449 {
450 struct vb2_queue *vq;
451
452 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
453 return vb2_expbuf(vq, eb);
454 }
455 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
456 /**
457 * v4l2_m2m_streamon() - turn on streaming for a video queue
458 */
459 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
460 enum v4l2_buf_type type)
461 {
462 struct vb2_queue *vq;
463 int ret;
464
465 vq = v4l2_m2m_get_vq(m2m_ctx, type);
466 ret = vb2_streamon(vq, type);
467 if (!ret)
468 v4l2_m2m_try_schedule(m2m_ctx);
469
470 return ret;
471 }
472 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
473
474 /**
475 * v4l2_m2m_streamoff() - turn off streaming for a video queue
476 */
477 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
478 enum v4l2_buf_type type)
479 {
480 struct v4l2_m2m_dev *m2m_dev;
481 struct v4l2_m2m_queue_ctx *q_ctx;
482 unsigned long flags_job, flags;
483 int ret;
484
485 /* wait until the current context is dequeued from job_queue */
486 v4l2_m2m_cancel_job(m2m_ctx);
487
488 q_ctx = get_queue_ctx(m2m_ctx, type);
489 ret = vb2_streamoff(&q_ctx->q, type);
490 if (ret)
491 return ret;
492
493 m2m_dev = m2m_ctx->m2m_dev;
494 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
495 /* We should not be scheduled anymore, since we're dropping a queue. */
496 if (m2m_ctx->job_flags & TRANS_QUEUED)
497 list_del(&m2m_ctx->queue);
498 m2m_ctx->job_flags = 0;
499
500 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
501 /* Drop queue, since streamoff returns device to the same state as after
502 * calling reqbufs. */
503 INIT_LIST_HEAD(&q_ctx->rdy_queue);
504 q_ctx->num_rdy = 0;
505 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
506
507 if (m2m_dev->curr_ctx == m2m_ctx) {
508 m2m_dev->curr_ctx = NULL;
509 wake_up(&m2m_ctx->finished);
510 }
511 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
512
513 return 0;
514 }
515 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
516
517 /**
518 * v4l2_m2m_poll() - poll replacement, for destination buffers only
519 *
520 * Call from the driver's poll() function. Will poll both queues. If a buffer
521 * is available to dequeue (with dqbuf) from the source queue, this will
522 * indicate that a non-blocking write can be performed, while read will be
523 * returned in case of the destination queue.
524 */
525 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
526 struct poll_table_struct *wait)
527 {
528 struct video_device *vfd = video_devdata(file);
529 unsigned long req_events = poll_requested_events(wait);
530 struct vb2_queue *src_q, *dst_q;
531 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
532 unsigned int rc = 0;
533 unsigned long flags;
534
535 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
536 struct v4l2_fh *fh = file->private_data;
537
538 if (v4l2_event_pending(fh))
539 rc = POLLPRI;
540 else if (req_events & POLLPRI)
541 poll_wait(file, &fh->wait, wait);
542 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
543 return rc;
544 }
545
546 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
547 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
548
549 /*
550 * There has to be at least one buffer queued on each queued_list, which
551 * means either in driver already or waiting for driver to claim it
552 * and start processing.
553 */
554 if ((!src_q->streaming || list_empty(&src_q->queued_list))
555 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
556 rc |= POLLERR;
557 goto end;
558 }
559
560 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
561 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
562 else if (m2m_ctx->q_lock)
563 mutex_unlock(m2m_ctx->q_lock);
564
565 if (list_empty(&src_q->done_list))
566 poll_wait(file, &src_q->done_wq, wait);
567 if (list_empty(&dst_q->done_list)) {
568 /*
569 * If the last buffer was dequeued from the capture queue,
570 * return immediately. DQBUF will return -EPIPE.
571 */
572 if (dst_q->last_buffer_dequeued)
573 return rc | POLLIN | POLLRDNORM;
574
575 poll_wait(file, &dst_q->done_wq, wait);
576 }
577
578 if (m2m_ctx->m2m_dev->m2m_ops->lock)
579 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
580 else if (m2m_ctx->q_lock) {
581 if (mutex_lock_interruptible(m2m_ctx->q_lock)) {
582 rc |= POLLERR;
583 goto end;
584 }
585 }
586
587 spin_lock_irqsave(&src_q->done_lock, flags);
588 if (!list_empty(&src_q->done_list))
589 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
590 done_entry);
591 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
592 || src_vb->state == VB2_BUF_STATE_ERROR))
593 rc |= POLLOUT | POLLWRNORM;
594 spin_unlock_irqrestore(&src_q->done_lock, flags);
595
596 spin_lock_irqsave(&dst_q->done_lock, flags);
597 if (!list_empty(&dst_q->done_list))
598 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
599 done_entry);
600 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
601 || dst_vb->state == VB2_BUF_STATE_ERROR))
602 rc |= POLLIN | POLLRDNORM;
603 spin_unlock_irqrestore(&dst_q->done_lock, flags);
604
605 end:
606 return rc;
607 }
608 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
609
610 /**
611 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
612 *
613 * Call from driver's mmap() function. Will handle mmap() for both queues
614 * seamlessly for videobuffer, which will receive normal per-queue offsets and
615 * proper videobuf queue pointers. The differentiation is made outside videobuf
616 * by adding a predefined offset to buffers from one of the queues and
617 * subtracting it before passing it back to videobuf. Only drivers (and
618 * thus applications) receive modified offsets.
619 */
620 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
621 struct vm_area_struct *vma)
622 {
623 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
624 struct vb2_queue *vq;
625
626 if (offset < DST_QUEUE_OFF_BASE) {
627 vq = v4l2_m2m_get_src_vq(m2m_ctx);
628 } else {
629 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
630 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
631 }
632
633 return vb2_mmap(vq, vma);
634 }
635 EXPORT_SYMBOL(v4l2_m2m_mmap);
636
637 /**
638 * v4l2_m2m_init() - initialize per-driver m2m data
639 *
640 * Usually called from driver's probe() function.
641 */
642 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
643 {
644 struct v4l2_m2m_dev *m2m_dev;
645
646 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
647 WARN_ON(!m2m_ops->job_abort))
648 return ERR_PTR(-EINVAL);
649
650 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
651 if (!m2m_dev)
652 return ERR_PTR(-ENOMEM);
653
654 m2m_dev->curr_ctx = NULL;
655 m2m_dev->m2m_ops = m2m_ops;
656 INIT_LIST_HEAD(&m2m_dev->job_queue);
657 spin_lock_init(&m2m_dev->job_spinlock);
658
659 return m2m_dev;
660 }
661 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
662
663 /**
664 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
665 *
666 * Usually called from driver's remove() function.
667 */
668 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
669 {
670 kfree(m2m_dev);
671 }
672 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
673
674 /**
675 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
676 * @priv - driver's instance private data
677 * @m2m_dev - a previously initialized m2m_dev struct
678 * @vq_init - a callback for queue type-specific initialization function to be
679 * used for initializing videobuf_queues
680 *
681 * Usually called from driver's open() function.
682 */
683 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
684 void *drv_priv,
685 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
686 {
687 struct v4l2_m2m_ctx *m2m_ctx;
688 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
689 int ret;
690
691 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
692 if (!m2m_ctx)
693 return ERR_PTR(-ENOMEM);
694
695 m2m_ctx->priv = drv_priv;
696 m2m_ctx->m2m_dev = m2m_dev;
697 init_waitqueue_head(&m2m_ctx->finished);
698
699 out_q_ctx = &m2m_ctx->out_q_ctx;
700 cap_q_ctx = &m2m_ctx->cap_q_ctx;
701
702 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
703 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
704 spin_lock_init(&out_q_ctx->rdy_spinlock);
705 spin_lock_init(&cap_q_ctx->rdy_spinlock);
706
707 INIT_LIST_HEAD(&m2m_ctx->queue);
708
709 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
710
711 if (ret)
712 goto err;
713 /*
714 * If both queues use same mutex assign it as the common buffer
715 * queues lock to the m2m context. This lock is used in the
716 * v4l2_m2m_ioctl_* helpers.
717 */
718 if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
719 m2m_ctx->q_lock = out_q_ctx->q.lock;
720
721 return m2m_ctx;
722 err:
723 kfree(m2m_ctx);
724 return ERR_PTR(ret);
725 }
726 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
727
728 /**
729 * v4l2_m2m_ctx_release() - release m2m context
730 *
731 * Usually called from driver's release() function.
732 */
733 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
734 {
735 /* wait until the current context is dequeued from job_queue */
736 v4l2_m2m_cancel_job(m2m_ctx);
737
738 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
739 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
740
741 kfree(m2m_ctx);
742 }
743 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
744
745 /**
746 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
747 *
748 * Call from buf_queue(), videobuf_queue_ops callback.
749 */
750 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
751 {
752 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
753 struct v4l2_m2m_queue_ctx *q_ctx;
754 unsigned long flags;
755
756 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
757 if (!q_ctx)
758 return;
759
760 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
761 list_add_tail(&b->list, &q_ctx->rdy_queue);
762 q_ctx->num_rdy++;
763 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
764 }
765 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
766
767 /* Videobuf2 ioctl helpers */
768
769 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
770 struct v4l2_requestbuffers *rb)
771 {
772 struct v4l2_fh *fh = file->private_data;
773
774 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
775 }
776 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
777
778 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
779 struct v4l2_create_buffers *create)
780 {
781 struct v4l2_fh *fh = file->private_data;
782
783 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
784 }
785 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
786
787 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
788 struct v4l2_buffer *buf)
789 {
790 struct v4l2_fh *fh = file->private_data;
791
792 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
793 }
794 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
795
796 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
797 struct v4l2_buffer *buf)
798 {
799 struct v4l2_fh *fh = file->private_data;
800
801 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
802 }
803 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
804
805 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
806 struct v4l2_buffer *buf)
807 {
808 struct v4l2_fh *fh = file->private_data;
809
810 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
811 }
812 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
813
814 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
815 struct v4l2_exportbuffer *eb)
816 {
817 struct v4l2_fh *fh = file->private_data;
818
819 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
820 }
821 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
822
823 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
824 enum v4l2_buf_type type)
825 {
826 struct v4l2_fh *fh = file->private_data;
827
828 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
829 }
830 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
831
832 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
833 enum v4l2_buf_type type)
834 {
835 struct v4l2_fh *fh = file->private_data;
836
837 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
838 }
839 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
840
841 /*
842 * v4l2_file_operations helpers. It is assumed here same lock is used
843 * for the output and the capture buffer queue.
844 */
845
846 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
847 {
848 struct v4l2_fh *fh = file->private_data;
849 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
850 int ret;
851
852 if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
853 return -ERESTARTSYS;
854
855 ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
856
857 if (m2m_ctx->q_lock)
858 mutex_unlock(m2m_ctx->q_lock);
859
860 return ret;
861 }
862 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
863
864 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
865 {
866 struct v4l2_fh *fh = file->private_data;
867 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
868 unsigned int ret;
869
870 if (m2m_ctx->q_lock)
871 mutex_lock(m2m_ctx->q_lock);
872
873 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
874
875 if (m2m_ctx->q_lock)
876 mutex_unlock(m2m_ctx->q_lock);
877
878 return ret;
879 }
880 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
881
This page took 0.051478 seconds and 5 git commands to generate.