Commit | Line | Data |
---|---|---|
7f98639d PO |
1 | /* |
2 | * Memory-to-memory device framework for Video for Linux 2 and videobuf. | |
3 | * | |
4 | * Helper functions for devices that use videobuf buffers for both their | |
5 | * source and destination. | |
6 | * | |
7 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | |
95072084 | 8 | * Pawel Osciak, <pawel@osciak.com> |
7f98639d PO |
9 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | */ | |
16 | #include <linux/module.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/slab.h> | |
19 | ||
908a0d7c | 20 | #include <media/videobuf2-core.h> |
7f98639d PO |
21 | #include <media/v4l2-mem2mem.h> |
22 | ||
23 | MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); | |
95072084 | 24 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); |
7f98639d PO |
25 | MODULE_LICENSE("GPL"); |
26 | ||
27 | static bool debug; | |
28 | module_param(debug, bool, 0644); | |
29 | ||
30 | #define dprintk(fmt, arg...) \ | |
31 | do { \ | |
32 | if (debug) \ | |
33 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ | |
34 | } while (0) | |
35 | ||
36 | ||
37 | /* Instance is already queued on the job_queue */ | |
38 | #define TRANS_QUEUED (1 << 0) | |
39 | /* Instance is currently running in hardware */ | |
40 | #define TRANS_RUNNING (1 << 1) | |
41 | ||
42 | ||
43 | /* Offset base for buffers on the destination queue - used to distinguish | |
44 | * between source and destination buffers when mmapping - they receive the same | |
45 | * offsets but for different queues */ | |
46 | #define DST_QUEUE_OFF_BASE (1 << 30) | |
47 | ||
48 | ||
49 | /** | |
50 | * struct v4l2_m2m_dev - per-device context | |
51 | * @curr_ctx: currently running instance | |
52 | * @job_queue: instances queued to run | |
53 | * @job_spinlock: protects job_queue | |
54 | * @m2m_ops: driver callbacks | |
55 | */ | |
56 | struct v4l2_m2m_dev { | |
57 | struct v4l2_m2m_ctx *curr_ctx; | |
58 | ||
59 | struct list_head job_queue; | |
60 | spinlock_t job_spinlock; | |
61 | ||
62 | struct v4l2_m2m_ops *m2m_ops; | |
63 | }; | |
64 | ||
65 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, | |
66 | enum v4l2_buf_type type) | |
67 | { | |
908a0d7c | 68 | if (V4L2_TYPE_IS_OUTPUT(type)) |
7f98639d | 69 | return &m2m_ctx->out_q_ctx; |
908a0d7c MS |
70 | else |
71 | return &m2m_ctx->cap_q_ctx; | |
7f98639d PO |
72 | } |
73 | ||
74 | /** | |
908a0d7c | 75 | * v4l2_m2m_get_vq() - return vb2_queue for the given type |
7f98639d | 76 | */ |
908a0d7c | 77 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
7f98639d PO |
78 | enum v4l2_buf_type type) |
79 | { | |
80 | struct v4l2_m2m_queue_ctx *q_ctx; | |
81 | ||
82 | q_ctx = get_queue_ctx(m2m_ctx, type); | |
83 | if (!q_ctx) | |
84 | return NULL; | |
85 | ||
86 | return &q_ctx->q; | |
87 | } | |
88 | EXPORT_SYMBOL(v4l2_m2m_get_vq); | |
89 | ||
90 | /** | |
91 | * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers | |
92 | */ | |
908a0d7c | 93 | void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
7f98639d | 94 | { |
908a0d7c | 95 | struct v4l2_m2m_buffer *b = NULL; |
7f98639d PO |
96 | unsigned long flags; |
97 | ||
908a0d7c | 98 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
7f98639d | 99 | |
a6bd62be AP |
100 | if (list_empty(&q_ctx->rdy_queue)) { |
101 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
102 | return NULL; | |
103 | } | |
7f98639d | 104 | |
908a0d7c | 105 | b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list); |
908a0d7c MS |
106 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
107 | return &b->vb; | |
7f98639d PO |
108 | } |
109 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); | |
110 | ||
111 | /** | |
112 | * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and | |
113 | * return it | |
114 | */ | |
908a0d7c | 115 | void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
7f98639d | 116 | { |
908a0d7c | 117 | struct v4l2_m2m_buffer *b = NULL; |
7f98639d PO |
118 | unsigned long flags; |
119 | ||
908a0d7c | 120 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
a6bd62be AP |
121 | if (list_empty(&q_ctx->rdy_queue)) { |
122 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); | |
123 | return NULL; | |
7f98639d | 124 | } |
a6bd62be AP |
125 | b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list); |
126 | list_del(&b->list); | |
127 | q_ctx->num_rdy--; | |
908a0d7c | 128 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
7f98639d | 129 | |
908a0d7c | 130 | return &b->vb; |
7f98639d PO |
131 | } |
132 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); | |
133 | ||
134 | /* | |
135 | * Scheduling handlers | |
136 | */ | |
137 | ||
138 | /** | |
139 | * v4l2_m2m_get_curr_priv() - return driver private data for the currently | |
140 | * running instance or NULL if no instance is running | |
141 | */ | |
142 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) | |
143 | { | |
144 | unsigned long flags; | |
145 | void *ret = NULL; | |
146 | ||
147 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
148 | if (m2m_dev->curr_ctx) | |
149 | ret = m2m_dev->curr_ctx->priv; | |
150 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
151 | ||
152 | return ret; | |
153 | } | |
154 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); | |
155 | ||
156 | /** | |
157 | * v4l2_m2m_try_run() - select next job to perform and run it if possible | |
158 | * | |
159 | * Get next transaction (if present) from the waiting jobs list and run it. | |
160 | */ | |
161 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) | |
162 | { | |
163 | unsigned long flags; | |
164 | ||
165 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
166 | if (NULL != m2m_dev->curr_ctx) { | |
167 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
168 | dprintk("Another instance is running, won't run now\n"); | |
169 | return; | |
170 | } | |
171 | ||
172 | if (list_empty(&m2m_dev->job_queue)) { | |
173 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
174 | dprintk("No job pending\n"); | |
175 | return; | |
176 | } | |
177 | ||
178 | m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next, | |
179 | struct v4l2_m2m_ctx, queue); | |
180 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; | |
181 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
182 | ||
183 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); | |
184 | } | |
185 | ||
186 | /** | |
187 | * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to | |
188 | * the pending job queue and add it if so. | |
189 | * @m2m_ctx: m2m context assigned to the instance to be checked | |
190 | * | |
191 | * There are three basic requirements an instance has to meet to be able to run: | |
192 | * 1) at least one source buffer has to be queued, | |
193 | * 2) at least one destination buffer has to be queued, | |
194 | * 3) streaming has to be on. | |
195 | * | |
196 | * There may also be additional, custom requirements. In such case the driver | |
197 | * should supply a custom callback (job_ready in v4l2_m2m_ops) that should | |
198 | * return 1 if the instance is ready. | |
199 | * An example of the above could be an instance that requires more than one | |
200 | * src/dst buffer per transaction. | |
201 | */ | |
202 | static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) | |
203 | { | |
204 | struct v4l2_m2m_dev *m2m_dev; | |
205 | unsigned long flags_job, flags; | |
206 | ||
207 | m2m_dev = m2m_ctx->m2m_dev; | |
208 | dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); | |
209 | ||
210 | if (!m2m_ctx->out_q_ctx.q.streaming | |
211 | || !m2m_ctx->cap_q_ctx.q.streaming) { | |
212 | dprintk("Streaming needs to be on for both queues\n"); | |
213 | return; | |
214 | } | |
215 | ||
216 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); | |
217 | if (m2m_ctx->job_flags & TRANS_QUEUED) { | |
218 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
219 | dprintk("On job queue already\n"); | |
220 | return; | |
221 | } | |
222 | ||
908a0d7c | 223 | spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
7f98639d | 224 | if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { |
908a0d7c | 225 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
7f98639d PO |
226 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
227 | dprintk("No input buffers available\n"); | |
228 | return; | |
229 | } | |
230 | if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { | |
908a0d7c | 231 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
7f98639d PO |
232 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); |
233 | dprintk("No output buffers available\n"); | |
234 | return; | |
235 | } | |
908a0d7c | 236 | spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); |
7f98639d PO |
237 | |
238 | if (m2m_dev->m2m_ops->job_ready | |
239 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { | |
240 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
241 | dprintk("Driver not ready\n"); | |
242 | return; | |
243 | } | |
244 | ||
245 | list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); | |
246 | m2m_ctx->job_flags |= TRANS_QUEUED; | |
247 | ||
248 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); | |
249 | ||
250 | v4l2_m2m_try_run(m2m_dev); | |
251 | } | |
252 | ||
253 | /** | |
254 | * v4l2_m2m_job_finish() - inform the framework that a job has been finished | |
255 | * and have it clean up | |
256 | * | |
257 | * Called by a driver to yield back the device after it has finished with it. | |
258 | * Should be called as soon as possible after reaching a state which allows | |
259 | * other instances to take control of the device. | |
260 | * | |
261 | * This function has to be called only after device_run() callback has been | |
262 | * called on the driver. To prevent recursion, it should not be called directly | |
263 | * from the device_run() callback though. | |
264 | */ | |
265 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, | |
266 | struct v4l2_m2m_ctx *m2m_ctx) | |
267 | { | |
268 | unsigned long flags; | |
269 | ||
270 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
271 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { | |
272 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
273 | dprintk("Called by an instance not currently running\n"); | |
274 | return; | |
275 | } | |
276 | ||
277 | list_del(&m2m_dev->curr_ctx->queue); | |
278 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | |
908a0d7c | 279 | wake_up(&m2m_dev->curr_ctx->finished); |
7f98639d PO |
280 | m2m_dev->curr_ctx = NULL; |
281 | ||
282 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
283 | ||
284 | /* This instance might have more buffers ready, but since we do not | |
285 | * allow more than one job on the job_queue per instance, each has | |
286 | * to be scheduled separately after the previous one finishes. */ | |
287 | v4l2_m2m_try_schedule(m2m_ctx); | |
288 | v4l2_m2m_try_run(m2m_dev); | |
289 | } | |
290 | EXPORT_SYMBOL(v4l2_m2m_job_finish); | |
291 | ||
292 | /** | |
293 | * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer | |
294 | */ | |
295 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
296 | struct v4l2_requestbuffers *reqbufs) | |
297 | { | |
908a0d7c | 298 | struct vb2_queue *vq; |
7f98639d PO |
299 | |
300 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); | |
908a0d7c | 301 | return vb2_reqbufs(vq, reqbufs); |
7f98639d PO |
302 | } |
303 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); | |
304 | ||
305 | /** | |
306 | * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer | |
307 | * | |
308 | * See v4l2_m2m_mmap() documentation for details. | |
309 | */ | |
310 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
311 | struct v4l2_buffer *buf) | |
312 | { | |
908a0d7c MS |
313 | struct vb2_queue *vq; |
314 | int ret = 0; | |
315 | unsigned int i; | |
7f98639d PO |
316 | |
317 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
908a0d7c MS |
318 | ret = vb2_querybuf(vq, buf); |
319 | ||
320 | /* Adjust MMAP memory offsets for the CAPTURE queue */ | |
321 | if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { | |
322 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { | |
323 | for (i = 0; i < buf->length; ++i) | |
324 | buf->m.planes[i].m.mem_offset | |
325 | += DST_QUEUE_OFF_BASE; | |
326 | } else { | |
327 | buf->m.offset += DST_QUEUE_OFF_BASE; | |
328 | } | |
7f98639d PO |
329 | } |
330 | ||
331 | return ret; | |
332 | } | |
333 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); | |
334 | ||
335 | /** | |
336 | * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on | |
337 | * the type | |
338 | */ | |
339 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
340 | struct v4l2_buffer *buf) | |
341 | { | |
908a0d7c | 342 | struct vb2_queue *vq; |
7f98639d PO |
343 | int ret; |
344 | ||
345 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
908a0d7c | 346 | ret = vb2_qbuf(vq, buf); |
7f98639d PO |
347 | if (!ret) |
348 | v4l2_m2m_try_schedule(m2m_ctx); | |
349 | ||
350 | return ret; | |
351 | } | |
352 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); | |
353 | ||
354 | /** | |
355 | * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on | |
356 | * the type | |
357 | */ | |
358 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
359 | struct v4l2_buffer *buf) | |
360 | { | |
908a0d7c | 361 | struct vb2_queue *vq; |
7f98639d PO |
362 | |
363 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); | |
908a0d7c | 364 | return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); |
7f98639d PO |
365 | } |
366 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); | |
367 | ||
368 | /** | |
369 | * v4l2_m2m_streamon() - turn on streaming for a video queue | |
370 | */ | |
371 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
372 | enum v4l2_buf_type type) | |
373 | { | |
908a0d7c | 374 | struct vb2_queue *vq; |
7f98639d PO |
375 | int ret; |
376 | ||
377 | vq = v4l2_m2m_get_vq(m2m_ctx, type); | |
908a0d7c | 378 | ret = vb2_streamon(vq, type); |
7f98639d PO |
379 | if (!ret) |
380 | v4l2_m2m_try_schedule(m2m_ctx); | |
381 | ||
382 | return ret; | |
383 | } | |
384 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); | |
385 | ||
386 | /** | |
387 | * v4l2_m2m_streamoff() - turn off streaming for a video queue | |
388 | */ | |
389 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
390 | enum v4l2_buf_type type) | |
391 | { | |
908a0d7c | 392 | struct vb2_queue *vq; |
7f98639d PO |
393 | |
394 | vq = v4l2_m2m_get_vq(m2m_ctx, type); | |
908a0d7c | 395 | return vb2_streamoff(vq, type); |
7f98639d PO |
396 | } |
397 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); | |
398 | ||
399 | /** | |
400 | * v4l2_m2m_poll() - poll replacement, for destination buffers only | |
401 | * | |
402 | * Call from the driver's poll() function. Will poll both queues. If a buffer | |
403 | * is available to dequeue (with dqbuf) from the source queue, this will | |
404 | * indicate that a non-blocking write can be performed, while read will be | |
405 | * returned in case of the destination queue. | |
406 | */ | |
407 | unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
408 | struct poll_table_struct *wait) | |
409 | { | |
908a0d7c MS |
410 | struct vb2_queue *src_q, *dst_q; |
411 | struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; | |
7f98639d | 412 | unsigned int rc = 0; |
908a0d7c | 413 | unsigned long flags; |
7f98639d PO |
414 | |
415 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); | |
416 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); | |
417 | ||
908a0d7c MS |
418 | /* |
419 | * There has to be at least one buffer queued on each queued_list, which | |
420 | * means either in driver already or waiting for driver to claim it | |
421 | * and start processing. | |
422 | */ | |
423 | if ((!src_q->streaming || list_empty(&src_q->queued_list)) | |
424 | && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { | |
7f98639d PO |
425 | rc = POLLERR; |
426 | goto end; | |
427 | } | |
428 | ||
908a0d7c MS |
429 | if (m2m_ctx->m2m_dev->m2m_ops->unlock) |
430 | m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); | |
431 | ||
432 | poll_wait(file, &src_q->done_wq, wait); | |
433 | poll_wait(file, &dst_q->done_wq, wait); | |
434 | ||
435 | if (m2m_ctx->m2m_dev->m2m_ops->lock) | |
436 | m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); | |
437 | ||
438 | spin_lock_irqsave(&src_q->done_lock, flags); | |
439 | if (!list_empty(&src_q->done_list)) | |
440 | src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, | |
441 | done_entry); | |
442 | if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE | |
443 | || src_vb->state == VB2_BUF_STATE_ERROR)) | |
444 | rc |= POLLOUT | POLLWRNORM; | |
445 | spin_unlock_irqrestore(&src_q->done_lock, flags); | |
446 | ||
447 | spin_lock_irqsave(&dst_q->done_lock, flags); | |
448 | if (!list_empty(&dst_q->done_list)) | |
449 | dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, | |
450 | done_entry); | |
451 | if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE | |
452 | || dst_vb->state == VB2_BUF_STATE_ERROR)) | |
453 | rc |= POLLIN | POLLRDNORM; | |
454 | spin_unlock_irqrestore(&dst_q->done_lock, flags); | |
7f98639d PO |
455 | |
456 | end: | |
7f98639d PO |
457 | return rc; |
458 | } | |
459 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); | |
460 | ||
461 | /** | |
462 | * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer | |
463 | * | |
464 | * Call from driver's mmap() function. Will handle mmap() for both queues | |
465 | * seamlessly for videobuffer, which will receive normal per-queue offsets and | |
466 | * proper videobuf queue pointers. The differentiation is made outside videobuf | |
467 | * by adding a predefined offset to buffers from one of the queues and | |
468 | * subtracting it before passing it back to videobuf. Only drivers (and | |
469 | * thus applications) receive modified offsets. | |
470 | */ | |
471 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, | |
472 | struct vm_area_struct *vma) | |
473 | { | |
474 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
908a0d7c | 475 | struct vb2_queue *vq; |
7f98639d PO |
476 | |
477 | if (offset < DST_QUEUE_OFF_BASE) { | |
478 | vq = v4l2_m2m_get_src_vq(m2m_ctx); | |
479 | } else { | |
480 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); | |
481 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); | |
482 | } | |
483 | ||
908a0d7c | 484 | return vb2_mmap(vq, vma); |
7f98639d PO |
485 | } |
486 | EXPORT_SYMBOL(v4l2_m2m_mmap); | |
487 | ||
488 | /** | |
489 | * v4l2_m2m_init() - initialize per-driver m2m data | |
490 | * | |
491 | * Usually called from driver's probe() function. | |
492 | */ | |
493 | struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops) | |
494 | { | |
495 | struct v4l2_m2m_dev *m2m_dev; | |
496 | ||
497 | if (!m2m_ops) | |
498 | return ERR_PTR(-EINVAL); | |
499 | ||
500 | BUG_ON(!m2m_ops->device_run); | |
501 | BUG_ON(!m2m_ops->job_abort); | |
502 | ||
503 | m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); | |
504 | if (!m2m_dev) | |
505 | return ERR_PTR(-ENOMEM); | |
506 | ||
507 | m2m_dev->curr_ctx = NULL; | |
508 | m2m_dev->m2m_ops = m2m_ops; | |
509 | INIT_LIST_HEAD(&m2m_dev->job_queue); | |
510 | spin_lock_init(&m2m_dev->job_spinlock); | |
511 | ||
512 | return m2m_dev; | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); | |
515 | ||
516 | /** | |
517 | * v4l2_m2m_release() - cleans up and frees a m2m_dev structure | |
518 | * | |
519 | * Usually called from driver's remove() function. | |
520 | */ | |
521 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) | |
522 | { | |
523 | kfree(m2m_dev); | |
524 | } | |
525 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); | |
526 | ||
527 | /** | |
528 | * v4l2_m2m_ctx_init() - allocate and initialize a m2m context | |
529 | * @priv - driver's instance private data | |
530 | * @m2m_dev - a previously initialized m2m_dev struct | |
531 | * @vq_init - a callback for queue type-specific initialization function to be | |
532 | * used for initializing videobuf_queues | |
533 | * | |
534 | * Usually called from driver's open() function. | |
535 | */ | |
908a0d7c MS |
536 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
537 | void *drv_priv, | |
538 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) | |
7f98639d PO |
539 | { |
540 | struct v4l2_m2m_ctx *m2m_ctx; | |
541 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; | |
908a0d7c | 542 | int ret; |
7f98639d PO |
543 | |
544 | m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); | |
545 | if (!m2m_ctx) | |
546 | return ERR_PTR(-ENOMEM); | |
547 | ||
908a0d7c | 548 | m2m_ctx->priv = drv_priv; |
7f98639d | 549 | m2m_ctx->m2m_dev = m2m_dev; |
908a0d7c | 550 | init_waitqueue_head(&m2m_ctx->finished); |
7f98639d | 551 | |
908a0d7c MS |
552 | out_q_ctx = &m2m_ctx->out_q_ctx; |
553 | cap_q_ctx = &m2m_ctx->cap_q_ctx; | |
7f98639d PO |
554 | |
555 | INIT_LIST_HEAD(&out_q_ctx->rdy_queue); | |
556 | INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); | |
908a0d7c MS |
557 | spin_lock_init(&out_q_ctx->rdy_spinlock); |
558 | spin_lock_init(&cap_q_ctx->rdy_spinlock); | |
7f98639d PO |
559 | |
560 | INIT_LIST_HEAD(&m2m_ctx->queue); | |
561 | ||
908a0d7c MS |
562 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); |
563 | ||
564 | if (ret) | |
565 | goto err; | |
7f98639d PO |
566 | |
567 | return m2m_ctx; | |
908a0d7c MS |
568 | err: |
569 | kfree(m2m_ctx); | |
570 | return ERR_PTR(ret); | |
7f98639d PO |
571 | } |
572 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); | |
573 | ||
574 | /** | |
575 | * v4l2_m2m_ctx_release() - release m2m context | |
576 | * | |
577 | * Usually called from driver's release() function. | |
578 | */ | |
579 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) | |
580 | { | |
581 | struct v4l2_m2m_dev *m2m_dev; | |
7f98639d PO |
582 | unsigned long flags; |
583 | ||
584 | m2m_dev = m2m_ctx->m2m_dev; | |
585 | ||
586 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); | |
587 | if (m2m_ctx->job_flags & TRANS_RUNNING) { | |
588 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
589 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); | |
590 | dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); | |
908a0d7c | 591 | wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); |
7f98639d PO |
592 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { |
593 | list_del(&m2m_ctx->queue); | |
594 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); | |
595 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
596 | dprintk("m2m_ctx: %p had been on queue and was removed\n", | |
597 | m2m_ctx); | |
598 | } else { | |
599 | /* Do nothing, was not on queue/running */ | |
600 | spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); | |
601 | } | |
602 | ||
908a0d7c MS |
603 | vb2_queue_release(&m2m_ctx->cap_q_ctx.q); |
604 | vb2_queue_release(&m2m_ctx->out_q_ctx.q); | |
7f98639d PO |
605 | |
606 | kfree(m2m_ctx); | |
607 | } | |
608 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); | |
609 | ||
610 | /** | |
611 | * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. | |
612 | * | |
613 | * Call from buf_queue(), videobuf_queue_ops callback. | |
7f98639d | 614 | */ |
908a0d7c | 615 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) |
7f98639d | 616 | { |
908a0d7c | 617 | struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); |
7f98639d | 618 | struct v4l2_m2m_queue_ctx *q_ctx; |
908a0d7c | 619 | unsigned long flags; |
7f98639d | 620 | |
908a0d7c | 621 | q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); |
7f98639d PO |
622 | if (!q_ctx) |
623 | return; | |
624 | ||
908a0d7c MS |
625 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
626 | list_add_tail(&b->list, &q_ctx->rdy_queue); | |
7f98639d | 627 | q_ctx->num_rdy++; |
908a0d7c | 628 | spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); |
7f98639d PO |
629 | } |
630 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); | |
631 |