[media] omap3isp: Move CCDC link validation to ccdc_link_validate()
[deliverable/linux.git] / drivers / media / video / omap3isp / ispvideo.c
CommitLineData
ad614acb
LP
1/*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/clk.h>
28#include <linux/mm.h>
025521f9 29#include <linux/module.h>
ad614acb
LP
30#include <linux/pagemap.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/vmalloc.h>
35#include <media/v4l2-dev.h>
36#include <media/v4l2-ioctl.h>
37#include <plat/iommu.h>
38#include <plat/iovmm.h>
39#include <plat/omap-pm.h>
40
41#include "ispvideo.h"
42#include "isp.h"
43
44
45/* -----------------------------------------------------------------------------
46 * Helper functions
47 */
48
8f4f298e
SA
49/*
50 * NOTE: When adding new media bus codes, always remember to add
51 * corresponding in-memory formats to the table below!!!
52 */
ad614acb
LP
53static struct isp_format_info formats[] = {
54 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
c09af044
MJ
55 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
56 V4L2_PIX_FMT_GREY, 8, },
5782f97b 57 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
c09af044
MJ
58 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
59 V4L2_PIX_FMT_Y10, 10, },
5782f97b 60 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
c09af044
MJ
61 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
62 V4L2_PIX_FMT_Y12, 12, },
5782f97b 63 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
c09af044
MJ
64 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
65 V4L2_PIX_FMT_SBGGR8, 8, },
5782f97b 66 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
c09af044
MJ
67 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
68 V4L2_PIX_FMT_SGBRG8, 8, },
5782f97b 69 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
c09af044
MJ
70 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
71 V4L2_PIX_FMT_SGRBG8, 8, },
5782f97b 72 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
c09af044
MJ
73 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
74 V4L2_PIX_FMT_SRGGB8, 8, },
8f4f298e
SA
75 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
76 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
77 V4L2_PIX_FMT_SBGGR10DPCM8, 8, },
78 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
79 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
80 V4L2_PIX_FMT_SGBRG10DPCM8, 8, },
ad614acb 81 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
c09af044
MJ
82 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
83 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
8f4f298e
SA
84 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
85 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
86 V4L2_PIX_FMT_SRGGB10DPCM8, 8, },
ad614acb 87 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
c09af044
MJ
88 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
89 V4L2_PIX_FMT_SBGGR10, 10, },
ad614acb 90 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
c09af044
MJ
91 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
92 V4L2_PIX_FMT_SGBRG10, 10, },
ad614acb 93 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
c09af044
MJ
94 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
95 V4L2_PIX_FMT_SGRBG10, 10, },
ad614acb 96 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
c09af044
MJ
97 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
98 V4L2_PIX_FMT_SRGGB10, 10, },
ad614acb 99 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
c09af044
MJ
100 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
101 V4L2_PIX_FMT_SBGGR12, 12, },
ad614acb 102 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
c09af044
MJ
103 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
104 V4L2_PIX_FMT_SGBRG12, 12, },
ad614acb 105 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
c09af044
MJ
106 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
107 V4L2_PIX_FMT_SGRBG12, 12, },
ad614acb 108 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
c09af044
MJ
109 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
110 V4L2_PIX_FMT_SRGGB12, 12, },
ad614acb 111 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
c09af044
MJ
112 V4L2_MBUS_FMT_UYVY8_1X16, 0,
113 V4L2_PIX_FMT_UYVY, 16, },
ad614acb 114 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
c09af044
MJ
115 V4L2_MBUS_FMT_YUYV8_1X16, 0,
116 V4L2_PIX_FMT_YUYV, 16, },
ad614acb
LP
117};
118
119const struct isp_format_info *
120omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
121{
122 unsigned int i;
123
124 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
125 if (formats[i].code == code)
126 return &formats[i];
127 }
128
129 return NULL;
130}
131
132/*
133 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
134 * @video: ISP video instance
135 * @mbus: v4l2_mbus_framefmt format (input)
136 * @pix: v4l2_pix_format format (output)
137 *
138 * Fill the output pix structure with information from the input mbus format.
139 * The bytesperline and sizeimage fields are computed from the requested bytes
140 * per line value in the pix format and information from the video instance.
141 *
142 * Return the number of padding bytes at end of line.
143 */
144static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
145 const struct v4l2_mbus_framefmt *mbus,
146 struct v4l2_pix_format *pix)
147{
148 unsigned int bpl = pix->bytesperline;
149 unsigned int min_bpl;
150 unsigned int i;
151
152 memset(pix, 0, sizeof(*pix));
153 pix->width = mbus->width;
154 pix->height = mbus->height;
155
156 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
157 if (formats[i].code == mbus->code)
158 break;
159 }
160
161 if (WARN_ON(i == ARRAY_SIZE(formats)))
162 return 0;
163
164 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
165
166 /* Clamp the requested bytes per line value. If the maximum bytes per
167 * line value is zero, the module doesn't support user configurable line
168 * sizes. Override the requested value with the minimum in that case.
169 */
170 if (video->bpl_max)
171 bpl = clamp(bpl, min_bpl, video->bpl_max);
172 else
173 bpl = min_bpl;
174
175 if (!video->bpl_zero_padding || bpl != min_bpl)
176 bpl = ALIGN(bpl, video->bpl_alignment);
177
178 pix->pixelformat = formats[i].pixelformat;
179 pix->bytesperline = bpl;
180 pix->sizeimage = pix->bytesperline * pix->height;
181 pix->colorspace = mbus->colorspace;
182 pix->field = mbus->field;
183
184 return bpl - min_bpl;
185}
186
187static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
188 struct v4l2_mbus_framefmt *mbus)
189{
190 unsigned int i;
191
192 memset(mbus, 0, sizeof(*mbus));
193 mbus->width = pix->width;
194 mbus->height = pix->height;
195
c3cd2574
LP
196 /* Skip the last format in the loop so that it will be selected if no
197 * match is found.
198 */
199 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
ad614acb
LP
200 if (formats[i].pixelformat == pix->pixelformat)
201 break;
202 }
203
ad614acb
LP
204 mbus->code = formats[i].code;
205 mbus->colorspace = pix->colorspace;
206 mbus->field = pix->field;
207}
208
209static struct v4l2_subdev *
210isp_video_remote_subdev(struct isp_video *video, u32 *pad)
211{
212 struct media_pad *remote;
213
214 remote = media_entity_remote_source(&video->pad);
215
216 if (remote == NULL ||
217 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
218 return NULL;
219
220 if (pad)
221 *pad = remote->index;
222
223 return media_entity_to_v4l2_subdev(remote->entity);
224}
225
226/* Return a pointer to the ISP video instance at the far end of the pipeline. */
ae5df813
SA
227static int isp_video_get_graph_data(struct isp_video *video,
228 struct isp_pipeline *pipe)
ad614acb
LP
229{
230 struct media_entity_graph graph;
231 struct media_entity *entity = &video->video.entity;
232 struct media_device *mdev = entity->parent;
233 struct isp_video *far_end = NULL;
234
235 mutex_lock(&mdev->graph_mutex);
236 media_entity_graph_walk_start(&graph, entity);
237
238 while ((entity = media_entity_graph_walk_next(&graph))) {
ae5df813
SA
239 struct isp_video *__video;
240
241 pipe->entities |= 1 << entity->id;
242
243 if (far_end != NULL)
244 continue;
245
ad614acb
LP
246 if (entity == &video->video.entity)
247 continue;
248
249 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
250 continue;
251
ae5df813
SA
252 __video = to_isp_video(media_entity_to_video_device(entity));
253 if (__video->type != video->type)
254 far_end = __video;
ad614acb
LP
255 }
256
257 mutex_unlock(&mdev->graph_mutex);
ae5df813
SA
258
259 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
260 pipe->input = far_end;
261 pipe->output = video;
262 } else {
263 if (far_end == NULL)
264 return -EPIPE;
265
266 pipe->input = video;
267 pipe->output = far_end;
268 }
269
270 return 0;
ad614acb
LP
271}
272
273/*
274 * Validate a pipeline by checking both ends of all links for format
275 * discrepancies.
276 *
277 * Compute the minimum time per frame value as the maximum of time per frame
278 * limits reported by every block in the pipeline.
279 *
280 * Return 0 if all formats match, or -EPIPE if at least one link is found with
00542edf
LP
281 * different formats on its two ends or if the pipeline doesn't start with a
282 * video source (either a subdev with no input pad, or a non-subdev entity).
ad614acb
LP
283 */
284static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
285{
286 struct isp_device *isp = pipe->output->isp;
ad614acb
LP
287 struct media_pad *pad;
288 struct v4l2_subdev *subdev;
ad614acb 289
ad614acb
LP
290 subdev = isp_video_remote_subdev(pipe->output, NULL);
291 if (subdev == NULL)
292 return -EPIPE;
293
294 while (1) {
295 /* Retrieve the sink format */
296 pad = &subdev->entity.pads[0];
297 if (!(pad->flags & MEDIA_PAD_FL_SINK))
298 break;
299
ad614acb
LP
300 /* Update the maximum frame rate */
301 if (subdev == &isp->isp_res.subdev)
302 omap3isp_resizer_max_rate(&isp->isp_res,
303 &pipe->max_rate);
304
00542edf
LP
305 /* Retrieve the source format. Return an error if no source
306 * entity can be found, and stop checking the pipeline if the
307 * source entity isn't a subdev.
308 */
ad614acb 309 pad = media_entity_remote_source(pad);
00542edf
LP
310 if (pad == NULL)
311 return -EPIPE;
312
313 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
ad614acb
LP
314 break;
315
316 subdev = media_entity_to_v4l2_subdev(pad->entity);
ad614acb
LP
317 }
318
319 return 0;
320}
321
322static int
323__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
324{
325 struct v4l2_subdev_format fmt;
326 struct v4l2_subdev *subdev;
327 u32 pad;
328 int ret;
329
330 subdev = isp_video_remote_subdev(video, &pad);
331 if (subdev == NULL)
332 return -EINVAL;
333
334 mutex_lock(&video->mutex);
335
336 fmt.pad = pad;
337 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
338 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
339 if (ret == -ENOIOCTLCMD)
340 ret = -EINVAL;
341
342 mutex_unlock(&video->mutex);
343
344 if (ret)
345 return ret;
346
347 format->type = video->type;
348 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
349}
350
351static int
352isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
353{
354 struct v4l2_format format;
355 int ret;
356
357 memcpy(&format, &vfh->format, sizeof(format));
358 ret = __isp_video_get_format(video, &format);
359 if (ret < 0)
360 return ret;
361
362 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
363 vfh->format.fmt.pix.height != format.fmt.pix.height ||
364 vfh->format.fmt.pix.width != format.fmt.pix.width ||
365 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
366 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
367 return -EINVAL;
368
369 return ret;
370}
371
372/* -----------------------------------------------------------------------------
373 * IOMMU management
374 */
375
376#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
377
378/*
379 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
380 * @dev: Device pointer specific to the OMAP3 ISP.
381 * @sglist: Pointer to source Scatter gather list to allocate.
382 * @sglen: Number of elements of the scatter-gatter list.
383 *
384 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
385 * we ran out of memory.
386 */
387static dma_addr_t
388ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
389{
390 struct sg_table *sgt;
391 u32 da;
392
393 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
394 if (sgt == NULL)
395 return -ENOMEM;
396
397 sgt->sgl = (struct scatterlist *)sglist;
398 sgt->nents = sglen;
399 sgt->orig_nents = sglen;
400
fabdbca8 401 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
ad614acb
LP
402 if (IS_ERR_VALUE(da))
403 kfree(sgt);
404
405 return da;
406}
407
408/*
409 * ispmmu_vunmap - Unmap a device address from the ISP MMU
410 * @dev: Device pointer specific to the OMAP3 ISP.
411 * @da: Device address generated from a ispmmu_vmap call.
412 */
413static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
414{
415 struct sg_table *sgt;
416
fabdbca8 417 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
ad614acb
LP
418 kfree(sgt);
419}
420
421/* -----------------------------------------------------------------------------
422 * Video queue operations
423 */
424
425static void isp_video_queue_prepare(struct isp_video_queue *queue,
426 unsigned int *nbuffers, unsigned int *size)
427{
428 struct isp_video_fh *vfh =
429 container_of(queue, struct isp_video_fh, queue);
430 struct isp_video *video = vfh->video;
431
432 *size = vfh->format.fmt.pix.sizeimage;
433 if (*size == 0)
434 return;
435
436 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
437}
438
439static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
440{
441 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
442 struct isp_buffer *buffer = to_isp_buffer(buf);
443 struct isp_video *video = vfh->video;
444
445 if (buffer->isp_addr) {
446 ispmmu_vunmap(video->isp, buffer->isp_addr);
447 buffer->isp_addr = 0;
448 }
449}
450
451static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
452{
453 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
454 struct isp_buffer *buffer = to_isp_buffer(buf);
455 struct isp_video *video = vfh->video;
456 unsigned long addr;
457
458 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
459 if (IS_ERR_VALUE(addr))
460 return -EIO;
461
462 if (!IS_ALIGNED(addr, 32)) {
463 dev_dbg(video->isp->dev, "Buffer address must be "
464 "aligned to 32 bytes boundary.\n");
465 ispmmu_vunmap(video->isp, buffer->isp_addr);
466 return -EINVAL;
467 }
468
469 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
470 buffer->isp_addr = addr;
471 return 0;
472}
473
474/*
475 * isp_video_buffer_queue - Add buffer to streaming queue
476 * @buf: Video buffer
477 *
478 * In memory-to-memory mode, start streaming on the pipeline if buffers are
479 * queued on both the input and the output, if the pipeline isn't already busy.
480 * If the pipeline is busy, it will be restarted in the output module interrupt
481 * handler.
482 */
483static void isp_video_buffer_queue(struct isp_video_buffer *buf)
484{
485 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
486 struct isp_buffer *buffer = to_isp_buffer(buf);
487 struct isp_video *video = vfh->video;
488 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
489 enum isp_pipeline_state state;
490 unsigned long flags;
491 unsigned int empty;
492 unsigned int start;
493
494 empty = list_empty(&video->dmaqueue);
495 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
496
497 if (empty) {
498 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
499 state = ISP_PIPELINE_QUEUE_OUTPUT;
500 else
501 state = ISP_PIPELINE_QUEUE_INPUT;
502
503 spin_lock_irqsave(&pipe->lock, flags);
504 pipe->state |= state;
505 video->ops->queue(video, buffer);
506 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
507
508 start = isp_pipeline_ready(pipe);
509 if (start)
510 pipe->state |= ISP_PIPELINE_STREAM;
511 spin_unlock_irqrestore(&pipe->lock, flags);
512
513 if (start)
514 omap3isp_pipeline_set_stream(pipe,
515 ISP_PIPELINE_STREAM_SINGLESHOT);
516 }
517}
518
519static const struct isp_video_queue_operations isp_video_queue_ops = {
520 .queue_prepare = &isp_video_queue_prepare,
521 .buffer_prepare = &isp_video_buffer_prepare,
522 .buffer_queue = &isp_video_buffer_queue,
523 .buffer_cleanup = &isp_video_buffer_cleanup,
524};
525
526/*
527 * omap3isp_video_buffer_next - Complete the current buffer and return the next
528 * @video: ISP video object
ad614acb
LP
529 *
530 * Remove the current video buffer from the DMA queue and fill its timestamp,
531 * field count and state fields before waking up its completion handler.
532 *
875e2e3e
LP
533 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
534 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
535 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
ad614acb
LP
536 *
537 * The DMA queue is expected to contain at least one buffer.
538 *
539 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
540 * empty.
541 */
875e2e3e 542struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
ad614acb
LP
543{
544 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
545 struct isp_video_queue *queue = video->queue;
546 enum isp_pipeline_state state;
547 struct isp_video_buffer *buf;
548 unsigned long flags;
549 struct timespec ts;
550
551 spin_lock_irqsave(&queue->irqlock, flags);
552 if (WARN_ON(list_empty(&video->dmaqueue))) {
553 spin_unlock_irqrestore(&queue->irqlock, flags);
554 return NULL;
555 }
556
557 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
558 irqlist);
559 list_del(&buf->irqlist);
560 spin_unlock_irqrestore(&queue->irqlock, flags);
561
562 ktime_get_ts(&ts);
563 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
564 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
565
566 /* Do frame number propagation only if this is the output video node.
567 * Frame number either comes from the CSI receivers or it gets
568 * incremented here if H3A is not active.
569 * Note: There is no guarantee that the output buffer will finish
570 * first, so the input number might lag behind by 1 in some cases.
571 */
572 if (video == pipe->output && !pipe->do_propagation)
573 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
574 else
575 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
576
875e2e3e
LP
577 /* Report pipeline errors to userspace on the capture device side. */
578 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
579 buf->state = ISP_BUF_STATE_ERROR;
580 pipe->error = false;
581 } else {
582 buf->state = ISP_BUF_STATE_DONE;
583 }
ad614acb
LP
584
585 wake_up(&buf->wait);
586
587 if (list_empty(&video->dmaqueue)) {
588 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
589 state = ISP_PIPELINE_QUEUE_OUTPUT
590 | ISP_PIPELINE_STREAM;
591 else
592 state = ISP_PIPELINE_QUEUE_INPUT
593 | ISP_PIPELINE_STREAM;
594
595 spin_lock_irqsave(&pipe->lock, flags);
596 pipe->state &= ~state;
597 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
598 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
599 spin_unlock_irqrestore(&pipe->lock, flags);
600 return NULL;
601 }
602
603 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
604 spin_lock_irqsave(&pipe->lock, flags);
605 pipe->state &= ~ISP_PIPELINE_STREAM;
606 spin_unlock_irqrestore(&pipe->lock, flags);
607 }
608
609 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
610 irqlist);
611 buf->state = ISP_BUF_STATE_ACTIVE;
612 return to_isp_buffer(buf);
613}
614
615/*
616 * omap3isp_video_resume - Perform resume operation on the buffers
617 * @video: ISP video object
25985edc 618 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
ad614acb
LP
619 *
620 * This function is intended to be used on suspend/resume scenario. It
621 * requests video queue layer to discard buffers marked as DONE if it's in
622 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
623 * if there's any.
624 */
625void omap3isp_video_resume(struct isp_video *video, int continuous)
626{
627 struct isp_buffer *buf = NULL;
628
629 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
630 omap3isp_video_queue_discard_done(video->queue);
631
632 if (!list_empty(&video->dmaqueue)) {
633 buf = list_first_entry(&video->dmaqueue,
634 struct isp_buffer, buffer.irqlist);
635 video->ops->queue(video, buf);
636 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
637 } else {
638 if (continuous)
639 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
640 }
641}
642
643/* -----------------------------------------------------------------------------
644 * V4L2 ioctls
645 */
646
647static int
648isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
649{
650 struct isp_video *video = video_drvdata(file);
651
652 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
653 strlcpy(cap->card, video->video.name, sizeof(cap->card));
654 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
ad614acb
LP
655
656 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
657 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
658 else
659 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
660
661 return 0;
662}
663
664static int
665isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
666{
667 struct isp_video_fh *vfh = to_isp_video_fh(fh);
668 struct isp_video *video = video_drvdata(file);
669
670 if (format->type != video->type)
671 return -EINVAL;
672
673 mutex_lock(&video->mutex);
674 *format = vfh->format;
675 mutex_unlock(&video->mutex);
676
677 return 0;
678}
679
680static int
681isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
682{
683 struct isp_video_fh *vfh = to_isp_video_fh(fh);
684 struct isp_video *video = video_drvdata(file);
685 struct v4l2_mbus_framefmt fmt;
686
687 if (format->type != video->type)
688 return -EINVAL;
689
690 mutex_lock(&video->mutex);
691
692 /* Fill the bytesperline and sizeimage fields by converting to media bus
693 * format and back to pixel format.
694 */
695 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
696 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
697
698 vfh->format = *format;
699
700 mutex_unlock(&video->mutex);
701 return 0;
702}
703
704static int
705isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
706{
707 struct isp_video *video = video_drvdata(file);
708 struct v4l2_subdev_format fmt;
709 struct v4l2_subdev *subdev;
710 u32 pad;
711 int ret;
712
713 if (format->type != video->type)
714 return -EINVAL;
715
716 subdev = isp_video_remote_subdev(video, &pad);
717 if (subdev == NULL)
718 return -EINVAL;
719
720 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
721
722 fmt.pad = pad;
723 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
724 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
725 if (ret)
726 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
727
728 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
729 return 0;
730}
731
732static int
733isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
734{
735 struct isp_video *video = video_drvdata(file);
736 struct v4l2_subdev *subdev;
737 int ret;
738
739 subdev = isp_video_remote_subdev(video, NULL);
740 if (subdev == NULL)
741 return -EINVAL;
742
743 mutex_lock(&video->mutex);
744 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
745 mutex_unlock(&video->mutex);
746
747 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
748}
749
750static int
751isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
752{
753 struct isp_video *video = video_drvdata(file);
754 struct v4l2_subdev_format format;
755 struct v4l2_subdev *subdev;
756 u32 pad;
757 int ret;
758
759 subdev = isp_video_remote_subdev(video, &pad);
760 if (subdev == NULL)
761 return -EINVAL;
762
763 /* Try the get crop operation first and fallback to get format if not
764 * implemented.
765 */
766 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
767 if (ret != -ENOIOCTLCMD)
768 return ret;
769
770 format.pad = pad;
771 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
772 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
773 if (ret < 0)
774 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
775
776 crop->c.left = 0;
777 crop->c.top = 0;
778 crop->c.width = format.format.width;
779 crop->c.height = format.format.height;
780
781 return 0;
782}
783
784static int
785isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
786{
787 struct isp_video *video = video_drvdata(file);
788 struct v4l2_subdev *subdev;
789 int ret;
790
791 subdev = isp_video_remote_subdev(video, NULL);
792 if (subdev == NULL)
793 return -EINVAL;
794
795 mutex_lock(&video->mutex);
796 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
797 mutex_unlock(&video->mutex);
798
799 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
800}
801
802static int
803isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
804{
805 struct isp_video_fh *vfh = to_isp_video_fh(fh);
806 struct isp_video *video = video_drvdata(file);
807
808 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
809 video->type != a->type)
810 return -EINVAL;
811
812 memset(a, 0, sizeof(*a));
813 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
814 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
815 a->parm.output.timeperframe = vfh->timeperframe;
816
817 return 0;
818}
819
820static int
821isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
822{
823 struct isp_video_fh *vfh = to_isp_video_fh(fh);
824 struct isp_video *video = video_drvdata(file);
825
826 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
827 video->type != a->type)
828 return -EINVAL;
829
830 if (a->parm.output.timeperframe.denominator == 0)
831 a->parm.output.timeperframe.denominator = 1;
832
833 vfh->timeperframe = a->parm.output.timeperframe;
834
835 return 0;
836}
837
838static int
839isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
840{
841 struct isp_video_fh *vfh = to_isp_video_fh(fh);
842
843 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
844}
845
846static int
847isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
848{
849 struct isp_video_fh *vfh = to_isp_video_fh(fh);
850
851 return omap3isp_video_queue_querybuf(&vfh->queue, b);
852}
853
854static int
855isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
856{
857 struct isp_video_fh *vfh = to_isp_video_fh(fh);
858
859 return omap3isp_video_queue_qbuf(&vfh->queue, b);
860}
861
862static int
863isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
864{
865 struct isp_video_fh *vfh = to_isp_video_fh(fh);
866
867 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
868 file->f_flags & O_NONBLOCK);
869}
870
ccddd916
SA
871static int isp_video_check_external_subdevs(struct isp_video *video,
872 struct isp_pipeline *pipe)
873{
874 struct isp_device *isp = video->isp;
875 struct media_entity *ents[] = {
876 &isp->isp_csi2a.subdev.entity,
877 &isp->isp_csi2c.subdev.entity,
878 &isp->isp_ccp2.subdev.entity,
879 &isp->isp_ccdc.subdev.entity
880 };
881 struct media_pad *source_pad;
882 struct media_entity *source = NULL;
883 struct media_entity *sink;
884 struct v4l2_subdev_format fmt;
885 struct v4l2_ext_controls ctrls;
886 struct v4l2_ext_control ctrl;
887 unsigned int i;
888 int ret = 0;
889
890 for (i = 0; i < ARRAY_SIZE(ents); i++) {
891 /* Is the entity part of the pipeline? */
892 if (!(pipe->entities & (1 << ents[i]->id)))
893 continue;
894
895 /* ISP entities have always sink pad == 0. Find source. */
896 source_pad = media_entity_remote_source(&ents[i]->pads[0]);
897 if (source_pad == NULL)
898 continue;
899
900 source = source_pad->entity;
901 sink = ents[i];
902 break;
903 }
904
905 if (!source) {
906 dev_warn(isp->dev, "can't find source, failing now\n");
907 return ret;
908 }
909
910 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
911 return 0;
912
913 pipe->external = media_entity_to_v4l2_subdev(source);
914
915 fmt.pad = source_pad->index;
916 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
917 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
918 pad, get_fmt, NULL, &fmt);
919 if (unlikely(ret < 0)) {
920 dev_warn(isp->dev, "get_fmt returned null!\n");
921 return ret;
922 }
923
924 pipe->external_bpp = omap3isp_video_format_info(fmt.format.code)->bpp;
925
926 memset(&ctrls, 0, sizeof(ctrls));
927 memset(&ctrl, 0, sizeof(ctrl));
928
929 ctrl.id = V4L2_CID_PIXEL_RATE;
930
931 ctrls.count = 1;
932 ctrls.controls = &ctrl;
933
934 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
935 if (ret < 0) {
936 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
937 pipe->external->name);
938 return ret;
939 }
940
941 pipe->external_rate = ctrl.value64;
942
a6d7a62d
SA
943 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
944 unsigned int rate = UINT_MAX;
945 /*
946 * Check that maximum allowed CCDC pixel rate isn't
947 * exceeded by the pixel rate.
948 */
949 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
950 if (pipe->external_rate > rate)
951 return -ENOSPC;
952 }
953
ccddd916
SA
954 return 0;
955}
956
ad614acb
LP
957/*
958 * Stream management
959 *
960 * Every ISP pipeline has a single input and a single output. The input can be
961 * either a sensor or a video node. The output is always a video node.
962 *
963 * As every pipeline has an output video node, the ISP video objects at the
964 * pipeline output stores the pipeline state. It tracks the streaming state of
965 * both the input and output, as well as the availability of buffers.
966 *
967 * In sensor-to-memory mode, frames are always available at the pipeline input.
968 * Starting the sensor usually requires I2C transfers and must be done in
969 * interruptible context. The pipeline is started and stopped synchronously
970 * to the stream on/off commands. All modules in the pipeline will get their
971 * subdev set stream handler called. The module at the end of the pipeline must
972 * delay starting the hardware until buffers are available at its output.
973 *
974 * In memory-to-memory mode, starting/stopping the stream requires
975 * synchronization between the input and output. ISP modules can't be stopped
976 * in the middle of a frame, and at least some of the modules seem to become
977 * busy as soon as they're started, even if they don't receive a frame start
978 * event. For that reason frames need to be processed in single-shot mode. The
979 * driver needs to wait until a frame is completely processed and written to
980 * memory before restarting the pipeline for the next frame. Pipelined
981 * processing might be possible but requires more testing.
982 *
983 * Stream start must be delayed until buffers are available at both the input
984 * and output. The pipeline must be started in the videobuf queue callback with
985 * the buffers queue spinlock held. The modules subdev set stream operation must
986 * not sleep.
987 */
988static int
989isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
990{
991 struct isp_video_fh *vfh = to_isp_video_fh(fh);
992 struct isp_video *video = video_drvdata(file);
993 enum isp_pipeline_state state;
994 struct isp_pipeline *pipe;
ad614acb
LP
995 unsigned long flags;
996 int ret;
997
998 if (type != video->type)
999 return -EINVAL;
1000
1001 mutex_lock(&video->stream_lock);
1002
1003 if (video->streaming) {
1004 mutex_unlock(&video->stream_lock);
1005 return -EBUSY;
1006 }
1007
1008 /* Start streaming on the pipeline. No link touching an entity in the
1009 * pipeline can be activated or deactivated once streaming is started.
1010 */
1011 pipe = video->video.entity.pipe
1012 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
b0cd79ed 1013
ae5df813
SA
1014 pipe->entities = 0;
1015
b0cd79ed
SA
1016 if (video->isp->pdata->set_constraints)
1017 video->isp->pdata->set_constraints(video->isp, true);
1018 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1019 pipe->max_rate = pipe->l3_ick;
1020
da39257f
SA
1021 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1022 if (ret < 0)
1023 goto err_pipeline_start;
ad614acb
LP
1024
1025 /* Verify that the currently configured format matches the output of
1026 * the connected subdev.
1027 */
1028 ret = isp_video_check_format(video, vfh);
1029 if (ret < 0)
da39257f 1030 goto err_check_format;
ad614acb
LP
1031
1032 video->bpl_padding = ret;
1033 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1034
ae5df813
SA
1035 ret = isp_video_get_graph_data(video, pipe);
1036 if (ret < 0)
1037 goto err_check_format;
ad614acb 1038
ae5df813 1039 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
ad614acb 1040 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
ae5df813 1041 else
ad614acb 1042 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
ad614acb 1043
ccddd916
SA
1044 ret = isp_video_check_external_subdevs(video, pipe);
1045 if (ret < 0)
1046 goto err_check_format;
1047
ad614acb
LP
1048 /* Validate the pipeline and update its state. */
1049 ret = isp_video_validate_pipeline(pipe);
1050 if (ret < 0)
da39257f 1051 goto err_check_format;
ad614acb 1052
875e2e3e
LP
1053 pipe->error = false;
1054
ad614acb
LP
1055 spin_lock_irqsave(&pipe->lock, flags);
1056 pipe->state &= ~ISP_PIPELINE_STREAM;
1057 pipe->state |= state;
1058 spin_unlock_irqrestore(&pipe->lock, flags);
1059
1060 /* Set the maximum time per frame as the value requested by userspace.
1061 * This is a soft limit that can be overridden if the hardware doesn't
1062 * support the request limit.
1063 */
1064 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1065 pipe->max_timeperframe = vfh->timeperframe;
1066
1067 video->queue = &vfh->queue;
1068 INIT_LIST_HEAD(&video->dmaqueue);
1069 atomic_set(&pipe->frame_number, -1);
1070
1071 ret = omap3isp_video_queue_streamon(&vfh->queue);
1072 if (ret < 0)
da39257f 1073 goto err_check_format;
ad614acb
LP
1074
1075 /* In sensor-to-memory mode, the stream can be started synchronously
1076 * to the stream on command. In memory-to-memory mode, it will be
1077 * started when buffers are queued on both the input and output.
1078 */
1079 if (pipe->input == NULL) {
1080 ret = omap3isp_pipeline_set_stream(pipe,
1081 ISP_PIPELINE_STREAM_CONTINUOUS);
1082 if (ret < 0)
da39257f 1083 goto err_set_stream;
ad614acb
LP
1084 spin_lock_irqsave(&video->queue->irqlock, flags);
1085 if (list_empty(&video->dmaqueue))
1086 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1087 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1088 }
1089
da39257f 1090 video->streaming = 1;
ad614acb 1091
da39257f
SA
1092 mutex_unlock(&video->stream_lock);
1093 return 0;
1094
1095err_set_stream:
1096 omap3isp_video_queue_streamoff(&vfh->queue);
1097err_check_format:
1098 media_entity_pipeline_stop(&video->video.entity);
1099err_pipeline_start:
1100 if (video->isp->pdata->set_constraints)
1101 video->isp->pdata->set_constraints(video->isp, false);
1102 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1103 * will get triggered the next time the CCDC is powered up will try to
1104 * access buffers that might have been freed but still present in the
1105 * DMA queue. This can easily get triggered if the above
1106 * omap3isp_pipeline_set_stream() call fails on a system with a
1107 * free-running sensor.
1108 */
1109 INIT_LIST_HEAD(&video->dmaqueue);
1110 video->queue = NULL;
ad614acb
LP
1111
1112 mutex_unlock(&video->stream_lock);
1113 return ret;
1114}
1115
1116static int
1117isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1118{
1119 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1120 struct isp_video *video = video_drvdata(file);
1121 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1122 enum isp_pipeline_state state;
1123 unsigned int streaming;
1124 unsigned long flags;
1125
1126 if (type != video->type)
1127 return -EINVAL;
1128
1129 mutex_lock(&video->stream_lock);
1130
1131 /* Make sure we're not streaming yet. */
1132 mutex_lock(&vfh->queue.lock);
1133 streaming = vfh->queue.streaming;
1134 mutex_unlock(&vfh->queue.lock);
1135
1136 if (!streaming)
1137 goto done;
1138
1139 /* Update the pipeline state. */
1140 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1141 state = ISP_PIPELINE_STREAM_OUTPUT
1142 | ISP_PIPELINE_QUEUE_OUTPUT;
1143 else
1144 state = ISP_PIPELINE_STREAM_INPUT
1145 | ISP_PIPELINE_QUEUE_INPUT;
1146
1147 spin_lock_irqsave(&pipe->lock, flags);
1148 pipe->state &= ~state;
1149 spin_unlock_irqrestore(&pipe->lock, flags);
1150
1151 /* Stop the stream. */
1152 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1153 omap3isp_video_queue_streamoff(&vfh->queue);
1154 video->queue = NULL;
1155 video->streaming = 0;
1156
4b0ec19e
LP
1157 if (video->isp->pdata->set_constraints)
1158 video->isp->pdata->set_constraints(video->isp, false);
ad614acb
LP
1159 media_entity_pipeline_stop(&video->video.entity);
1160
1161done:
1162 mutex_unlock(&video->stream_lock);
1163 return 0;
1164}
1165
1166static int
1167isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1168{
1169 if (input->index > 0)
1170 return -EINVAL;
1171
1172 strlcpy(input->name, "camera", sizeof(input->name));
1173 input->type = V4L2_INPUT_TYPE_CAMERA;
1174
1175 return 0;
1176}
1177
1178static int
1179isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1180{
1181 *input = 0;
1182
1183 return 0;
1184}
1185
1186static int
1187isp_video_s_input(struct file *file, void *fh, unsigned int input)
1188{
1189 return input == 0 ? 0 : -EINVAL;
1190}
1191
1192static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1193 .vidioc_querycap = isp_video_querycap,
1194 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1195 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1196 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1197 .vidioc_g_fmt_vid_out = isp_video_get_format,
1198 .vidioc_s_fmt_vid_out = isp_video_set_format,
1199 .vidioc_try_fmt_vid_out = isp_video_try_format,
1200 .vidioc_cropcap = isp_video_cropcap,
1201 .vidioc_g_crop = isp_video_get_crop,
1202 .vidioc_s_crop = isp_video_set_crop,
1203 .vidioc_g_parm = isp_video_get_param,
1204 .vidioc_s_parm = isp_video_set_param,
1205 .vidioc_reqbufs = isp_video_reqbufs,
1206 .vidioc_querybuf = isp_video_querybuf,
1207 .vidioc_qbuf = isp_video_qbuf,
1208 .vidioc_dqbuf = isp_video_dqbuf,
1209 .vidioc_streamon = isp_video_streamon,
1210 .vidioc_streamoff = isp_video_streamoff,
1211 .vidioc_enum_input = isp_video_enum_input,
1212 .vidioc_g_input = isp_video_g_input,
1213 .vidioc_s_input = isp_video_s_input,
1214};
1215
1216/* -----------------------------------------------------------------------------
1217 * V4L2 file operations
1218 */
1219
1220static int isp_video_open(struct file *file)
1221{
1222 struct isp_video *video = video_drvdata(file);
1223 struct isp_video_fh *handle;
1224 int ret = 0;
1225
1226 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1227 if (handle == NULL)
1228 return -ENOMEM;
1229
1230 v4l2_fh_init(&handle->vfh, &video->video);
1231 v4l2_fh_add(&handle->vfh);
1232
1233 /* If this is the first user, initialise the pipeline. */
1234 if (omap3isp_get(video->isp) == NULL) {
1235 ret = -EBUSY;
1236 goto done;
1237 }
1238
1239 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1240 if (ret < 0) {
1241 omap3isp_put(video->isp);
1242 goto done;
1243 }
1244
1245 omap3isp_video_queue_init(&handle->queue, video->type,
1246 &isp_video_queue_ops, video->isp->dev,
1247 sizeof(struct isp_buffer));
1248
1249 memset(&handle->format, 0, sizeof(handle->format));
1250 handle->format.type = video->type;
1251 handle->timeperframe.denominator = 1;
1252
1253 handle->video = video;
1254 file->private_data = &handle->vfh;
1255
1256done:
1257 if (ret < 0) {
1258 v4l2_fh_del(&handle->vfh);
1259 kfree(handle);
1260 }
1261
1262 return ret;
1263}
1264
1265static int isp_video_release(struct file *file)
1266{
1267 struct isp_video *video = video_drvdata(file);
1268 struct v4l2_fh *vfh = file->private_data;
1269 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1270
1271 /* Disable streaming and free the buffers queue resources. */
1272 isp_video_streamoff(file, vfh, video->type);
1273
1274 mutex_lock(&handle->queue.lock);
1275 omap3isp_video_queue_cleanup(&handle->queue);
1276 mutex_unlock(&handle->queue.lock);
1277
1278 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1279
1280 /* Release the file handle. */
1281 v4l2_fh_del(vfh);
1282 kfree(handle);
1283 file->private_data = NULL;
1284
1285 omap3isp_put(video->isp);
1286
1287 return 0;
1288}
1289
1290static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1291{
1292 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1293 struct isp_video_queue *queue = &vfh->queue;
1294
1295 return omap3isp_video_queue_poll(queue, file, wait);
1296}
1297
1298static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1299{
1300 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1301
1302 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1303}
1304
1305static struct v4l2_file_operations isp_video_fops = {
1306 .owner = THIS_MODULE,
1307 .unlocked_ioctl = video_ioctl2,
1308 .open = isp_video_open,
1309 .release = isp_video_release,
1310 .poll = isp_video_poll,
1311 .mmap = isp_video_mmap,
1312};
1313
1314/* -----------------------------------------------------------------------------
1315 * ISP video core
1316 */
1317
1318static const struct isp_video_operations isp_video_dummy_ops = {
1319};
1320
1321int omap3isp_video_init(struct isp_video *video, const char *name)
1322{
1323 const char *direction;
1324 int ret;
1325
1326 switch (video->type) {
1327 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1328 direction = "output";
1329 video->pad.flags = MEDIA_PAD_FL_SINK;
1330 break;
1331 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1332 direction = "input";
1333 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1334 break;
1335
1336 default:
1337 return -EINVAL;
1338 }
1339
1340 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1341 if (ret < 0)
1342 return ret;
1343
1344 mutex_init(&video->mutex);
1345 atomic_set(&video->active, 0);
1346
1347 spin_lock_init(&video->pipe.lock);
1348 mutex_init(&video->stream_lock);
1349
1350 /* Initialize the video device. */
1351 if (video->ops == NULL)
1352 video->ops = &isp_video_dummy_ops;
1353
1354 video->video.fops = &isp_video_fops;
1355 snprintf(video->video.name, sizeof(video->video.name),
1356 "OMAP3 ISP %s %s", name, direction);
1357 video->video.vfl_type = VFL_TYPE_GRABBER;
1358 video->video.release = video_device_release_empty;
1359 video->video.ioctl_ops = &isp_video_ioctl_ops;
1360 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1361
1362 video_set_drvdata(&video->video, video);
1363
1364 return 0;
1365}
1366
63b4ca23
LP
1367void omap3isp_video_cleanup(struct isp_video *video)
1368{
1369 media_entity_cleanup(&video->video.entity);
ed33ac8e
LP
1370 mutex_destroy(&video->stream_lock);
1371 mutex_destroy(&video->mutex);
63b4ca23
LP
1372}
1373
ad614acb
LP
1374int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1375{
1376 int ret;
1377
1378 video->video.v4l2_dev = vdev;
1379
1380 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1381 if (ret < 0)
1382 printk(KERN_ERR "%s: could not register video device (%d)\n",
1383 __func__, ret);
1384
1385 return ret;
1386}
1387
1388void omap3isp_video_unregister(struct isp_video *video)
1389{
63b4ca23 1390 if (video_is_registered(&video->video))
ad614acb 1391 video_unregister_device(&video->video);
ad614acb 1392}
This page took 0.180607 seconds and 5 git commands to generate.