iommu/omap: eliminate the public omap_find_iommu_device() method
[deliverable/linux.git] / drivers / media / video / omap3isp / ispvideo.c
CommitLineData
ad614acb
LP
1/*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <asm/cacheflush.h>
27#include <linux/clk.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/vmalloc.h>
34#include <media/v4l2-dev.h>
35#include <media/v4l2-ioctl.h>
36#include <plat/iommu.h>
37#include <plat/iovmm.h>
38#include <plat/omap-pm.h>
39
40#include "ispvideo.h"
41#include "isp.h"
42
43
44/* -----------------------------------------------------------------------------
45 * Helper functions
46 */
47
48static struct isp_format_info formats[] = {
49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
c09af044
MJ
50 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
51 V4L2_PIX_FMT_GREY, 8, },
5782f97b 52 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
c09af044
MJ
53 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_PIX_FMT_Y10, 10, },
5782f97b 55 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
c09af044
MJ
56 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_Y12, 12, },
5782f97b 58 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
c09af044
MJ
59 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
60 V4L2_PIX_FMT_SBGGR8, 8, },
5782f97b 61 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
c09af044
MJ
62 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
63 V4L2_PIX_FMT_SGBRG8, 8, },
5782f97b 64 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
c09af044
MJ
65 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
66 V4L2_PIX_FMT_SGRBG8, 8, },
5782f97b 67 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
c09af044
MJ
68 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
69 V4L2_PIX_FMT_SRGGB8, 8, },
ad614acb 70 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
c09af044
MJ
71 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
72 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
ad614acb 73 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
c09af044
MJ
74 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
75 V4L2_PIX_FMT_SBGGR10, 10, },
ad614acb 76 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
c09af044
MJ
77 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
78 V4L2_PIX_FMT_SGBRG10, 10, },
ad614acb 79 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
c09af044
MJ
80 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
81 V4L2_PIX_FMT_SGRBG10, 10, },
ad614acb 82 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
c09af044
MJ
83 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
84 V4L2_PIX_FMT_SRGGB10, 10, },
ad614acb 85 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
c09af044
MJ
86 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
87 V4L2_PIX_FMT_SBGGR12, 12, },
ad614acb 88 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
c09af044
MJ
89 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
90 V4L2_PIX_FMT_SGBRG12, 12, },
ad614acb 91 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
c09af044
MJ
92 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
93 V4L2_PIX_FMT_SGRBG12, 12, },
ad614acb 94 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
c09af044
MJ
95 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
96 V4L2_PIX_FMT_SRGGB12, 12, },
ad614acb 97 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
c09af044
MJ
98 V4L2_MBUS_FMT_UYVY8_1X16, 0,
99 V4L2_PIX_FMT_UYVY, 16, },
ad614acb 100 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
c09af044
MJ
101 V4L2_MBUS_FMT_YUYV8_1X16, 0,
102 V4L2_PIX_FMT_YUYV, 16, },
ad614acb
LP
103};
104
105const struct isp_format_info *
106omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
107{
108 unsigned int i;
109
110 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
111 if (formats[i].code == code)
112 return &formats[i];
113 }
114
115 return NULL;
116}
117
c09af044
MJ
118/*
119 * Decide whether desired output pixel code can be obtained with
120 * the lane shifter by shifting the input pixel code.
121 * @in: input pixelcode to shifter
122 * @out: output pixelcode from shifter
123 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
124 *
125 * return true if the combination is possible
126 * return false otherwise
127 */
128static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
129 enum v4l2_mbus_pixelcode out,
130 unsigned int additional_shift)
131{
132 const struct isp_format_info *in_info, *out_info;
133
134 if (in == out)
135 return true;
136
137 in_info = omap3isp_video_format_info(in);
138 out_info = omap3isp_video_format_info(out);
139
140 if ((in_info->flavor == 0) || (out_info->flavor == 0))
141 return false;
142
143 if (in_info->flavor != out_info->flavor)
144 return false;
145
146 return in_info->bpp - out_info->bpp + additional_shift <= 6;
147}
148
ad614acb
LP
149/*
150 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
151 * @video: ISP video instance
152 * @mbus: v4l2_mbus_framefmt format (input)
153 * @pix: v4l2_pix_format format (output)
154 *
155 * Fill the output pix structure with information from the input mbus format.
156 * The bytesperline and sizeimage fields are computed from the requested bytes
157 * per line value in the pix format and information from the video instance.
158 *
159 * Return the number of padding bytes at end of line.
160 */
161static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
162 const struct v4l2_mbus_framefmt *mbus,
163 struct v4l2_pix_format *pix)
164{
165 unsigned int bpl = pix->bytesperline;
166 unsigned int min_bpl;
167 unsigned int i;
168
169 memset(pix, 0, sizeof(*pix));
170 pix->width = mbus->width;
171 pix->height = mbus->height;
172
173 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
174 if (formats[i].code == mbus->code)
175 break;
176 }
177
178 if (WARN_ON(i == ARRAY_SIZE(formats)))
179 return 0;
180
181 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
182
183 /* Clamp the requested bytes per line value. If the maximum bytes per
184 * line value is zero, the module doesn't support user configurable line
185 * sizes. Override the requested value with the minimum in that case.
186 */
187 if (video->bpl_max)
188 bpl = clamp(bpl, min_bpl, video->bpl_max);
189 else
190 bpl = min_bpl;
191
192 if (!video->bpl_zero_padding || bpl != min_bpl)
193 bpl = ALIGN(bpl, video->bpl_alignment);
194
195 pix->pixelformat = formats[i].pixelformat;
196 pix->bytesperline = bpl;
197 pix->sizeimage = pix->bytesperline * pix->height;
198 pix->colorspace = mbus->colorspace;
199 pix->field = mbus->field;
200
201 return bpl - min_bpl;
202}
203
204static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
205 struct v4l2_mbus_framefmt *mbus)
206{
207 unsigned int i;
208
209 memset(mbus, 0, sizeof(*mbus));
210 mbus->width = pix->width;
211 mbus->height = pix->height;
212
213 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
214 if (formats[i].pixelformat == pix->pixelformat)
215 break;
216 }
217
218 if (WARN_ON(i == ARRAY_SIZE(formats)))
219 return;
220
221 mbus->code = formats[i].code;
222 mbus->colorspace = pix->colorspace;
223 mbus->field = pix->field;
224}
225
226static struct v4l2_subdev *
227isp_video_remote_subdev(struct isp_video *video, u32 *pad)
228{
229 struct media_pad *remote;
230
231 remote = media_entity_remote_source(&video->pad);
232
233 if (remote == NULL ||
234 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
235 return NULL;
236
237 if (pad)
238 *pad = remote->index;
239
240 return media_entity_to_v4l2_subdev(remote->entity);
241}
242
243/* Return a pointer to the ISP video instance at the far end of the pipeline. */
244static struct isp_video *
245isp_video_far_end(struct isp_video *video)
246{
247 struct media_entity_graph graph;
248 struct media_entity *entity = &video->video.entity;
249 struct media_device *mdev = entity->parent;
250 struct isp_video *far_end = NULL;
251
252 mutex_lock(&mdev->graph_mutex);
253 media_entity_graph_walk_start(&graph, entity);
254
255 while ((entity = media_entity_graph_walk_next(&graph))) {
256 if (entity == &video->video.entity)
257 continue;
258
259 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
260 continue;
261
262 far_end = to_isp_video(media_entity_to_video_device(entity));
263 if (far_end->type != video->type)
264 break;
265
266 far_end = NULL;
267 }
268
269 mutex_unlock(&mdev->graph_mutex);
270 return far_end;
271}
272
273/*
274 * Validate a pipeline by checking both ends of all links for format
275 * discrepancies.
276 *
277 * Compute the minimum time per frame value as the maximum of time per frame
278 * limits reported by every block in the pipeline.
279 *
280 * Return 0 if all formats match, or -EPIPE if at least one link is found with
00542edf
LP
281 * different formats on its two ends or if the pipeline doesn't start with a
282 * video source (either a subdev with no input pad, or a non-subdev entity).
ad614acb
LP
283 */
284static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
285{
286 struct isp_device *isp = pipe->output->isp;
287 struct v4l2_subdev_format fmt_source;
288 struct v4l2_subdev_format fmt_sink;
289 struct media_pad *pad;
290 struct v4l2_subdev *subdev;
291 int ret;
292
293 pipe->max_rate = pipe->l3_ick;
294
295 subdev = isp_video_remote_subdev(pipe->output, NULL);
296 if (subdev == NULL)
297 return -EPIPE;
298
299 while (1) {
c09af044 300 unsigned int shifter_link;
ad614acb
LP
301 /* Retrieve the sink format */
302 pad = &subdev->entity.pads[0];
303 if (!(pad->flags & MEDIA_PAD_FL_SINK))
304 break;
305
306 fmt_sink.pad = pad->index;
307 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
308 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
309 if (ret < 0 && ret != -ENOIOCTLCMD)
310 return -EPIPE;
311
312 /* Update the maximum frame rate */
313 if (subdev == &isp->isp_res.subdev)
314 omap3isp_resizer_max_rate(&isp->isp_res,
315 &pipe->max_rate);
316
317 /* Check ccdc maximum data rate when data comes from sensor
318 * TODO: Include ccdc rate in pipe->max_rate and compare the
319 * total pipe rate with the input data rate from sensor.
320 */
321 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
322 unsigned int rate = UINT_MAX;
323
324 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
325 if (isp->isp_ccdc.vpcfg.pixelclk > rate)
326 return -ENOSPC;
327 }
328
c09af044
MJ
329 /* If sink pad is on CCDC, the link has the lane shifter
330 * in the middle of it. */
331 shifter_link = subdev == &isp->isp_ccdc.subdev;
332
00542edf
LP
333 /* Retrieve the source format. Return an error if no source
334 * entity can be found, and stop checking the pipeline if the
335 * source entity isn't a subdev.
336 */
ad614acb 337 pad = media_entity_remote_source(pad);
00542edf
LP
338 if (pad == NULL)
339 return -EPIPE;
340
341 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
ad614acb
LP
342 break;
343
344 subdev = media_entity_to_v4l2_subdev(pad->entity);
345
346 fmt_source.pad = pad->index;
347 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
348 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
349 if (ret < 0 && ret != -ENOIOCTLCMD)
350 return -EPIPE;
351
352 /* Check if the two ends match */
c09af044 353 if (fmt_source.format.width != fmt_sink.format.width ||
ad614acb
LP
354 fmt_source.format.height != fmt_sink.format.height)
355 return -EPIPE;
c09af044
MJ
356
357 if (shifter_link) {
358 unsigned int parallel_shift = 0;
359 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
360 struct isp_parallel_platform_data *pdata =
361 &((struct isp_v4l2_subdevs_group *)
362 subdev->host_priv)->bus.parallel;
363 parallel_shift = pdata->data_lane_shift * 2;
364 }
365 if (!isp_video_is_shiftable(fmt_source.format.code,
366 fmt_sink.format.code,
367 parallel_shift))
368 return -EPIPE;
369 } else if (fmt_source.format.code != fmt_sink.format.code)
370 return -EPIPE;
ad614acb
LP
371 }
372
373 return 0;
374}
375
376static int
377__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
378{
379 struct v4l2_subdev_format fmt;
380 struct v4l2_subdev *subdev;
381 u32 pad;
382 int ret;
383
384 subdev = isp_video_remote_subdev(video, &pad);
385 if (subdev == NULL)
386 return -EINVAL;
387
388 mutex_lock(&video->mutex);
389
390 fmt.pad = pad;
391 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
392 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
393 if (ret == -ENOIOCTLCMD)
394 ret = -EINVAL;
395
396 mutex_unlock(&video->mutex);
397
398 if (ret)
399 return ret;
400
401 format->type = video->type;
402 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
403}
404
405static int
406isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
407{
408 struct v4l2_format format;
409 int ret;
410
411 memcpy(&format, &vfh->format, sizeof(format));
412 ret = __isp_video_get_format(video, &format);
413 if (ret < 0)
414 return ret;
415
416 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
417 vfh->format.fmt.pix.height != format.fmt.pix.height ||
418 vfh->format.fmt.pix.width != format.fmt.pix.width ||
419 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
420 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
421 return -EINVAL;
422
423 return ret;
424}
425
426/* -----------------------------------------------------------------------------
427 * IOMMU management
428 */
429
430#define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
431
432/*
433 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
434 * @dev: Device pointer specific to the OMAP3 ISP.
435 * @sglist: Pointer to source Scatter gather list to allocate.
436 * @sglen: Number of elements of the scatter-gatter list.
437 *
438 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
439 * we ran out of memory.
440 */
441static dma_addr_t
442ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
443{
444 struct sg_table *sgt;
445 u32 da;
446
447 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
448 if (sgt == NULL)
449 return -ENOMEM;
450
451 sgt->sgl = (struct scatterlist *)sglist;
452 sgt->nents = sglen;
453 sgt->orig_nents = sglen;
454
fabdbca8 455 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
ad614acb
LP
456 if (IS_ERR_VALUE(da))
457 kfree(sgt);
458
459 return da;
460}
461
462/*
463 * ispmmu_vunmap - Unmap a device address from the ISP MMU
464 * @dev: Device pointer specific to the OMAP3 ISP.
465 * @da: Device address generated from a ispmmu_vmap call.
466 */
467static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
468{
469 struct sg_table *sgt;
470
fabdbca8 471 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
ad614acb
LP
472 kfree(sgt);
473}
474
475/* -----------------------------------------------------------------------------
476 * Video queue operations
477 */
478
479static void isp_video_queue_prepare(struct isp_video_queue *queue,
480 unsigned int *nbuffers, unsigned int *size)
481{
482 struct isp_video_fh *vfh =
483 container_of(queue, struct isp_video_fh, queue);
484 struct isp_video *video = vfh->video;
485
486 *size = vfh->format.fmt.pix.sizeimage;
487 if (*size == 0)
488 return;
489
490 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
491}
492
493static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
494{
495 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
496 struct isp_buffer *buffer = to_isp_buffer(buf);
497 struct isp_video *video = vfh->video;
498
499 if (buffer->isp_addr) {
500 ispmmu_vunmap(video->isp, buffer->isp_addr);
501 buffer->isp_addr = 0;
502 }
503}
504
505static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
506{
507 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
508 struct isp_buffer *buffer = to_isp_buffer(buf);
509 struct isp_video *video = vfh->video;
510 unsigned long addr;
511
512 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
513 if (IS_ERR_VALUE(addr))
514 return -EIO;
515
516 if (!IS_ALIGNED(addr, 32)) {
517 dev_dbg(video->isp->dev, "Buffer address must be "
518 "aligned to 32 bytes boundary.\n");
519 ispmmu_vunmap(video->isp, buffer->isp_addr);
520 return -EINVAL;
521 }
522
523 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
524 buffer->isp_addr = addr;
525 return 0;
526}
527
528/*
529 * isp_video_buffer_queue - Add buffer to streaming queue
530 * @buf: Video buffer
531 *
532 * In memory-to-memory mode, start streaming on the pipeline if buffers are
533 * queued on both the input and the output, if the pipeline isn't already busy.
534 * If the pipeline is busy, it will be restarted in the output module interrupt
535 * handler.
536 */
537static void isp_video_buffer_queue(struct isp_video_buffer *buf)
538{
539 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
540 struct isp_buffer *buffer = to_isp_buffer(buf);
541 struct isp_video *video = vfh->video;
542 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
543 enum isp_pipeline_state state;
544 unsigned long flags;
545 unsigned int empty;
546 unsigned int start;
547
548 empty = list_empty(&video->dmaqueue);
549 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
550
551 if (empty) {
552 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
553 state = ISP_PIPELINE_QUEUE_OUTPUT;
554 else
555 state = ISP_PIPELINE_QUEUE_INPUT;
556
557 spin_lock_irqsave(&pipe->lock, flags);
558 pipe->state |= state;
559 video->ops->queue(video, buffer);
560 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
561
562 start = isp_pipeline_ready(pipe);
563 if (start)
564 pipe->state |= ISP_PIPELINE_STREAM;
565 spin_unlock_irqrestore(&pipe->lock, flags);
566
567 if (start)
568 omap3isp_pipeline_set_stream(pipe,
569 ISP_PIPELINE_STREAM_SINGLESHOT);
570 }
571}
572
573static const struct isp_video_queue_operations isp_video_queue_ops = {
574 .queue_prepare = &isp_video_queue_prepare,
575 .buffer_prepare = &isp_video_buffer_prepare,
576 .buffer_queue = &isp_video_buffer_queue,
577 .buffer_cleanup = &isp_video_buffer_cleanup,
578};
579
580/*
581 * omap3isp_video_buffer_next - Complete the current buffer and return the next
582 * @video: ISP video object
25985edc 583 * @error: Whether an error occurred during capture
ad614acb
LP
584 *
585 * Remove the current video buffer from the DMA queue and fill its timestamp,
586 * field count and state fields before waking up its completion handler.
587 *
25985edc 588 * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0)
ad614acb
LP
589 * or VIDEOBUF_ERROR otherwise (@error is non-zero).
590 *
591 * The DMA queue is expected to contain at least one buffer.
592 *
593 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
594 * empty.
595 */
596struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
597 unsigned int error)
598{
599 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
600 struct isp_video_queue *queue = video->queue;
601 enum isp_pipeline_state state;
602 struct isp_video_buffer *buf;
603 unsigned long flags;
604 struct timespec ts;
605
606 spin_lock_irqsave(&queue->irqlock, flags);
607 if (WARN_ON(list_empty(&video->dmaqueue))) {
608 spin_unlock_irqrestore(&queue->irqlock, flags);
609 return NULL;
610 }
611
612 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
613 irqlist);
614 list_del(&buf->irqlist);
615 spin_unlock_irqrestore(&queue->irqlock, flags);
616
617 ktime_get_ts(&ts);
618 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
619 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
620
621 /* Do frame number propagation only if this is the output video node.
622 * Frame number either comes from the CSI receivers or it gets
623 * incremented here if H3A is not active.
624 * Note: There is no guarantee that the output buffer will finish
625 * first, so the input number might lag behind by 1 in some cases.
626 */
627 if (video == pipe->output && !pipe->do_propagation)
628 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
629 else
630 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
631
632 buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
633
634 wake_up(&buf->wait);
635
636 if (list_empty(&video->dmaqueue)) {
637 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
638 state = ISP_PIPELINE_QUEUE_OUTPUT
639 | ISP_PIPELINE_STREAM;
640 else
641 state = ISP_PIPELINE_QUEUE_INPUT
642 | ISP_PIPELINE_STREAM;
643
644 spin_lock_irqsave(&pipe->lock, flags);
645 pipe->state &= ~state;
646 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
647 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
648 spin_unlock_irqrestore(&pipe->lock, flags);
649 return NULL;
650 }
651
652 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
653 spin_lock_irqsave(&pipe->lock, flags);
654 pipe->state &= ~ISP_PIPELINE_STREAM;
655 spin_unlock_irqrestore(&pipe->lock, flags);
656 }
657
658 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
659 irqlist);
660 buf->state = ISP_BUF_STATE_ACTIVE;
661 return to_isp_buffer(buf);
662}
663
664/*
665 * omap3isp_video_resume - Perform resume operation on the buffers
666 * @video: ISP video object
25985edc 667 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
ad614acb
LP
668 *
669 * This function is intended to be used on suspend/resume scenario. It
670 * requests video queue layer to discard buffers marked as DONE if it's in
671 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
672 * if there's any.
673 */
674void omap3isp_video_resume(struct isp_video *video, int continuous)
675{
676 struct isp_buffer *buf = NULL;
677
678 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
679 omap3isp_video_queue_discard_done(video->queue);
680
681 if (!list_empty(&video->dmaqueue)) {
682 buf = list_first_entry(&video->dmaqueue,
683 struct isp_buffer, buffer.irqlist);
684 video->ops->queue(video, buf);
685 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
686 } else {
687 if (continuous)
688 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
689 }
690}
691
692/* -----------------------------------------------------------------------------
693 * V4L2 ioctls
694 */
695
696static int
697isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
698{
699 struct isp_video *video = video_drvdata(file);
700
701 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
702 strlcpy(cap->card, video->video.name, sizeof(cap->card));
703 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
ad614acb
LP
704
705 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
706 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
707 else
708 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
709
710 return 0;
711}
712
713static int
714isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
715{
716 struct isp_video_fh *vfh = to_isp_video_fh(fh);
717 struct isp_video *video = video_drvdata(file);
718
719 if (format->type != video->type)
720 return -EINVAL;
721
722 mutex_lock(&video->mutex);
723 *format = vfh->format;
724 mutex_unlock(&video->mutex);
725
726 return 0;
727}
728
729static int
730isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
731{
732 struct isp_video_fh *vfh = to_isp_video_fh(fh);
733 struct isp_video *video = video_drvdata(file);
734 struct v4l2_mbus_framefmt fmt;
735
736 if (format->type != video->type)
737 return -EINVAL;
738
739 mutex_lock(&video->mutex);
740
741 /* Fill the bytesperline and sizeimage fields by converting to media bus
742 * format and back to pixel format.
743 */
744 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
745 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
746
747 vfh->format = *format;
748
749 mutex_unlock(&video->mutex);
750 return 0;
751}
752
753static int
754isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
755{
756 struct isp_video *video = video_drvdata(file);
757 struct v4l2_subdev_format fmt;
758 struct v4l2_subdev *subdev;
759 u32 pad;
760 int ret;
761
762 if (format->type != video->type)
763 return -EINVAL;
764
765 subdev = isp_video_remote_subdev(video, &pad);
766 if (subdev == NULL)
767 return -EINVAL;
768
769 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
770
771 fmt.pad = pad;
772 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
773 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
774 if (ret)
775 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
776
777 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
778 return 0;
779}
780
781static int
782isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
783{
784 struct isp_video *video = video_drvdata(file);
785 struct v4l2_subdev *subdev;
786 int ret;
787
788 subdev = isp_video_remote_subdev(video, NULL);
789 if (subdev == NULL)
790 return -EINVAL;
791
792 mutex_lock(&video->mutex);
793 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
794 mutex_unlock(&video->mutex);
795
796 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
797}
798
799static int
800isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
801{
802 struct isp_video *video = video_drvdata(file);
803 struct v4l2_subdev_format format;
804 struct v4l2_subdev *subdev;
805 u32 pad;
806 int ret;
807
808 subdev = isp_video_remote_subdev(video, &pad);
809 if (subdev == NULL)
810 return -EINVAL;
811
812 /* Try the get crop operation first and fallback to get format if not
813 * implemented.
814 */
815 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
816 if (ret != -ENOIOCTLCMD)
817 return ret;
818
819 format.pad = pad;
820 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
821 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
822 if (ret < 0)
823 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
824
825 crop->c.left = 0;
826 crop->c.top = 0;
827 crop->c.width = format.format.width;
828 crop->c.height = format.format.height;
829
830 return 0;
831}
832
833static int
834isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
835{
836 struct isp_video *video = video_drvdata(file);
837 struct v4l2_subdev *subdev;
838 int ret;
839
840 subdev = isp_video_remote_subdev(video, NULL);
841 if (subdev == NULL)
842 return -EINVAL;
843
844 mutex_lock(&video->mutex);
845 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
846 mutex_unlock(&video->mutex);
847
848 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
849}
850
851static int
852isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
853{
854 struct isp_video_fh *vfh = to_isp_video_fh(fh);
855 struct isp_video *video = video_drvdata(file);
856
857 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
858 video->type != a->type)
859 return -EINVAL;
860
861 memset(a, 0, sizeof(*a));
862 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
863 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
864 a->parm.output.timeperframe = vfh->timeperframe;
865
866 return 0;
867}
868
869static int
870isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
871{
872 struct isp_video_fh *vfh = to_isp_video_fh(fh);
873 struct isp_video *video = video_drvdata(file);
874
875 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
876 video->type != a->type)
877 return -EINVAL;
878
879 if (a->parm.output.timeperframe.denominator == 0)
880 a->parm.output.timeperframe.denominator = 1;
881
882 vfh->timeperframe = a->parm.output.timeperframe;
883
884 return 0;
885}
886
887static int
888isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
889{
890 struct isp_video_fh *vfh = to_isp_video_fh(fh);
891
892 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
893}
894
895static int
896isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
897{
898 struct isp_video_fh *vfh = to_isp_video_fh(fh);
899
900 return omap3isp_video_queue_querybuf(&vfh->queue, b);
901}
902
903static int
904isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
905{
906 struct isp_video_fh *vfh = to_isp_video_fh(fh);
907
908 return omap3isp_video_queue_qbuf(&vfh->queue, b);
909}
910
911static int
912isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
913{
914 struct isp_video_fh *vfh = to_isp_video_fh(fh);
915
916 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
917 file->f_flags & O_NONBLOCK);
918}
919
920/*
921 * Stream management
922 *
923 * Every ISP pipeline has a single input and a single output. The input can be
924 * either a sensor or a video node. The output is always a video node.
925 *
926 * As every pipeline has an output video node, the ISP video objects at the
927 * pipeline output stores the pipeline state. It tracks the streaming state of
928 * both the input and output, as well as the availability of buffers.
929 *
930 * In sensor-to-memory mode, frames are always available at the pipeline input.
931 * Starting the sensor usually requires I2C transfers and must be done in
932 * interruptible context. The pipeline is started and stopped synchronously
933 * to the stream on/off commands. All modules in the pipeline will get their
934 * subdev set stream handler called. The module at the end of the pipeline must
935 * delay starting the hardware until buffers are available at its output.
936 *
937 * In memory-to-memory mode, starting/stopping the stream requires
938 * synchronization between the input and output. ISP modules can't be stopped
939 * in the middle of a frame, and at least some of the modules seem to become
940 * busy as soon as they're started, even if they don't receive a frame start
941 * event. For that reason frames need to be processed in single-shot mode. The
942 * driver needs to wait until a frame is completely processed and written to
943 * memory before restarting the pipeline for the next frame. Pipelined
944 * processing might be possible but requires more testing.
945 *
946 * Stream start must be delayed until buffers are available at both the input
947 * and output. The pipeline must be started in the videobuf queue callback with
948 * the buffers queue spinlock held. The modules subdev set stream operation must
949 * not sleep.
950 */
951static int
952isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
953{
954 struct isp_video_fh *vfh = to_isp_video_fh(fh);
955 struct isp_video *video = video_drvdata(file);
956 enum isp_pipeline_state state;
957 struct isp_pipeline *pipe;
958 struct isp_video *far_end;
959 unsigned long flags;
960 int ret;
961
962 if (type != video->type)
963 return -EINVAL;
964
965 mutex_lock(&video->stream_lock);
966
967 if (video->streaming) {
968 mutex_unlock(&video->stream_lock);
969 return -EBUSY;
970 }
971
972 /* Start streaming on the pipeline. No link touching an entity in the
973 * pipeline can be activated or deactivated once streaming is started.
974 */
975 pipe = video->video.entity.pipe
976 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
977 media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
978
979 /* Verify that the currently configured format matches the output of
980 * the connected subdev.
981 */
982 ret = isp_video_check_format(video, vfh);
983 if (ret < 0)
984 goto error;
985
986 video->bpl_padding = ret;
987 video->bpl_value = vfh->format.fmt.pix.bytesperline;
988
989 /* Find the ISP video node connected at the far end of the pipeline and
990 * update the pipeline.
991 */
992 far_end = isp_video_far_end(video);
993
994 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
995 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
996 pipe->input = far_end;
997 pipe->output = video;
998 } else {
999 if (far_end == NULL) {
1000 ret = -EPIPE;
1001 goto error;
1002 }
1003
1004 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1005 pipe->input = video;
1006 pipe->output = far_end;
1007 }
1008
4b0ec19e
LP
1009 if (video->isp->pdata->set_constraints)
1010 video->isp->pdata->set_constraints(video->isp, true);
ad614acb
LP
1011 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1012
1013 /* Validate the pipeline and update its state. */
1014 ret = isp_video_validate_pipeline(pipe);
1015 if (ret < 0)
1016 goto error;
1017
1018 spin_lock_irqsave(&pipe->lock, flags);
1019 pipe->state &= ~ISP_PIPELINE_STREAM;
1020 pipe->state |= state;
1021 spin_unlock_irqrestore(&pipe->lock, flags);
1022
1023 /* Set the maximum time per frame as the value requested by userspace.
1024 * This is a soft limit that can be overridden if the hardware doesn't
1025 * support the request limit.
1026 */
1027 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1028 pipe->max_timeperframe = vfh->timeperframe;
1029
1030 video->queue = &vfh->queue;
1031 INIT_LIST_HEAD(&video->dmaqueue);
1032 atomic_set(&pipe->frame_number, -1);
1033
1034 ret = omap3isp_video_queue_streamon(&vfh->queue);
1035 if (ret < 0)
1036 goto error;
1037
1038 /* In sensor-to-memory mode, the stream can be started synchronously
1039 * to the stream on command. In memory-to-memory mode, it will be
1040 * started when buffers are queued on both the input and output.
1041 */
1042 if (pipe->input == NULL) {
1043 ret = omap3isp_pipeline_set_stream(pipe,
1044 ISP_PIPELINE_STREAM_CONTINUOUS);
1045 if (ret < 0)
1046 goto error;
1047 spin_lock_irqsave(&video->queue->irqlock, flags);
1048 if (list_empty(&video->dmaqueue))
1049 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1050 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1051 }
1052
1053error:
1054 if (ret < 0) {
1055 omap3isp_video_queue_streamoff(&vfh->queue);
4b0ec19e
LP
1056 if (video->isp->pdata->set_constraints)
1057 video->isp->pdata->set_constraints(video->isp, false);
ad614acb 1058 media_entity_pipeline_stop(&video->video.entity);
5b6c3ef0
LP
1059 /* The DMA queue must be emptied here, otherwise CCDC interrupts
1060 * that will get triggered the next time the CCDC is powered up
1061 * will try to access buffers that might have been freed but
1062 * still present in the DMA queue. This can easily get triggered
1063 * if the above omap3isp_pipeline_set_stream() call fails on a
1064 * system with a free-running sensor.
1065 */
1066 INIT_LIST_HEAD(&video->dmaqueue);
ad614acb
LP
1067 video->queue = NULL;
1068 }
1069
1070 if (!ret)
1071 video->streaming = 1;
1072
1073 mutex_unlock(&video->stream_lock);
1074 return ret;
1075}
1076
1077static int
1078isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1079{
1080 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1081 struct isp_video *video = video_drvdata(file);
1082 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1083 enum isp_pipeline_state state;
1084 unsigned int streaming;
1085 unsigned long flags;
1086
1087 if (type != video->type)
1088 return -EINVAL;
1089
1090 mutex_lock(&video->stream_lock);
1091
1092 /* Make sure we're not streaming yet. */
1093 mutex_lock(&vfh->queue.lock);
1094 streaming = vfh->queue.streaming;
1095 mutex_unlock(&vfh->queue.lock);
1096
1097 if (!streaming)
1098 goto done;
1099
1100 /* Update the pipeline state. */
1101 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1102 state = ISP_PIPELINE_STREAM_OUTPUT
1103 | ISP_PIPELINE_QUEUE_OUTPUT;
1104 else
1105 state = ISP_PIPELINE_STREAM_INPUT
1106 | ISP_PIPELINE_QUEUE_INPUT;
1107
1108 spin_lock_irqsave(&pipe->lock, flags);
1109 pipe->state &= ~state;
1110 spin_unlock_irqrestore(&pipe->lock, flags);
1111
1112 /* Stop the stream. */
1113 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1114 omap3isp_video_queue_streamoff(&vfh->queue);
1115 video->queue = NULL;
1116 video->streaming = 0;
1117
4b0ec19e
LP
1118 if (video->isp->pdata->set_constraints)
1119 video->isp->pdata->set_constraints(video->isp, false);
ad614acb
LP
1120 media_entity_pipeline_stop(&video->video.entity);
1121
1122done:
1123 mutex_unlock(&video->stream_lock);
1124 return 0;
1125}
1126
1127static int
1128isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1129{
1130 if (input->index > 0)
1131 return -EINVAL;
1132
1133 strlcpy(input->name, "camera", sizeof(input->name));
1134 input->type = V4L2_INPUT_TYPE_CAMERA;
1135
1136 return 0;
1137}
1138
1139static int
1140isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1141{
1142 *input = 0;
1143
1144 return 0;
1145}
1146
1147static int
1148isp_video_s_input(struct file *file, void *fh, unsigned int input)
1149{
1150 return input == 0 ? 0 : -EINVAL;
1151}
1152
1153static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1154 .vidioc_querycap = isp_video_querycap,
1155 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1156 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1157 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1158 .vidioc_g_fmt_vid_out = isp_video_get_format,
1159 .vidioc_s_fmt_vid_out = isp_video_set_format,
1160 .vidioc_try_fmt_vid_out = isp_video_try_format,
1161 .vidioc_cropcap = isp_video_cropcap,
1162 .vidioc_g_crop = isp_video_get_crop,
1163 .vidioc_s_crop = isp_video_set_crop,
1164 .vidioc_g_parm = isp_video_get_param,
1165 .vidioc_s_parm = isp_video_set_param,
1166 .vidioc_reqbufs = isp_video_reqbufs,
1167 .vidioc_querybuf = isp_video_querybuf,
1168 .vidioc_qbuf = isp_video_qbuf,
1169 .vidioc_dqbuf = isp_video_dqbuf,
1170 .vidioc_streamon = isp_video_streamon,
1171 .vidioc_streamoff = isp_video_streamoff,
1172 .vidioc_enum_input = isp_video_enum_input,
1173 .vidioc_g_input = isp_video_g_input,
1174 .vidioc_s_input = isp_video_s_input,
1175};
1176
1177/* -----------------------------------------------------------------------------
1178 * V4L2 file operations
1179 */
1180
1181static int isp_video_open(struct file *file)
1182{
1183 struct isp_video *video = video_drvdata(file);
1184 struct isp_video_fh *handle;
1185 int ret = 0;
1186
1187 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1188 if (handle == NULL)
1189 return -ENOMEM;
1190
1191 v4l2_fh_init(&handle->vfh, &video->video);
1192 v4l2_fh_add(&handle->vfh);
1193
1194 /* If this is the first user, initialise the pipeline. */
1195 if (omap3isp_get(video->isp) == NULL) {
1196 ret = -EBUSY;
1197 goto done;
1198 }
1199
1200 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1201 if (ret < 0) {
1202 omap3isp_put(video->isp);
1203 goto done;
1204 }
1205
1206 omap3isp_video_queue_init(&handle->queue, video->type,
1207 &isp_video_queue_ops, video->isp->dev,
1208 sizeof(struct isp_buffer));
1209
1210 memset(&handle->format, 0, sizeof(handle->format));
1211 handle->format.type = video->type;
1212 handle->timeperframe.denominator = 1;
1213
1214 handle->video = video;
1215 file->private_data = &handle->vfh;
1216
1217done:
1218 if (ret < 0) {
1219 v4l2_fh_del(&handle->vfh);
1220 kfree(handle);
1221 }
1222
1223 return ret;
1224}
1225
1226static int isp_video_release(struct file *file)
1227{
1228 struct isp_video *video = video_drvdata(file);
1229 struct v4l2_fh *vfh = file->private_data;
1230 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1231
1232 /* Disable streaming and free the buffers queue resources. */
1233 isp_video_streamoff(file, vfh, video->type);
1234
1235 mutex_lock(&handle->queue.lock);
1236 omap3isp_video_queue_cleanup(&handle->queue);
1237 mutex_unlock(&handle->queue.lock);
1238
1239 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1240
1241 /* Release the file handle. */
1242 v4l2_fh_del(vfh);
1243 kfree(handle);
1244 file->private_data = NULL;
1245
1246 omap3isp_put(video->isp);
1247
1248 return 0;
1249}
1250
1251static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1252{
1253 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1254 struct isp_video_queue *queue = &vfh->queue;
1255
1256 return omap3isp_video_queue_poll(queue, file, wait);
1257}
1258
1259static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1260{
1261 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1262
1263 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1264}
1265
1266static struct v4l2_file_operations isp_video_fops = {
1267 .owner = THIS_MODULE,
1268 .unlocked_ioctl = video_ioctl2,
1269 .open = isp_video_open,
1270 .release = isp_video_release,
1271 .poll = isp_video_poll,
1272 .mmap = isp_video_mmap,
1273};
1274
1275/* -----------------------------------------------------------------------------
1276 * ISP video core
1277 */
1278
1279static const struct isp_video_operations isp_video_dummy_ops = {
1280};
1281
1282int omap3isp_video_init(struct isp_video *video, const char *name)
1283{
1284 const char *direction;
1285 int ret;
1286
1287 switch (video->type) {
1288 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1289 direction = "output";
1290 video->pad.flags = MEDIA_PAD_FL_SINK;
1291 break;
1292 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1293 direction = "input";
1294 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1295 break;
1296
1297 default:
1298 return -EINVAL;
1299 }
1300
1301 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1302 if (ret < 0)
1303 return ret;
1304
1305 mutex_init(&video->mutex);
1306 atomic_set(&video->active, 0);
1307
1308 spin_lock_init(&video->pipe.lock);
1309 mutex_init(&video->stream_lock);
1310
1311 /* Initialize the video device. */
1312 if (video->ops == NULL)
1313 video->ops = &isp_video_dummy_ops;
1314
1315 video->video.fops = &isp_video_fops;
1316 snprintf(video->video.name, sizeof(video->video.name),
1317 "OMAP3 ISP %s %s", name, direction);
1318 video->video.vfl_type = VFL_TYPE_GRABBER;
1319 video->video.release = video_device_release_empty;
1320 video->video.ioctl_ops = &isp_video_ioctl_ops;
1321 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1322
1323 video_set_drvdata(&video->video, video);
1324
1325 return 0;
1326}
1327
63b4ca23
LP
1328void omap3isp_video_cleanup(struct isp_video *video)
1329{
1330 media_entity_cleanup(&video->video.entity);
ed33ac8e
LP
1331 mutex_destroy(&video->stream_lock);
1332 mutex_destroy(&video->mutex);
63b4ca23
LP
1333}
1334
ad614acb
LP
1335int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1336{
1337 int ret;
1338
1339 video->video.v4l2_dev = vdev;
1340
1341 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1342 if (ret < 0)
1343 printk(KERN_ERR "%s: could not register video device (%d)\n",
1344 __func__, ret);
1345
1346 return ret;
1347}
1348
1349void omap3isp_video_unregister(struct isp_video *video)
1350{
63b4ca23 1351 if (video_is_registered(&video->video))
ad614acb 1352 video_unregister_device(&video->video);
ad614acb 1353}
This page took 0.129467 seconds and 5 git commands to generate.