[media] omap3isp: Add resizer data rate configuration to resizer_link_validate
[deliverable/linux.git] / drivers / media / platform / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/omap-iommu.h>
31 #include <linux/pagemap.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <media/v4l2-dev.h>
37 #include <media/v4l2-ioctl.h>
38
39 #include "ispvideo.h"
40 #include "isp.h"
41
42
43 /* -----------------------------------------------------------------------------
44 * Helper functions
45 */
46
47 /*
48 * NOTE: When adding new media bus codes, always remember to add
49 * corresponding in-memory formats to the table below!!!
50 */
51 static struct isp_format_info formats[] = {
52 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
53 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_PIX_FMT_GREY, 8, 1, },
55 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
56 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_Y10, 10, 2, },
58 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
59 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
60 V4L2_PIX_FMT_Y12, 12, 2, },
61 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
62 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
63 V4L2_PIX_FMT_SBGGR8, 8, 1, },
64 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
65 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
66 V4L2_PIX_FMT_SGBRG8, 8, 1, },
67 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
68 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
69 V4L2_PIX_FMT_SGRBG8, 8, 1, },
70 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
71 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
72 V4L2_PIX_FMT_SRGGB8, 8, 1, },
73 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
74 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
75 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
76 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
77 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
78 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
79 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
80 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
81 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
82 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
83 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
84 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
85 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
86 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
87 V4L2_PIX_FMT_SBGGR10, 10, 2, },
88 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
89 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
90 V4L2_PIX_FMT_SGBRG10, 10, 2, },
91 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
92 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
93 V4L2_PIX_FMT_SGRBG10, 10, 2, },
94 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
95 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
96 V4L2_PIX_FMT_SRGGB10, 10, 2, },
97 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
98 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
99 V4L2_PIX_FMT_SBGGR12, 12, 2, },
100 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
101 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
102 V4L2_PIX_FMT_SGBRG12, 12, 2, },
103 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
104 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
105 V4L2_PIX_FMT_SGRBG12, 12, 2, },
106 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
107 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
108 V4L2_PIX_FMT_SRGGB12, 12, 2, },
109 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
110 V4L2_MBUS_FMT_UYVY8_1X16, 0,
111 V4L2_PIX_FMT_UYVY, 16, 2, },
112 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
113 V4L2_MBUS_FMT_YUYV8_1X16, 0,
114 V4L2_PIX_FMT_YUYV, 16, 2, },
115 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
116 V4L2_MBUS_FMT_UYVY8_2X8, 0,
117 V4L2_PIX_FMT_UYVY, 8, 2, },
118 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
119 V4L2_MBUS_FMT_YUYV8_2X8, 0,
120 V4L2_PIX_FMT_YUYV, 8, 2, },
121 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
122 * module and avoid NULL pointer dereferences.
123 */
124 { 0, }
125 };
126
127 const struct isp_format_info *
128 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
129 {
130 unsigned int i;
131
132 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
133 if (formats[i].code == code)
134 return &formats[i];
135 }
136
137 return NULL;
138 }
139
140 /*
141 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
142 * @video: ISP video instance
143 * @mbus: v4l2_mbus_framefmt format (input)
144 * @pix: v4l2_pix_format format (output)
145 *
146 * Fill the output pix structure with information from the input mbus format.
147 * The bytesperline and sizeimage fields are computed from the requested bytes
148 * per line value in the pix format and information from the video instance.
149 *
150 * Return the number of padding bytes at end of line.
151 */
152 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
153 const struct v4l2_mbus_framefmt *mbus,
154 struct v4l2_pix_format *pix)
155 {
156 unsigned int bpl = pix->bytesperline;
157 unsigned int min_bpl;
158 unsigned int i;
159
160 memset(pix, 0, sizeof(*pix));
161 pix->width = mbus->width;
162 pix->height = mbus->height;
163
164 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
165 if (formats[i].code == mbus->code)
166 break;
167 }
168
169 if (WARN_ON(i == ARRAY_SIZE(formats)))
170 return 0;
171
172 min_bpl = pix->width * formats[i].bpp;
173
174 /* Clamp the requested bytes per line value. If the maximum bytes per
175 * line value is zero, the module doesn't support user configurable line
176 * sizes. Override the requested value with the minimum in that case.
177 */
178 if (video->bpl_max)
179 bpl = clamp(bpl, min_bpl, video->bpl_max);
180 else
181 bpl = min_bpl;
182
183 if (!video->bpl_zero_padding || bpl != min_bpl)
184 bpl = ALIGN(bpl, video->bpl_alignment);
185
186 pix->pixelformat = formats[i].pixelformat;
187 pix->bytesperline = bpl;
188 pix->sizeimage = pix->bytesperline * pix->height;
189 pix->colorspace = mbus->colorspace;
190 pix->field = mbus->field;
191
192 return bpl - min_bpl;
193 }
194
195 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
196 struct v4l2_mbus_framefmt *mbus)
197 {
198 unsigned int i;
199
200 memset(mbus, 0, sizeof(*mbus));
201 mbus->width = pix->width;
202 mbus->height = pix->height;
203
204 /* Skip the last format in the loop so that it will be selected if no
205 * match is found.
206 */
207 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
208 if (formats[i].pixelformat == pix->pixelformat)
209 break;
210 }
211
212 mbus->code = formats[i].code;
213 mbus->colorspace = pix->colorspace;
214 mbus->field = pix->field;
215 }
216
217 static struct v4l2_subdev *
218 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
219 {
220 struct media_pad *remote;
221
222 remote = media_entity_remote_pad(&video->pad);
223
224 if (remote == NULL ||
225 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
226 return NULL;
227
228 if (pad)
229 *pad = remote->index;
230
231 return media_entity_to_v4l2_subdev(remote->entity);
232 }
233
234 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
235 static int isp_video_get_graph_data(struct isp_video *video,
236 struct isp_pipeline *pipe)
237 {
238 struct media_entity_graph graph;
239 struct media_entity *entity = &video->video.entity;
240 struct media_device *mdev = entity->parent;
241 struct isp_video *far_end = NULL;
242
243 mutex_lock(&mdev->graph_mutex);
244 media_entity_graph_walk_start(&graph, entity);
245
246 while ((entity = media_entity_graph_walk_next(&graph))) {
247 struct isp_video *__video;
248
249 pipe->entities |= 1 << entity->id;
250
251 if (far_end != NULL)
252 continue;
253
254 if (entity == &video->video.entity)
255 continue;
256
257 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
258 continue;
259
260 __video = to_isp_video(media_entity_to_video_device(entity));
261 if (__video->type != video->type)
262 far_end = __video;
263 }
264
265 mutex_unlock(&mdev->graph_mutex);
266
267 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
268 pipe->input = far_end;
269 pipe->output = video;
270 } else {
271 if (far_end == NULL)
272 return -EPIPE;
273
274 pipe->input = video;
275 pipe->output = far_end;
276 }
277
278 return 0;
279 }
280
281 static int
282 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
283 {
284 struct v4l2_subdev_format fmt;
285 struct v4l2_subdev *subdev;
286 u32 pad;
287 int ret;
288
289 subdev = isp_video_remote_subdev(video, &pad);
290 if (subdev == NULL)
291 return -EINVAL;
292
293 mutex_lock(&video->mutex);
294
295 fmt.pad = pad;
296 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
297 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
298 if (ret == -ENOIOCTLCMD)
299 ret = -EINVAL;
300
301 mutex_unlock(&video->mutex);
302
303 if (ret)
304 return ret;
305
306 format->type = video->type;
307 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
308 }
309
310 static int
311 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
312 {
313 struct v4l2_format format;
314 int ret;
315
316 memcpy(&format, &vfh->format, sizeof(format));
317 ret = __isp_video_get_format(video, &format);
318 if (ret < 0)
319 return ret;
320
321 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
322 vfh->format.fmt.pix.height != format.fmt.pix.height ||
323 vfh->format.fmt.pix.width != format.fmt.pix.width ||
324 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
325 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
326 return -EINVAL;
327
328 return ret;
329 }
330
331 /* -----------------------------------------------------------------------------
332 * IOMMU management
333 */
334
335 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
336
337 /*
338 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
339 * @dev: Device pointer specific to the OMAP3 ISP.
340 * @sglist: Pointer to source Scatter gather list to allocate.
341 * @sglen: Number of elements of the scatter-gatter list.
342 *
343 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
344 * we ran out of memory.
345 */
346 static dma_addr_t
347 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
348 {
349 struct sg_table *sgt;
350 u32 da;
351
352 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
353 if (sgt == NULL)
354 return -ENOMEM;
355
356 sgt->sgl = (struct scatterlist *)sglist;
357 sgt->nents = sglen;
358 sgt->orig_nents = sglen;
359
360 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
361 if (IS_ERR_VALUE(da))
362 kfree(sgt);
363
364 return da;
365 }
366
367 /*
368 * ispmmu_vunmap - Unmap a device address from the ISP MMU
369 * @dev: Device pointer specific to the OMAP3 ISP.
370 * @da: Device address generated from a ispmmu_vmap call.
371 */
372 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
373 {
374 struct sg_table *sgt;
375
376 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
377 kfree(sgt);
378 }
379
380 /* -----------------------------------------------------------------------------
381 * Video queue operations
382 */
383
384 static void isp_video_queue_prepare(struct isp_video_queue *queue,
385 unsigned int *nbuffers, unsigned int *size)
386 {
387 struct isp_video_fh *vfh =
388 container_of(queue, struct isp_video_fh, queue);
389 struct isp_video *video = vfh->video;
390
391 *size = vfh->format.fmt.pix.sizeimage;
392 if (*size == 0)
393 return;
394
395 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
396 }
397
398 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
399 {
400 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
401 struct isp_buffer *buffer = to_isp_buffer(buf);
402 struct isp_video *video = vfh->video;
403
404 if (buffer->isp_addr) {
405 ispmmu_vunmap(video->isp, buffer->isp_addr);
406 buffer->isp_addr = 0;
407 }
408 }
409
410 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
411 {
412 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
413 struct isp_buffer *buffer = to_isp_buffer(buf);
414 struct isp_video *video = vfh->video;
415 unsigned long addr;
416
417 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
418 if (IS_ERR_VALUE(addr))
419 return -EIO;
420
421 if (!IS_ALIGNED(addr, 32)) {
422 dev_dbg(video->isp->dev, "Buffer address must be "
423 "aligned to 32 bytes boundary.\n");
424 ispmmu_vunmap(video->isp, buffer->isp_addr);
425 return -EINVAL;
426 }
427
428 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
429 buffer->isp_addr = addr;
430 return 0;
431 }
432
433 /*
434 * isp_video_buffer_queue - Add buffer to streaming queue
435 * @buf: Video buffer
436 *
437 * In memory-to-memory mode, start streaming on the pipeline if buffers are
438 * queued on both the input and the output, if the pipeline isn't already busy.
439 * If the pipeline is busy, it will be restarted in the output module interrupt
440 * handler.
441 */
442 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
443 {
444 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
445 struct isp_buffer *buffer = to_isp_buffer(buf);
446 struct isp_video *video = vfh->video;
447 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
448 enum isp_pipeline_state state;
449 unsigned long flags;
450 unsigned int empty;
451 unsigned int start;
452
453 empty = list_empty(&video->dmaqueue);
454 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
455
456 if (empty) {
457 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
458 state = ISP_PIPELINE_QUEUE_OUTPUT;
459 else
460 state = ISP_PIPELINE_QUEUE_INPUT;
461
462 spin_lock_irqsave(&pipe->lock, flags);
463 pipe->state |= state;
464 video->ops->queue(video, buffer);
465 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
466
467 start = isp_pipeline_ready(pipe);
468 if (start)
469 pipe->state |= ISP_PIPELINE_STREAM;
470 spin_unlock_irqrestore(&pipe->lock, flags);
471
472 if (start)
473 omap3isp_pipeline_set_stream(pipe,
474 ISP_PIPELINE_STREAM_SINGLESHOT);
475 }
476 }
477
478 static const struct isp_video_queue_operations isp_video_queue_ops = {
479 .queue_prepare = &isp_video_queue_prepare,
480 .buffer_prepare = &isp_video_buffer_prepare,
481 .buffer_queue = &isp_video_buffer_queue,
482 .buffer_cleanup = &isp_video_buffer_cleanup,
483 };
484
485 /*
486 * omap3isp_video_buffer_next - Complete the current buffer and return the next
487 * @video: ISP video object
488 *
489 * Remove the current video buffer from the DMA queue and fill its timestamp,
490 * field count and state fields before waking up its completion handler.
491 *
492 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
493 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
494 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
495 *
496 * The DMA queue is expected to contain at least one buffer.
497 *
498 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
499 * empty.
500 */
501 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
502 {
503 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
504 struct isp_video_queue *queue = video->queue;
505 enum isp_pipeline_state state;
506 struct isp_video_buffer *buf;
507 unsigned long flags;
508 struct timespec ts;
509
510 spin_lock_irqsave(&queue->irqlock, flags);
511 if (WARN_ON(list_empty(&video->dmaqueue))) {
512 spin_unlock_irqrestore(&queue->irqlock, flags);
513 return NULL;
514 }
515
516 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
517 irqlist);
518 list_del(&buf->irqlist);
519 spin_unlock_irqrestore(&queue->irqlock, flags);
520
521 ktime_get_ts(&ts);
522 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
523 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
524
525 /* Do frame number propagation only if this is the output video node.
526 * Frame number either comes from the CSI receivers or it gets
527 * incremented here if H3A is not active.
528 * Note: There is no guarantee that the output buffer will finish
529 * first, so the input number might lag behind by 1 in some cases.
530 */
531 if (video == pipe->output && !pipe->do_propagation)
532 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
533 else
534 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
535
536 /* Report pipeline errors to userspace on the capture device side. */
537 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
538 buf->state = ISP_BUF_STATE_ERROR;
539 pipe->error = false;
540 } else {
541 buf->state = ISP_BUF_STATE_DONE;
542 }
543
544 wake_up(&buf->wait);
545
546 if (list_empty(&video->dmaqueue)) {
547 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
548 state = ISP_PIPELINE_QUEUE_OUTPUT
549 | ISP_PIPELINE_STREAM;
550 else
551 state = ISP_PIPELINE_QUEUE_INPUT
552 | ISP_PIPELINE_STREAM;
553
554 spin_lock_irqsave(&pipe->lock, flags);
555 pipe->state &= ~state;
556 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
557 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
558 spin_unlock_irqrestore(&pipe->lock, flags);
559 return NULL;
560 }
561
562 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
563 spin_lock_irqsave(&pipe->lock, flags);
564 pipe->state &= ~ISP_PIPELINE_STREAM;
565 spin_unlock_irqrestore(&pipe->lock, flags);
566 }
567
568 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
569 irqlist);
570 buf->state = ISP_BUF_STATE_ACTIVE;
571 return to_isp_buffer(buf);
572 }
573
574 /*
575 * omap3isp_video_resume - Perform resume operation on the buffers
576 * @video: ISP video object
577 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
578 *
579 * This function is intended to be used on suspend/resume scenario. It
580 * requests video queue layer to discard buffers marked as DONE if it's in
581 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
582 * if there's any.
583 */
584 void omap3isp_video_resume(struct isp_video *video, int continuous)
585 {
586 struct isp_buffer *buf = NULL;
587
588 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
589 omap3isp_video_queue_discard_done(video->queue);
590
591 if (!list_empty(&video->dmaqueue)) {
592 buf = list_first_entry(&video->dmaqueue,
593 struct isp_buffer, buffer.irqlist);
594 video->ops->queue(video, buf);
595 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
596 } else {
597 if (continuous)
598 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
599 }
600 }
601
602 /* -----------------------------------------------------------------------------
603 * V4L2 ioctls
604 */
605
606 static int
607 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
608 {
609 struct isp_video *video = video_drvdata(file);
610
611 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
612 strlcpy(cap->card, video->video.name, sizeof(cap->card));
613 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
614
615 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
616 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
617 else
618 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
619
620 return 0;
621 }
622
623 static int
624 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
625 {
626 struct isp_video_fh *vfh = to_isp_video_fh(fh);
627 struct isp_video *video = video_drvdata(file);
628
629 if (format->type != video->type)
630 return -EINVAL;
631
632 mutex_lock(&video->mutex);
633 *format = vfh->format;
634 mutex_unlock(&video->mutex);
635
636 return 0;
637 }
638
639 static int
640 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
641 {
642 struct isp_video_fh *vfh = to_isp_video_fh(fh);
643 struct isp_video *video = video_drvdata(file);
644 struct v4l2_mbus_framefmt fmt;
645
646 if (format->type != video->type)
647 return -EINVAL;
648
649 mutex_lock(&video->mutex);
650
651 /* Fill the bytesperline and sizeimage fields by converting to media bus
652 * format and back to pixel format.
653 */
654 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
655 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
656
657 vfh->format = *format;
658
659 mutex_unlock(&video->mutex);
660 return 0;
661 }
662
663 static int
664 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
665 {
666 struct isp_video *video = video_drvdata(file);
667 struct v4l2_subdev_format fmt;
668 struct v4l2_subdev *subdev;
669 u32 pad;
670 int ret;
671
672 if (format->type != video->type)
673 return -EINVAL;
674
675 subdev = isp_video_remote_subdev(video, &pad);
676 if (subdev == NULL)
677 return -EINVAL;
678
679 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
680
681 fmt.pad = pad;
682 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
683 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
684 if (ret)
685 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
686
687 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
688 return 0;
689 }
690
691 static int
692 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
693 {
694 struct isp_video *video = video_drvdata(file);
695 struct v4l2_subdev *subdev;
696 int ret;
697
698 subdev = isp_video_remote_subdev(video, NULL);
699 if (subdev == NULL)
700 return -EINVAL;
701
702 mutex_lock(&video->mutex);
703 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
704 mutex_unlock(&video->mutex);
705
706 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
707 }
708
709 static int
710 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
711 {
712 struct isp_video *video = video_drvdata(file);
713 struct v4l2_subdev_format format;
714 struct v4l2_subdev *subdev;
715 u32 pad;
716 int ret;
717
718 subdev = isp_video_remote_subdev(video, &pad);
719 if (subdev == NULL)
720 return -EINVAL;
721
722 /* Try the get crop operation first and fallback to get format if not
723 * implemented.
724 */
725 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
726 if (ret != -ENOIOCTLCMD)
727 return ret;
728
729 format.pad = pad;
730 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
731 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
732 if (ret < 0)
733 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
734
735 crop->c.left = 0;
736 crop->c.top = 0;
737 crop->c.width = format.format.width;
738 crop->c.height = format.format.height;
739
740 return 0;
741 }
742
743 static int
744 isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
745 {
746 struct isp_video *video = video_drvdata(file);
747 struct v4l2_subdev *subdev;
748 int ret;
749
750 subdev = isp_video_remote_subdev(video, NULL);
751 if (subdev == NULL)
752 return -EINVAL;
753
754 mutex_lock(&video->mutex);
755 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
756 mutex_unlock(&video->mutex);
757
758 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
759 }
760
761 static int
762 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
763 {
764 struct isp_video_fh *vfh = to_isp_video_fh(fh);
765 struct isp_video *video = video_drvdata(file);
766
767 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
768 video->type != a->type)
769 return -EINVAL;
770
771 memset(a, 0, sizeof(*a));
772 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
773 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
774 a->parm.output.timeperframe = vfh->timeperframe;
775
776 return 0;
777 }
778
779 static int
780 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
781 {
782 struct isp_video_fh *vfh = to_isp_video_fh(fh);
783 struct isp_video *video = video_drvdata(file);
784
785 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
786 video->type != a->type)
787 return -EINVAL;
788
789 if (a->parm.output.timeperframe.denominator == 0)
790 a->parm.output.timeperframe.denominator = 1;
791
792 vfh->timeperframe = a->parm.output.timeperframe;
793
794 return 0;
795 }
796
797 static int
798 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
799 {
800 struct isp_video_fh *vfh = to_isp_video_fh(fh);
801
802 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
803 }
804
805 static int
806 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
807 {
808 struct isp_video_fh *vfh = to_isp_video_fh(fh);
809
810 return omap3isp_video_queue_querybuf(&vfh->queue, b);
811 }
812
813 static int
814 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
815 {
816 struct isp_video_fh *vfh = to_isp_video_fh(fh);
817
818 return omap3isp_video_queue_qbuf(&vfh->queue, b);
819 }
820
821 static int
822 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
823 {
824 struct isp_video_fh *vfh = to_isp_video_fh(fh);
825
826 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
827 file->f_flags & O_NONBLOCK);
828 }
829
830 static int isp_video_check_external_subdevs(struct isp_video *video,
831 struct isp_pipeline *pipe)
832 {
833 struct isp_device *isp = video->isp;
834 struct media_entity *ents[] = {
835 &isp->isp_csi2a.subdev.entity,
836 &isp->isp_csi2c.subdev.entity,
837 &isp->isp_ccp2.subdev.entity,
838 &isp->isp_ccdc.subdev.entity
839 };
840 struct media_pad *source_pad;
841 struct media_entity *source = NULL;
842 struct media_entity *sink;
843 struct v4l2_subdev_format fmt;
844 struct v4l2_ext_controls ctrls;
845 struct v4l2_ext_control ctrl;
846 unsigned int i;
847 int ret = 0;
848
849 for (i = 0; i < ARRAY_SIZE(ents); i++) {
850 /* Is the entity part of the pipeline? */
851 if (!(pipe->entities & (1 << ents[i]->id)))
852 continue;
853
854 /* ISP entities have always sink pad == 0. Find source. */
855 source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
856 if (source_pad == NULL)
857 continue;
858
859 source = source_pad->entity;
860 sink = ents[i];
861 break;
862 }
863
864 if (!source) {
865 dev_warn(isp->dev, "can't find source, failing now\n");
866 return ret;
867 }
868
869 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
870 return 0;
871
872 pipe->external = media_entity_to_v4l2_subdev(source);
873
874 fmt.pad = source_pad->index;
875 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
876 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
877 pad, get_fmt, NULL, &fmt);
878 if (unlikely(ret < 0)) {
879 dev_warn(isp->dev, "get_fmt returned null!\n");
880 return ret;
881 }
882
883 pipe->external_width =
884 omap3isp_video_format_info(fmt.format.code)->width;
885
886 memset(&ctrls, 0, sizeof(ctrls));
887 memset(&ctrl, 0, sizeof(ctrl));
888
889 ctrl.id = V4L2_CID_PIXEL_RATE;
890
891 ctrls.count = 1;
892 ctrls.controls = &ctrl;
893
894 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
895 if (ret < 0) {
896 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
897 pipe->external->name);
898 return ret;
899 }
900
901 pipe->external_rate = ctrl.value64;
902
903 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
904 unsigned int rate = UINT_MAX;
905 /*
906 * Check that maximum allowed CCDC pixel rate isn't
907 * exceeded by the pixel rate.
908 */
909 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
910 if (pipe->external_rate > rate)
911 return -ENOSPC;
912 }
913
914 return 0;
915 }
916
917 /*
918 * Stream management
919 *
920 * Every ISP pipeline has a single input and a single output. The input can be
921 * either a sensor or a video node. The output is always a video node.
922 *
923 * As every pipeline has an output video node, the ISP video objects at the
924 * pipeline output stores the pipeline state. It tracks the streaming state of
925 * both the input and output, as well as the availability of buffers.
926 *
927 * In sensor-to-memory mode, frames are always available at the pipeline input.
928 * Starting the sensor usually requires I2C transfers and must be done in
929 * interruptible context. The pipeline is started and stopped synchronously
930 * to the stream on/off commands. All modules in the pipeline will get their
931 * subdev set stream handler called. The module at the end of the pipeline must
932 * delay starting the hardware until buffers are available at its output.
933 *
934 * In memory-to-memory mode, starting/stopping the stream requires
935 * synchronization between the input and output. ISP modules can't be stopped
936 * in the middle of a frame, and at least some of the modules seem to become
937 * busy as soon as they're started, even if they don't receive a frame start
938 * event. For that reason frames need to be processed in single-shot mode. The
939 * driver needs to wait until a frame is completely processed and written to
940 * memory before restarting the pipeline for the next frame. Pipelined
941 * processing might be possible but requires more testing.
942 *
943 * Stream start must be delayed until buffers are available at both the input
944 * and output. The pipeline must be started in the videobuf queue callback with
945 * the buffers queue spinlock held. The modules subdev set stream operation must
946 * not sleep.
947 */
948 static int
949 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
950 {
951 struct isp_video_fh *vfh = to_isp_video_fh(fh);
952 struct isp_video *video = video_drvdata(file);
953 enum isp_pipeline_state state;
954 struct isp_pipeline *pipe;
955 unsigned long flags;
956 int ret;
957
958 if (type != video->type)
959 return -EINVAL;
960
961 mutex_lock(&video->stream_lock);
962
963 if (video->streaming) {
964 mutex_unlock(&video->stream_lock);
965 return -EBUSY;
966 }
967
968 /* Start streaming on the pipeline. No link touching an entity in the
969 * pipeline can be activated or deactivated once streaming is started.
970 */
971 pipe = video->video.entity.pipe
972 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
973
974 pipe->entities = 0;
975
976 if (video->isp->pdata->set_constraints)
977 video->isp->pdata->set_constraints(video->isp, true);
978 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
979 pipe->max_rate = pipe->l3_ick;
980
981 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
982 if (ret < 0)
983 goto err_pipeline_start;
984
985 /* Verify that the currently configured format matches the output of
986 * the connected subdev.
987 */
988 ret = isp_video_check_format(video, vfh);
989 if (ret < 0)
990 goto err_check_format;
991
992 video->bpl_padding = ret;
993 video->bpl_value = vfh->format.fmt.pix.bytesperline;
994
995 ret = isp_video_get_graph_data(video, pipe);
996 if (ret < 0)
997 goto err_check_format;
998
999 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1000 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1001 else
1002 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1003
1004 ret = isp_video_check_external_subdevs(video, pipe);
1005 if (ret < 0)
1006 goto err_check_format;
1007
1008 pipe->error = false;
1009
1010 spin_lock_irqsave(&pipe->lock, flags);
1011 pipe->state &= ~ISP_PIPELINE_STREAM;
1012 pipe->state |= state;
1013 spin_unlock_irqrestore(&pipe->lock, flags);
1014
1015 /* Set the maximum time per frame as the value requested by userspace.
1016 * This is a soft limit that can be overridden if the hardware doesn't
1017 * support the request limit.
1018 */
1019 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1020 pipe->max_timeperframe = vfh->timeperframe;
1021
1022 video->queue = &vfh->queue;
1023 INIT_LIST_HEAD(&video->dmaqueue);
1024 atomic_set(&pipe->frame_number, -1);
1025
1026 ret = omap3isp_video_queue_streamon(&vfh->queue);
1027 if (ret < 0)
1028 goto err_check_format;
1029
1030 /* In sensor-to-memory mode, the stream can be started synchronously
1031 * to the stream on command. In memory-to-memory mode, it will be
1032 * started when buffers are queued on both the input and output.
1033 */
1034 if (pipe->input == NULL) {
1035 ret = omap3isp_pipeline_set_stream(pipe,
1036 ISP_PIPELINE_STREAM_CONTINUOUS);
1037 if (ret < 0)
1038 goto err_set_stream;
1039 spin_lock_irqsave(&video->queue->irqlock, flags);
1040 if (list_empty(&video->dmaqueue))
1041 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1042 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1043 }
1044
1045 video->streaming = 1;
1046
1047 mutex_unlock(&video->stream_lock);
1048 return 0;
1049
1050 err_set_stream:
1051 omap3isp_video_queue_streamoff(&vfh->queue);
1052 err_check_format:
1053 media_entity_pipeline_stop(&video->video.entity);
1054 err_pipeline_start:
1055 if (video->isp->pdata->set_constraints)
1056 video->isp->pdata->set_constraints(video->isp, false);
1057 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1058 * will get triggered the next time the CCDC is powered up will try to
1059 * access buffers that might have been freed but still present in the
1060 * DMA queue. This can easily get triggered if the above
1061 * omap3isp_pipeline_set_stream() call fails on a system with a
1062 * free-running sensor.
1063 */
1064 INIT_LIST_HEAD(&video->dmaqueue);
1065 video->queue = NULL;
1066
1067 mutex_unlock(&video->stream_lock);
1068 return ret;
1069 }
1070
1071 static int
1072 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1073 {
1074 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1075 struct isp_video *video = video_drvdata(file);
1076 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1077 enum isp_pipeline_state state;
1078 unsigned int streaming;
1079 unsigned long flags;
1080
1081 if (type != video->type)
1082 return -EINVAL;
1083
1084 mutex_lock(&video->stream_lock);
1085
1086 /* Make sure we're not streaming yet. */
1087 mutex_lock(&vfh->queue.lock);
1088 streaming = vfh->queue.streaming;
1089 mutex_unlock(&vfh->queue.lock);
1090
1091 if (!streaming)
1092 goto done;
1093
1094 /* Update the pipeline state. */
1095 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1096 state = ISP_PIPELINE_STREAM_OUTPUT
1097 | ISP_PIPELINE_QUEUE_OUTPUT;
1098 else
1099 state = ISP_PIPELINE_STREAM_INPUT
1100 | ISP_PIPELINE_QUEUE_INPUT;
1101
1102 spin_lock_irqsave(&pipe->lock, flags);
1103 pipe->state &= ~state;
1104 spin_unlock_irqrestore(&pipe->lock, flags);
1105
1106 /* Stop the stream. */
1107 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1108 omap3isp_video_queue_streamoff(&vfh->queue);
1109 video->queue = NULL;
1110 video->streaming = 0;
1111
1112 if (video->isp->pdata->set_constraints)
1113 video->isp->pdata->set_constraints(video->isp, false);
1114 media_entity_pipeline_stop(&video->video.entity);
1115
1116 done:
1117 mutex_unlock(&video->stream_lock);
1118 return 0;
1119 }
1120
1121 static int
1122 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1123 {
1124 if (input->index > 0)
1125 return -EINVAL;
1126
1127 strlcpy(input->name, "camera", sizeof(input->name));
1128 input->type = V4L2_INPUT_TYPE_CAMERA;
1129
1130 return 0;
1131 }
1132
1133 static int
1134 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1135 {
1136 *input = 0;
1137
1138 return 0;
1139 }
1140
1141 static int
1142 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1143 {
1144 return input == 0 ? 0 : -EINVAL;
1145 }
1146
1147 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1148 .vidioc_querycap = isp_video_querycap,
1149 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1150 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1151 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1152 .vidioc_g_fmt_vid_out = isp_video_get_format,
1153 .vidioc_s_fmt_vid_out = isp_video_set_format,
1154 .vidioc_try_fmt_vid_out = isp_video_try_format,
1155 .vidioc_cropcap = isp_video_cropcap,
1156 .vidioc_g_crop = isp_video_get_crop,
1157 .vidioc_s_crop = isp_video_set_crop,
1158 .vidioc_g_parm = isp_video_get_param,
1159 .vidioc_s_parm = isp_video_set_param,
1160 .vidioc_reqbufs = isp_video_reqbufs,
1161 .vidioc_querybuf = isp_video_querybuf,
1162 .vidioc_qbuf = isp_video_qbuf,
1163 .vidioc_dqbuf = isp_video_dqbuf,
1164 .vidioc_streamon = isp_video_streamon,
1165 .vidioc_streamoff = isp_video_streamoff,
1166 .vidioc_enum_input = isp_video_enum_input,
1167 .vidioc_g_input = isp_video_g_input,
1168 .vidioc_s_input = isp_video_s_input,
1169 };
1170
1171 /* -----------------------------------------------------------------------------
1172 * V4L2 file operations
1173 */
1174
1175 static int isp_video_open(struct file *file)
1176 {
1177 struct isp_video *video = video_drvdata(file);
1178 struct isp_video_fh *handle;
1179 int ret = 0;
1180
1181 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1182 if (handle == NULL)
1183 return -ENOMEM;
1184
1185 v4l2_fh_init(&handle->vfh, &video->video);
1186 v4l2_fh_add(&handle->vfh);
1187
1188 /* If this is the first user, initialise the pipeline. */
1189 if (omap3isp_get(video->isp) == NULL) {
1190 ret = -EBUSY;
1191 goto done;
1192 }
1193
1194 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1195 if (ret < 0) {
1196 omap3isp_put(video->isp);
1197 goto done;
1198 }
1199
1200 omap3isp_video_queue_init(&handle->queue, video->type,
1201 &isp_video_queue_ops, video->isp->dev,
1202 sizeof(struct isp_buffer));
1203
1204 memset(&handle->format, 0, sizeof(handle->format));
1205 handle->format.type = video->type;
1206 handle->timeperframe.denominator = 1;
1207
1208 handle->video = video;
1209 file->private_data = &handle->vfh;
1210
1211 done:
1212 if (ret < 0) {
1213 v4l2_fh_del(&handle->vfh);
1214 kfree(handle);
1215 }
1216
1217 return ret;
1218 }
1219
1220 static int isp_video_release(struct file *file)
1221 {
1222 struct isp_video *video = video_drvdata(file);
1223 struct v4l2_fh *vfh = file->private_data;
1224 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1225
1226 /* Disable streaming and free the buffers queue resources. */
1227 isp_video_streamoff(file, vfh, video->type);
1228
1229 mutex_lock(&handle->queue.lock);
1230 omap3isp_video_queue_cleanup(&handle->queue);
1231 mutex_unlock(&handle->queue.lock);
1232
1233 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1234
1235 /* Release the file handle. */
1236 v4l2_fh_del(vfh);
1237 kfree(handle);
1238 file->private_data = NULL;
1239
1240 omap3isp_put(video->isp);
1241
1242 return 0;
1243 }
1244
1245 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1246 {
1247 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1248 struct isp_video_queue *queue = &vfh->queue;
1249
1250 return omap3isp_video_queue_poll(queue, file, wait);
1251 }
1252
1253 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1254 {
1255 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1256
1257 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1258 }
1259
1260 static struct v4l2_file_operations isp_video_fops = {
1261 .owner = THIS_MODULE,
1262 .unlocked_ioctl = video_ioctl2,
1263 .open = isp_video_open,
1264 .release = isp_video_release,
1265 .poll = isp_video_poll,
1266 .mmap = isp_video_mmap,
1267 };
1268
1269 /* -----------------------------------------------------------------------------
1270 * ISP video core
1271 */
1272
1273 static const struct isp_video_operations isp_video_dummy_ops = {
1274 };
1275
1276 int omap3isp_video_init(struct isp_video *video, const char *name)
1277 {
1278 const char *direction;
1279 int ret;
1280
1281 switch (video->type) {
1282 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1283 direction = "output";
1284 video->pad.flags = MEDIA_PAD_FL_SINK
1285 | MEDIA_PAD_FL_MUST_CONNECT;
1286 break;
1287 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1288 direction = "input";
1289 video->pad.flags = MEDIA_PAD_FL_SOURCE
1290 | MEDIA_PAD_FL_MUST_CONNECT;
1291 video->video.vfl_dir = VFL_DIR_TX;
1292 break;
1293
1294 default:
1295 return -EINVAL;
1296 }
1297
1298 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1299 if (ret < 0)
1300 return ret;
1301
1302 mutex_init(&video->mutex);
1303 atomic_set(&video->active, 0);
1304
1305 spin_lock_init(&video->pipe.lock);
1306 mutex_init(&video->stream_lock);
1307
1308 /* Initialize the video device. */
1309 if (video->ops == NULL)
1310 video->ops = &isp_video_dummy_ops;
1311
1312 video->video.fops = &isp_video_fops;
1313 snprintf(video->video.name, sizeof(video->video.name),
1314 "OMAP3 ISP %s %s", name, direction);
1315 video->video.vfl_type = VFL_TYPE_GRABBER;
1316 video->video.release = video_device_release_empty;
1317 video->video.ioctl_ops = &isp_video_ioctl_ops;
1318 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1319
1320 video_set_drvdata(&video->video, video);
1321
1322 return 0;
1323 }
1324
1325 void omap3isp_video_cleanup(struct isp_video *video)
1326 {
1327 media_entity_cleanup(&video->video.entity);
1328 mutex_destroy(&video->stream_lock);
1329 mutex_destroy(&video->mutex);
1330 }
1331
1332 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1333 {
1334 int ret;
1335
1336 video->video.v4l2_dev = vdev;
1337
1338 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1339 if (ret < 0)
1340 dev_err(video->isp->dev,
1341 "%s: could not register video device (%d)\n",
1342 __func__, ret);
1343
1344 return ret;
1345 }
1346
1347 void omap3isp_video_unregister(struct isp_video *video)
1348 {
1349 if (video_is_registered(&video->video))
1350 video_unregister_device(&video->video);
1351 }
This page took 0.076947 seconds and 5 git commands to generate.