[media] media: vb2: fix userptr VMA release seq
[deliverable/linux.git] / drivers / media / video / s5p-tv / mixer_video.c
CommitLineData
fef1c8d0
TS
1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#include "mixer.h"
15
16#include <media/v4l2-ioctl.h>
17#include <linux/videodev2.h>
18#include <linux/mm.h>
19#include <linux/version.h>
20#include <linux/timer.h>
21#include <media/videobuf2-dma-contig.h>
22
23static int find_reg_callback(struct device *dev, void *p)
24{
25 struct v4l2_subdev **sd = p;
26
27 *sd = dev_get_drvdata(dev);
28 /* non-zero value stops iteration */
29 return 1;
30}
31
32static struct v4l2_subdev *find_and_register_subdev(
33 struct mxr_device *mdev, char *module_name)
34{
35 struct device_driver *drv;
36 struct v4l2_subdev *sd = NULL;
37 int ret;
38
39 /* TODO: add waiting until probe is finished */
40 drv = driver_find(module_name, &platform_bus_type);
41 if (!drv) {
42 mxr_warn(mdev, "module %s is missing\n", module_name);
43 return NULL;
44 }
45 /* driver refcnt is increased, it is safe to iterate over devices */
46 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
47 /* ret == 0 means that find_reg_callback was never executed */
48 if (sd == NULL) {
49 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
50 goto done;
51 }
52 /* v4l2_device_register_subdev detects if sd is NULL */
53 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
54 if (ret) {
55 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
56 sd = NULL;
57 }
58
59done:
60 put_driver(drv);
61 return sd;
62}
63
64int __devinit mxr_acquire_video(struct mxr_device *mdev,
65 struct mxr_output_conf *output_conf, int output_count)
66{
67 struct device *dev = mdev->dev;
68 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
69 int i;
70 int ret = 0;
71 struct v4l2_subdev *sd;
72
73 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
74 /* prepare context for V4L2 device */
75 ret = v4l2_device_register(dev, v4l2_dev);
76 if (ret) {
77 mxr_err(mdev, "could not register v4l2 device.\n");
78 goto fail;
79 }
80
81 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
82 if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
83 mxr_err(mdev, "could not acquire vb2 allocator\n");
84 goto fail_v4l2_dev;
85 }
86
87 /* registering outputs */
88 mdev->output_cnt = 0;
89 for (i = 0; i < output_count; ++i) {
90 struct mxr_output_conf *conf = &output_conf[i];
91 struct mxr_output *out;
92
93 sd = find_and_register_subdev(mdev, conf->module_name);
94 /* trying to register next output */
95 if (sd == NULL)
96 continue;
97 out = kzalloc(sizeof *out, GFP_KERNEL);
98 if (out == NULL) {
99 mxr_err(mdev, "no memory for '%s'\n",
100 conf->output_name);
101 ret = -ENOMEM;
102 /* registered subdevs are removed in fail_v4l2_dev */
103 goto fail_output;
104 }
105 strlcpy(out->name, conf->output_name, sizeof(out->name));
106 out->sd = sd;
107 out->cookie = conf->cookie;
108 mdev->output[mdev->output_cnt++] = out;
109 mxr_info(mdev, "added output '%s' from module '%s'\n",
110 conf->output_name, conf->module_name);
111 /* checking if maximal number of outputs is reached */
112 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
113 break;
114 }
115
116 if (mdev->output_cnt == 0) {
117 mxr_err(mdev, "failed to register any output\n");
118 ret = -ENODEV;
119 /* skipping fail_output because there is nothing to free */
120 goto fail_vb2_allocator;
121 }
122
123 return 0;
124
125fail_output:
126 /* kfree is NULL-safe */
127 for (i = 0; i < mdev->output_cnt; ++i)
128 kfree(mdev->output[i]);
129 memset(mdev->output, 0, sizeof mdev->output);
130
131fail_vb2_allocator:
132 /* freeing allocator context */
133 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
134
135fail_v4l2_dev:
136 /* NOTE: automatically unregister all subdevs */
137 v4l2_device_unregister(v4l2_dev);
138
139fail:
140 return ret;
141}
142
143void __devexit mxr_release_video(struct mxr_device *mdev)
144{
145 int i;
146
147 /* kfree is NULL-safe */
148 for (i = 0; i < mdev->output_cnt; ++i)
149 kfree(mdev->output[i]);
150
151 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
152 v4l2_device_unregister(&mdev->v4l2_dev);
153}
154
155static int mxr_querycap(struct file *file, void *priv,
156 struct v4l2_capability *cap)
157{
158 struct mxr_layer *layer = video_drvdata(file);
159
160 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
161
162 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
163 strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
164 sprintf(cap->bus_info, "%d", layer->idx);
165 cap->version = KERNEL_VERSION(0, 1, 0);
166 cap->capabilities = V4L2_CAP_STREAMING |
167 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
168
169 return 0;
170}
171
172/* Geometry handling */
173static void mxr_layer_geo_fix(struct mxr_layer *layer)
174{
175 struct mxr_device *mdev = layer->mdev;
176 struct v4l2_mbus_framefmt mbus_fmt;
177
178 /* TODO: add some dirty flag to avoid unnecessary adjustments */
179 mxr_get_mbus_fmt(mdev, &mbus_fmt);
180 layer->geo.dst.full_width = mbus_fmt.width;
181 layer->geo.dst.full_height = mbus_fmt.height;
182 layer->geo.dst.field = mbus_fmt.field;
183 layer->ops.fix_geometry(layer);
184}
185
186static void mxr_layer_default_geo(struct mxr_layer *layer)
187{
188 struct mxr_device *mdev = layer->mdev;
189 struct v4l2_mbus_framefmt mbus_fmt;
190
191 memset(&layer->geo, 0, sizeof layer->geo);
192
193 mxr_get_mbus_fmt(mdev, &mbus_fmt);
194
195 layer->geo.dst.full_width = mbus_fmt.width;
196 layer->geo.dst.full_height = mbus_fmt.height;
197 layer->geo.dst.width = layer->geo.dst.full_width;
198 layer->geo.dst.height = layer->geo.dst.full_height;
199 layer->geo.dst.field = mbus_fmt.field;
200
201 layer->geo.src.full_width = mbus_fmt.width;
202 layer->geo.src.full_height = mbus_fmt.height;
203 layer->geo.src.width = layer->geo.src.full_width;
204 layer->geo.src.height = layer->geo.src.full_height;
205
206 layer->ops.fix_geometry(layer);
207}
208
209static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
210{
211 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
212 geo->src.full_width, geo->src.full_height);
213 mxr_dbg(mdev, "src.size = (%u, %u)\n",
214 geo->src.width, geo->src.height);
215 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
216 geo->src.x_offset, geo->src.y_offset);
217 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
218 geo->dst.full_width, geo->dst.full_height);
219 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
220 geo->dst.width, geo->dst.height);
221 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
222 geo->dst.x_offset, geo->dst.y_offset);
223 mxr_dbg(mdev, "ratio = (%u, %u)\n",
224 geo->x_ratio, geo->y_ratio);
225}
226
227
228static const struct mxr_format *find_format_by_fourcc(
229 struct mxr_layer *layer, unsigned long fourcc);
230static const struct mxr_format *find_format_by_index(
231 struct mxr_layer *layer, unsigned long index);
232
233static int mxr_enum_fmt(struct file *file, void *priv,
234 struct v4l2_fmtdesc *f)
235{
236 struct mxr_layer *layer = video_drvdata(file);
237 struct mxr_device *mdev = layer->mdev;
238 const struct mxr_format *fmt;
239
240 mxr_dbg(mdev, "%s\n", __func__);
241 fmt = find_format_by_index(layer, f->index);
242 if (fmt == NULL)
243 return -EINVAL;
244
245 strlcpy(f->description, fmt->name, sizeof(f->description));
246 f->pixelformat = fmt->fourcc;
247
248 return 0;
249}
250
251static int mxr_s_fmt(struct file *file, void *priv,
252 struct v4l2_format *f)
253{
254 struct mxr_layer *layer = video_drvdata(file);
255 const struct mxr_format *fmt;
256 struct v4l2_pix_format_mplane *pix;
257 struct mxr_device *mdev = layer->mdev;
258 struct mxr_geometry *geo = &layer->geo;
259
260 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
261
262 pix = &f->fmt.pix_mp;
263 fmt = find_format_by_fourcc(layer, pix->pixelformat);
264 if (fmt == NULL) {
265 mxr_warn(mdev, "not recognized fourcc: %08x\n",
266 pix->pixelformat);
267 return -EINVAL;
268 }
269 layer->fmt = fmt;
270 geo->src.full_width = pix->width;
271 geo->src.width = pix->width;
272 geo->src.full_height = pix->height;
273 geo->src.height = pix->height;
274 /* assure consistency of geometry */
275 mxr_layer_geo_fix(layer);
276 mxr_dbg(mdev, "width=%u height=%u span=%u\n",
277 geo->src.width, geo->src.height, geo->src.full_width);
278
279 return 0;
280}
281
282static unsigned int divup(unsigned int divident, unsigned int divisor)
283{
284 return (divident + divisor - 1) / divisor;
285}
286
287unsigned long mxr_get_plane_size(const struct mxr_block *blk,
288 unsigned int width, unsigned int height)
289{
290 unsigned int bl_width = divup(width, blk->width);
291 unsigned int bl_height = divup(height, blk->height);
292
293 return bl_width * bl_height * blk->size;
294}
295
296static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
297 const struct mxr_format *fmt, u32 width, u32 height)
298{
299 int i;
300
301 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
302 for (i = 0; i < fmt->num_planes; ++i) {
303 struct v4l2_plane_pix_format *plane = planes
304 + fmt->plane2subframe[i];
305 const struct mxr_block *blk = &fmt->plane[i];
306 u32 bl_width = divup(width, blk->width);
307 u32 bl_height = divup(height, blk->height);
308 u32 sizeimage = bl_width * bl_height * blk->size;
309 u16 bytesperline = bl_width * blk->size / blk->height;
310
311 plane->sizeimage += sizeimage;
312 plane->bytesperline = max(plane->bytesperline, bytesperline);
313 }
314}
315
316static int mxr_g_fmt(struct file *file, void *priv,
317 struct v4l2_format *f)
318{
319 struct mxr_layer *layer = video_drvdata(file);
320 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
321
322 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
323
324 pix->width = layer->geo.src.full_width;
325 pix->height = layer->geo.src.full_height;
326 pix->field = V4L2_FIELD_NONE;
327 pix->pixelformat = layer->fmt->fourcc;
328 pix->colorspace = layer->fmt->colorspace;
329 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
330
331 return 0;
332}
333
334static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
335 enum v4l2_buf_type type)
336{
337 switch (type) {
338 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
339 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
340 return &geo->dst;
341 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
342 return &geo->src;
343 default:
344 return NULL;
345 }
346}
347
348static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
349{
350 struct mxr_layer *layer = video_drvdata(file);
351 struct mxr_crop *crop;
352
353 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
354 crop = choose_crop_by_type(&layer->geo, a->type);
355 if (crop == NULL)
356 return -EINVAL;
357 mxr_layer_geo_fix(layer);
358 a->c.left = crop->x_offset;
359 a->c.top = crop->y_offset;
360 a->c.width = crop->width;
361 a->c.height = crop->height;
362 return 0;
363}
364
365static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
366{
367 struct mxr_layer *layer = video_drvdata(file);
368 struct mxr_crop *crop;
369
370 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
371 crop = choose_crop_by_type(&layer->geo, a->type);
372 if (crop == NULL)
373 return -EINVAL;
374 crop->x_offset = a->c.left;
375 crop->y_offset = a->c.top;
376 crop->width = a->c.width;
377 crop->height = a->c.height;
378 mxr_layer_geo_fix(layer);
379 return 0;
380}
381
382static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
383{
384 struct mxr_layer *layer = video_drvdata(file);
385 struct mxr_crop *crop;
386
387 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
388 crop = choose_crop_by_type(&layer->geo, a->type);
389 if (crop == NULL)
390 return -EINVAL;
391 mxr_layer_geo_fix(layer);
392 a->bounds.left = 0;
393 a->bounds.top = 0;
394 a->bounds.width = crop->full_width;
395 a->bounds.top = crop->full_height;
396 a->defrect = a->bounds;
397 /* setting pixel aspect to 1/1 */
398 a->pixelaspect.numerator = 1;
399 a->pixelaspect.denominator = 1;
400 return 0;
401}
402
403static int mxr_enum_dv_presets(struct file *file, void *fh,
404 struct v4l2_dv_enum_preset *preset)
405{
406 struct mxr_layer *layer = video_drvdata(file);
407 struct mxr_device *mdev = layer->mdev;
408 int ret;
409
410 /* lock protects from changing sd_out */
411 mutex_lock(&mdev->mutex);
412 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
413 mutex_unlock(&mdev->mutex);
414
415 return ret ? -EINVAL : 0;
416}
417
418static int mxr_s_dv_preset(struct file *file, void *fh,
419 struct v4l2_dv_preset *preset)
420{
421 struct mxr_layer *layer = video_drvdata(file);
422 struct mxr_device *mdev = layer->mdev;
423 int ret;
424
425 /* lock protects from changing sd_out */
426 mutex_lock(&mdev->mutex);
427
428 /* preset change cannot be done while there is an entity
429 * dependant on output configuration
430 */
431 if (mdev->n_output > 0) {
432 mutex_unlock(&mdev->mutex);
433 return -EBUSY;
434 }
435
436 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
437
438 mutex_unlock(&mdev->mutex);
439
440 /* any failure should return EINVAL according to V4L2 doc */
441 return ret ? -EINVAL : 0;
442}
443
444static int mxr_g_dv_preset(struct file *file, void *fh,
445 struct v4l2_dv_preset *preset)
446{
447 struct mxr_layer *layer = video_drvdata(file);
448 struct mxr_device *mdev = layer->mdev;
449 int ret;
450
451 /* lock protects from changing sd_out */
452 mutex_lock(&mdev->mutex);
453 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
454 mutex_unlock(&mdev->mutex);
455
456 return ret ? -EINVAL : 0;
457}
458
459static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
460{
461 struct mxr_layer *layer = video_drvdata(file);
462 struct mxr_device *mdev = layer->mdev;
463 int ret;
464
465 /* lock protects from changing sd_out */
466 mutex_lock(&mdev->mutex);
467
468 /* standard change cannot be done while there is an entity
469 * dependant on output configuration
470 */
471 if (mdev->n_output > 0) {
472 mutex_unlock(&mdev->mutex);
473 return -EBUSY;
474 }
475
476 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
477
478 mutex_unlock(&mdev->mutex);
479
480 return ret ? -EINVAL : 0;
481}
482
483static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
484{
485 struct mxr_layer *layer = video_drvdata(file);
486 struct mxr_device *mdev = layer->mdev;
487 int ret;
488
489 /* lock protects from changing sd_out */
490 mutex_lock(&mdev->mutex);
491 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
492 mutex_unlock(&mdev->mutex);
493
494 return ret ? -EINVAL : 0;
495}
496
497static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
498{
499 struct mxr_layer *layer = video_drvdata(file);
500 struct mxr_device *mdev = layer->mdev;
501 struct mxr_output *out;
502 struct v4l2_subdev *sd;
503
504 if (a->index >= mdev->output_cnt)
505 return -EINVAL;
506 out = mdev->output[a->index];
507 BUG_ON(out == NULL);
508 sd = out->sd;
509 strlcpy(a->name, out->name, sizeof(a->name));
510
511 /* try to obtain supported tv norms */
512 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
513 a->capabilities = 0;
514 if (sd->ops->video && sd->ops->video->s_dv_preset)
515 a->capabilities |= V4L2_OUT_CAP_PRESETS;
516 if (sd->ops->video && sd->ops->video->s_std_output)
517 a->capabilities |= V4L2_OUT_CAP_STD;
518 a->type = V4L2_OUTPUT_TYPE_ANALOG;
519
520 return 0;
521}
522
523static int mxr_s_output(struct file *file, void *fh, unsigned int i)
524{
525 struct video_device *vfd = video_devdata(file);
526 struct mxr_layer *layer = video_drvdata(file);
527 struct mxr_device *mdev = layer->mdev;
528 int ret = 0;
529
530 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
531 return -EINVAL;
532
533 mutex_lock(&mdev->mutex);
534 if (mdev->n_output > 0) {
535 ret = -EBUSY;
536 goto done;
537 }
538 mdev->current_output = i;
539 vfd->tvnorms = 0;
540 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
541 &vfd->tvnorms);
542 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
543
544done:
545 mutex_unlock(&mdev->mutex);
546 return ret;
547}
548
549static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
550{
551 struct mxr_layer *layer = video_drvdata(file);
552 struct mxr_device *mdev = layer->mdev;
553
554 mutex_lock(&mdev->mutex);
555 *p = mdev->current_output;
556 mutex_unlock(&mdev->mutex);
557
558 return 0;
559}
560
561static int mxr_reqbufs(struct file *file, void *priv,
562 struct v4l2_requestbuffers *p)
563{
564 struct mxr_layer *layer = video_drvdata(file);
565
566 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
567 return vb2_reqbufs(&layer->vb_queue, p);
568}
569
570static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
571{
572 struct mxr_layer *layer = video_drvdata(file);
573
574 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
575 return vb2_querybuf(&layer->vb_queue, p);
576}
577
578static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
579{
580 struct mxr_layer *layer = video_drvdata(file);
581
582 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
583 return vb2_qbuf(&layer->vb_queue, p);
584}
585
586static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
587{
588 struct mxr_layer *layer = video_drvdata(file);
589
590 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
591 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
592}
593
594static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
595{
596 struct mxr_layer *layer = video_drvdata(file);
597
598 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
599 return vb2_streamon(&layer->vb_queue, i);
600}
601
602static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
603{
604 struct mxr_layer *layer = video_drvdata(file);
605
606 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
607 return vb2_streamoff(&layer->vb_queue, i);
608}
609
610static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
611 .vidioc_querycap = mxr_querycap,
612 /* format handling */
613 .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
614 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
615 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
616 /* buffer control */
617 .vidioc_reqbufs = mxr_reqbufs,
618 .vidioc_querybuf = mxr_querybuf,
619 .vidioc_qbuf = mxr_qbuf,
620 .vidioc_dqbuf = mxr_dqbuf,
621 /* Streaming control */
622 .vidioc_streamon = mxr_streamon,
623 .vidioc_streamoff = mxr_streamoff,
624 /* Preset functions */
625 .vidioc_enum_dv_presets = mxr_enum_dv_presets,
626 .vidioc_s_dv_preset = mxr_s_dv_preset,
627 .vidioc_g_dv_preset = mxr_g_dv_preset,
628 /* analog TV standard functions */
629 .vidioc_s_std = mxr_s_std,
630 .vidioc_g_std = mxr_g_std,
631 /* Output handling */
632 .vidioc_enum_output = mxr_enum_output,
633 .vidioc_s_output = mxr_s_output,
634 .vidioc_g_output = mxr_g_output,
635 /* Crop ioctls */
636 .vidioc_g_crop = mxr_g_crop,
637 .vidioc_s_crop = mxr_s_crop,
638 .vidioc_cropcap = mxr_cropcap,
639};
640
641static int mxr_video_open(struct file *file)
642{
643 struct mxr_layer *layer = video_drvdata(file);
644 struct mxr_device *mdev = layer->mdev;
645 int ret = 0;
646
647 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
648 /* assure device probe is finished */
649 wait_for_device_probe();
650 /* creating context for file descriptor */
651 ret = v4l2_fh_open(file);
652 if (ret) {
653 mxr_err(mdev, "v4l2_fh_open failed\n");
654 return ret;
655 }
656
657 /* leaving if layer is already initialized */
658 if (!v4l2_fh_is_singular_file(file))
659 return 0;
660
661 /* FIXME: should power be enabled on open? */
662 ret = mxr_power_get(mdev);
663 if (ret) {
664 mxr_err(mdev, "power on failed\n");
665 goto fail_fh_open;
666 }
667
668 ret = vb2_queue_init(&layer->vb_queue);
669 if (ret != 0) {
670 mxr_err(mdev, "failed to initialize vb2 queue\n");
671 goto fail_power;
672 }
673 /* set default format, first on the list */
674 layer->fmt = layer->fmt_array[0];
675 /* setup default geometry */
676 mxr_layer_default_geo(layer);
677
678 return 0;
679
680fail_power:
681 mxr_power_put(mdev);
682
683fail_fh_open:
684 v4l2_fh_release(file);
685
686 return ret;
687}
688
689static unsigned int
690mxr_video_poll(struct file *file, struct poll_table_struct *wait)
691{
692 struct mxr_layer *layer = video_drvdata(file);
693
694 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
695
696 return vb2_poll(&layer->vb_queue, file, wait);
697}
698
699static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
700{
701 struct mxr_layer *layer = video_drvdata(file);
702
703 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
704
705 return vb2_mmap(&layer->vb_queue, vma);
706}
707
708static int mxr_video_release(struct file *file)
709{
710 struct mxr_layer *layer = video_drvdata(file);
711
712 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
713 if (v4l2_fh_is_singular_file(file)) {
714 vb2_queue_release(&layer->vb_queue);
715 mxr_power_put(layer->mdev);
716 }
717 v4l2_fh_release(file);
718 return 0;
719}
720
721static const struct v4l2_file_operations mxr_fops = {
722 .owner = THIS_MODULE,
723 .open = mxr_video_open,
724 .poll = mxr_video_poll,
725 .mmap = mxr_video_mmap,
726 .release = mxr_video_release,
727 .unlocked_ioctl = video_ioctl2,
728};
729
730static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
035aa147 731 unsigned int *nplanes, unsigned int sizes[],
fef1c8d0
TS
732 void *alloc_ctxs[])
733{
734 struct mxr_layer *layer = vb2_get_drv_priv(vq);
735 const struct mxr_format *fmt = layer->fmt;
736 int i;
737 struct mxr_device *mdev = layer->mdev;
738 struct v4l2_plane_pix_format planes[3];
739
740 mxr_dbg(mdev, "%s\n", __func__);
741 /* checking if format was configured */
742 if (fmt == NULL)
743 return -EINVAL;
744 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
745 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
746 layer->geo.src.full_height);
747
748 *nplanes = fmt->num_subframes;
749 for (i = 0; i < fmt->num_subframes; ++i) {
750 alloc_ctxs[i] = layer->mdev->alloc_ctx;
751 sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
752 mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
753 }
754
755 if (*nbuffers == 0)
756 *nbuffers = 1;
757
758 return 0;
759}
760
761static void buf_queue(struct vb2_buffer *vb)
762{
763 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
764 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
765 struct mxr_device *mdev = layer->mdev;
766 unsigned long flags;
767 int must_start = 0;
768
769 spin_lock_irqsave(&layer->enq_slock, flags);
770 if (layer->state == MXR_LAYER_STREAMING_START) {
771 layer->state = MXR_LAYER_STREAMING;
772 must_start = 1;
773 }
774 list_add_tail(&buffer->list, &layer->enq_list);
775 spin_unlock_irqrestore(&layer->enq_slock, flags);
776 if (must_start) {
777 layer->ops.stream_set(layer, MXR_ENABLE);
778 mxr_streamer_get(mdev);
779 }
780
781 mxr_dbg(mdev, "queuing buffer\n");
782}
783
784static void wait_lock(struct vb2_queue *vq)
785{
786 struct mxr_layer *layer = vb2_get_drv_priv(vq);
787
788 mxr_dbg(layer->mdev, "%s\n", __func__);
789 mutex_lock(&layer->mutex);
790}
791
792static void wait_unlock(struct vb2_queue *vq)
793{
794 struct mxr_layer *layer = vb2_get_drv_priv(vq);
795
796 mxr_dbg(layer->mdev, "%s\n", __func__);
797 mutex_unlock(&layer->mutex);
798}
799
800static int start_streaming(struct vb2_queue *vq)
801{
802 struct mxr_layer *layer = vb2_get_drv_priv(vq);
803 struct mxr_device *mdev = layer->mdev;
804 unsigned long flags;
805
806 mxr_dbg(mdev, "%s\n", __func__);
807 /* block any changes in output configuration */
808 mxr_output_get(mdev);
809
810 /* update layers geometry */
811 mxr_layer_geo_fix(layer);
812 mxr_geometry_dump(mdev, &layer->geo);
813
814 layer->ops.format_set(layer);
815 /* enabling layer in hardware */
816 spin_lock_irqsave(&layer->enq_slock, flags);
817 layer->state = MXR_LAYER_STREAMING_START;
818 spin_unlock_irqrestore(&layer->enq_slock, flags);
819
820 return 0;
821}
822
823static void mxr_watchdog(unsigned long arg)
824{
825 struct mxr_layer *layer = (struct mxr_layer *) arg;
826 struct mxr_device *mdev = layer->mdev;
827 unsigned long flags;
828
829 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
830
831 spin_lock_irqsave(&layer->enq_slock, flags);
832
833 if (layer->update_buf == layer->shadow_buf)
834 layer->update_buf = NULL;
835 if (layer->update_buf) {
836 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
837 layer->update_buf = NULL;
838 }
839 if (layer->shadow_buf) {
840 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
841 layer->shadow_buf = NULL;
842 }
843 spin_unlock_irqrestore(&layer->enq_slock, flags);
844}
845
846static int stop_streaming(struct vb2_queue *vq)
847{
848 struct mxr_layer *layer = vb2_get_drv_priv(vq);
849 struct mxr_device *mdev = layer->mdev;
850 unsigned long flags;
851 struct timer_list watchdog;
852 struct mxr_buffer *buf, *buf_tmp;
853
854 mxr_dbg(mdev, "%s\n", __func__);
855
856 spin_lock_irqsave(&layer->enq_slock, flags);
857
858 /* reset list */
859 layer->state = MXR_LAYER_STREAMING_FINISH;
860
861 /* set all buffer to be done */
862 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
863 list_del(&buf->list);
864 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
865 }
866
867 spin_unlock_irqrestore(&layer->enq_slock, flags);
868
869 /* give 1 seconds to complete to complete last buffers */
870 setup_timer_on_stack(&watchdog, mxr_watchdog,
871 (unsigned long)layer);
872 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
873
874 /* wait until all buffers are goes to done state */
875 vb2_wait_for_all_buffers(vq);
876
877 /* stop timer if all synchronization is done */
878 del_timer_sync(&watchdog);
879 destroy_timer_on_stack(&watchdog);
880
881 /* stopping hardware */
882 spin_lock_irqsave(&layer->enq_slock, flags);
883 layer->state = MXR_LAYER_IDLE;
884 spin_unlock_irqrestore(&layer->enq_slock, flags);
885
886 /* disabling layer in hardware */
887 layer->ops.stream_set(layer, MXR_DISABLE);
888 /* remove one streamer */
889 mxr_streamer_put(mdev);
890 /* allow changes in output configuration */
891 mxr_output_put(mdev);
892 return 0;
893}
894
895static struct vb2_ops mxr_video_qops = {
896 .queue_setup = queue_setup,
897 .buf_queue = buf_queue,
898 .wait_prepare = wait_unlock,
899 .wait_finish = wait_lock,
900 .start_streaming = start_streaming,
901 .stop_streaming = stop_streaming,
902};
903
904/* FIXME: try to put this functions to mxr_base_layer_create */
905int mxr_base_layer_register(struct mxr_layer *layer)
906{
907 struct mxr_device *mdev = layer->mdev;
908 int ret;
909
910 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
911 if (ret)
912 mxr_err(mdev, "failed to register video device\n");
913 else
914 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
915 layer->vfd.name, layer->vfd.num);
916 return ret;
917}
918
919void mxr_base_layer_unregister(struct mxr_layer *layer)
920{
921 video_unregister_device(&layer->vfd);
922}
923
924void mxr_layer_release(struct mxr_layer *layer)
925{
926 if (layer->ops.release)
927 layer->ops.release(layer);
928}
929
930void mxr_base_layer_release(struct mxr_layer *layer)
931{
932 kfree(layer);
933}
934
935static void mxr_vfd_release(struct video_device *vdev)
936{
937 printk(KERN_INFO "video device release\n");
938}
939
940struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
941 int idx, char *name, struct mxr_layer_ops *ops)
942{
943 struct mxr_layer *layer;
944
945 layer = kzalloc(sizeof *layer, GFP_KERNEL);
946 if (layer == NULL) {
947 mxr_err(mdev, "not enough memory for layer.\n");
948 goto fail;
949 }
950
951 layer->mdev = mdev;
952 layer->idx = idx;
953 layer->ops = *ops;
954
955 spin_lock_init(&layer->enq_slock);
956 INIT_LIST_HEAD(&layer->enq_list);
957 mutex_init(&layer->mutex);
958
959 layer->vfd = (struct video_device) {
960 .minor = -1,
961 .release = mxr_vfd_release,
962 .fops = &mxr_fops,
963 .ioctl_ops = &mxr_ioctl_ops,
964 };
965 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
966 /* let framework control PRIORITY */
967 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
968
969 video_set_drvdata(&layer->vfd, layer);
970 layer->vfd.lock = &layer->mutex;
971 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
972
973 layer->vb_queue = (struct vb2_queue) {
974 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
975 .io_modes = VB2_MMAP | VB2_USERPTR,
976 .drv_priv = layer,
977 .buf_struct_size = sizeof(struct mxr_buffer),
978 .ops = &mxr_video_qops,
979 .mem_ops = &vb2_dma_contig_memops,
980 };
981
982 return layer;
983
984fail:
985 return NULL;
986}
987
988static const struct mxr_format *find_format_by_fourcc(
989 struct mxr_layer *layer, unsigned long fourcc)
990{
991 int i;
992
993 for (i = 0; i < layer->fmt_array_size; ++i)
994 if (layer->fmt_array[i]->fourcc == fourcc)
995 return layer->fmt_array[i];
996 return NULL;
997}
998
999static const struct mxr_format *find_format_by_index(
1000 struct mxr_layer *layer, unsigned long index)
1001{
1002 if (index >= layer->fmt_array_size)
1003 return NULL;
1004 return layer->fmt_array[index];
1005}
1006
This page took 0.068037 seconds and 5 git commands to generate.