cfq-iosched: fix use-after-free of cfqq
[deliverable/linux.git] / drivers / media / video / s5p-tv / mixer_video.c
1 /*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14 #include "mixer.h"
15
16 #include <media/v4l2-ioctl.h>
17 #include <linux/videodev2.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #include <linux/timer.h>
22 #include <media/videobuf2-dma-contig.h>
23
24 static int find_reg_callback(struct device *dev, void *p)
25 {
26 struct v4l2_subdev **sd = p;
27
28 *sd = dev_get_drvdata(dev);
29 /* non-zero value stops iteration */
30 return 1;
31 }
32
33 static struct v4l2_subdev *find_and_register_subdev(
34 struct mxr_device *mdev, char *module_name)
35 {
36 struct device_driver *drv;
37 struct v4l2_subdev *sd = NULL;
38 int ret;
39
40 /* TODO: add waiting until probe is finished */
41 drv = driver_find(module_name, &platform_bus_type);
42 if (!drv) {
43 mxr_warn(mdev, "module %s is missing\n", module_name);
44 return NULL;
45 }
46 /* driver refcnt is increased, it is safe to iterate over devices */
47 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
48 /* ret == 0 means that find_reg_callback was never executed */
49 if (sd == NULL) {
50 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
51 goto done;
52 }
53 /* v4l2_device_register_subdev detects if sd is NULL */
54 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
55 if (ret) {
56 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
57 sd = NULL;
58 }
59
60 done:
61 put_driver(drv);
62 return sd;
63 }
64
65 int __devinit mxr_acquire_video(struct mxr_device *mdev,
66 struct mxr_output_conf *output_conf, int output_count)
67 {
68 struct device *dev = mdev->dev;
69 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
70 int i;
71 int ret = 0;
72 struct v4l2_subdev *sd;
73
74 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
75 /* prepare context for V4L2 device */
76 ret = v4l2_device_register(dev, v4l2_dev);
77 if (ret) {
78 mxr_err(mdev, "could not register v4l2 device.\n");
79 goto fail;
80 }
81
82 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
83 if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
84 mxr_err(mdev, "could not acquire vb2 allocator\n");
85 goto fail_v4l2_dev;
86 }
87
88 /* registering outputs */
89 mdev->output_cnt = 0;
90 for (i = 0; i < output_count; ++i) {
91 struct mxr_output_conf *conf = &output_conf[i];
92 struct mxr_output *out;
93
94 sd = find_and_register_subdev(mdev, conf->module_name);
95 /* trying to register next output */
96 if (sd == NULL)
97 continue;
98 out = kzalloc(sizeof *out, GFP_KERNEL);
99 if (out == NULL) {
100 mxr_err(mdev, "no memory for '%s'\n",
101 conf->output_name);
102 ret = -ENOMEM;
103 /* registered subdevs are removed in fail_v4l2_dev */
104 goto fail_output;
105 }
106 strlcpy(out->name, conf->output_name, sizeof(out->name));
107 out->sd = sd;
108 out->cookie = conf->cookie;
109 mdev->output[mdev->output_cnt++] = out;
110 mxr_info(mdev, "added output '%s' from module '%s'\n",
111 conf->output_name, conf->module_name);
112 /* checking if maximal number of outputs is reached */
113 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
114 break;
115 }
116
117 if (mdev->output_cnt == 0) {
118 mxr_err(mdev, "failed to register any output\n");
119 ret = -ENODEV;
120 /* skipping fail_output because there is nothing to free */
121 goto fail_vb2_allocator;
122 }
123
124 return 0;
125
126 fail_output:
127 /* kfree is NULL-safe */
128 for (i = 0; i < mdev->output_cnt; ++i)
129 kfree(mdev->output[i]);
130 memset(mdev->output, 0, sizeof mdev->output);
131
132 fail_vb2_allocator:
133 /* freeing allocator context */
134 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
135
136 fail_v4l2_dev:
137 /* NOTE: automatically unregister all subdevs */
138 v4l2_device_unregister(v4l2_dev);
139
140 fail:
141 return ret;
142 }
143
144 void __devexit mxr_release_video(struct mxr_device *mdev)
145 {
146 int i;
147
148 /* kfree is NULL-safe */
149 for (i = 0; i < mdev->output_cnt; ++i)
150 kfree(mdev->output[i]);
151
152 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
153 v4l2_device_unregister(&mdev->v4l2_dev);
154 }
155
156 static int mxr_querycap(struct file *file, void *priv,
157 struct v4l2_capability *cap)
158 {
159 struct mxr_layer *layer = video_drvdata(file);
160
161 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
162
163 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
164 strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
165 sprintf(cap->bus_info, "%d", layer->idx);
166 cap->version = KERNEL_VERSION(0, 1, 0);
167 cap->capabilities = V4L2_CAP_STREAMING |
168 V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
169
170 return 0;
171 }
172
173 /* Geometry handling */
174 static void mxr_layer_geo_fix(struct mxr_layer *layer)
175 {
176 struct mxr_device *mdev = layer->mdev;
177 struct v4l2_mbus_framefmt mbus_fmt;
178
179 /* TODO: add some dirty flag to avoid unnecessary adjustments */
180 mxr_get_mbus_fmt(mdev, &mbus_fmt);
181 layer->geo.dst.full_width = mbus_fmt.width;
182 layer->geo.dst.full_height = mbus_fmt.height;
183 layer->geo.dst.field = mbus_fmt.field;
184 layer->ops.fix_geometry(layer);
185 }
186
187 static void mxr_layer_default_geo(struct mxr_layer *layer)
188 {
189 struct mxr_device *mdev = layer->mdev;
190 struct v4l2_mbus_framefmt mbus_fmt;
191
192 memset(&layer->geo, 0, sizeof layer->geo);
193
194 mxr_get_mbus_fmt(mdev, &mbus_fmt);
195
196 layer->geo.dst.full_width = mbus_fmt.width;
197 layer->geo.dst.full_height = mbus_fmt.height;
198 layer->geo.dst.width = layer->geo.dst.full_width;
199 layer->geo.dst.height = layer->geo.dst.full_height;
200 layer->geo.dst.field = mbus_fmt.field;
201
202 layer->geo.src.full_width = mbus_fmt.width;
203 layer->geo.src.full_height = mbus_fmt.height;
204 layer->geo.src.width = layer->geo.src.full_width;
205 layer->geo.src.height = layer->geo.src.full_height;
206
207 layer->ops.fix_geometry(layer);
208 }
209
210 static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
211 {
212 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
213 geo->src.full_width, geo->src.full_height);
214 mxr_dbg(mdev, "src.size = (%u, %u)\n",
215 geo->src.width, geo->src.height);
216 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
217 geo->src.x_offset, geo->src.y_offset);
218 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
219 geo->dst.full_width, geo->dst.full_height);
220 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
221 geo->dst.width, geo->dst.height);
222 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
223 geo->dst.x_offset, geo->dst.y_offset);
224 mxr_dbg(mdev, "ratio = (%u, %u)\n",
225 geo->x_ratio, geo->y_ratio);
226 }
227
228
229 static const struct mxr_format *find_format_by_fourcc(
230 struct mxr_layer *layer, unsigned long fourcc);
231 static const struct mxr_format *find_format_by_index(
232 struct mxr_layer *layer, unsigned long index);
233
234 static int mxr_enum_fmt(struct file *file, void *priv,
235 struct v4l2_fmtdesc *f)
236 {
237 struct mxr_layer *layer = video_drvdata(file);
238 struct mxr_device *mdev = layer->mdev;
239 const struct mxr_format *fmt;
240
241 mxr_dbg(mdev, "%s\n", __func__);
242 fmt = find_format_by_index(layer, f->index);
243 if (fmt == NULL)
244 return -EINVAL;
245
246 strlcpy(f->description, fmt->name, sizeof(f->description));
247 f->pixelformat = fmt->fourcc;
248
249 return 0;
250 }
251
252 static int mxr_s_fmt(struct file *file, void *priv,
253 struct v4l2_format *f)
254 {
255 struct mxr_layer *layer = video_drvdata(file);
256 const struct mxr_format *fmt;
257 struct v4l2_pix_format_mplane *pix;
258 struct mxr_device *mdev = layer->mdev;
259 struct mxr_geometry *geo = &layer->geo;
260
261 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
262
263 pix = &f->fmt.pix_mp;
264 fmt = find_format_by_fourcc(layer, pix->pixelformat);
265 if (fmt == NULL) {
266 mxr_warn(mdev, "not recognized fourcc: %08x\n",
267 pix->pixelformat);
268 return -EINVAL;
269 }
270 layer->fmt = fmt;
271 geo->src.full_width = pix->width;
272 geo->src.width = pix->width;
273 geo->src.full_height = pix->height;
274 geo->src.height = pix->height;
275 /* assure consistency of geometry */
276 mxr_layer_geo_fix(layer);
277 mxr_dbg(mdev, "width=%u height=%u span=%u\n",
278 geo->src.width, geo->src.height, geo->src.full_width);
279
280 return 0;
281 }
282
283 static unsigned int divup(unsigned int divident, unsigned int divisor)
284 {
285 return (divident + divisor - 1) / divisor;
286 }
287
288 unsigned long mxr_get_plane_size(const struct mxr_block *blk,
289 unsigned int width, unsigned int height)
290 {
291 unsigned int bl_width = divup(width, blk->width);
292 unsigned int bl_height = divup(height, blk->height);
293
294 return bl_width * bl_height * blk->size;
295 }
296
297 static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
298 const struct mxr_format *fmt, u32 width, u32 height)
299 {
300 int i;
301
302 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
303 for (i = 0; i < fmt->num_planes; ++i) {
304 struct v4l2_plane_pix_format *plane = planes
305 + fmt->plane2subframe[i];
306 const struct mxr_block *blk = &fmt->plane[i];
307 u32 bl_width = divup(width, blk->width);
308 u32 bl_height = divup(height, blk->height);
309 u32 sizeimage = bl_width * bl_height * blk->size;
310 u16 bytesperline = bl_width * blk->size / blk->height;
311
312 plane->sizeimage += sizeimage;
313 plane->bytesperline = max(plane->bytesperline, bytesperline);
314 }
315 }
316
317 static int mxr_g_fmt(struct file *file, void *priv,
318 struct v4l2_format *f)
319 {
320 struct mxr_layer *layer = video_drvdata(file);
321 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
322
323 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
324
325 pix->width = layer->geo.src.full_width;
326 pix->height = layer->geo.src.full_height;
327 pix->field = V4L2_FIELD_NONE;
328 pix->pixelformat = layer->fmt->fourcc;
329 pix->colorspace = layer->fmt->colorspace;
330 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
331
332 return 0;
333 }
334
335 static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
336 enum v4l2_buf_type type)
337 {
338 switch (type) {
339 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
340 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
341 return &geo->dst;
342 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
343 return &geo->src;
344 default:
345 return NULL;
346 }
347 }
348
349 static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
350 {
351 struct mxr_layer *layer = video_drvdata(file);
352 struct mxr_crop *crop;
353
354 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
355 crop = choose_crop_by_type(&layer->geo, a->type);
356 if (crop == NULL)
357 return -EINVAL;
358 mxr_layer_geo_fix(layer);
359 a->c.left = crop->x_offset;
360 a->c.top = crop->y_offset;
361 a->c.width = crop->width;
362 a->c.height = crop->height;
363 return 0;
364 }
365
366 static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
367 {
368 struct mxr_layer *layer = video_drvdata(file);
369 struct mxr_crop *crop;
370
371 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
372 crop = choose_crop_by_type(&layer->geo, a->type);
373 if (crop == NULL)
374 return -EINVAL;
375 crop->x_offset = a->c.left;
376 crop->y_offset = a->c.top;
377 crop->width = a->c.width;
378 crop->height = a->c.height;
379 mxr_layer_geo_fix(layer);
380 return 0;
381 }
382
383 static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
384 {
385 struct mxr_layer *layer = video_drvdata(file);
386 struct mxr_crop *crop;
387
388 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
389 crop = choose_crop_by_type(&layer->geo, a->type);
390 if (crop == NULL)
391 return -EINVAL;
392 mxr_layer_geo_fix(layer);
393 a->bounds.left = 0;
394 a->bounds.top = 0;
395 a->bounds.width = crop->full_width;
396 a->bounds.top = crop->full_height;
397 a->defrect = a->bounds;
398 /* setting pixel aspect to 1/1 */
399 a->pixelaspect.numerator = 1;
400 a->pixelaspect.denominator = 1;
401 return 0;
402 }
403
404 static int mxr_enum_dv_presets(struct file *file, void *fh,
405 struct v4l2_dv_enum_preset *preset)
406 {
407 struct mxr_layer *layer = video_drvdata(file);
408 struct mxr_device *mdev = layer->mdev;
409 int ret;
410
411 /* lock protects from changing sd_out */
412 mutex_lock(&mdev->mutex);
413 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
414 mutex_unlock(&mdev->mutex);
415
416 return ret ? -EINVAL : 0;
417 }
418
419 static int mxr_s_dv_preset(struct file *file, void *fh,
420 struct v4l2_dv_preset *preset)
421 {
422 struct mxr_layer *layer = video_drvdata(file);
423 struct mxr_device *mdev = layer->mdev;
424 int ret;
425
426 /* lock protects from changing sd_out */
427 mutex_lock(&mdev->mutex);
428
429 /* preset change cannot be done while there is an entity
430 * dependant on output configuration
431 */
432 if (mdev->n_output > 0) {
433 mutex_unlock(&mdev->mutex);
434 return -EBUSY;
435 }
436
437 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
438
439 mutex_unlock(&mdev->mutex);
440
441 /* any failure should return EINVAL according to V4L2 doc */
442 return ret ? -EINVAL : 0;
443 }
444
445 static int mxr_g_dv_preset(struct file *file, void *fh,
446 struct v4l2_dv_preset *preset)
447 {
448 struct mxr_layer *layer = video_drvdata(file);
449 struct mxr_device *mdev = layer->mdev;
450 int ret;
451
452 /* lock protects from changing sd_out */
453 mutex_lock(&mdev->mutex);
454 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
455 mutex_unlock(&mdev->mutex);
456
457 return ret ? -EINVAL : 0;
458 }
459
460 static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
461 {
462 struct mxr_layer *layer = video_drvdata(file);
463 struct mxr_device *mdev = layer->mdev;
464 int ret;
465
466 /* lock protects from changing sd_out */
467 mutex_lock(&mdev->mutex);
468
469 /* standard change cannot be done while there is an entity
470 * dependant on output configuration
471 */
472 if (mdev->n_output > 0) {
473 mutex_unlock(&mdev->mutex);
474 return -EBUSY;
475 }
476
477 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
478
479 mutex_unlock(&mdev->mutex);
480
481 return ret ? -EINVAL : 0;
482 }
483
484 static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
485 {
486 struct mxr_layer *layer = video_drvdata(file);
487 struct mxr_device *mdev = layer->mdev;
488 int ret;
489
490 /* lock protects from changing sd_out */
491 mutex_lock(&mdev->mutex);
492 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
493 mutex_unlock(&mdev->mutex);
494
495 return ret ? -EINVAL : 0;
496 }
497
498 static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
499 {
500 struct mxr_layer *layer = video_drvdata(file);
501 struct mxr_device *mdev = layer->mdev;
502 struct mxr_output *out;
503 struct v4l2_subdev *sd;
504
505 if (a->index >= mdev->output_cnt)
506 return -EINVAL;
507 out = mdev->output[a->index];
508 BUG_ON(out == NULL);
509 sd = out->sd;
510 strlcpy(a->name, out->name, sizeof(a->name));
511
512 /* try to obtain supported tv norms */
513 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
514 a->capabilities = 0;
515 if (sd->ops->video && sd->ops->video->s_dv_preset)
516 a->capabilities |= V4L2_OUT_CAP_PRESETS;
517 if (sd->ops->video && sd->ops->video->s_std_output)
518 a->capabilities |= V4L2_OUT_CAP_STD;
519 a->type = V4L2_OUTPUT_TYPE_ANALOG;
520
521 return 0;
522 }
523
524 static int mxr_s_output(struct file *file, void *fh, unsigned int i)
525 {
526 struct video_device *vfd = video_devdata(file);
527 struct mxr_layer *layer = video_drvdata(file);
528 struct mxr_device *mdev = layer->mdev;
529 int ret = 0;
530
531 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
532 return -EINVAL;
533
534 mutex_lock(&mdev->mutex);
535 if (mdev->n_output > 0) {
536 ret = -EBUSY;
537 goto done;
538 }
539 mdev->current_output = i;
540 vfd->tvnorms = 0;
541 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
542 &vfd->tvnorms);
543 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
544
545 done:
546 mutex_unlock(&mdev->mutex);
547 return ret;
548 }
549
550 static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
551 {
552 struct mxr_layer *layer = video_drvdata(file);
553 struct mxr_device *mdev = layer->mdev;
554
555 mutex_lock(&mdev->mutex);
556 *p = mdev->current_output;
557 mutex_unlock(&mdev->mutex);
558
559 return 0;
560 }
561
562 static int mxr_reqbufs(struct file *file, void *priv,
563 struct v4l2_requestbuffers *p)
564 {
565 struct mxr_layer *layer = video_drvdata(file);
566
567 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
568 return vb2_reqbufs(&layer->vb_queue, p);
569 }
570
571 static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
572 {
573 struct mxr_layer *layer = video_drvdata(file);
574
575 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
576 return vb2_querybuf(&layer->vb_queue, p);
577 }
578
579 static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
580 {
581 struct mxr_layer *layer = video_drvdata(file);
582
583 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
584 return vb2_qbuf(&layer->vb_queue, p);
585 }
586
587 static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
588 {
589 struct mxr_layer *layer = video_drvdata(file);
590
591 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
592 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
593 }
594
595 static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
596 {
597 struct mxr_layer *layer = video_drvdata(file);
598
599 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
600 return vb2_streamon(&layer->vb_queue, i);
601 }
602
603 static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
604 {
605 struct mxr_layer *layer = video_drvdata(file);
606
607 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
608 return vb2_streamoff(&layer->vb_queue, i);
609 }
610
611 static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
612 .vidioc_querycap = mxr_querycap,
613 /* format handling */
614 .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
615 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
616 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
617 /* buffer control */
618 .vidioc_reqbufs = mxr_reqbufs,
619 .vidioc_querybuf = mxr_querybuf,
620 .vidioc_qbuf = mxr_qbuf,
621 .vidioc_dqbuf = mxr_dqbuf,
622 /* Streaming control */
623 .vidioc_streamon = mxr_streamon,
624 .vidioc_streamoff = mxr_streamoff,
625 /* Preset functions */
626 .vidioc_enum_dv_presets = mxr_enum_dv_presets,
627 .vidioc_s_dv_preset = mxr_s_dv_preset,
628 .vidioc_g_dv_preset = mxr_g_dv_preset,
629 /* analog TV standard functions */
630 .vidioc_s_std = mxr_s_std,
631 .vidioc_g_std = mxr_g_std,
632 /* Output handling */
633 .vidioc_enum_output = mxr_enum_output,
634 .vidioc_s_output = mxr_s_output,
635 .vidioc_g_output = mxr_g_output,
636 /* Crop ioctls */
637 .vidioc_g_crop = mxr_g_crop,
638 .vidioc_s_crop = mxr_s_crop,
639 .vidioc_cropcap = mxr_cropcap,
640 };
641
642 static int mxr_video_open(struct file *file)
643 {
644 struct mxr_layer *layer = video_drvdata(file);
645 struct mxr_device *mdev = layer->mdev;
646 int ret = 0;
647
648 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
649 /* assure device probe is finished */
650 wait_for_device_probe();
651 /* creating context for file descriptor */
652 ret = v4l2_fh_open(file);
653 if (ret) {
654 mxr_err(mdev, "v4l2_fh_open failed\n");
655 return ret;
656 }
657
658 /* leaving if layer is already initialized */
659 if (!v4l2_fh_is_singular_file(file))
660 return 0;
661
662 /* FIXME: should power be enabled on open? */
663 ret = mxr_power_get(mdev);
664 if (ret) {
665 mxr_err(mdev, "power on failed\n");
666 goto fail_fh_open;
667 }
668
669 ret = vb2_queue_init(&layer->vb_queue);
670 if (ret != 0) {
671 mxr_err(mdev, "failed to initialize vb2 queue\n");
672 goto fail_power;
673 }
674 /* set default format, first on the list */
675 layer->fmt = layer->fmt_array[0];
676 /* setup default geometry */
677 mxr_layer_default_geo(layer);
678
679 return 0;
680
681 fail_power:
682 mxr_power_put(mdev);
683
684 fail_fh_open:
685 v4l2_fh_release(file);
686
687 return ret;
688 }
689
690 static unsigned int
691 mxr_video_poll(struct file *file, struct poll_table_struct *wait)
692 {
693 struct mxr_layer *layer = video_drvdata(file);
694
695 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
696
697 return vb2_poll(&layer->vb_queue, file, wait);
698 }
699
700 static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
701 {
702 struct mxr_layer *layer = video_drvdata(file);
703
704 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
705
706 return vb2_mmap(&layer->vb_queue, vma);
707 }
708
709 static int mxr_video_release(struct file *file)
710 {
711 struct mxr_layer *layer = video_drvdata(file);
712
713 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
714 if (v4l2_fh_is_singular_file(file)) {
715 vb2_queue_release(&layer->vb_queue);
716 mxr_power_put(layer->mdev);
717 }
718 v4l2_fh_release(file);
719 return 0;
720 }
721
722 static const struct v4l2_file_operations mxr_fops = {
723 .owner = THIS_MODULE,
724 .open = mxr_video_open,
725 .poll = mxr_video_poll,
726 .mmap = mxr_video_mmap,
727 .release = mxr_video_release,
728 .unlocked_ioctl = video_ioctl2,
729 };
730
731 static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
732 unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
733 void *alloc_ctxs[])
734 {
735 struct mxr_layer *layer = vb2_get_drv_priv(vq);
736 const struct mxr_format *fmt = layer->fmt;
737 int i;
738 struct mxr_device *mdev = layer->mdev;
739 struct v4l2_plane_pix_format planes[3];
740
741 mxr_dbg(mdev, "%s\n", __func__);
742 /* checking if format was configured */
743 if (fmt == NULL)
744 return -EINVAL;
745 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
746 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
747 layer->geo.src.full_height);
748
749 *nplanes = fmt->num_subframes;
750 for (i = 0; i < fmt->num_subframes; ++i) {
751 alloc_ctxs[i] = layer->mdev->alloc_ctx;
752 sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
753 mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
754 }
755
756 if (*nbuffers == 0)
757 *nbuffers = 1;
758
759 return 0;
760 }
761
762 static void buf_queue(struct vb2_buffer *vb)
763 {
764 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
765 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
766 struct mxr_device *mdev = layer->mdev;
767 unsigned long flags;
768
769 spin_lock_irqsave(&layer->enq_slock, flags);
770 list_add_tail(&buffer->list, &layer->enq_list);
771 spin_unlock_irqrestore(&layer->enq_slock, flags);
772
773 mxr_dbg(mdev, "queuing buffer\n");
774 }
775
776 static void wait_lock(struct vb2_queue *vq)
777 {
778 struct mxr_layer *layer = vb2_get_drv_priv(vq);
779
780 mxr_dbg(layer->mdev, "%s\n", __func__);
781 mutex_lock(&layer->mutex);
782 }
783
784 static void wait_unlock(struct vb2_queue *vq)
785 {
786 struct mxr_layer *layer = vb2_get_drv_priv(vq);
787
788 mxr_dbg(layer->mdev, "%s\n", __func__);
789 mutex_unlock(&layer->mutex);
790 }
791
792 static int start_streaming(struct vb2_queue *vq, unsigned int count)
793 {
794 struct mxr_layer *layer = vb2_get_drv_priv(vq);
795 struct mxr_device *mdev = layer->mdev;
796 unsigned long flags;
797
798 mxr_dbg(mdev, "%s\n", __func__);
799
800 if (count == 0) {
801 mxr_dbg(mdev, "no output buffers queued\n");
802 return -EINVAL;
803 }
804
805 /* block any changes in output configuration */
806 mxr_output_get(mdev);
807
808 /* update layers geometry */
809 mxr_layer_geo_fix(layer);
810 mxr_geometry_dump(mdev, &layer->geo);
811
812 layer->ops.format_set(layer);
813 /* enabling layer in hardware */
814 spin_lock_irqsave(&layer->enq_slock, flags);
815 layer->state = MXR_LAYER_STREAMING;
816 spin_unlock_irqrestore(&layer->enq_slock, flags);
817
818 layer->ops.stream_set(layer, MXR_ENABLE);
819 mxr_streamer_get(mdev);
820
821 return 0;
822 }
823
824 static void mxr_watchdog(unsigned long arg)
825 {
826 struct mxr_layer *layer = (struct mxr_layer *) arg;
827 struct mxr_device *mdev = layer->mdev;
828 unsigned long flags;
829
830 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
831
832 spin_lock_irqsave(&layer->enq_slock, flags);
833
834 if (layer->update_buf == layer->shadow_buf)
835 layer->update_buf = NULL;
836 if (layer->update_buf) {
837 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
838 layer->update_buf = NULL;
839 }
840 if (layer->shadow_buf) {
841 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
842 layer->shadow_buf = NULL;
843 }
844 spin_unlock_irqrestore(&layer->enq_slock, flags);
845 }
846
847 static int stop_streaming(struct vb2_queue *vq)
848 {
849 struct mxr_layer *layer = vb2_get_drv_priv(vq);
850 struct mxr_device *mdev = layer->mdev;
851 unsigned long flags;
852 struct timer_list watchdog;
853 struct mxr_buffer *buf, *buf_tmp;
854
855 mxr_dbg(mdev, "%s\n", __func__);
856
857 spin_lock_irqsave(&layer->enq_slock, flags);
858
859 /* reset list */
860 layer->state = MXR_LAYER_STREAMING_FINISH;
861
862 /* set all buffer to be done */
863 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
864 list_del(&buf->list);
865 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
866 }
867
868 spin_unlock_irqrestore(&layer->enq_slock, flags);
869
870 /* give 1 seconds to complete to complete last buffers */
871 setup_timer_on_stack(&watchdog, mxr_watchdog,
872 (unsigned long)layer);
873 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
874
875 /* wait until all buffers are goes to done state */
876 vb2_wait_for_all_buffers(vq);
877
878 /* stop timer if all synchronization is done */
879 del_timer_sync(&watchdog);
880 destroy_timer_on_stack(&watchdog);
881
882 /* stopping hardware */
883 spin_lock_irqsave(&layer->enq_slock, flags);
884 layer->state = MXR_LAYER_IDLE;
885 spin_unlock_irqrestore(&layer->enq_slock, flags);
886
887 /* disabling layer in hardware */
888 layer->ops.stream_set(layer, MXR_DISABLE);
889 /* remove one streamer */
890 mxr_streamer_put(mdev);
891 /* allow changes in output configuration */
892 mxr_output_put(mdev);
893 return 0;
894 }
895
896 static struct vb2_ops mxr_video_qops = {
897 .queue_setup = queue_setup,
898 .buf_queue = buf_queue,
899 .wait_prepare = wait_unlock,
900 .wait_finish = wait_lock,
901 .start_streaming = start_streaming,
902 .stop_streaming = stop_streaming,
903 };
904
905 /* FIXME: try to put this functions to mxr_base_layer_create */
906 int mxr_base_layer_register(struct mxr_layer *layer)
907 {
908 struct mxr_device *mdev = layer->mdev;
909 int ret;
910
911 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
912 if (ret)
913 mxr_err(mdev, "failed to register video device\n");
914 else
915 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
916 layer->vfd.name, layer->vfd.num);
917 return ret;
918 }
919
920 void mxr_base_layer_unregister(struct mxr_layer *layer)
921 {
922 video_unregister_device(&layer->vfd);
923 }
924
925 void mxr_layer_release(struct mxr_layer *layer)
926 {
927 if (layer->ops.release)
928 layer->ops.release(layer);
929 }
930
931 void mxr_base_layer_release(struct mxr_layer *layer)
932 {
933 kfree(layer);
934 }
935
936 static void mxr_vfd_release(struct video_device *vdev)
937 {
938 printk(KERN_INFO "video device release\n");
939 }
940
941 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
942 int idx, char *name, struct mxr_layer_ops *ops)
943 {
944 struct mxr_layer *layer;
945
946 layer = kzalloc(sizeof *layer, GFP_KERNEL);
947 if (layer == NULL) {
948 mxr_err(mdev, "not enough memory for layer.\n");
949 goto fail;
950 }
951
952 layer->mdev = mdev;
953 layer->idx = idx;
954 layer->ops = *ops;
955
956 spin_lock_init(&layer->enq_slock);
957 INIT_LIST_HEAD(&layer->enq_list);
958 mutex_init(&layer->mutex);
959
960 layer->vfd = (struct video_device) {
961 .minor = -1,
962 .release = mxr_vfd_release,
963 .fops = &mxr_fops,
964 .ioctl_ops = &mxr_ioctl_ops,
965 };
966 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
967 /* let framework control PRIORITY */
968 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
969
970 video_set_drvdata(&layer->vfd, layer);
971 layer->vfd.lock = &layer->mutex;
972 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
973
974 layer->vb_queue = (struct vb2_queue) {
975 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
976 .io_modes = VB2_MMAP | VB2_USERPTR,
977 .drv_priv = layer,
978 .buf_struct_size = sizeof(struct mxr_buffer),
979 .ops = &mxr_video_qops,
980 .mem_ops = &vb2_dma_contig_memops,
981 };
982
983 return layer;
984
985 fail:
986 return NULL;
987 }
988
989 static const struct mxr_format *find_format_by_fourcc(
990 struct mxr_layer *layer, unsigned long fourcc)
991 {
992 int i;
993
994 for (i = 0; i < layer->fmt_array_size; ++i)
995 if (layer->fmt_array[i]->fourcc == fourcc)
996 return layer->fmt_array[i];
997 return NULL;
998 }
999
1000 static const struct mxr_format *find_format_by_index(
1001 struct mxr_layer *layer, unsigned long index)
1002 {
1003 if (index >= layer->fmt_array_size)
1004 return NULL;
1005 return layer->fmt_array[index];
1006 }
1007
This page took 0.058065 seconds and 5 git commands to generate.