Merge tag 'iwlwifi-next-for-kalle-2016-07-11' of git://git.kernel.org/pub/scm/linux...
[deliverable/linux.git] / drivers / media / platform / ti-vpe / cal.c
1 /*
2 * TI CAL camera interface driver
3 *
4 * Copyright (c) 2015 Texas Instruments Inc.
5 * Benoit Parrot, <bparrot@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation
10 */
11
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioctl.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/of_device.h>
22 #include <linux/of_graph.h>
23
24 #include <media/v4l2-of.h>
25 #include <media/v4l2-async.h>
26 #include <media/v4l2-common.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-ioctl.h>
31 #include <media/v4l2-ctrls.h>
32 #include <media/v4l2-fh.h>
33 #include <media/v4l2-event.h>
34 #include <media/v4l2-common.h>
35 #include <media/videobuf2-core.h>
36 #include <media/videobuf2-dma-contig.h>
37 #include "cal_regs.h"
38
39 #define CAL_MODULE_NAME "cal"
40
41 #define MAX_WIDTH 1920
42 #define MAX_HEIGHT 1200
43
44 #define CAL_VERSION "0.1.0"
45
46 MODULE_DESCRIPTION("TI CAL driver");
47 MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
48 MODULE_LICENSE("GPL v2");
49 MODULE_VERSION(CAL_VERSION);
50
51 static unsigned video_nr = -1;
52 module_param(video_nr, uint, 0644);
53 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
54
55 static unsigned debug;
56 module_param(debug, uint, 0644);
57 MODULE_PARM_DESC(debug, "activates debug info");
58
59 /* timeperframe: min/max and default */
60 static const struct v4l2_fract
61 tpf_default = {.numerator = 1001, .denominator = 30000};
62
63 #define cal_dbg(level, caldev, fmt, arg...) \
64 v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
65 #define cal_info(caldev, fmt, arg...) \
66 v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
67 #define cal_err(caldev, fmt, arg...) \
68 v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
69
70 #define ctx_dbg(level, ctx, fmt, arg...) \
71 v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
72 #define ctx_info(ctx, fmt, arg...) \
73 v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
74 #define ctx_err(ctx, fmt, arg...) \
75 v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
76
77 #define CAL_NUM_INPUT 1
78 #define CAL_NUM_CONTEXT 2
79
80 #define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
81
82 #define reg_read(dev, offset) ioread32(dev->base + offset)
83 #define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
84
85 #define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
86 mask)
87 #define reg_write_field(dev, offset, field, mask) { \
88 u32 val = reg_read(dev, offset); \
89 set_field(&val, field, mask); \
90 reg_write(dev, offset, val); }
91
92 /* ------------------------------------------------------------------
93 * Basic structures
94 * ------------------------------------------------------------------
95 */
96
97 struct cal_fmt {
98 u32 fourcc;
99 u32 code;
100 u8 depth;
101 };
102
103 static struct cal_fmt cal_formats[] = {
104 {
105 .fourcc = V4L2_PIX_FMT_YUYV,
106 .code = MEDIA_BUS_FMT_YUYV8_2X8,
107 .depth = 16,
108 }, {
109 .fourcc = V4L2_PIX_FMT_UYVY,
110 .code = MEDIA_BUS_FMT_UYVY8_2X8,
111 .depth = 16,
112 }, {
113 .fourcc = V4L2_PIX_FMT_YVYU,
114 .code = MEDIA_BUS_FMT_YVYU8_2X8,
115 .depth = 16,
116 }, {
117 .fourcc = V4L2_PIX_FMT_VYUY,
118 .code = MEDIA_BUS_FMT_VYUY8_2X8,
119 .depth = 16,
120 }, {
121 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
122 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
123 .depth = 16,
124 }, {
125 .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
126 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
127 .depth = 16,
128 }, {
129 .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
130 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
131 .depth = 16,
132 }, {
133 .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
134 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
135 .depth = 16,
136 }, {
137 .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
138 .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
139 .depth = 24,
140 }, {
141 .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
142 .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
143 .depth = 24,
144 }, {
145 .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
146 .code = MEDIA_BUS_FMT_ARGB8888_1X32,
147 .depth = 32,
148 }, {
149 .fourcc = V4L2_PIX_FMT_SBGGR8,
150 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
151 .depth = 8,
152 }, {
153 .fourcc = V4L2_PIX_FMT_SGBRG8,
154 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
155 .depth = 8,
156 }, {
157 .fourcc = V4L2_PIX_FMT_SGRBG8,
158 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
159 .depth = 8,
160 }, {
161 .fourcc = V4L2_PIX_FMT_SRGGB8,
162 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
163 .depth = 8,
164 }, {
165 .fourcc = V4L2_PIX_FMT_SBGGR10,
166 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
167 .depth = 16,
168 }, {
169 .fourcc = V4L2_PIX_FMT_SGBRG10,
170 .code = MEDIA_BUS_FMT_SGBRG10_1X10,
171 .depth = 16,
172 }, {
173 .fourcc = V4L2_PIX_FMT_SGRBG10,
174 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
175 .depth = 16,
176 }, {
177 .fourcc = V4L2_PIX_FMT_SRGGB10,
178 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
179 .depth = 16,
180 }, {
181 .fourcc = V4L2_PIX_FMT_SBGGR12,
182 .code = MEDIA_BUS_FMT_SBGGR12_1X12,
183 .depth = 16,
184 }, {
185 .fourcc = V4L2_PIX_FMT_SGBRG12,
186 .code = MEDIA_BUS_FMT_SGBRG12_1X12,
187 .depth = 16,
188 }, {
189 .fourcc = V4L2_PIX_FMT_SGRBG12,
190 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
191 .depth = 16,
192 }, {
193 .fourcc = V4L2_PIX_FMT_SRGGB12,
194 .code = MEDIA_BUS_FMT_SRGGB12_1X12,
195 .depth = 16,
196 },
197 };
198
199 /* Print Four-character-code (FOURCC) */
200 static char *fourcc_to_str(u32 fmt)
201 {
202 static char code[5];
203
204 code[0] = (unsigned char)(fmt & 0xff);
205 code[1] = (unsigned char)((fmt >> 8) & 0xff);
206 code[2] = (unsigned char)((fmt >> 16) & 0xff);
207 code[3] = (unsigned char)((fmt >> 24) & 0xff);
208 code[4] = '\0';
209
210 return code;
211 }
212
213 /* buffer for one video frame */
214 struct cal_buffer {
215 /* common v4l buffer stuff -- must be first */
216 struct vb2_v4l2_buffer vb;
217 struct list_head list;
218 const struct cal_fmt *fmt;
219 };
220
221 struct cal_dmaqueue {
222 struct list_head active;
223
224 /* Counters to control fps rate */
225 int frame;
226 int ini_jiffies;
227 };
228
229 struct cm_data {
230 void __iomem *base;
231 struct resource *res;
232
233 unsigned int camerrx_control;
234
235 struct platform_device *pdev;
236 };
237
238 struct cc_data {
239 void __iomem *base;
240 struct resource *res;
241
242 struct platform_device *pdev;
243 };
244
245 /*
246 * there is one cal_dev structure in the driver, it is shared by
247 * all instances.
248 */
249 struct cal_dev {
250 int irq;
251 void __iomem *base;
252 struct resource *res;
253 struct platform_device *pdev;
254 struct v4l2_device v4l2_dev;
255
256 /* Control Module handle */
257 struct cm_data *cm;
258 /* Camera Core Module handle */
259 struct cc_data *cc[CAL_NUM_CSI2_PORTS];
260
261 struct cal_ctx *ctx[CAL_NUM_CONTEXT];
262 };
263
264 /*
265 * There is one cal_ctx structure for each camera core context.
266 */
267 struct cal_ctx {
268 struct v4l2_device v4l2_dev;
269 struct v4l2_ctrl_handler ctrl_handler;
270 struct video_device vdev;
271 struct v4l2_async_notifier notifier;
272 struct v4l2_subdev *sensor;
273 struct v4l2_of_endpoint endpoint;
274
275 struct v4l2_async_subdev asd;
276 struct v4l2_async_subdev *asd_list[1];
277
278 struct v4l2_fh fh;
279 struct cal_dev *dev;
280 struct cc_data *cc;
281
282 /* v4l2_ioctl mutex */
283 struct mutex mutex;
284 /* v4l2 buffers lock */
285 spinlock_t slock;
286
287 /* Several counters */
288 unsigned long jiffies;
289
290 struct vb2_alloc_ctx *alloc_ctx;
291 struct cal_dmaqueue vidq;
292
293 /* Input Number */
294 int input;
295
296 /* video capture */
297 const struct cal_fmt *fmt;
298 /* Used to store current pixel format */
299 struct v4l2_format v_fmt;
300 /* Used to store current mbus frame format */
301 struct v4l2_mbus_framefmt m_fmt;
302
303 /* Current subdev enumerated format */
304 struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
305 int num_active_fmt;
306
307 struct v4l2_fract timeperframe;
308 unsigned int sequence;
309 unsigned int external_rate;
310 struct vb2_queue vb_vidq;
311 unsigned int seq_count;
312 unsigned int csi2_port;
313 unsigned int virtual_channel;
314
315 /* Pointer pointing to current v4l2_buffer */
316 struct cal_buffer *cur_frm;
317 /* Pointer pointing to next v4l2_buffer */
318 struct cal_buffer *next_frm;
319 };
320
321 static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
322 u32 pixelformat)
323 {
324 const struct cal_fmt *fmt;
325 unsigned int k;
326
327 for (k = 0; k < ctx->num_active_fmt; k++) {
328 fmt = ctx->active_fmt[k];
329 if (fmt->fourcc == pixelformat)
330 return fmt;
331 }
332
333 return NULL;
334 }
335
336 static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
337 u32 code)
338 {
339 const struct cal_fmt *fmt;
340 unsigned int k;
341
342 for (k = 0; k < ctx->num_active_fmt; k++) {
343 fmt = ctx->active_fmt[k];
344 if (fmt->code == code)
345 return fmt;
346 }
347
348 return NULL;
349 }
350
351 static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
352 {
353 return container_of(n, struct cal_ctx, notifier);
354 }
355
356 static inline int get_field(u32 value, u32 mask)
357 {
358 return (value & mask) >> __ffs(mask);
359 }
360
361 static inline void set_field(u32 *valp, u32 field, u32 mask)
362 {
363 u32 val = *valp;
364
365 val &= ~mask;
366 val |= (field << __ffs(mask)) & mask;
367 *valp = val;
368 }
369
370 /*
371 * Control Module block access
372 */
373 static struct cm_data *cm_create(struct cal_dev *dev)
374 {
375 struct platform_device *pdev = dev->pdev;
376 struct cm_data *cm;
377
378 cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
379 if (!cm)
380 return ERR_PTR(-ENOMEM);
381
382 cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
383 "camerrx_control");
384 cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
385 if (IS_ERR(cm->base)) {
386 cal_err(dev, "failed to ioremap\n");
387 return ERR_CAST(cm->base);
388 }
389
390 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
391 cm->res->name, &cm->res->start, &cm->res->end);
392
393 return cm;
394 }
395
396 static void camerarx_phy_enable(struct cal_ctx *ctx)
397 {
398 u32 val;
399
400 if (!ctx->dev->cm->base) {
401 ctx_err(ctx, "cm not mapped\n");
402 return;
403 }
404
405 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
406 if (ctx->csi2_port == 1) {
407 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
408 set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
409 /* enable all lanes by default */
410 set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
411 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
412 } else if (ctx->csi2_port == 2) {
413 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
414 set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
415 /* enable all lanes by default */
416 set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
417 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
418 }
419 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
420 }
421
422 static void camerarx_phy_disable(struct cal_ctx *ctx)
423 {
424 u32 val;
425
426 if (!ctx->dev->cm->base) {
427 ctx_err(ctx, "cm not mapped\n");
428 return;
429 }
430
431 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
432 if (ctx->csi2_port == 1)
433 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
434 else if (ctx->csi2_port == 2)
435 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
436 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
437 }
438
439 /*
440 * Camera Instance access block
441 */
442 static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
443 {
444 struct platform_device *pdev = dev->pdev;
445 struct cc_data *cc;
446
447 cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
448 if (!cc)
449 return ERR_PTR(-ENOMEM);
450
451 cc->res = platform_get_resource_byname(pdev,
452 IORESOURCE_MEM,
453 (core == 0) ?
454 "cal_rx_core0" :
455 "cal_rx_core1");
456 cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
457 if (IS_ERR(cc->base)) {
458 cal_err(dev, "failed to ioremap\n");
459 return ERR_CAST(cc->base);
460 }
461
462 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
463 cc->res->name, &cc->res->start, &cc->res->end);
464
465 return cc;
466 }
467
468 /*
469 * Get Revision and HW info
470 */
471 static void cal_get_hwinfo(struct cal_dev *dev)
472 {
473 u32 revision = 0;
474 u32 hwinfo = 0;
475
476 revision = reg_read(dev, CAL_HL_REVISION);
477 cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
478 revision);
479
480 hwinfo = reg_read(dev, CAL_HL_HWINFO);
481 cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
482 hwinfo);
483 }
484
485 static inline int cal_runtime_get(struct cal_dev *dev)
486 {
487 int r;
488
489 r = pm_runtime_get_sync(&dev->pdev->dev);
490
491 return r;
492 }
493
494 static inline void cal_runtime_put(struct cal_dev *dev)
495 {
496 pm_runtime_put_sync(&dev->pdev->dev);
497 }
498
499 static void cal_quickdump_regs(struct cal_dev *dev)
500 {
501 cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
502 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
503 (__force const void *)dev->base,
504 resource_size(dev->res), false);
505
506 if (dev->ctx[0]) {
507 cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
508 &dev->ctx[0]->cc->res->start);
509 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
510 (__force const void *)dev->ctx[0]->cc->base,
511 resource_size(dev->ctx[0]->cc->res),
512 false);
513 }
514
515 if (dev->ctx[1]) {
516 cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
517 &dev->ctx[1]->cc->res->start);
518 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
519 (__force const void *)dev->ctx[1]->cc->base,
520 resource_size(dev->ctx[1]->cc->res),
521 false);
522 }
523
524 cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
525 &dev->cm->res->start);
526 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
527 (__force const void *)dev->cm->base,
528 resource_size(dev->cm->res), false);
529 }
530
531 /*
532 * Enable the expected IRQ sources
533 */
534 static void enable_irqs(struct cal_ctx *ctx)
535 {
536 /* Enable IRQ_WDMA_END 0/1 */
537 reg_write_field(ctx->dev,
538 CAL_HL_IRQENABLE_SET(2),
539 CAL_HL_IRQ_ENABLE,
540 CAL_HL_IRQ_MASK(ctx->csi2_port));
541 /* Enable IRQ_WDMA_START 0/1 */
542 reg_write_field(ctx->dev,
543 CAL_HL_IRQENABLE_SET(3),
544 CAL_HL_IRQ_ENABLE,
545 CAL_HL_IRQ_MASK(ctx->csi2_port));
546 /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
547 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
548 }
549
550 static void disable_irqs(struct cal_ctx *ctx)
551 {
552 /* Disable IRQ_WDMA_END 0/1 */
553 reg_write_field(ctx->dev,
554 CAL_HL_IRQENABLE_CLR(2),
555 CAL_HL_IRQ_CLEAR,
556 CAL_HL_IRQ_MASK(ctx->csi2_port));
557 /* Disable IRQ_WDMA_START 0/1 */
558 reg_write_field(ctx->dev,
559 CAL_HL_IRQENABLE_CLR(3),
560 CAL_HL_IRQ_CLEAR,
561 CAL_HL_IRQ_MASK(ctx->csi2_port));
562 /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
563 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
564 }
565
566 static void csi2_init(struct cal_ctx *ctx)
567 {
568 int i;
569 u32 val;
570
571 val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
572 set_field(&val, CAL_GEN_ENABLE,
573 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
574 set_field(&val, CAL_GEN_ENABLE,
575 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
576 set_field(&val, CAL_GEN_DISABLE,
577 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
578 set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
579 reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
580 ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
581 reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
582
583 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
584 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
585 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
586 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
587 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
588 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
589 for (i = 0; i < 10; i++) {
590 if (reg_read_field(ctx->dev,
591 CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
592 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
593 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
594 break;
595 usleep_range(1000, 1100);
596 }
597 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
598 reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
599
600 val = reg_read(ctx->dev, CAL_CTRL);
601 set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
602 set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
603 set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
604 CAL_CTRL_POSTED_WRITES_MASK);
605 set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
606 set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
607 reg_write(ctx->dev, CAL_CTRL, val);
608 ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
609 }
610
611 static void csi2_lane_config(struct cal_ctx *ctx)
612 {
613 u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
614 u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
615 u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
616 struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2;
617 int lane;
618
619 set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
620 set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
621 for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
622 /*
623 * Every lane are one nibble apart starting with the
624 * clock followed by the data lanes so shift masks by 4.
625 */
626 lane_mask <<= 4;
627 polarity_mask <<= 4;
628 set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
629 set_field(&val, mipi_csi2->lane_polarities[lane + 1],
630 polarity_mask);
631 }
632
633 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
634 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
635 ctx->csi2_port, val);
636 }
637
638 static void csi2_ppi_enable(struct cal_ctx *ctx)
639 {
640 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
641 CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
642 }
643
644 static void csi2_ppi_disable(struct cal_ctx *ctx)
645 {
646 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
647 CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
648 }
649
650 static void csi2_ctx_config(struct cal_ctx *ctx)
651 {
652 u32 val;
653
654 val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
655 set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
656 /*
657 * DT type: MIPI CSI-2 Specs
658 * 0x1: All - DT filter is disabled
659 * 0x24: RGB888 1 pixel = 3 bytes
660 * 0x2B: RAW10 4 pixels = 5 bytes
661 * 0x2A: RAW8 1 pixel = 1 byte
662 * 0x1E: YUV422 2 pixels = 4 bytes
663 */
664 set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
665 /* Virtual Channel from the CSI2 sensor usually 0! */
666 set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
667 /* NUM_LINES_PER_FRAME => 0 means auto detect */
668 set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
669 set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
670 set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
671 CAL_CSI2_CTX_PACK_MODE_MASK);
672 reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
673 ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
674 reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
675 }
676
677 static void pix_proc_config(struct cal_ctx *ctx)
678 {
679 u32 val;
680
681 val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
682 set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
683 set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
684 set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
685 set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
686 set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
687 set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
688 reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
689 ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
690 reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
691 }
692
693 static void cal_wr_dma_config(struct cal_ctx *ctx,
694 unsigned int width)
695 {
696 u32 val;
697
698 val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
699 set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
700 set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
701 CAL_WR_DMA_CTRL_DTAG_MASK);
702 set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
703 CAL_WR_DMA_CTRL_MODE_MASK);
704 set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
705 CAL_WR_DMA_CTRL_PATTERN_MASK);
706 set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
707 reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
708 ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
709 reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
710
711 /*
712 * width/16 not sure but giving it a whirl.
713 * zero does not work right
714 */
715 reg_write_field(ctx->dev,
716 CAL_WR_DMA_OFST(ctx->csi2_port),
717 (width / 16),
718 CAL_WR_DMA_OFST_MASK);
719 ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
720 reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
721
722 val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
723 /* 64 bit word means no skipping */
724 set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
725 /*
726 * (width*8)/64 this should be size of an entire line
727 * in 64bit word but 0 means all data until the end
728 * is detected automagically
729 */
730 set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
731 reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
732 ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
733 reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
734 }
735
736 static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
737 {
738 reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
739 }
740
741 /*
742 * TCLK values are OK at their reset values
743 */
744 #define TCLK_TERM 0
745 #define TCLK_MISS 1
746 #define TCLK_SETTLE 14
747 #define THS_SETTLE 15
748
749 static void csi2_phy_config(struct cal_ctx *ctx)
750 {
751 unsigned int reg0, reg1;
752 unsigned int ths_term, ths_settle;
753 unsigned int ddrclkperiod_us;
754
755 /*
756 * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
757 */
758 ddrclkperiod_us = ctx->external_rate / 2000000;
759 ddrclkperiod_us = 1000000 / ddrclkperiod_us;
760 ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
761
762 ths_term = 20000 / ddrclkperiod_us;
763 ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
764 ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
765
766 /*
767 * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
768 * Since CtrlClk is fixed at 96Mhz then we get
769 * ths_settle = floor(176.3 / 10.416) - 1 = 15
770 * If we ever switch to a dynamic clock then this code might be useful
771 *
772 * unsigned int ctrlclkperiod_us;
773 * ctrlclkperiod_us = 96000000 / 1000000;
774 * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
775 * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
776
777 * ths_settle = 176300 / ctrlclkperiod_us;
778 * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
779 */
780
781 ths_settle = THS_SETTLE;
782 ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
783
784 reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
785 set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
786 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
787 set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
788 set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
789
790 ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
791 reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
792
793 reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
794 set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
795 set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
796 set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
797 set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
798
799 ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
800 reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
801 }
802
803 static int cal_get_external_info(struct cal_ctx *ctx)
804 {
805 struct v4l2_ctrl *ctrl;
806
807 if (!ctx->sensor)
808 return -ENODEV;
809
810 ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
811 if (!ctrl) {
812 ctx_err(ctx, "no pixel rate control in subdev: %s\n",
813 ctx->sensor->name);
814 return -EPIPE;
815 }
816
817 ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
818 ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
819
820 return 0;
821 }
822
823 static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
824 {
825 struct cal_dmaqueue *dma_q = &ctx->vidq;
826 struct cal_buffer *buf;
827 unsigned long addr;
828
829 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
830 ctx->next_frm = buf;
831 list_del(&buf->list);
832
833 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
834 cal_wr_dma_addr(ctx, addr);
835 }
836
837 static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
838 {
839 ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
840 ctx->cur_frm->vb.field = ctx->m_fmt.field;
841 ctx->cur_frm->vb.sequence = ctx->sequence++;
842
843 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
844 ctx->cur_frm = ctx->next_frm;
845 }
846
847 #define isvcirqset(irq, vc, ff) (irq & \
848 (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
849
850 #define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
851
852 static irqreturn_t cal_irq(int irq_cal, void *data)
853 {
854 struct cal_dev *dev = (struct cal_dev *)data;
855 struct cal_ctx *ctx;
856 struct cal_dmaqueue *dma_q;
857 u32 irqst2, irqst3;
858
859 /* Check which DMA just finished */
860 irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
861 if (irqst2) {
862 /* Clear Interrupt status */
863 reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
864
865 /* Need to check both port */
866 if (isportirqset(irqst2, 1)) {
867 ctx = dev->ctx[0];
868
869 if (ctx->cur_frm != ctx->next_frm)
870 cal_process_buffer_complete(ctx);
871 }
872
873 if (isportirqset(irqst2, 2)) {
874 ctx = dev->ctx[1];
875
876 if (ctx->cur_frm != ctx->next_frm)
877 cal_process_buffer_complete(ctx);
878 }
879 }
880
881 /* Check which DMA just started */
882 irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
883 if (irqst3) {
884 /* Clear Interrupt status */
885 reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
886
887 /* Need to check both port */
888 if (isportirqset(irqst3, 1)) {
889 ctx = dev->ctx[0];
890 dma_q = &ctx->vidq;
891
892 spin_lock(&ctx->slock);
893 if (!list_empty(&dma_q->active) &&
894 ctx->cur_frm == ctx->next_frm)
895 cal_schedule_next_buffer(ctx);
896 spin_unlock(&ctx->slock);
897 }
898
899 if (isportirqset(irqst3, 2)) {
900 ctx = dev->ctx[1];
901 dma_q = &ctx->vidq;
902
903 spin_lock(&ctx->slock);
904 if (!list_empty(&dma_q->active) &&
905 ctx->cur_frm == ctx->next_frm)
906 cal_schedule_next_buffer(ctx);
907 spin_unlock(&ctx->slock);
908 }
909 }
910
911 return IRQ_HANDLED;
912 }
913
914 /*
915 * video ioctls
916 */
917 static int cal_querycap(struct file *file, void *priv,
918 struct v4l2_capability *cap)
919 {
920 struct cal_ctx *ctx = video_drvdata(file);
921
922 strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
923 strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
924
925 snprintf(cap->bus_info, sizeof(cap->bus_info),
926 "platform:%s", ctx->v4l2_dev.name);
927 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
928 V4L2_CAP_READWRITE;
929 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
930 return 0;
931 }
932
933 static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
934 struct v4l2_fmtdesc *f)
935 {
936 struct cal_ctx *ctx = video_drvdata(file);
937 const struct cal_fmt *fmt = NULL;
938
939 if (f->index >= ctx->num_active_fmt)
940 return -EINVAL;
941
942 fmt = ctx->active_fmt[f->index];
943
944 f->pixelformat = fmt->fourcc;
945 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
946 return 0;
947 }
948
949 static int __subdev_get_format(struct cal_ctx *ctx,
950 struct v4l2_mbus_framefmt *fmt)
951 {
952 struct v4l2_subdev_format sd_fmt;
953 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
954 int ret;
955
956 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
957 sd_fmt.pad = 0;
958
959 ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
960 if (ret)
961 return ret;
962
963 *fmt = *mbus_fmt;
964
965 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
966 fmt->width, fmt->height, fmt->code);
967
968 return 0;
969 }
970
971 static int __subdev_set_format(struct cal_ctx *ctx,
972 struct v4l2_mbus_framefmt *fmt)
973 {
974 struct v4l2_subdev_format sd_fmt;
975 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
976 int ret;
977
978 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
979 sd_fmt.pad = 0;
980 *mbus_fmt = *fmt;
981
982 ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
983 if (ret)
984 return ret;
985
986 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
987 fmt->width, fmt->height, fmt->code);
988
989 return 0;
990 }
991
992 static int cal_calc_format_size(struct cal_ctx *ctx,
993 const struct cal_fmt *fmt,
994 struct v4l2_format *f)
995 {
996 if (!fmt) {
997 ctx_dbg(3, ctx, "No cal_fmt provided!\n");
998 return -EINVAL;
999 }
1000
1001 v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
1002 &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
1003 f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
1004 fmt->depth >> 3);
1005 f->fmt.pix.sizeimage = f->fmt.pix.height *
1006 f->fmt.pix.bytesperline;
1007
1008 ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
1009 __func__, fourcc_to_str(f->fmt.pix.pixelformat),
1010 f->fmt.pix.width, f->fmt.pix.height,
1011 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1012
1013 return 0;
1014 }
1015
1016 static int cal_g_fmt_vid_cap(struct file *file, void *priv,
1017 struct v4l2_format *f)
1018 {
1019 struct cal_ctx *ctx = video_drvdata(file);
1020
1021 *f = ctx->v_fmt;
1022
1023 return 0;
1024 }
1025
1026 static int cal_try_fmt_vid_cap(struct file *file, void *priv,
1027 struct v4l2_format *f)
1028 {
1029 struct cal_ctx *ctx = video_drvdata(file);
1030 const struct cal_fmt *fmt;
1031 struct v4l2_subdev_frame_size_enum fse;
1032 int ret, found;
1033
1034 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1035 if (!fmt) {
1036 ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
1037 f->fmt.pix.pixelformat);
1038
1039 /* Just get the first one enumerated */
1040 fmt = ctx->active_fmt[0];
1041 f->fmt.pix.pixelformat = fmt->fourcc;
1042 }
1043
1044 f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
1045
1046 /* check for/find a valid width/height */
1047 ret = 0;
1048 found = false;
1049 fse.pad = 0;
1050 fse.code = fmt->code;
1051 fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1052 for (fse.index = 0; ; fse.index++) {
1053 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
1054 NULL, &fse);
1055 if (ret)
1056 break;
1057
1058 if ((f->fmt.pix.width == fse.max_width) &&
1059 (f->fmt.pix.height == fse.max_height)) {
1060 found = true;
1061 break;
1062 } else if ((f->fmt.pix.width >= fse.min_width) &&
1063 (f->fmt.pix.width <= fse.max_width) &&
1064 (f->fmt.pix.height >= fse.min_height) &&
1065 (f->fmt.pix.height <= fse.max_height)) {
1066 found = true;
1067 break;
1068 }
1069 }
1070
1071 if (!found) {
1072 /* use existing values as default */
1073 f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
1074 f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
1075 }
1076
1077 /*
1078 * Use current colorspace for now, it will get
1079 * updated properly during s_fmt
1080 */
1081 f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
1082 return cal_calc_format_size(ctx, fmt, f);
1083 }
1084
1085 static int cal_s_fmt_vid_cap(struct file *file, void *priv,
1086 struct v4l2_format *f)
1087 {
1088 struct cal_ctx *ctx = video_drvdata(file);
1089 struct vb2_queue *q = &ctx->vb_vidq;
1090 const struct cal_fmt *fmt;
1091 struct v4l2_mbus_framefmt mbus_fmt;
1092 int ret;
1093
1094 if (vb2_is_busy(q)) {
1095 ctx_dbg(3, ctx, "%s device busy\n", __func__);
1096 return -EBUSY;
1097 }
1098
1099 ret = cal_try_fmt_vid_cap(file, priv, f);
1100 if (ret < 0)
1101 return ret;
1102
1103 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1104
1105 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
1106
1107 ret = __subdev_set_format(ctx, &mbus_fmt);
1108 if (ret)
1109 return ret;
1110
1111 /* Just double check nothing has gone wrong */
1112 if (mbus_fmt.code != fmt->code) {
1113 ctx_dbg(3, ctx,
1114 "%s subdev changed format on us, this should not happen\n",
1115 __func__);
1116 return -EINVAL;
1117 }
1118
1119 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1120 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1121 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1122 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1123 ctx->fmt = fmt;
1124 ctx->m_fmt = mbus_fmt;
1125 *f = ctx->v_fmt;
1126
1127 return 0;
1128 }
1129
1130 static int cal_enum_framesizes(struct file *file, void *fh,
1131 struct v4l2_frmsizeenum *fsize)
1132 {
1133 struct cal_ctx *ctx = video_drvdata(file);
1134 const struct cal_fmt *fmt;
1135 struct v4l2_subdev_frame_size_enum fse;
1136 int ret;
1137
1138 /* check for valid format */
1139 fmt = find_format_by_pix(ctx, fsize->pixel_format);
1140 if (!fmt) {
1141 ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
1142 fsize->pixel_format);
1143 return -EINVAL;
1144 }
1145
1146 fse.index = fsize->index;
1147 fse.pad = 0;
1148 fse.code = fmt->code;
1149
1150 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
1151 if (ret)
1152 return ret;
1153
1154 ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1155 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1156 fse.min_height, fse.max_height);
1157
1158 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1159 fsize->discrete.width = fse.max_width;
1160 fsize->discrete.height = fse.max_height;
1161
1162 return 0;
1163 }
1164
1165 static int cal_enum_input(struct file *file, void *priv,
1166 struct v4l2_input *inp)
1167 {
1168 if (inp->index >= CAL_NUM_INPUT)
1169 return -EINVAL;
1170
1171 inp->type = V4L2_INPUT_TYPE_CAMERA;
1172 sprintf(inp->name, "Camera %u", inp->index);
1173 return 0;
1174 }
1175
1176 static int cal_g_input(struct file *file, void *priv, unsigned int *i)
1177 {
1178 struct cal_ctx *ctx = video_drvdata(file);
1179
1180 *i = ctx->input;
1181 return 0;
1182 }
1183
1184 static int cal_s_input(struct file *file, void *priv, unsigned int i)
1185 {
1186 struct cal_ctx *ctx = video_drvdata(file);
1187
1188 if (i >= CAL_NUM_INPUT)
1189 return -EINVAL;
1190
1191 ctx->input = i;
1192 return 0;
1193 }
1194
1195 /* timeperframe is arbitrary and continuous */
1196 static int cal_enum_frameintervals(struct file *file, void *priv,
1197 struct v4l2_frmivalenum *fival)
1198 {
1199 struct cal_ctx *ctx = video_drvdata(file);
1200 const struct cal_fmt *fmt;
1201 struct v4l2_subdev_frame_interval_enum fie = {
1202 .index = fival->index,
1203 .width = fival->width,
1204 .height = fival->height,
1205 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1206 };
1207 int ret;
1208
1209 fmt = find_format_by_pix(ctx, fival->pixel_format);
1210 if (!fmt)
1211 return -EINVAL;
1212
1213 fie.code = fmt->code;
1214 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
1215 NULL, &fie);
1216 if (ret)
1217 return ret;
1218 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1219 fival->discrete = fie.interval;
1220
1221 return 0;
1222 }
1223
1224 /*
1225 * Videobuf operations
1226 */
1227 static int cal_queue_setup(struct vb2_queue *vq,
1228 unsigned int *nbuffers, unsigned int *nplanes,
1229 unsigned int sizes[], void *alloc_ctxs[])
1230 {
1231 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1232 unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
1233
1234 if (vq->num_buffers + *nbuffers < 3)
1235 *nbuffers = 3 - vq->num_buffers;
1236 alloc_ctxs[0] = ctx->alloc_ctx;
1237
1238 if (*nplanes) {
1239 if (sizes[0] < size)
1240 return -EINVAL;
1241 size = sizes[0];
1242 }
1243
1244 *nplanes = 1;
1245 sizes[0] = size;
1246
1247 ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
1248
1249 return 0;
1250 }
1251
1252 static int cal_buffer_prepare(struct vb2_buffer *vb)
1253 {
1254 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1255 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1256 vb.vb2_buf);
1257 unsigned long size;
1258
1259 if (WARN_ON(!ctx->fmt))
1260 return -EINVAL;
1261
1262 size = ctx->v_fmt.fmt.pix.sizeimage;
1263 if (vb2_plane_size(vb, 0) < size) {
1264 ctx_err(ctx,
1265 "data will not fit into plane (%lu < %lu)\n",
1266 vb2_plane_size(vb, 0), size);
1267 return -EINVAL;
1268 }
1269
1270 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1271 return 0;
1272 }
1273
1274 static void cal_buffer_queue(struct vb2_buffer *vb)
1275 {
1276 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1277 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1278 vb.vb2_buf);
1279 struct cal_dmaqueue *vidq = &ctx->vidq;
1280 unsigned long flags = 0;
1281
1282 /* recheck locking */
1283 spin_lock_irqsave(&ctx->slock, flags);
1284 list_add_tail(&buf->list, &vidq->active);
1285 spin_unlock_irqrestore(&ctx->slock, flags);
1286 }
1287
1288 static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1289 {
1290 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1291 struct cal_dmaqueue *dma_q = &ctx->vidq;
1292 struct cal_buffer *buf, *tmp;
1293 unsigned long addr = 0;
1294 unsigned long flags;
1295 int ret;
1296
1297 spin_lock_irqsave(&ctx->slock, flags);
1298 if (list_empty(&dma_q->active)) {
1299 spin_unlock_irqrestore(&ctx->slock, flags);
1300 ctx_dbg(3, ctx, "buffer queue is empty\n");
1301 return -EIO;
1302 }
1303
1304 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
1305 ctx->cur_frm = buf;
1306 ctx->next_frm = buf;
1307 list_del(&buf->list);
1308 spin_unlock_irqrestore(&ctx->slock, flags);
1309
1310 addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
1311 ctx->sequence = 0;
1312
1313 ret = cal_get_external_info(ctx);
1314 if (ret < 0)
1315 goto err;
1316
1317 cal_runtime_get(ctx->dev);
1318
1319 enable_irqs(ctx);
1320 camerarx_phy_enable(ctx);
1321 csi2_init(ctx);
1322 csi2_phy_config(ctx);
1323 csi2_lane_config(ctx);
1324 csi2_ctx_config(ctx);
1325 pix_proc_config(ctx);
1326 cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1327 cal_wr_dma_addr(ctx, addr);
1328 csi2_ppi_enable(ctx);
1329
1330 ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
1331 if (ret) {
1332 ctx_err(ctx, "stream on failed in subdev\n");
1333 cal_runtime_put(ctx->dev);
1334 goto err;
1335 }
1336
1337 if (debug >= 4)
1338 cal_quickdump_regs(ctx->dev);
1339
1340 return 0;
1341
1342 err:
1343 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1344 list_del(&buf->list);
1345 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1346 }
1347 return ret;
1348 }
1349
1350 static void cal_stop_streaming(struct vb2_queue *vq)
1351 {
1352 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1353 struct cal_dmaqueue *dma_q = &ctx->vidq;
1354 struct cal_buffer *buf, *tmp;
1355 unsigned long flags;
1356
1357 if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
1358 ctx_err(ctx, "stream off failed in subdev\n");
1359
1360 csi2_ppi_disable(ctx);
1361 disable_irqs(ctx);
1362
1363 /* Release all active buffers */
1364 spin_lock_irqsave(&ctx->slock, flags);
1365 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1366 list_del(&buf->list);
1367 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1368 }
1369
1370 if (ctx->cur_frm == ctx->next_frm) {
1371 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1372 } else {
1373 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1374 vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
1375 VB2_BUF_STATE_ERROR);
1376 }
1377 ctx->cur_frm = NULL;
1378 ctx->next_frm = NULL;
1379 spin_unlock_irqrestore(&ctx->slock, flags);
1380
1381 cal_runtime_put(ctx->dev);
1382 }
1383
1384 static struct vb2_ops cal_video_qops = {
1385 .queue_setup = cal_queue_setup,
1386 .buf_prepare = cal_buffer_prepare,
1387 .buf_queue = cal_buffer_queue,
1388 .start_streaming = cal_start_streaming,
1389 .stop_streaming = cal_stop_streaming,
1390 .wait_prepare = vb2_ops_wait_prepare,
1391 .wait_finish = vb2_ops_wait_finish,
1392 };
1393
1394 static const struct v4l2_file_operations cal_fops = {
1395 .owner = THIS_MODULE,
1396 .open = v4l2_fh_open,
1397 .release = vb2_fop_release,
1398 .read = vb2_fop_read,
1399 .poll = vb2_fop_poll,
1400 .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
1401 .mmap = vb2_fop_mmap,
1402 };
1403
1404 static const struct v4l2_ioctl_ops cal_ioctl_ops = {
1405 .vidioc_querycap = cal_querycap,
1406 .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
1407 .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
1408 .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
1409 .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
1410 .vidioc_enum_framesizes = cal_enum_framesizes,
1411 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1412 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1413 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1414 .vidioc_querybuf = vb2_ioctl_querybuf,
1415 .vidioc_qbuf = vb2_ioctl_qbuf,
1416 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1417 .vidioc_enum_input = cal_enum_input,
1418 .vidioc_g_input = cal_g_input,
1419 .vidioc_s_input = cal_s_input,
1420 .vidioc_enum_frameintervals = cal_enum_frameintervals,
1421 .vidioc_streamon = vb2_ioctl_streamon,
1422 .vidioc_streamoff = vb2_ioctl_streamoff,
1423 .vidioc_log_status = v4l2_ctrl_log_status,
1424 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1425 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1426 };
1427
1428 static struct video_device cal_videodev = {
1429 .name = CAL_MODULE_NAME,
1430 .fops = &cal_fops,
1431 .ioctl_ops = &cal_ioctl_ops,
1432 .minor = -1,
1433 .release = video_device_release_empty,
1434 };
1435
1436 /* -----------------------------------------------------------------
1437 * Initialization and module stuff
1438 * ------------------------------------------------------------------
1439 */
1440 static int cal_complete_ctx(struct cal_ctx *ctx);
1441
1442 static int cal_async_bound(struct v4l2_async_notifier *notifier,
1443 struct v4l2_subdev *subdev,
1444 struct v4l2_async_subdev *asd)
1445 {
1446 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1447 struct v4l2_subdev_mbus_code_enum mbus_code;
1448 int ret = 0;
1449 int i, j, k;
1450
1451 if (ctx->sensor) {
1452 ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
1453 subdev->name);
1454 return 0;
1455 }
1456
1457 ctx->sensor = subdev;
1458 ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
1459
1460 /* Enumerate sub device formats and enable all matching local formats */
1461 ctx->num_active_fmt = 0;
1462 for (j = 0, i = 0; ret != -EINVAL; ++j) {
1463 struct cal_fmt *fmt;
1464
1465 memset(&mbus_code, 0, sizeof(mbus_code));
1466 mbus_code.index = j;
1467 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
1468 NULL, &mbus_code);
1469 if (ret)
1470 continue;
1471
1472 ctx_dbg(2, ctx,
1473 "subdev %s: code: %04x idx: %d\n",
1474 subdev->name, mbus_code.code, j);
1475
1476 for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
1477 fmt = &cal_formats[k];
1478
1479 if (mbus_code.code == fmt->code) {
1480 ctx->active_fmt[i] = fmt;
1481 ctx_dbg(2, ctx,
1482 "matched fourcc: %s: code: %04x idx: %d\n",
1483 fourcc_to_str(fmt->fourcc),
1484 fmt->code, i);
1485 ctx->num_active_fmt = ++i;
1486 }
1487 }
1488 }
1489
1490 if (i == 0) {
1491 ctx_err(ctx, "No suitable format reported by subdev %s\n",
1492 subdev->name);
1493 return -EINVAL;
1494 }
1495
1496 cal_complete_ctx(ctx);
1497
1498 return 0;
1499 }
1500
1501 static int cal_async_complete(struct v4l2_async_notifier *notifier)
1502 {
1503 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1504 const struct cal_fmt *fmt;
1505 struct v4l2_mbus_framefmt mbus_fmt;
1506 int ret;
1507
1508 ret = __subdev_get_format(ctx, &mbus_fmt);
1509 if (ret)
1510 return ret;
1511
1512 fmt = find_format_by_code(ctx, mbus_fmt.code);
1513 if (!fmt) {
1514 ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
1515 mbus_fmt.code);
1516 return -EINVAL;
1517 }
1518
1519 /* Save current subdev format */
1520 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1521 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1522 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1523 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1524 ctx->fmt = fmt;
1525 ctx->m_fmt = mbus_fmt;
1526
1527 return 0;
1528 }
1529
1530 static int cal_complete_ctx(struct cal_ctx *ctx)
1531 {
1532 struct video_device *vfd;
1533 struct vb2_queue *q;
1534 int ret;
1535
1536 ctx->timeperframe = tpf_default;
1537 ctx->external_rate = 192000000;
1538
1539 /* initialize locks */
1540 spin_lock_init(&ctx->slock);
1541 mutex_init(&ctx->mutex);
1542
1543 /* initialize queue */
1544 q = &ctx->vb_vidq;
1545 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1546 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
1547 q->drv_priv = ctx;
1548 q->buf_struct_size = sizeof(struct cal_buffer);
1549 q->ops = &cal_video_qops;
1550 q->mem_ops = &vb2_dma_contig_memops;
1551 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1552 q->lock = &ctx->mutex;
1553 q->min_buffers_needed = 3;
1554
1555 ret = vb2_queue_init(q);
1556 if (ret)
1557 return ret;
1558
1559 /* init video dma queues */
1560 INIT_LIST_HEAD(&ctx->vidq.active);
1561
1562 vfd = &ctx->vdev;
1563 *vfd = cal_videodev;
1564 vfd->v4l2_dev = &ctx->v4l2_dev;
1565 vfd->queue = q;
1566
1567 /*
1568 * Provide a mutex to v4l2 core. It will be used to protect
1569 * all fops and v4l2 ioctls.
1570 */
1571 vfd->lock = &ctx->mutex;
1572 video_set_drvdata(vfd, ctx);
1573
1574 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1575 if (ret < 0)
1576 return ret;
1577
1578 v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
1579 video_device_node_name(vfd));
1580
1581 ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev);
1582 if (IS_ERR(ctx->alloc_ctx)) {
1583 ctx_err(ctx, "Failed to alloc vb2 context\n");
1584 ret = PTR_ERR(ctx->alloc_ctx);
1585 goto vdev_unreg;
1586 }
1587
1588 return 0;
1589
1590 vdev_unreg:
1591 video_unregister_device(vfd);
1592 return ret;
1593 }
1594
1595 static struct device_node *
1596 of_get_next_port(const struct device_node *parent,
1597 struct device_node *prev)
1598 {
1599 struct device_node *port = NULL;
1600
1601 if (!parent)
1602 return NULL;
1603
1604 if (!prev) {
1605 struct device_node *ports;
1606 /*
1607 * It's the first call, we have to find a port subnode
1608 * within this node or within an optional 'ports' node.
1609 */
1610 ports = of_get_child_by_name(parent, "ports");
1611 if (ports)
1612 parent = ports;
1613
1614 port = of_get_child_by_name(parent, "port");
1615
1616 /* release the 'ports' node */
1617 of_node_put(ports);
1618 } else {
1619 struct device_node *ports;
1620
1621 ports = of_get_parent(prev);
1622 if (!ports)
1623 return NULL;
1624
1625 do {
1626 port = of_get_next_child(ports, prev);
1627 if (!port) {
1628 of_node_put(ports);
1629 return NULL;
1630 }
1631 prev = port;
1632 } while (of_node_cmp(port->name, "port") != 0);
1633 }
1634
1635 return port;
1636 }
1637
1638 static struct device_node *
1639 of_get_next_endpoint(const struct device_node *parent,
1640 struct device_node *prev)
1641 {
1642 struct device_node *ep = NULL;
1643
1644 if (!parent)
1645 return NULL;
1646
1647 do {
1648 ep = of_get_next_child(parent, prev);
1649 if (!ep)
1650 return NULL;
1651 prev = ep;
1652 } while (of_node_cmp(ep->name, "endpoint") != 0);
1653
1654 return ep;
1655 }
1656
1657 static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
1658 {
1659 struct platform_device *pdev = ctx->dev->pdev;
1660 struct device_node *ep_node, *port, *remote_ep,
1661 *sensor_node, *parent;
1662 struct v4l2_of_endpoint *endpoint;
1663 struct v4l2_async_subdev *asd;
1664 u32 regval = 0;
1665 int ret, index, found_port = 0, lane;
1666
1667 parent = pdev->dev.of_node;
1668
1669 asd = &ctx->asd;
1670 endpoint = &ctx->endpoint;
1671
1672 ep_node = NULL;
1673 port = NULL;
1674 remote_ep = NULL;
1675 sensor_node = NULL;
1676 ret = -EINVAL;
1677
1678 ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
1679 for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
1680 port = of_get_next_port(parent, port);
1681 if (!port) {
1682 ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
1683 index);
1684 goto cleanup_exit;
1685 }
1686
1687 /* Match the slice number with <REG> */
1688 of_property_read_u32(port, "reg", &regval);
1689 ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
1690 index, inst, regval);
1691 if ((regval == inst) && (index == inst)) {
1692 found_port = 1;
1693 break;
1694 }
1695 }
1696
1697 if (!found_port) {
1698 ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
1699 inst);
1700 goto cleanup_exit;
1701 }
1702
1703 ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
1704 inst);
1705
1706 ep_node = of_get_next_endpoint(port, ep_node);
1707 if (!ep_node) {
1708 ctx_dbg(3, ctx, "can't get next endpoint\n");
1709 goto cleanup_exit;
1710 }
1711
1712 sensor_node = of_graph_get_remote_port_parent(ep_node);
1713 if (!sensor_node) {
1714 ctx_dbg(3, ctx, "can't get remote parent\n");
1715 goto cleanup_exit;
1716 }
1717 asd->match_type = V4L2_ASYNC_MATCH_OF;
1718 asd->match.of.node = sensor_node;
1719
1720 remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0);
1721 if (!remote_ep) {
1722 ctx_dbg(3, ctx, "can't get remote-endpoint\n");
1723 goto cleanup_exit;
1724 }
1725 v4l2_of_parse_endpoint(remote_ep, endpoint);
1726
1727 if (endpoint->bus_type != V4L2_MBUS_CSI2) {
1728 ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
1729 inst, sensor_node->name);
1730 goto cleanup_exit;
1731 }
1732
1733 /* Store Virtual Channel number */
1734 ctx->virtual_channel = endpoint->base.id;
1735
1736 ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
1737 ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
1738 ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
1739 ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
1740 ctx_dbg(3, ctx, "num_data_lanes=%d\n",
1741 endpoint->bus.mipi_csi2.num_data_lanes);
1742 ctx_dbg(3, ctx, "data_lanes= <\n");
1743 for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
1744 ctx_dbg(3, ctx, "\t%d\n",
1745 endpoint->bus.mipi_csi2.data_lanes[lane]);
1746 ctx_dbg(3, ctx, "\t>\n");
1747
1748 ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
1749 inst, sensor_node->name);
1750
1751 ctx->asd_list[0] = asd;
1752 ctx->notifier.subdevs = ctx->asd_list;
1753 ctx->notifier.num_subdevs = 1;
1754 ctx->notifier.bound = cal_async_bound;
1755 ctx->notifier.complete = cal_async_complete;
1756 ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
1757 &ctx->notifier);
1758 if (ret) {
1759 ctx_err(ctx, "Error registering async notifier\n");
1760 ret = -EINVAL;
1761 }
1762
1763 cleanup_exit:
1764 if (!remote_ep)
1765 of_node_put(remote_ep);
1766 if (!sensor_node)
1767 of_node_put(sensor_node);
1768 if (!ep_node)
1769 of_node_put(ep_node);
1770 if (!port)
1771 of_node_put(port);
1772
1773 return ret;
1774 }
1775
1776 static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
1777 {
1778 struct cal_ctx *ctx;
1779 struct v4l2_ctrl_handler *hdl;
1780 int ret;
1781
1782 ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1783 if (!ctx)
1784 return NULL;
1785
1786 /* save the cal_dev * for future ref */
1787 ctx->dev = dev;
1788
1789 snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
1790 "%s-%03d", CAL_MODULE_NAME, inst);
1791 ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
1792 if (ret)
1793 goto err_exit;
1794
1795 hdl = &ctx->ctrl_handler;
1796 ret = v4l2_ctrl_handler_init(hdl, 11);
1797 if (ret) {
1798 ctx_err(ctx, "Failed to init ctrl handler\n");
1799 goto unreg_dev;
1800 }
1801 ctx->v4l2_dev.ctrl_handler = hdl;
1802
1803 /* Make sure Camera Core H/W register area is available */
1804 ctx->cc = dev->cc[inst];
1805
1806 /* Store the instance id */
1807 ctx->csi2_port = inst + 1;
1808
1809 ret = of_cal_create_instance(ctx, inst);
1810 if (ret) {
1811 ret = -EINVAL;
1812 goto free_hdl;
1813 }
1814 return ctx;
1815
1816 free_hdl:
1817 v4l2_ctrl_handler_free(hdl);
1818 unreg_dev:
1819 v4l2_device_unregister(&ctx->v4l2_dev);
1820 err_exit:
1821 return NULL;
1822 }
1823
1824 static int cal_probe(struct platform_device *pdev)
1825 {
1826 struct cal_dev *dev;
1827 int ret;
1828 int irq;
1829
1830 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1831 if (!dev)
1832 return -ENOMEM;
1833
1834 /* set pseudo v4l2 device name so we can use v4l2_printk */
1835 strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
1836 sizeof(dev->v4l2_dev.name));
1837
1838 /* save pdev pointer */
1839 dev->pdev = pdev;
1840
1841 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1842 "cal_top");
1843 dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
1844 if (IS_ERR(dev->base))
1845 return PTR_ERR(dev->base);
1846
1847 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
1848 dev->res->name, &dev->res->start, &dev->res->end);
1849
1850 irq = platform_get_irq(pdev, 0);
1851 cal_dbg(1, dev, "got irq# %d\n", irq);
1852 ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1853 dev);
1854 if (ret)
1855 return ret;
1856
1857 platform_set_drvdata(pdev, dev);
1858
1859 dev->cm = cm_create(dev);
1860 if (IS_ERR(dev->cm))
1861 return PTR_ERR(dev->cm);
1862
1863 dev->cc[0] = cc_create(dev, 0);
1864 if (IS_ERR(dev->cc[0]))
1865 return PTR_ERR(dev->cc[0]);
1866
1867 dev->cc[1] = cc_create(dev, 1);
1868 if (IS_ERR(dev->cc[1]))
1869 return PTR_ERR(dev->cc[1]);
1870
1871 dev->ctx[0] = NULL;
1872 dev->ctx[1] = NULL;
1873
1874 dev->ctx[0] = cal_create_instance(dev, 0);
1875 dev->ctx[1] = cal_create_instance(dev, 1);
1876 if (!dev->ctx[0] && !dev->ctx[1]) {
1877 cal_err(dev, "Neither port is configured, no point in staying up\n");
1878 return -ENODEV;
1879 }
1880
1881 pm_runtime_enable(&pdev->dev);
1882
1883 ret = cal_runtime_get(dev);
1884 if (ret)
1885 goto runtime_disable;
1886
1887 /* Just check we can actually access the module */
1888 cal_get_hwinfo(dev);
1889
1890 cal_runtime_put(dev);
1891
1892 return 0;
1893
1894 runtime_disable:
1895 pm_runtime_disable(&pdev->dev);
1896 return ret;
1897 }
1898
1899 static int cal_remove(struct platform_device *pdev)
1900 {
1901 struct cal_dev *dev =
1902 (struct cal_dev *)platform_get_drvdata(pdev);
1903 struct cal_ctx *ctx;
1904 int i;
1905
1906 cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
1907
1908 cal_runtime_get(dev);
1909
1910 for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1911 ctx = dev->ctx[i];
1912 if (ctx) {
1913 ctx_dbg(1, ctx, "unregistering %s\n",
1914 video_device_node_name(&ctx->vdev));
1915 camerarx_phy_disable(ctx);
1916 v4l2_async_notifier_unregister(&ctx->notifier);
1917 vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx);
1918 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1919 v4l2_device_unregister(&ctx->v4l2_dev);
1920 video_unregister_device(&ctx->vdev);
1921 }
1922 }
1923
1924 cal_runtime_put(dev);
1925 pm_runtime_disable(&pdev->dev);
1926
1927 return 0;
1928 }
1929
1930 #if defined(CONFIG_OF)
1931 static const struct of_device_id cal_of_match[] = {
1932 { .compatible = "ti,dra72-cal", },
1933 {},
1934 };
1935 MODULE_DEVICE_TABLE(of, cal_of_match);
1936 #endif
1937
1938 static struct platform_driver cal_pdrv = {
1939 .probe = cal_probe,
1940 .remove = cal_remove,
1941 .driver = {
1942 .name = CAL_MODULE_NAME,
1943 .of_match_table = of_match_ptr(cal_of_match),
1944 },
1945 };
1946
1947 module_platform_driver(cal_pdrv);
This page took 0.231893 seconds and 5 git commands to generate.