2 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/platform_device.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
25 #include <linux/clk.h>
26 #include <linux/list.h>
27 #include <linux/irq.h>
28 #include <linux/of_device.h>
29 #include <asm/mach/irq.h>
31 #include "imx-ipu-v3.h"
34 static inline u32
ipu_cm_read(struct ipu_soc
*ipu
, unsigned offset
)
36 return readl(ipu
->cm_reg
+ offset
);
39 static inline void ipu_cm_write(struct ipu_soc
*ipu
, u32 value
, unsigned offset
)
41 writel(value
, ipu
->cm_reg
+ offset
);
44 static inline u32
ipu_idmac_read(struct ipu_soc
*ipu
, unsigned offset
)
46 return readl(ipu
->idmac_reg
+ offset
);
49 static inline void ipu_idmac_write(struct ipu_soc
*ipu
, u32 value
,
52 writel(value
, ipu
->idmac_reg
+ offset
);
55 void ipu_srm_dp_sync_update(struct ipu_soc
*ipu
)
59 val
= ipu_cm_read(ipu
, IPU_SRM_PRI2
);
61 ipu_cm_write(ipu
, val
, IPU_SRM_PRI2
);
63 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update
);
65 struct ipu_ch_param __iomem
*ipu_get_cpmem(struct ipuv3_channel
*channel
)
67 struct ipu_soc
*ipu
= channel
->ipu
;
69 return ipu
->cpmem_base
+ channel
->num
;
71 EXPORT_SYMBOL_GPL(ipu_get_cpmem
);
73 void ipu_cpmem_set_high_priority(struct ipuv3_channel
*channel
)
75 struct ipu_soc
*ipu
= channel
->ipu
;
76 struct ipu_ch_param __iomem
*p
= ipu_get_cpmem(channel
);
79 if (ipu
->ipu_type
== IPUV3EX
)
80 ipu_ch_param_write_field(p
, IPU_FIELD_ID
, 1);
82 val
= ipu_idmac_read(ipu
, IDMAC_CHA_PRI(channel
->num
));
83 val
|= 1 << (channel
->num
% 32);
84 ipu_idmac_write(ipu
, val
, IDMAC_CHA_PRI(channel
->num
));
86 EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority
);
88 void ipu_ch_param_write_field(struct ipu_ch_param __iomem
*base
, u32 wbs
, u32 v
)
90 u32 bit
= (wbs
>> 8) % 160;
91 u32 size
= wbs
& 0xff;
92 u32 word
= (wbs
>> 8) / 160;
95 u32 mask
= (1 << size
) - 1;
98 pr_debug("%s %d %d %d\n", __func__
, word
, bit
, size
);
100 val
= readl(&base
->word
[word
].data
[i
]);
101 val
&= ~(mask
<< ofs
);
103 writel(val
, &base
->word
[word
].data
[i
]);
105 if ((bit
+ size
- 1) / 32 > i
) {
106 val
= readl(&base
->word
[word
].data
[i
+ 1]);
107 val
&= ~(mask
>> (ofs
? (32 - ofs
) : 0));
108 val
|= v
>> (ofs
? (32 - ofs
) : 0);
109 writel(val
, &base
->word
[word
].data
[i
+ 1]);
112 EXPORT_SYMBOL_GPL(ipu_ch_param_write_field
);
114 u32
ipu_ch_param_read_field(struct ipu_ch_param __iomem
*base
, u32 wbs
)
116 u32 bit
= (wbs
>> 8) % 160;
117 u32 size
= wbs
& 0xff;
118 u32 word
= (wbs
>> 8) / 160;
121 u32 mask
= (1 << size
) - 1;
124 pr_debug("%s %d %d %d\n", __func__
, word
, bit
, size
);
126 val
= (readl(&base
->word
[word
].data
[i
]) >> ofs
) & mask
;
128 if ((bit
+ size
- 1) / 32 > i
) {
130 tmp
= readl(&base
->word
[word
].data
[i
+ 1]);
131 tmp
&= mask
>> (ofs
? (32 - ofs
) : 0);
132 val
|= tmp
<< (ofs
? (32 - ofs
) : 0);
137 EXPORT_SYMBOL_GPL(ipu_ch_param_read_field
);
139 int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem
*p
,
142 int bpp
= 0, npb
= 0, ro
, go
, bo
, to
;
144 ro
= rgb
->bits_per_pixel
- rgb
->red
.length
- rgb
->red
.offset
;
145 go
= rgb
->bits_per_pixel
- rgb
->green
.length
- rgb
->green
.offset
;
146 bo
= rgb
->bits_per_pixel
- rgb
->blue
.length
- rgb
->blue
.offset
;
147 to
= rgb
->bits_per_pixel
- rgb
->transp
.length
- rgb
->transp
.offset
;
149 ipu_ch_param_write_field(p
, IPU_FIELD_WID0
, rgb
->red
.length
- 1);
150 ipu_ch_param_write_field(p
, IPU_FIELD_OFS0
, ro
);
151 ipu_ch_param_write_field(p
, IPU_FIELD_WID1
, rgb
->green
.length
- 1);
152 ipu_ch_param_write_field(p
, IPU_FIELD_OFS1
, go
);
153 ipu_ch_param_write_field(p
, IPU_FIELD_WID2
, rgb
->blue
.length
- 1);
154 ipu_ch_param_write_field(p
, IPU_FIELD_OFS2
, bo
);
156 if (rgb
->transp
.length
) {
157 ipu_ch_param_write_field(p
, IPU_FIELD_WID3
,
158 rgb
->transp
.length
- 1);
159 ipu_ch_param_write_field(p
, IPU_FIELD_OFS3
, to
);
161 ipu_ch_param_write_field(p
, IPU_FIELD_WID3
, 7);
162 ipu_ch_param_write_field(p
, IPU_FIELD_OFS3
,
163 rgb
->bits_per_pixel
);
166 switch (rgb
->bits_per_pixel
) {
186 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, bpp
);
187 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, npb
);
188 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 7); /* rgb mode */
192 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb
);
194 int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem
*p
,
197 int bpp
= 0, npb
= 0;
220 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, bpp
);
221 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, npb
);
222 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 6); /* raw mode */
226 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough
);
228 void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param
*p
, u32 pixel_format
)
230 switch (pixel_format
) {
231 case V4L2_PIX_FMT_UYVY
:
232 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, 3); /* bits/pixel */
233 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 0xA); /* pix format */
234 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, 31); /* burst size */
236 case V4L2_PIX_FMT_YUYV
:
237 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, 3); /* bits/pixel */
238 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 0x8); /* pix format */
239 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, 31); /* burst size */
243 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved
);
245 void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem
*p
,
246 u32 pixel_format
, int stride
, int u_offset
, int v_offset
)
248 switch (pixel_format
) {
249 case V4L2_PIX_FMT_YUV420
:
250 ipu_ch_param_write_field(p
, IPU_FIELD_SLUV
, (stride
/ 2) - 1);
251 ipu_ch_param_write_field(p
, IPU_FIELD_UBO
, u_offset
/ 8);
252 ipu_ch_param_write_field(p
, IPU_FIELD_VBO
, v_offset
/ 8);
254 case V4L2_PIX_FMT_YVU420
:
255 ipu_ch_param_write_field(p
, IPU_FIELD_SLUV
, (stride
/ 2) - 1);
256 ipu_ch_param_write_field(p
, IPU_FIELD_UBO
, v_offset
/ 8);
257 ipu_ch_param_write_field(p
, IPU_FIELD_VBO
, u_offset
/ 8);
261 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full
);
263 void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem
*p
, u32 pixel_format
,
264 int stride
, int height
)
266 int u_offset
, v_offset
;
269 switch (pixel_format
) {
270 case V4L2_PIX_FMT_YUV420
:
271 case V4L2_PIX_FMT_YVU420
:
272 uv_stride
= stride
/ 2;
273 u_offset
= stride
* height
;
274 v_offset
= u_offset
+ (uv_stride
* height
/ 2);
275 ipu_cpmem_set_yuv_planar_full(p
, pixel_format
, stride
,
280 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar
);
282 static struct ipu_rgb def_rgb_32
= {
283 .red
= { .offset
= 16, .length
= 8, },
284 .green
= { .offset
= 8, .length
= 8, },
285 .blue
= { .offset
= 0, .length
= 8, },
286 .transp
= { .offset
= 24, .length
= 8, },
287 .bits_per_pixel
= 32,
290 static struct ipu_rgb def_bgr_32
= {
291 .red
= { .offset
= 16, .length
= 8, },
292 .green
= { .offset
= 8, .length
= 8, },
293 .blue
= { .offset
= 0, .length
= 8, },
294 .transp
= { .offset
= 24, .length
= 8, },
295 .bits_per_pixel
= 32,
298 static struct ipu_rgb def_rgb_24
= {
299 .red
= { .offset
= 0, .length
= 8, },
300 .green
= { .offset
= 8, .length
= 8, },
301 .blue
= { .offset
= 16, .length
= 8, },
302 .transp
= { .offset
= 0, .length
= 0, },
303 .bits_per_pixel
= 24,
306 static struct ipu_rgb def_bgr_24
= {
307 .red
= { .offset
= 16, .length
= 8, },
308 .green
= { .offset
= 8, .length
= 8, },
309 .blue
= { .offset
= 0, .length
= 8, },
310 .transp
= { .offset
= 0, .length
= 0, },
311 .bits_per_pixel
= 24,
314 static struct ipu_rgb def_rgb_16
= {
315 .red
= { .offset
= 11, .length
= 5, },
316 .green
= { .offset
= 5, .length
= 6, },
317 .blue
= { .offset
= 0, .length
= 5, },
318 .transp
= { .offset
= 0, .length
= 0, },
319 .bits_per_pixel
= 16,
322 #define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
323 #define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
324 (pix->width * (y) / 4) + (x) / 2)
325 #define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
326 (pix->width * pix->height / 4) + \
327 (pix->width * (y) / 4) + (x) / 2)
329 int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem
*cpmem
, u32 pixelformat
)
331 switch (pixelformat
) {
332 case V4L2_PIX_FMT_YUV420
:
333 case V4L2_PIX_FMT_YVU420
:
335 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 2);
337 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 63);
339 case V4L2_PIX_FMT_UYVY
:
341 ipu_ch_param_write_field(cpmem
, IPU_FIELD_BPP
, 3);
343 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 0xA);
345 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 31);
347 case V4L2_PIX_FMT_YUYV
:
349 ipu_ch_param_write_field(cpmem
, IPU_FIELD_BPP
, 3);
351 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 0x8);
353 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 31);
355 case V4L2_PIX_FMT_RGB32
:
356 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_32
);
358 case V4L2_PIX_FMT_RGB565
:
359 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_16
);
361 case V4L2_PIX_FMT_BGR32
:
362 ipu_cpmem_set_format_rgb(cpmem
, &def_bgr_32
);
364 case V4L2_PIX_FMT_RGB24
:
365 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_24
);
367 case V4L2_PIX_FMT_BGR24
:
368 ipu_cpmem_set_format_rgb(cpmem
, &def_bgr_24
);
376 EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt
);
378 int ipu_cpmem_set_image(struct ipu_ch_param __iomem
*cpmem
,
379 struct ipu_image
*image
)
381 struct v4l2_pix_format
*pix
= &image
->pix
;
382 int y_offset
, u_offset
, v_offset
;
384 pr_debug("%s: resolution: %dx%d stride: %d\n",
385 __func__
, pix
->width
, pix
->height
,
388 ipu_cpmem_set_resolution(cpmem
, image
->rect
.width
,
390 ipu_cpmem_set_stride(cpmem
, pix
->bytesperline
);
392 ipu_cpmem_set_fmt(cpmem
, pix
->pixelformat
);
394 switch (pix
->pixelformat
) {
395 case V4L2_PIX_FMT_YUV420
:
396 case V4L2_PIX_FMT_YVU420
:
397 y_offset
= Y_OFFSET(pix
, image
->rect
.left
, image
->rect
.top
);
398 u_offset
= U_OFFSET(pix
, image
->rect
.left
,
399 image
->rect
.top
) - y_offset
;
400 v_offset
= V_OFFSET(pix
, image
->rect
.left
,
401 image
->rect
.top
) - y_offset
;
403 ipu_cpmem_set_yuv_planar_full(cpmem
, pix
->pixelformat
,
404 pix
->bytesperline
, u_offset
, v_offset
);
405 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+ y_offset
);
407 case V4L2_PIX_FMT_UYVY
:
408 case V4L2_PIX_FMT_YUYV
:
409 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
410 image
->rect
.left
* 2 +
411 image
->rect
.top
* image
->pix
.bytesperline
);
413 case V4L2_PIX_FMT_RGB32
:
414 case V4L2_PIX_FMT_BGR32
:
415 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
416 image
->rect
.left
* 4 +
417 image
->rect
.top
* image
->pix
.bytesperline
);
419 case V4L2_PIX_FMT_RGB565
:
420 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
421 image
->rect
.left
* 2 +
422 image
->rect
.top
* image
->pix
.bytesperline
);
424 case V4L2_PIX_FMT_RGB24
:
425 case V4L2_PIX_FMT_BGR24
:
426 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
427 image
->rect
.left
* 3 +
428 image
->rect
.top
* image
->pix
.bytesperline
);
436 EXPORT_SYMBOL_GPL(ipu_cpmem_set_image
);
438 enum ipu_color_space
ipu_pixelformat_to_colorspace(u32 pixelformat
)
440 switch (pixelformat
) {
441 case V4L2_PIX_FMT_YUV420
:
442 case V4L2_PIX_FMT_YVU420
:
443 case V4L2_PIX_FMT_UYVY
:
444 case V4L2_PIX_FMT_YUYV
:
445 return IPUV3_COLORSPACE_YUV
;
446 case V4L2_PIX_FMT_RGB32
:
447 case V4L2_PIX_FMT_BGR32
:
448 case V4L2_PIX_FMT_RGB24
:
449 case V4L2_PIX_FMT_BGR24
:
450 case V4L2_PIX_FMT_RGB565
:
451 return IPUV3_COLORSPACE_RGB
;
453 return IPUV3_COLORSPACE_UNKNOWN
;
456 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace
);
458 struct ipuv3_channel
*ipu_idmac_get(struct ipu_soc
*ipu
, unsigned num
)
460 struct ipuv3_channel
*channel
;
462 dev_dbg(ipu
->dev
, "%s %d\n", __func__
, num
);
465 return ERR_PTR(-ENODEV
);
467 mutex_lock(&ipu
->channel_lock
);
469 channel
= &ipu
->channel
[num
];
472 channel
= ERR_PTR(-EBUSY
);
480 mutex_unlock(&ipu
->channel_lock
);
484 EXPORT_SYMBOL_GPL(ipu_idmac_get
);
486 void ipu_idmac_put(struct ipuv3_channel
*channel
)
488 struct ipu_soc
*ipu
= channel
->ipu
;
490 dev_dbg(ipu
->dev
, "%s %d\n", __func__
, channel
->num
);
492 mutex_lock(&ipu
->channel_lock
);
496 mutex_unlock(&ipu
->channel_lock
);
498 EXPORT_SYMBOL_GPL(ipu_idmac_put
);
500 #define idma_mask(ch) (1 << (ch & 0x1f))
502 void ipu_idmac_set_double_buffer(struct ipuv3_channel
*channel
,
505 struct ipu_soc
*ipu
= channel
->ipu
;
509 spin_lock_irqsave(&ipu
->lock
, flags
);
511 reg
= ipu_cm_read(ipu
, IPU_CHA_DB_MODE_SEL(channel
->num
));
513 reg
|= idma_mask(channel
->num
);
515 reg
&= ~idma_mask(channel
->num
);
516 ipu_cm_write(ipu
, reg
, IPU_CHA_DB_MODE_SEL(channel
->num
));
518 spin_unlock_irqrestore(&ipu
->lock
, flags
);
520 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer
);
522 int ipu_module_enable(struct ipu_soc
*ipu
, u32 mask
)
524 unsigned long lock_flags
;
527 spin_lock_irqsave(&ipu
->lock
, lock_flags
);
529 val
= ipu_cm_read(ipu
, IPU_DISP_GEN
);
531 if (mask
& IPU_CONF_DI0_EN
)
532 val
|= IPU_DI0_COUNTER_RELEASE
;
533 if (mask
& IPU_CONF_DI1_EN
)
534 val
|= IPU_DI1_COUNTER_RELEASE
;
536 ipu_cm_write(ipu
, val
, IPU_DISP_GEN
);
538 val
= ipu_cm_read(ipu
, IPU_CONF
);
540 ipu_cm_write(ipu
, val
, IPU_CONF
);
542 spin_unlock_irqrestore(&ipu
->lock
, lock_flags
);
546 EXPORT_SYMBOL_GPL(ipu_module_enable
);
548 int ipu_module_disable(struct ipu_soc
*ipu
, u32 mask
)
550 unsigned long lock_flags
;
553 spin_lock_irqsave(&ipu
->lock
, lock_flags
);
555 val
= ipu_cm_read(ipu
, IPU_CONF
);
557 ipu_cm_write(ipu
, val
, IPU_CONF
);
559 val
= ipu_cm_read(ipu
, IPU_DISP_GEN
);
561 if (mask
& IPU_CONF_DI0_EN
)
562 val
&= ~IPU_DI0_COUNTER_RELEASE
;
563 if (mask
& IPU_CONF_DI1_EN
)
564 val
&= ~IPU_DI1_COUNTER_RELEASE
;
566 ipu_cm_write(ipu
, val
, IPU_DISP_GEN
);
568 spin_unlock_irqrestore(&ipu
->lock
, lock_flags
);
572 EXPORT_SYMBOL_GPL(ipu_module_disable
);
574 void ipu_idmac_select_buffer(struct ipuv3_channel
*channel
, u32 buf_num
)
576 struct ipu_soc
*ipu
= channel
->ipu
;
577 unsigned int chno
= channel
->num
;
580 spin_lock_irqsave(&ipu
->lock
, flags
);
582 /* Mark buffer as ready. */
584 ipu_cm_write(ipu
, idma_mask(chno
), IPU_CHA_BUF0_RDY(chno
));
586 ipu_cm_write(ipu
, idma_mask(chno
), IPU_CHA_BUF1_RDY(chno
));
588 spin_unlock_irqrestore(&ipu
->lock
, flags
);
590 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer
);
592 int ipu_idmac_enable_channel(struct ipuv3_channel
*channel
)
594 struct ipu_soc
*ipu
= channel
->ipu
;
598 spin_lock_irqsave(&ipu
->lock
, flags
);
600 val
= ipu_idmac_read(ipu
, IDMAC_CHA_EN(channel
->num
));
601 val
|= idma_mask(channel
->num
);
602 ipu_idmac_write(ipu
, val
, IDMAC_CHA_EN(channel
->num
));
604 spin_unlock_irqrestore(&ipu
->lock
, flags
);
608 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel
);
610 int ipu_idmac_disable_channel(struct ipuv3_channel
*channel
)
612 struct ipu_soc
*ipu
= channel
->ipu
;
615 unsigned long timeout
;
617 timeout
= jiffies
+ msecs_to_jiffies(50);
618 while (ipu_idmac_read(ipu
, IDMAC_CHA_BUSY(channel
->num
)) &
619 idma_mask(channel
->num
)) {
620 if (time_after(jiffies
, timeout
)) {
621 dev_warn(ipu
->dev
, "disabling busy idmac channel %d\n",
628 spin_lock_irqsave(&ipu
->lock
, flags
);
630 /* Disable DMA channel(s) */
631 val
= ipu_idmac_read(ipu
, IDMAC_CHA_EN(channel
->num
));
632 val
&= ~idma_mask(channel
->num
);
633 ipu_idmac_write(ipu
, val
, IDMAC_CHA_EN(channel
->num
));
635 /* Set channel buffers NOT to be ready */
636 ipu_cm_write(ipu
, 0xf0000000, IPU_GPR
); /* write one to clear */
638 if (ipu_cm_read(ipu
, IPU_CHA_BUF0_RDY(channel
->num
)) &
639 idma_mask(channel
->num
)) {
640 ipu_cm_write(ipu
, idma_mask(channel
->num
),
641 IPU_CHA_BUF0_RDY(channel
->num
));
644 if (ipu_cm_read(ipu
, IPU_CHA_BUF1_RDY(channel
->num
)) &
645 idma_mask(channel
->num
)) {
646 ipu_cm_write(ipu
, idma_mask(channel
->num
),
647 IPU_CHA_BUF1_RDY(channel
->num
));
650 ipu_cm_write(ipu
, 0x0, IPU_GPR
); /* write one to set */
652 /* Reset the double buffer */
653 val
= ipu_cm_read(ipu
, IPU_CHA_DB_MODE_SEL(channel
->num
));
654 val
&= ~idma_mask(channel
->num
);
655 ipu_cm_write(ipu
, val
, IPU_CHA_DB_MODE_SEL(channel
->num
));
657 spin_unlock_irqrestore(&ipu
->lock
, flags
);
661 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel
);
663 static int ipu_reset(struct ipu_soc
*ipu
)
665 unsigned long timeout
;
667 ipu_cm_write(ipu
, 0x807FFFFF, IPU_MEM_RST
);
669 timeout
= jiffies
+ msecs_to_jiffies(1000);
670 while (ipu_cm_read(ipu
, IPU_MEM_RST
) & 0x80000000) {
671 if (time_after(jiffies
, timeout
))
681 unsigned long cm_ofs
;
682 unsigned long cpmem_ofs
;
683 unsigned long srm_ofs
;
684 unsigned long tpm_ofs
;
685 unsigned long disp0_ofs
;
686 unsigned long disp1_ofs
;
687 unsigned long dc_tmpl_ofs
;
688 unsigned long vdi_ofs
;
689 enum ipuv3_type type
;
692 static struct ipu_devtype ipu_type_imx51
= {
694 .cm_ofs
= 0x1e000000,
695 .cpmem_ofs
= 0x1f000000,
696 .srm_ofs
= 0x1f040000,
697 .tpm_ofs
= 0x1f060000,
698 .disp0_ofs
= 0x1e040000,
699 .disp1_ofs
= 0x1e048000,
700 .dc_tmpl_ofs
= 0x1f080000,
701 .vdi_ofs
= 0x1e068000,
705 static struct ipu_devtype ipu_type_imx53
= {
707 .cm_ofs
= 0x06000000,
708 .cpmem_ofs
= 0x07000000,
709 .srm_ofs
= 0x07040000,
710 .tpm_ofs
= 0x07060000,
711 .disp0_ofs
= 0x06040000,
712 .disp1_ofs
= 0x06048000,
713 .dc_tmpl_ofs
= 0x07080000,
714 .vdi_ofs
= 0x06068000,
718 static struct ipu_devtype ipu_type_imx6q
= {
720 .cm_ofs
= 0x00200000,
721 .cpmem_ofs
= 0x00300000,
722 .srm_ofs
= 0x00340000,
723 .tpm_ofs
= 0x00360000,
724 .disp0_ofs
= 0x00240000,
725 .disp1_ofs
= 0x00248000,
726 .dc_tmpl_ofs
= 0x00380000,
727 .vdi_ofs
= 0x00268000,
731 static const struct of_device_id imx_ipu_dt_ids
[] = {
732 { .compatible
= "fsl,imx51-ipu", .data
= &ipu_type_imx51
, },
733 { .compatible
= "fsl,imx53-ipu", .data
= &ipu_type_imx53
, },
734 { .compatible
= "fsl,imx6q-ipu", .data
= &ipu_type_imx6q
, },
737 MODULE_DEVICE_TABLE(of
, imx_ipu_dt_ids
);
739 static int ipu_submodules_init(struct ipu_soc
*ipu
,
740 struct platform_device
*pdev
, unsigned long ipu_base
,
745 struct device
*dev
= &pdev
->dev
;
746 const struct ipu_devtype
*devtype
= ipu
->devtype
;
748 ret
= ipu_di_init(ipu
, dev
, 0, ipu_base
+ devtype
->disp0_ofs
,
749 IPU_CONF_DI0_EN
, ipu_clk
);
755 ret
= ipu_di_init(ipu
, dev
, 1, ipu_base
+ devtype
->disp1_ofs
,
756 IPU_CONF_DI1_EN
, ipu_clk
);
762 ret
= ipu_dc_init(ipu
, dev
, ipu_base
+ devtype
->cm_ofs
+
763 IPU_CM_DC_REG_OFS
, ipu_base
+ devtype
->dc_tmpl_ofs
);
765 unit
= "dc_template";
769 ret
= ipu_dmfc_init(ipu
, dev
, ipu_base
+
770 devtype
->cm_ofs
+ IPU_CM_DMFC_REG_OFS
, ipu_clk
);
776 ret
= ipu_dp_init(ipu
, dev
, ipu_base
+ devtype
->srm_ofs
);
793 dev_err(&pdev
->dev
, "init %s failed with %d\n", unit
, ret
);
797 static void ipu_irq_handle(struct ipu_soc
*ipu
, const int *regs
, int num_regs
)
799 unsigned long status
;
800 int i
, bit
, irq_base
;
802 for (i
= 0; i
< num_regs
; i
++) {
804 status
= ipu_cm_read(ipu
, IPU_INT_STAT(regs
[i
]));
805 status
&= ipu_cm_read(ipu
, IPU_INT_CTRL(regs
[i
]));
807 irq_base
= ipu
->irq_start
+ regs
[i
] * 32;
808 for_each_set_bit(bit
, &status
, 32)
809 generic_handle_irq(irq_base
+ bit
);
813 static void ipu_irq_handler(unsigned int irq
, struct irq_desc
*desc
)
815 struct ipu_soc
*ipu
= irq_desc_get_handler_data(desc
);
816 const int int_reg
[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
817 struct irq_chip
*chip
= irq_get_chip(irq
);
819 chained_irq_enter(chip
, desc
);
821 ipu_irq_handle(ipu
, int_reg
, ARRAY_SIZE(int_reg
));
823 chained_irq_exit(chip
, desc
);
826 static void ipu_err_irq_handler(unsigned int irq
, struct irq_desc
*desc
)
828 struct ipu_soc
*ipu
= irq_desc_get_handler_data(desc
);
829 const int int_reg
[] = { 4, 5, 8, 9};
830 struct irq_chip
*chip
= irq_get_chip(irq
);
832 chained_irq_enter(chip
, desc
);
834 ipu_irq_handle(ipu
, int_reg
, ARRAY_SIZE(int_reg
));
836 chained_irq_exit(chip
, desc
);
839 static void ipu_ack_irq(struct irq_data
*d
)
841 struct ipu_soc
*ipu
= irq_data_get_irq_chip_data(d
);
842 unsigned int irq
= d
->irq
- ipu
->irq_start
;
844 ipu_cm_write(ipu
, 1 << (irq
% 32), IPU_INT_STAT(irq
/ 32));
847 static void ipu_unmask_irq(struct irq_data
*d
)
849 struct ipu_soc
*ipu
= irq_data_get_irq_chip_data(d
);
850 unsigned int irq
= d
->irq
- ipu
->irq_start
;
854 spin_lock_irqsave(&ipu
->lock
, flags
);
856 reg
= ipu_cm_read(ipu
, IPU_INT_CTRL(irq
/ 32));
857 reg
|= 1 << (irq
% 32);
858 ipu_cm_write(ipu
, reg
, IPU_INT_CTRL(irq
/ 32));
860 spin_unlock_irqrestore(&ipu
->lock
, flags
);
863 static void ipu_mask_irq(struct irq_data
*d
)
865 struct ipu_soc
*ipu
= irq_data_get_irq_chip_data(d
);
866 unsigned int irq
= d
->irq
- ipu
->irq_start
;
870 spin_lock_irqsave(&ipu
->lock
, flags
);
872 reg
= ipu_cm_read(ipu
, IPU_INT_CTRL(irq
/ 32));
873 reg
&= ~(1 << (irq
% 32));
874 ipu_cm_write(ipu
, reg
, IPU_INT_CTRL(irq
/ 32));
876 spin_unlock_irqrestore(&ipu
->lock
, flags
);
879 static struct irq_chip ipu_irq_chip
= {
881 .irq_ack
= ipu_ack_irq
,
882 .irq_mask
= ipu_mask_irq
,
883 .irq_unmask
= ipu_unmask_irq
,
886 int ipu_idmac_channel_irq(struct ipu_soc
*ipu
, struct ipuv3_channel
*channel
,
887 enum ipu_channel_irq irq_type
)
889 return ipu
->irq_start
+ irq_type
+ channel
->num
;
891 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq
);
893 static void ipu_submodules_exit(struct ipu_soc
*ipu
)
902 static int platform_remove_devices_fn(struct device
*dev
, void *unused
)
904 struct platform_device
*pdev
= to_platform_device(dev
);
906 platform_device_unregister(pdev
);
911 static void platform_device_unregister_children(struct platform_device
*pdev
)
913 device_for_each_child(&pdev
->dev
, NULL
, platform_remove_devices_fn
);
916 struct ipu_platform_reg
{
917 struct ipu_client_platformdata pdata
;
921 static const struct ipu_platform_reg client_reg
[] = {
926 .dp
= IPU_DP_FLOW_SYNC_BG
,
927 .dma
[0] = IPUV3_CHANNEL_MEM_BG_SYNC
,
930 .name
= "imx-ipuv3-crtc",
936 .dma
[0] = IPUV3_CHANNEL_MEM_DC_SYNC
,
939 .name
= "imx-ipuv3-crtc",
943 static int ipu_client_id
;
945 static int ipu_add_subdevice_pdata(struct device
*dev
,
946 const struct ipu_platform_reg
*reg
)
948 struct platform_device
*pdev
;
950 pdev
= platform_device_register_data(dev
, reg
->name
, ipu_client_id
++,
951 ®
->pdata
, sizeof(struct ipu_platform_reg
));
953 return pdev
? 0 : -EINVAL
;
956 static int ipu_add_client_devices(struct ipu_soc
*ipu
)
961 for (i
= 0; i
< ARRAY_SIZE(client_reg
); i
++) {
962 const struct ipu_platform_reg
*reg
= &client_reg
[i
];
963 ret
= ipu_add_subdevice_pdata(ipu
->dev
, reg
);
971 platform_device_unregister_children(to_platform_device(ipu
->dev
));
976 static int ipu_irq_init(struct ipu_soc
*ipu
)
980 ipu
->irq_start
= irq_alloc_descs(-1, 0, IPU_NUM_IRQS
, 0);
981 if (ipu
->irq_start
< 0)
982 return ipu
->irq_start
;
984 for (i
= ipu
->irq_start
; i
< ipu
->irq_start
+ IPU_NUM_IRQS
; i
++) {
985 irq_set_chip_and_handler(i
, &ipu_irq_chip
, handle_level_irq
);
986 set_irq_flags(i
, IRQF_VALID
);
987 irq_set_chip_data(i
, ipu
);
990 irq_set_chained_handler(ipu
->irq_sync
, ipu_irq_handler
);
991 irq_set_handler_data(ipu
->irq_sync
, ipu
);
992 irq_set_chained_handler(ipu
->irq_err
, ipu_err_irq_handler
);
993 irq_set_handler_data(ipu
->irq_err
, ipu
);
998 static void ipu_irq_exit(struct ipu_soc
*ipu
)
1002 irq_set_chained_handler(ipu
->irq_err
, NULL
);
1003 irq_set_handler_data(ipu
->irq_err
, NULL
);
1004 irq_set_chained_handler(ipu
->irq_sync
, NULL
);
1005 irq_set_handler_data(ipu
->irq_sync
, NULL
);
1007 for (i
= ipu
->irq_start
; i
< ipu
->irq_start
+ IPU_NUM_IRQS
; i
++) {
1008 set_irq_flags(i
, 0);
1009 irq_set_chip(i
, NULL
);
1010 irq_set_chip_data(i
, NULL
);
1013 irq_free_descs(ipu
->irq_start
, IPU_NUM_IRQS
);
1016 static int ipu_probe(struct platform_device
*pdev
)
1018 const struct of_device_id
*of_id
=
1019 of_match_device(imx_ipu_dt_ids
, &pdev
->dev
);
1020 struct ipu_soc
*ipu
;
1021 struct resource
*res
;
1022 unsigned long ipu_base
;
1023 int i
, ret
, irq_sync
, irq_err
;
1024 const struct ipu_devtype
*devtype
;
1026 devtype
= of_id
->data
;
1028 irq_sync
= platform_get_irq(pdev
, 0);
1029 irq_err
= platform_get_irq(pdev
, 1);
1030 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1032 dev_dbg(&pdev
->dev
, "irq_sync: %d irq_err: %d\n",
1035 if (!res
|| irq_sync
< 0 || irq_err
< 0)
1038 ipu_base
= res
->start
;
1040 ipu
= devm_kzalloc(&pdev
->dev
, sizeof(*ipu
), GFP_KERNEL
);
1044 for (i
= 0; i
< 64; i
++)
1045 ipu
->channel
[i
].ipu
= ipu
;
1046 ipu
->devtype
= devtype
;
1047 ipu
->ipu_type
= devtype
->type
;
1049 spin_lock_init(&ipu
->lock
);
1050 mutex_init(&ipu
->channel_lock
);
1052 dev_dbg(&pdev
->dev
, "cm_reg: 0x%08lx\n",
1053 ipu_base
+ devtype
->cm_ofs
);
1054 dev_dbg(&pdev
->dev
, "idmac: 0x%08lx\n",
1055 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IDMAC_REG_OFS
);
1056 dev_dbg(&pdev
->dev
, "cpmem: 0x%08lx\n",
1057 ipu_base
+ devtype
->cpmem_ofs
);
1058 dev_dbg(&pdev
->dev
, "disp0: 0x%08lx\n",
1059 ipu_base
+ devtype
->disp0_ofs
);
1060 dev_dbg(&pdev
->dev
, "disp1: 0x%08lx\n",
1061 ipu_base
+ devtype
->disp1_ofs
);
1062 dev_dbg(&pdev
->dev
, "srm: 0x%08lx\n",
1063 ipu_base
+ devtype
->srm_ofs
);
1064 dev_dbg(&pdev
->dev
, "tpm: 0x%08lx\n",
1065 ipu_base
+ devtype
->tpm_ofs
);
1066 dev_dbg(&pdev
->dev
, "dc: 0x%08lx\n",
1067 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_DC_REG_OFS
);
1068 dev_dbg(&pdev
->dev
, "ic: 0x%08lx\n",
1069 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IC_REG_OFS
);
1070 dev_dbg(&pdev
->dev
, "dmfc: 0x%08lx\n",
1071 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_DMFC_REG_OFS
);
1072 dev_dbg(&pdev
->dev
, "vdi: 0x%08lx\n",
1073 ipu_base
+ devtype
->vdi_ofs
);
1075 ipu
->cm_reg
= devm_ioremap(&pdev
->dev
,
1076 ipu_base
+ devtype
->cm_ofs
, PAGE_SIZE
);
1077 ipu
->idmac_reg
= devm_ioremap(&pdev
->dev
,
1078 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IDMAC_REG_OFS
,
1080 ipu
->cpmem_base
= devm_ioremap(&pdev
->dev
,
1081 ipu_base
+ devtype
->cpmem_ofs
, PAGE_SIZE
);
1083 if (!ipu
->cm_reg
|| !ipu
->idmac_reg
|| !ipu
->cpmem_base
) {
1085 goto failed_ioremap
;
1088 ipu
->clk
= devm_clk_get(&pdev
->dev
, "bus");
1089 if (IS_ERR(ipu
->clk
)) {
1090 ret
= PTR_ERR(ipu
->clk
);
1091 dev_err(&pdev
->dev
, "clk_get failed with %d", ret
);
1092 goto failed_clk_get
;
1095 platform_set_drvdata(pdev
, ipu
);
1097 clk_prepare_enable(ipu
->clk
);
1099 ipu
->dev
= &pdev
->dev
;
1100 ipu
->irq_sync
= irq_sync
;
1101 ipu
->irq_err
= irq_err
;
1103 ret
= ipu_irq_init(ipu
);
1105 goto out_failed_irq
;
1107 ret
= ipu_reset(ipu
);
1109 goto out_failed_reset
;
1111 /* Set MCU_T to divide MCU access window into 2 */
1112 ipu_cm_write(ipu
, 0x00400000L
| (IPU_MCU_T_DEFAULT
<< 18),
1115 ret
= ipu_submodules_init(ipu
, pdev
, ipu_base
, ipu
->clk
);
1117 goto failed_submodules_init
;
1119 ret
= ipu_add_client_devices(ipu
);
1121 dev_err(&pdev
->dev
, "adding client devices failed with %d\n",
1123 goto failed_add_clients
;
1126 dev_info(&pdev
->dev
, "%s probed\n", devtype
->name
);
1131 ipu_submodules_exit(ipu
);
1132 failed_submodules_init
:
1136 clk_disable_unprepare(ipu
->clk
);
1142 static int ipu_remove(struct platform_device
*pdev
)
1144 struct ipu_soc
*ipu
= platform_get_drvdata(pdev
);
1146 platform_device_unregister_children(pdev
);
1147 ipu_submodules_exit(ipu
);
1150 clk_disable_unprepare(ipu
->clk
);
1155 static struct platform_driver imx_ipu_driver
= {
1157 .name
= "imx-ipuv3",
1158 .of_match_table
= imx_ipu_dt_ids
,
1161 .remove
= ipu_remove
,
1164 module_platform_driver(imx_ipu_driver
);
1166 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1167 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1168 MODULE_LICENSE("GPL");