2 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
3 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/reset.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/spinlock.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
26 #include <linux/clk.h>
27 #include <linux/list.h>
28 #include <linux/irq.h>
29 #include <linux/irqchip/chained_irq.h>
30 #include <linux/irqdomain.h>
31 #include <linux/of_device.h>
33 #include "imx-ipu-v3.h"
36 static inline u32
ipu_cm_read(struct ipu_soc
*ipu
, unsigned offset
)
38 return readl(ipu
->cm_reg
+ offset
);
41 static inline void ipu_cm_write(struct ipu_soc
*ipu
, u32 value
, unsigned offset
)
43 writel(value
, ipu
->cm_reg
+ offset
);
46 static inline u32
ipu_idmac_read(struct ipu_soc
*ipu
, unsigned offset
)
48 return readl(ipu
->idmac_reg
+ offset
);
51 static inline void ipu_idmac_write(struct ipu_soc
*ipu
, u32 value
,
54 writel(value
, ipu
->idmac_reg
+ offset
);
57 void ipu_srm_dp_sync_update(struct ipu_soc
*ipu
)
61 val
= ipu_cm_read(ipu
, IPU_SRM_PRI2
);
63 ipu_cm_write(ipu
, val
, IPU_SRM_PRI2
);
65 EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update
);
67 struct ipu_ch_param __iomem
*ipu_get_cpmem(struct ipuv3_channel
*channel
)
69 struct ipu_soc
*ipu
= channel
->ipu
;
71 return ipu
->cpmem_base
+ channel
->num
;
73 EXPORT_SYMBOL_GPL(ipu_get_cpmem
);
75 void ipu_cpmem_set_high_priority(struct ipuv3_channel
*channel
)
77 struct ipu_soc
*ipu
= channel
->ipu
;
78 struct ipu_ch_param __iomem
*p
= ipu_get_cpmem(channel
);
81 if (ipu
->ipu_type
== IPUV3EX
)
82 ipu_ch_param_write_field(p
, IPU_FIELD_ID
, 1);
84 val
= ipu_idmac_read(ipu
, IDMAC_CHA_PRI(channel
->num
));
85 val
|= 1 << (channel
->num
% 32);
86 ipu_idmac_write(ipu
, val
, IDMAC_CHA_PRI(channel
->num
));
88 EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority
);
90 void ipu_ch_param_write_field(struct ipu_ch_param __iomem
*base
, u32 wbs
, u32 v
)
92 u32 bit
= (wbs
>> 8) % 160;
93 u32 size
= wbs
& 0xff;
94 u32 word
= (wbs
>> 8) / 160;
97 u32 mask
= (1 << size
) - 1;
100 pr_debug("%s %d %d %d\n", __func__
, word
, bit
, size
);
102 val
= readl(&base
->word
[word
].data
[i
]);
103 val
&= ~(mask
<< ofs
);
105 writel(val
, &base
->word
[word
].data
[i
]);
107 if ((bit
+ size
- 1) / 32 > i
) {
108 val
= readl(&base
->word
[word
].data
[i
+ 1]);
109 val
&= ~(mask
>> (ofs
? (32 - ofs
) : 0));
110 val
|= v
>> (ofs
? (32 - ofs
) : 0);
111 writel(val
, &base
->word
[word
].data
[i
+ 1]);
114 EXPORT_SYMBOL_GPL(ipu_ch_param_write_field
);
116 u32
ipu_ch_param_read_field(struct ipu_ch_param __iomem
*base
, u32 wbs
)
118 u32 bit
= (wbs
>> 8) % 160;
119 u32 size
= wbs
& 0xff;
120 u32 word
= (wbs
>> 8) / 160;
123 u32 mask
= (1 << size
) - 1;
126 pr_debug("%s %d %d %d\n", __func__
, word
, bit
, size
);
128 val
= (readl(&base
->word
[word
].data
[i
]) >> ofs
) & mask
;
130 if ((bit
+ size
- 1) / 32 > i
) {
132 tmp
= readl(&base
->word
[word
].data
[i
+ 1]);
133 tmp
&= mask
>> (ofs
? (32 - ofs
) : 0);
134 val
|= tmp
<< (ofs
? (32 - ofs
) : 0);
139 EXPORT_SYMBOL_GPL(ipu_ch_param_read_field
);
141 int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem
*p
,
144 int bpp
= 0, npb
= 0, ro
, go
, bo
, to
;
146 ro
= rgb
->bits_per_pixel
- rgb
->red
.length
- rgb
->red
.offset
;
147 go
= rgb
->bits_per_pixel
- rgb
->green
.length
- rgb
->green
.offset
;
148 bo
= rgb
->bits_per_pixel
- rgb
->blue
.length
- rgb
->blue
.offset
;
149 to
= rgb
->bits_per_pixel
- rgb
->transp
.length
- rgb
->transp
.offset
;
151 ipu_ch_param_write_field(p
, IPU_FIELD_WID0
, rgb
->red
.length
- 1);
152 ipu_ch_param_write_field(p
, IPU_FIELD_OFS0
, ro
);
153 ipu_ch_param_write_field(p
, IPU_FIELD_WID1
, rgb
->green
.length
- 1);
154 ipu_ch_param_write_field(p
, IPU_FIELD_OFS1
, go
);
155 ipu_ch_param_write_field(p
, IPU_FIELD_WID2
, rgb
->blue
.length
- 1);
156 ipu_ch_param_write_field(p
, IPU_FIELD_OFS2
, bo
);
158 if (rgb
->transp
.length
) {
159 ipu_ch_param_write_field(p
, IPU_FIELD_WID3
,
160 rgb
->transp
.length
- 1);
161 ipu_ch_param_write_field(p
, IPU_FIELD_OFS3
, to
);
163 ipu_ch_param_write_field(p
, IPU_FIELD_WID3
, 7);
164 ipu_ch_param_write_field(p
, IPU_FIELD_OFS3
,
165 rgb
->bits_per_pixel
);
168 switch (rgb
->bits_per_pixel
) {
188 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, bpp
);
189 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, npb
);
190 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 7); /* rgb mode */
194 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb
);
196 int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem
*p
,
199 int bpp
= 0, npb
= 0;
222 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, bpp
);
223 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, npb
);
224 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 6); /* raw mode */
228 EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough
);
230 void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem
*p
,
233 switch (pixel_format
) {
234 case V4L2_PIX_FMT_UYVY
:
235 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, 3); /* bits/pixel */
236 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 0xA); /* pix format */
237 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, 31); /* burst size */
239 case V4L2_PIX_FMT_YUYV
:
240 ipu_ch_param_write_field(p
, IPU_FIELD_BPP
, 3); /* bits/pixel */
241 ipu_ch_param_write_field(p
, IPU_FIELD_PFS
, 0x8); /* pix format */
242 ipu_ch_param_write_field(p
, IPU_FIELD_NPB
, 31); /* burst size */
246 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved
);
248 void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem
*p
,
249 u32 pixel_format
, int stride
, int u_offset
, int v_offset
)
251 switch (pixel_format
) {
252 case V4L2_PIX_FMT_YUV420
:
253 ipu_ch_param_write_field(p
, IPU_FIELD_SLUV
, (stride
/ 2) - 1);
254 ipu_ch_param_write_field(p
, IPU_FIELD_UBO
, u_offset
/ 8);
255 ipu_ch_param_write_field(p
, IPU_FIELD_VBO
, v_offset
/ 8);
257 case V4L2_PIX_FMT_YVU420
:
258 ipu_ch_param_write_field(p
, IPU_FIELD_SLUV
, (stride
/ 2) - 1);
259 ipu_ch_param_write_field(p
, IPU_FIELD_UBO
, v_offset
/ 8);
260 ipu_ch_param_write_field(p
, IPU_FIELD_VBO
, u_offset
/ 8);
264 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full
);
266 void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem
*p
, u32 pixel_format
,
267 int stride
, int height
)
269 int u_offset
, v_offset
;
272 switch (pixel_format
) {
273 case V4L2_PIX_FMT_YUV420
:
274 case V4L2_PIX_FMT_YVU420
:
275 uv_stride
= stride
/ 2;
276 u_offset
= stride
* height
;
277 v_offset
= u_offset
+ (uv_stride
* height
/ 2);
278 ipu_cpmem_set_yuv_planar_full(p
, pixel_format
, stride
,
283 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar
);
285 static struct ipu_rgb def_rgb_32
= {
286 .red
= { .offset
= 16, .length
= 8, },
287 .green
= { .offset
= 8, .length
= 8, },
288 .blue
= { .offset
= 0, .length
= 8, },
289 .transp
= { .offset
= 24, .length
= 8, },
290 .bits_per_pixel
= 32,
293 static struct ipu_rgb def_bgr_32
= {
294 .red
= { .offset
= 16, .length
= 8, },
295 .green
= { .offset
= 8, .length
= 8, },
296 .blue
= { .offset
= 0, .length
= 8, },
297 .transp
= { .offset
= 24, .length
= 8, },
298 .bits_per_pixel
= 32,
301 static struct ipu_rgb def_rgb_24
= {
302 .red
= { .offset
= 0, .length
= 8, },
303 .green
= { .offset
= 8, .length
= 8, },
304 .blue
= { .offset
= 16, .length
= 8, },
305 .transp
= { .offset
= 0, .length
= 0, },
306 .bits_per_pixel
= 24,
309 static struct ipu_rgb def_bgr_24
= {
310 .red
= { .offset
= 16, .length
= 8, },
311 .green
= { .offset
= 8, .length
= 8, },
312 .blue
= { .offset
= 0, .length
= 8, },
313 .transp
= { .offset
= 0, .length
= 0, },
314 .bits_per_pixel
= 24,
317 static struct ipu_rgb def_rgb_16
= {
318 .red
= { .offset
= 11, .length
= 5, },
319 .green
= { .offset
= 5, .length
= 6, },
320 .blue
= { .offset
= 0, .length
= 5, },
321 .transp
= { .offset
= 0, .length
= 0, },
322 .bits_per_pixel
= 16,
325 #define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
326 #define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
327 (pix->width * (y) / 4) + (x) / 2)
328 #define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
329 (pix->width * pix->height / 4) + \
330 (pix->width * (y) / 4) + (x) / 2)
332 int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem
*cpmem
, u32 pixelformat
)
334 switch (pixelformat
) {
335 case V4L2_PIX_FMT_YUV420
:
336 case V4L2_PIX_FMT_YVU420
:
338 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 2);
340 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 63);
342 case V4L2_PIX_FMT_UYVY
:
344 ipu_ch_param_write_field(cpmem
, IPU_FIELD_BPP
, 3);
346 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 0xA);
348 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 31);
350 case V4L2_PIX_FMT_YUYV
:
352 ipu_ch_param_write_field(cpmem
, IPU_FIELD_BPP
, 3);
354 ipu_ch_param_write_field(cpmem
, IPU_FIELD_PFS
, 0x8);
356 ipu_ch_param_write_field(cpmem
, IPU_FIELD_NPB
, 31);
358 case V4L2_PIX_FMT_RGB32
:
359 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_32
);
361 case V4L2_PIX_FMT_RGB565
:
362 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_16
);
364 case V4L2_PIX_FMT_BGR32
:
365 ipu_cpmem_set_format_rgb(cpmem
, &def_bgr_32
);
367 case V4L2_PIX_FMT_RGB24
:
368 ipu_cpmem_set_format_rgb(cpmem
, &def_rgb_24
);
370 case V4L2_PIX_FMT_BGR24
:
371 ipu_cpmem_set_format_rgb(cpmem
, &def_bgr_24
);
379 EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt
);
381 int ipu_cpmem_set_image(struct ipu_ch_param __iomem
*cpmem
,
382 struct ipu_image
*image
)
384 struct v4l2_pix_format
*pix
= &image
->pix
;
385 int y_offset
, u_offset
, v_offset
;
387 pr_debug("%s: resolution: %dx%d stride: %d\n",
388 __func__
, pix
->width
, pix
->height
,
391 ipu_cpmem_set_resolution(cpmem
, image
->rect
.width
,
393 ipu_cpmem_set_stride(cpmem
, pix
->bytesperline
);
395 ipu_cpmem_set_fmt(cpmem
, pix
->pixelformat
);
397 switch (pix
->pixelformat
) {
398 case V4L2_PIX_FMT_YUV420
:
399 case V4L2_PIX_FMT_YVU420
:
400 y_offset
= Y_OFFSET(pix
, image
->rect
.left
, image
->rect
.top
);
401 u_offset
= U_OFFSET(pix
, image
->rect
.left
,
402 image
->rect
.top
) - y_offset
;
403 v_offset
= V_OFFSET(pix
, image
->rect
.left
,
404 image
->rect
.top
) - y_offset
;
406 ipu_cpmem_set_yuv_planar_full(cpmem
, pix
->pixelformat
,
407 pix
->bytesperline
, u_offset
, v_offset
);
408 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+ y_offset
);
410 case V4L2_PIX_FMT_UYVY
:
411 case V4L2_PIX_FMT_YUYV
:
412 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
413 image
->rect
.left
* 2 +
414 image
->rect
.top
* image
->pix
.bytesperline
);
416 case V4L2_PIX_FMT_RGB32
:
417 case V4L2_PIX_FMT_BGR32
:
418 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
419 image
->rect
.left
* 4 +
420 image
->rect
.top
* image
->pix
.bytesperline
);
422 case V4L2_PIX_FMT_RGB565
:
423 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
424 image
->rect
.left
* 2 +
425 image
->rect
.top
* image
->pix
.bytesperline
);
427 case V4L2_PIX_FMT_RGB24
:
428 case V4L2_PIX_FMT_BGR24
:
429 ipu_cpmem_set_buffer(cpmem
, 0, image
->phys
+
430 image
->rect
.left
* 3 +
431 image
->rect
.top
* image
->pix
.bytesperline
);
439 EXPORT_SYMBOL_GPL(ipu_cpmem_set_image
);
441 enum ipu_color_space
ipu_pixelformat_to_colorspace(u32 pixelformat
)
443 switch (pixelformat
) {
444 case V4L2_PIX_FMT_YUV420
:
445 case V4L2_PIX_FMT_YVU420
:
446 case V4L2_PIX_FMT_UYVY
:
447 case V4L2_PIX_FMT_YUYV
:
448 return IPUV3_COLORSPACE_YUV
;
449 case V4L2_PIX_FMT_RGB32
:
450 case V4L2_PIX_FMT_BGR32
:
451 case V4L2_PIX_FMT_RGB24
:
452 case V4L2_PIX_FMT_BGR24
:
453 case V4L2_PIX_FMT_RGB565
:
454 return IPUV3_COLORSPACE_RGB
;
456 return IPUV3_COLORSPACE_UNKNOWN
;
459 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace
);
461 struct ipuv3_channel
*ipu_idmac_get(struct ipu_soc
*ipu
, unsigned num
)
463 struct ipuv3_channel
*channel
;
465 dev_dbg(ipu
->dev
, "%s %d\n", __func__
, num
);
468 return ERR_PTR(-ENODEV
);
470 mutex_lock(&ipu
->channel_lock
);
472 channel
= &ipu
->channel
[num
];
475 channel
= ERR_PTR(-EBUSY
);
483 mutex_unlock(&ipu
->channel_lock
);
487 EXPORT_SYMBOL_GPL(ipu_idmac_get
);
489 void ipu_idmac_put(struct ipuv3_channel
*channel
)
491 struct ipu_soc
*ipu
= channel
->ipu
;
493 dev_dbg(ipu
->dev
, "%s %d\n", __func__
, channel
->num
);
495 mutex_lock(&ipu
->channel_lock
);
499 mutex_unlock(&ipu
->channel_lock
);
501 EXPORT_SYMBOL_GPL(ipu_idmac_put
);
503 #define idma_mask(ch) (1 << (ch & 0x1f))
505 void ipu_idmac_set_double_buffer(struct ipuv3_channel
*channel
,
508 struct ipu_soc
*ipu
= channel
->ipu
;
512 spin_lock_irqsave(&ipu
->lock
, flags
);
514 reg
= ipu_cm_read(ipu
, IPU_CHA_DB_MODE_SEL(channel
->num
));
516 reg
|= idma_mask(channel
->num
);
518 reg
&= ~idma_mask(channel
->num
);
519 ipu_cm_write(ipu
, reg
, IPU_CHA_DB_MODE_SEL(channel
->num
));
521 spin_unlock_irqrestore(&ipu
->lock
, flags
);
523 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer
);
525 int ipu_module_enable(struct ipu_soc
*ipu
, u32 mask
)
527 unsigned long lock_flags
;
530 spin_lock_irqsave(&ipu
->lock
, lock_flags
);
532 val
= ipu_cm_read(ipu
, IPU_DISP_GEN
);
534 if (mask
& IPU_CONF_DI0_EN
)
535 val
|= IPU_DI0_COUNTER_RELEASE
;
536 if (mask
& IPU_CONF_DI1_EN
)
537 val
|= IPU_DI1_COUNTER_RELEASE
;
539 ipu_cm_write(ipu
, val
, IPU_DISP_GEN
);
541 val
= ipu_cm_read(ipu
, IPU_CONF
);
543 ipu_cm_write(ipu
, val
, IPU_CONF
);
545 spin_unlock_irqrestore(&ipu
->lock
, lock_flags
);
549 EXPORT_SYMBOL_GPL(ipu_module_enable
);
551 int ipu_module_disable(struct ipu_soc
*ipu
, u32 mask
)
553 unsigned long lock_flags
;
556 spin_lock_irqsave(&ipu
->lock
, lock_flags
);
558 val
= ipu_cm_read(ipu
, IPU_CONF
);
560 ipu_cm_write(ipu
, val
, IPU_CONF
);
562 val
= ipu_cm_read(ipu
, IPU_DISP_GEN
);
564 if (mask
& IPU_CONF_DI0_EN
)
565 val
&= ~IPU_DI0_COUNTER_RELEASE
;
566 if (mask
& IPU_CONF_DI1_EN
)
567 val
&= ~IPU_DI1_COUNTER_RELEASE
;
569 ipu_cm_write(ipu
, val
, IPU_DISP_GEN
);
571 spin_unlock_irqrestore(&ipu
->lock
, lock_flags
);
575 EXPORT_SYMBOL_GPL(ipu_module_disable
);
577 void ipu_idmac_select_buffer(struct ipuv3_channel
*channel
, u32 buf_num
)
579 struct ipu_soc
*ipu
= channel
->ipu
;
580 unsigned int chno
= channel
->num
;
583 spin_lock_irqsave(&ipu
->lock
, flags
);
585 /* Mark buffer as ready. */
587 ipu_cm_write(ipu
, idma_mask(chno
), IPU_CHA_BUF0_RDY(chno
));
589 ipu_cm_write(ipu
, idma_mask(chno
), IPU_CHA_BUF1_RDY(chno
));
591 spin_unlock_irqrestore(&ipu
->lock
, flags
);
593 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer
);
595 int ipu_idmac_enable_channel(struct ipuv3_channel
*channel
)
597 struct ipu_soc
*ipu
= channel
->ipu
;
601 spin_lock_irqsave(&ipu
->lock
, flags
);
603 val
= ipu_idmac_read(ipu
, IDMAC_CHA_EN(channel
->num
));
604 val
|= idma_mask(channel
->num
);
605 ipu_idmac_write(ipu
, val
, IDMAC_CHA_EN(channel
->num
));
607 spin_unlock_irqrestore(&ipu
->lock
, flags
);
611 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel
);
613 int ipu_idmac_disable_channel(struct ipuv3_channel
*channel
)
615 struct ipu_soc
*ipu
= channel
->ipu
;
618 unsigned long timeout
;
620 timeout
= jiffies
+ msecs_to_jiffies(50);
621 while (ipu_idmac_read(ipu
, IDMAC_CHA_BUSY(channel
->num
)) &
622 idma_mask(channel
->num
)) {
623 if (time_after(jiffies
, timeout
)) {
624 dev_warn(ipu
->dev
, "disabling busy idmac channel %d\n",
631 spin_lock_irqsave(&ipu
->lock
, flags
);
633 /* Disable DMA channel(s) */
634 val
= ipu_idmac_read(ipu
, IDMAC_CHA_EN(channel
->num
));
635 val
&= ~idma_mask(channel
->num
);
636 ipu_idmac_write(ipu
, val
, IDMAC_CHA_EN(channel
->num
));
638 /* Set channel buffers NOT to be ready */
639 ipu_cm_write(ipu
, 0xf0000000, IPU_GPR
); /* write one to clear */
641 if (ipu_cm_read(ipu
, IPU_CHA_BUF0_RDY(channel
->num
)) &
642 idma_mask(channel
->num
)) {
643 ipu_cm_write(ipu
, idma_mask(channel
->num
),
644 IPU_CHA_BUF0_RDY(channel
->num
));
647 if (ipu_cm_read(ipu
, IPU_CHA_BUF1_RDY(channel
->num
)) &
648 idma_mask(channel
->num
)) {
649 ipu_cm_write(ipu
, idma_mask(channel
->num
),
650 IPU_CHA_BUF1_RDY(channel
->num
));
653 ipu_cm_write(ipu
, 0x0, IPU_GPR
); /* write one to set */
655 /* Reset the double buffer */
656 val
= ipu_cm_read(ipu
, IPU_CHA_DB_MODE_SEL(channel
->num
));
657 val
&= ~idma_mask(channel
->num
);
658 ipu_cm_write(ipu
, val
, IPU_CHA_DB_MODE_SEL(channel
->num
));
660 spin_unlock_irqrestore(&ipu
->lock
, flags
);
664 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel
);
666 static int ipu_memory_reset(struct ipu_soc
*ipu
)
668 unsigned long timeout
;
670 ipu_cm_write(ipu
, 0x807FFFFF, IPU_MEM_RST
);
672 timeout
= jiffies
+ msecs_to_jiffies(1000);
673 while (ipu_cm_read(ipu
, IPU_MEM_RST
) & 0x80000000) {
674 if (time_after(jiffies
, timeout
))
684 unsigned long cm_ofs
;
685 unsigned long cpmem_ofs
;
686 unsigned long srm_ofs
;
687 unsigned long tpm_ofs
;
688 unsigned long disp0_ofs
;
689 unsigned long disp1_ofs
;
690 unsigned long dc_tmpl_ofs
;
691 unsigned long vdi_ofs
;
692 enum ipuv3_type type
;
695 static struct ipu_devtype ipu_type_imx51
= {
697 .cm_ofs
= 0x1e000000,
698 .cpmem_ofs
= 0x1f000000,
699 .srm_ofs
= 0x1f040000,
700 .tpm_ofs
= 0x1f060000,
701 .disp0_ofs
= 0x1e040000,
702 .disp1_ofs
= 0x1e048000,
703 .dc_tmpl_ofs
= 0x1f080000,
704 .vdi_ofs
= 0x1e068000,
708 static struct ipu_devtype ipu_type_imx53
= {
710 .cm_ofs
= 0x06000000,
711 .cpmem_ofs
= 0x07000000,
712 .srm_ofs
= 0x07040000,
713 .tpm_ofs
= 0x07060000,
714 .disp0_ofs
= 0x06040000,
715 .disp1_ofs
= 0x06048000,
716 .dc_tmpl_ofs
= 0x07080000,
717 .vdi_ofs
= 0x06068000,
721 static struct ipu_devtype ipu_type_imx6q
= {
723 .cm_ofs
= 0x00200000,
724 .cpmem_ofs
= 0x00300000,
725 .srm_ofs
= 0x00340000,
726 .tpm_ofs
= 0x00360000,
727 .disp0_ofs
= 0x00240000,
728 .disp1_ofs
= 0x00248000,
729 .dc_tmpl_ofs
= 0x00380000,
730 .vdi_ofs
= 0x00268000,
734 static const struct of_device_id imx_ipu_dt_ids
[] = {
735 { .compatible
= "fsl,imx51-ipu", .data
= &ipu_type_imx51
, },
736 { .compatible
= "fsl,imx53-ipu", .data
= &ipu_type_imx53
, },
737 { .compatible
= "fsl,imx6q-ipu", .data
= &ipu_type_imx6q
, },
740 MODULE_DEVICE_TABLE(of
, imx_ipu_dt_ids
);
742 static int ipu_submodules_init(struct ipu_soc
*ipu
,
743 struct platform_device
*pdev
, unsigned long ipu_base
,
748 struct device
*dev
= &pdev
->dev
;
749 const struct ipu_devtype
*devtype
= ipu
->devtype
;
751 ret
= ipu_di_init(ipu
, dev
, 0, ipu_base
+ devtype
->disp0_ofs
,
752 IPU_CONF_DI0_EN
, ipu_clk
);
758 ret
= ipu_di_init(ipu
, dev
, 1, ipu_base
+ devtype
->disp1_ofs
,
759 IPU_CONF_DI1_EN
, ipu_clk
);
765 ret
= ipu_dc_init(ipu
, dev
, ipu_base
+ devtype
->cm_ofs
+
766 IPU_CM_DC_REG_OFS
, ipu_base
+ devtype
->dc_tmpl_ofs
);
768 unit
= "dc_template";
772 ret
= ipu_dmfc_init(ipu
, dev
, ipu_base
+
773 devtype
->cm_ofs
+ IPU_CM_DMFC_REG_OFS
, ipu_clk
);
779 ret
= ipu_dp_init(ipu
, dev
, ipu_base
+ devtype
->srm_ofs
);
796 dev_err(&pdev
->dev
, "init %s failed with %d\n", unit
, ret
);
800 static void ipu_irq_handle(struct ipu_soc
*ipu
, const int *regs
, int num_regs
)
802 unsigned long status
;
805 for (i
= 0; i
< num_regs
; i
++) {
807 status
= ipu_cm_read(ipu
, IPU_INT_STAT(regs
[i
]));
808 status
&= ipu_cm_read(ipu
, IPU_INT_CTRL(regs
[i
]));
810 for_each_set_bit(bit
, &status
, 32) {
811 irq
= irq_linear_revmap(ipu
->domain
, regs
[i
] * 32 + bit
);
813 generic_handle_irq(irq
);
818 static void ipu_irq_handler(unsigned int irq
, struct irq_desc
*desc
)
820 struct ipu_soc
*ipu
= irq_desc_get_handler_data(desc
);
821 const int int_reg
[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
822 struct irq_chip
*chip
= irq_get_chip(irq
);
824 chained_irq_enter(chip
, desc
);
826 ipu_irq_handle(ipu
, int_reg
, ARRAY_SIZE(int_reg
));
828 chained_irq_exit(chip
, desc
);
831 static void ipu_err_irq_handler(unsigned int irq
, struct irq_desc
*desc
)
833 struct ipu_soc
*ipu
= irq_desc_get_handler_data(desc
);
834 const int int_reg
[] = { 4, 5, 8, 9};
835 struct irq_chip
*chip
= irq_get_chip(irq
);
837 chained_irq_enter(chip
, desc
);
839 ipu_irq_handle(ipu
, int_reg
, ARRAY_SIZE(int_reg
));
841 chained_irq_exit(chip
, desc
);
844 int ipu_idmac_channel_irq(struct ipu_soc
*ipu
, struct ipuv3_channel
*channel
,
845 enum ipu_channel_irq irq_type
)
847 int irq
= irq_linear_revmap(ipu
->domain
, irq_type
+ channel
->num
);
850 irq
= irq_create_mapping(ipu
->domain
, irq_type
+ channel
->num
);
854 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq
);
856 static void ipu_submodules_exit(struct ipu_soc
*ipu
)
865 static int platform_remove_devices_fn(struct device
*dev
, void *unused
)
867 struct platform_device
*pdev
= to_platform_device(dev
);
869 platform_device_unregister(pdev
);
874 static void platform_device_unregister_children(struct platform_device
*pdev
)
876 device_for_each_child(&pdev
->dev
, NULL
, platform_remove_devices_fn
);
879 struct ipu_platform_reg
{
880 struct ipu_client_platformdata pdata
;
884 static const struct ipu_platform_reg client_reg
[] = {
889 .dp
= IPU_DP_FLOW_SYNC_BG
,
890 .dma
[0] = IPUV3_CHANNEL_MEM_BG_SYNC
,
893 .name
= "imx-ipuv3-crtc",
899 .dma
[0] = IPUV3_CHANNEL_MEM_DC_SYNC
,
902 .name
= "imx-ipuv3-crtc",
906 static int ipu_client_id
;
908 static int ipu_add_subdevice_pdata(struct device
*dev
,
909 const struct ipu_platform_reg
*reg
)
911 struct platform_device
*pdev
;
913 pdev
= platform_device_register_data(dev
, reg
->name
, ipu_client_id
++,
914 ®
->pdata
, sizeof(struct ipu_platform_reg
));
916 return pdev
? 0 : -EINVAL
;
919 static int ipu_add_client_devices(struct ipu_soc
*ipu
)
924 for (i
= 0; i
< ARRAY_SIZE(client_reg
); i
++) {
925 const struct ipu_platform_reg
*reg
= &client_reg
[i
];
926 ret
= ipu_add_subdevice_pdata(ipu
->dev
, reg
);
934 platform_device_unregister_children(to_platform_device(ipu
->dev
));
940 static int ipu_irq_init(struct ipu_soc
*ipu
)
942 struct irq_chip_generic
*gc
;
943 struct irq_chip_type
*ct
;
946 ipu
->domain
= irq_domain_add_linear(ipu
->dev
->of_node
, IPU_NUM_IRQS
,
947 &irq_generic_chip_ops
, ipu
);
949 dev_err(ipu
->dev
, "failed to add irq domain\n");
953 ret
= irq_alloc_domain_generic_chips(ipu
->domain
, 32, 1, "IPU",
954 handle_level_irq
, 0, IRQF_VALID
, 0);
956 dev_err(ipu
->dev
, "failed to alloc generic irq chips\n");
957 irq_domain_remove(ipu
->domain
);
961 for (i
= 0; i
< IPU_NUM_IRQS
; i
+= 32) {
962 gc
= irq_get_domain_generic_chip(ipu
->domain
, i
);
963 gc
->reg_base
= ipu
->cm_reg
;
965 ct
->chip
.irq_ack
= irq_gc_ack_set_bit
;
966 ct
->chip
.irq_mask
= irq_gc_mask_clr_bit
;
967 ct
->chip
.irq_unmask
= irq_gc_mask_set_bit
;
968 ct
->regs
.ack
= IPU_INT_STAT(i
/ 32);
969 ct
->regs
.mask
= IPU_INT_CTRL(i
/ 32);
972 irq_set_chained_handler(ipu
->irq_sync
, ipu_irq_handler
);
973 irq_set_handler_data(ipu
->irq_sync
, ipu
);
974 irq_set_chained_handler(ipu
->irq_err
, ipu_err_irq_handler
);
975 irq_set_handler_data(ipu
->irq_err
, ipu
);
980 static void ipu_irq_exit(struct ipu_soc
*ipu
)
984 irq_set_chained_handler(ipu
->irq_err
, NULL
);
985 irq_set_handler_data(ipu
->irq_err
, NULL
);
986 irq_set_chained_handler(ipu
->irq_sync
, NULL
);
987 irq_set_handler_data(ipu
->irq_sync
, NULL
);
989 /* TODO: remove irq_domain_generic_chips */
991 for (i
= 0; i
< IPU_NUM_IRQS
; i
++) {
992 irq
= irq_linear_revmap(ipu
->domain
, i
);
994 irq_dispose_mapping(irq
);
997 irq_domain_remove(ipu
->domain
);
1000 static int ipu_probe(struct platform_device
*pdev
)
1002 const struct of_device_id
*of_id
=
1003 of_match_device(imx_ipu_dt_ids
, &pdev
->dev
);
1004 struct ipu_soc
*ipu
;
1005 struct resource
*res
;
1006 unsigned long ipu_base
;
1007 int i
, ret
, irq_sync
, irq_err
;
1008 const struct ipu_devtype
*devtype
;
1010 devtype
= of_id
->data
;
1012 irq_sync
= platform_get_irq(pdev
, 0);
1013 irq_err
= platform_get_irq(pdev
, 1);
1014 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1016 dev_dbg(&pdev
->dev
, "irq_sync: %d irq_err: %d\n",
1019 if (!res
|| irq_sync
< 0 || irq_err
< 0)
1022 ipu_base
= res
->start
;
1024 ipu
= devm_kzalloc(&pdev
->dev
, sizeof(*ipu
), GFP_KERNEL
);
1028 for (i
= 0; i
< 64; i
++)
1029 ipu
->channel
[i
].ipu
= ipu
;
1030 ipu
->devtype
= devtype
;
1031 ipu
->ipu_type
= devtype
->type
;
1033 spin_lock_init(&ipu
->lock
);
1034 mutex_init(&ipu
->channel_lock
);
1036 dev_dbg(&pdev
->dev
, "cm_reg: 0x%08lx\n",
1037 ipu_base
+ devtype
->cm_ofs
);
1038 dev_dbg(&pdev
->dev
, "idmac: 0x%08lx\n",
1039 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IDMAC_REG_OFS
);
1040 dev_dbg(&pdev
->dev
, "cpmem: 0x%08lx\n",
1041 ipu_base
+ devtype
->cpmem_ofs
);
1042 dev_dbg(&pdev
->dev
, "disp0: 0x%08lx\n",
1043 ipu_base
+ devtype
->disp0_ofs
);
1044 dev_dbg(&pdev
->dev
, "disp1: 0x%08lx\n",
1045 ipu_base
+ devtype
->disp1_ofs
);
1046 dev_dbg(&pdev
->dev
, "srm: 0x%08lx\n",
1047 ipu_base
+ devtype
->srm_ofs
);
1048 dev_dbg(&pdev
->dev
, "tpm: 0x%08lx\n",
1049 ipu_base
+ devtype
->tpm_ofs
);
1050 dev_dbg(&pdev
->dev
, "dc: 0x%08lx\n",
1051 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_DC_REG_OFS
);
1052 dev_dbg(&pdev
->dev
, "ic: 0x%08lx\n",
1053 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IC_REG_OFS
);
1054 dev_dbg(&pdev
->dev
, "dmfc: 0x%08lx\n",
1055 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_DMFC_REG_OFS
);
1056 dev_dbg(&pdev
->dev
, "vdi: 0x%08lx\n",
1057 ipu_base
+ devtype
->vdi_ofs
);
1059 ipu
->cm_reg
= devm_ioremap(&pdev
->dev
,
1060 ipu_base
+ devtype
->cm_ofs
, PAGE_SIZE
);
1061 ipu
->idmac_reg
= devm_ioremap(&pdev
->dev
,
1062 ipu_base
+ devtype
->cm_ofs
+ IPU_CM_IDMAC_REG_OFS
,
1064 ipu
->cpmem_base
= devm_ioremap(&pdev
->dev
,
1065 ipu_base
+ devtype
->cpmem_ofs
, PAGE_SIZE
);
1067 if (!ipu
->cm_reg
|| !ipu
->idmac_reg
|| !ipu
->cpmem_base
) {
1069 goto failed_ioremap
;
1072 ipu
->clk
= devm_clk_get(&pdev
->dev
, "bus");
1073 if (IS_ERR(ipu
->clk
)) {
1074 ret
= PTR_ERR(ipu
->clk
);
1075 dev_err(&pdev
->dev
, "clk_get failed with %d", ret
);
1076 goto failed_clk_get
;
1079 platform_set_drvdata(pdev
, ipu
);
1081 clk_prepare_enable(ipu
->clk
);
1083 ipu
->dev
= &pdev
->dev
;
1084 ipu
->irq_sync
= irq_sync
;
1085 ipu
->irq_err
= irq_err
;
1087 ret
= ipu_irq_init(ipu
);
1089 goto out_failed_irq
;
1091 ret
= device_reset(&pdev
->dev
);
1093 dev_err(&pdev
->dev
, "failed to reset: %d\n", ret
);
1094 goto out_failed_reset
;
1096 ret
= ipu_memory_reset(ipu
);
1098 goto out_failed_reset
;
1100 /* Set MCU_T to divide MCU access window into 2 */
1101 ipu_cm_write(ipu
, 0x00400000L
| (IPU_MCU_T_DEFAULT
<< 18),
1104 ret
= ipu_submodules_init(ipu
, pdev
, ipu_base
, ipu
->clk
);
1106 goto failed_submodules_init
;
1108 ret
= ipu_add_client_devices(ipu
);
1110 dev_err(&pdev
->dev
, "adding client devices failed with %d\n",
1112 goto failed_add_clients
;
1115 dev_info(&pdev
->dev
, "%s probed\n", devtype
->name
);
1120 ipu_submodules_exit(ipu
);
1121 failed_submodules_init
:
1125 clk_disable_unprepare(ipu
->clk
);
1131 static int ipu_remove(struct platform_device
*pdev
)
1133 struct ipu_soc
*ipu
= platform_get_drvdata(pdev
);
1135 platform_device_unregister_children(pdev
);
1136 ipu_submodules_exit(ipu
);
1139 clk_disable_unprepare(ipu
->clk
);
1144 static struct platform_driver imx_ipu_driver
= {
1146 .name
= "imx-ipuv3",
1147 .of_match_table
= imx_ipu_dt_ids
,
1150 .remove
= ipu_remove
,
1153 module_platform_driver(imx_ipu_driver
);
1155 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1156 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1157 MODULE_LICENSE("GPL");