2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/slab.h>
25 #include <linux/platform_device.h>
26 #include <linux/clk.h>
27 #include <linux/dmaengine.h>
28 #include <linux/module.h>
31 #include <linux/platform_data/dma-imx.h>
33 #include "dmaengine.h"
34 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
35 #define IMX_DMA_CHANNELS 16
37 #define IMX_DMA_2D_SLOTS 2
38 #define IMX_DMA_2D_SLOT_A 0
39 #define IMX_DMA_2D_SLOT_B 1
41 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
42 #define IMX_DMA_MEMSIZE_32 (0 << 4)
43 #define IMX_DMA_MEMSIZE_8 (1 << 4)
44 #define IMX_DMA_MEMSIZE_16 (2 << 4)
45 #define IMX_DMA_TYPE_LINEAR (0 << 10)
46 #define IMX_DMA_TYPE_2D (1 << 10)
47 #define IMX_DMA_TYPE_FIFO (2 << 10)
49 #define IMX_DMA_ERR_BURST (1 << 0)
50 #define IMX_DMA_ERR_REQUEST (1 << 1)
51 #define IMX_DMA_ERR_TRANSFER (1 << 2)
52 #define IMX_DMA_ERR_BUFFER (1 << 3)
53 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
55 #define DMA_DCR 0x00 /* Control Register */
56 #define DMA_DISR 0x04 /* Interrupt status Register */
57 #define DMA_DIMR 0x08 /* Interrupt mask Register */
58 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
59 #define DMA_DRTOSR 0x10 /* Request timeout Register */
60 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
61 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
62 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
63 #define DMA_WSRA 0x40 /* W-Size Register A */
64 #define DMA_XSRA 0x44 /* X-Size Register A */
65 #define DMA_YSRA 0x48 /* Y-Size Register A */
66 #define DMA_WSRB 0x4c /* W-Size Register B */
67 #define DMA_XSRB 0x50 /* X-Size Register B */
68 #define DMA_YSRB 0x54 /* Y-Size Register B */
69 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
70 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
71 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
72 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
73 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
74 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
75 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
76 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
77 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
79 #define DCR_DRST (1<<1)
80 #define DCR_DEN (1<<0)
81 #define DBTOCR_EN (1<<15)
82 #define DBTOCR_CNT(x) ((x) & 0x7fff)
83 #define CNTR_CNT(x) ((x) & 0xffffff)
84 #define CCR_ACRPT (1<<14)
85 #define CCR_DMOD_LINEAR (0x0 << 12)
86 #define CCR_DMOD_2D (0x1 << 12)
87 #define CCR_DMOD_FIFO (0x2 << 12)
88 #define CCR_DMOD_EOBFIFO (0x3 << 12)
89 #define CCR_SMOD_LINEAR (0x0 << 10)
90 #define CCR_SMOD_2D (0x1 << 10)
91 #define CCR_SMOD_FIFO (0x2 << 10)
92 #define CCR_SMOD_EOBFIFO (0x3 << 10)
93 #define CCR_MDIR_DEC (1<<9)
94 #define CCR_MSEL_B (1<<8)
95 #define CCR_DSIZ_32 (0x0 << 6)
96 #define CCR_DSIZ_8 (0x1 << 6)
97 #define CCR_DSIZ_16 (0x2 << 6)
98 #define CCR_SSIZ_32 (0x0 << 4)
99 #define CCR_SSIZ_8 (0x1 << 4)
100 #define CCR_SSIZ_16 (0x2 << 4)
101 #define CCR_REN (1<<3)
102 #define CCR_RPT (1<<2)
103 #define CCR_FRC (1<<1)
104 #define CCR_CEN (1<<0)
105 #define RTOR_EN (1<<15)
106 #define RTOR_CLK (1<<14)
107 #define RTOR_PSC (1<<13)
109 enum imxdma_prep_type
{
111 IMXDMA_DESC_INTERLEAVED
,
112 IMXDMA_DESC_SLAVE_SG
,
116 struct imx_dma_2d_config
{
124 struct list_head node
;
125 struct dma_async_tx_descriptor desc
;
126 enum dma_status status
;
130 enum dma_transfer_direction direction
;
131 enum imxdma_prep_type type
;
132 /* For memcpy and interleaved */
133 unsigned int config_port
;
134 unsigned int config_mem
;
135 /* For interleaved transfers */
139 /* For slave sg and cyclic */
140 struct scatterlist
*sg
;
141 unsigned int sgcount
;
144 struct imxdma_channel
{
146 struct timer_list watchdog
;
147 struct imxdma_engine
*imxdma
;
148 unsigned int channel
;
150 struct tasklet_struct dma_tasklet
;
151 struct list_head ld_free
;
152 struct list_head ld_queue
;
153 struct list_head ld_active
;
155 enum dma_slave_buswidth word_size
;
156 dma_addr_t per_address
;
158 struct dma_chan chan
;
159 struct dma_async_tx_descriptor desc
;
160 enum dma_status status
;
162 struct scatterlist
*sg_list
;
175 struct imxdma_engine
{
177 struct device_dma_parameters dma_parms
;
178 struct dma_device dma_device
;
183 struct imx_dma_2d_config slots_2d
[IMX_DMA_2D_SLOTS
];
184 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
185 enum imx_dma_type devtype
;
188 static struct platform_device_id imx_dma_devtype
[] = {
191 .driver_data
= IMX1_DMA
,
194 .driver_data
= IMX21_DMA
,
197 .driver_data
= IMX27_DMA
,
202 MODULE_DEVICE_TABLE(platform
, imx_dma_devtype
);
204 static inline int is_imx1_dma(struct imxdma_engine
*imxdma
)
206 return imxdma
->devtype
== IMX1_DMA
;
209 static inline int is_imx21_dma(struct imxdma_engine
*imxdma
)
211 return imxdma
->devtype
== IMX21_DMA
;
214 static inline int is_imx27_dma(struct imxdma_engine
*imxdma
)
216 return imxdma
->devtype
== IMX27_DMA
;
219 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
221 return container_of(chan
, struct imxdma_channel
, chan
);
224 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
226 struct imxdma_desc
*desc
;
228 if (!list_empty(&imxdmac
->ld_active
)) {
229 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
231 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
239 static void imx_dmav1_writel(struct imxdma_engine
*imxdma
, unsigned val
,
242 __raw_writel(val
, imxdma
->base
+ offset
);
245 static unsigned imx_dmav1_readl(struct imxdma_engine
*imxdma
, unsigned offset
)
247 return __raw_readl(imxdma
->base
+ offset
);
250 static int imxdma_hw_chain(struct imxdma_channel
*imxdmac
)
252 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
254 if (is_imx27_dma(imxdma
))
255 return imxdmac
->hw_chaining
;
261 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
263 static inline int imxdma_sg_next(struct imxdma_desc
*d
)
265 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
266 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
267 struct scatterlist
*sg
= d
->sg
;
270 now
= min(d
->len
, sg_dma_len(sg
));
271 if (d
->len
!= IMX_DMA_LENGTH_LOOP
)
274 if (d
->direction
== DMA_DEV_TO_MEM
)
275 imx_dmav1_writel(imxdma
, sg
->dma_address
,
276 DMA_DAR(imxdmac
->channel
));
278 imx_dmav1_writel(imxdma
, sg
->dma_address
,
279 DMA_SAR(imxdmac
->channel
));
281 imx_dmav1_writel(imxdma
, now
, DMA_CNTR(imxdmac
->channel
));
283 dev_dbg(imxdma
->dev
, " %s channel: %d dst 0x%08x, src 0x%08x, "
284 "size 0x%08x\n", __func__
, imxdmac
->channel
,
285 imx_dmav1_readl(imxdma
, DMA_DAR(imxdmac
->channel
)),
286 imx_dmav1_readl(imxdma
, DMA_SAR(imxdmac
->channel
)),
287 imx_dmav1_readl(imxdma
, DMA_CNTR(imxdmac
->channel
)));
292 static void imxdma_enable_hw(struct imxdma_desc
*d
)
294 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
295 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
296 int channel
= imxdmac
->channel
;
299 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
301 local_irq_save(flags
);
303 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
304 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) &
305 ~(1 << channel
), DMA_DIMR
);
306 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) |
307 CCR_CEN
| CCR_ACRPT
, DMA_CCR(channel
));
309 if (!is_imx1_dma(imxdma
) &&
310 d
->sg
&& imxdma_hw_chain(imxdmac
)) {
311 d
->sg
= sg_next(d
->sg
);
315 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(channel
));
316 imx_dmav1_writel(imxdma
, tmp
| CCR_RPT
| CCR_ACRPT
,
321 local_irq_restore(flags
);
324 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
326 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
327 int channel
= imxdmac
->channel
;
330 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
332 if (imxdma_hw_chain(imxdmac
))
333 del_timer(&imxdmac
->watchdog
);
335 local_irq_save(flags
);
336 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) |
337 (1 << channel
), DMA_DIMR
);
338 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) &
339 ~CCR_CEN
, DMA_CCR(channel
));
340 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
341 local_irq_restore(flags
);
344 static void imxdma_watchdog(unsigned long data
)
346 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
347 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
348 int channel
= imxdmac
->channel
;
350 imx_dmav1_writel(imxdma
, 0, DMA_CCR(channel
));
352 /* Tasklet watchdog error handler */
353 tasklet_schedule(&imxdmac
->dma_tasklet
);
354 dev_dbg(imxdma
->dev
, "channel %d: watchdog timeout!\n",
358 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
360 struct imxdma_engine
*imxdma
= dev_id
;
361 unsigned int err_mask
;
365 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
367 err_mask
= imx_dmav1_readl(imxdma
, DMA_DBTOSR
) |
368 imx_dmav1_readl(imxdma
, DMA_DRTOSR
) |
369 imx_dmav1_readl(imxdma
, DMA_DSESR
) |
370 imx_dmav1_readl(imxdma
, DMA_DBOSR
);
375 imx_dmav1_writel(imxdma
, disr
& err_mask
, DMA_DISR
);
377 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
378 if (!(err_mask
& (1 << i
)))
382 if (imx_dmav1_readl(imxdma
, DMA_DBTOSR
) & (1 << i
)) {
383 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBTOSR
);
384 errcode
|= IMX_DMA_ERR_BURST
;
386 if (imx_dmav1_readl(imxdma
, DMA_DRTOSR
) & (1 << i
)) {
387 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DRTOSR
);
388 errcode
|= IMX_DMA_ERR_REQUEST
;
390 if (imx_dmav1_readl(imxdma
, DMA_DSESR
) & (1 << i
)) {
391 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DSESR
);
392 errcode
|= IMX_DMA_ERR_TRANSFER
;
394 if (imx_dmav1_readl(imxdma
, DMA_DBOSR
) & (1 << i
)) {
395 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBOSR
);
396 errcode
|= IMX_DMA_ERR_BUFFER
;
398 /* Tasklet error handler */
399 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
402 "DMA timeout on channel %d -%s%s%s%s\n", i
,
403 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
404 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
405 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
406 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
411 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
413 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
414 int chno
= imxdmac
->channel
;
415 struct imxdma_desc
*desc
;
417 spin_lock(&imxdma
->lock
);
418 if (list_empty(&imxdmac
->ld_active
)) {
419 spin_unlock(&imxdma
->lock
);
423 desc
= list_first_entry(&imxdmac
->ld_active
,
426 spin_unlock(&imxdma
->lock
);
430 desc
->sg
= sg_next(desc
->sg
);
433 imxdma_sg_next(desc
);
435 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(chno
));
437 if (imxdma_hw_chain(imxdmac
)) {
438 /* FIXME: The timeout should probably be
441 mod_timer(&imxdmac
->watchdog
,
442 jiffies
+ msecs_to_jiffies(500));
444 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
445 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
447 imx_dmav1_writel(imxdma
, tmp
& ~CCR_CEN
,
452 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
454 if (imxdma_chan_is_doing_cyclic(imxdmac
))
455 /* Tasklet progression */
456 tasklet_schedule(&imxdmac
->dma_tasklet
);
461 if (imxdma_hw_chain(imxdmac
)) {
462 del_timer(&imxdmac
->watchdog
);
468 imx_dmav1_writel(imxdma
, 0, DMA_CCR(chno
));
470 tasklet_schedule(&imxdmac
->dma_tasklet
);
473 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
475 struct imxdma_engine
*imxdma
= dev_id
;
478 if (!is_imx1_dma(imxdma
))
479 imxdma_err_handler(irq
, dev_id
);
481 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
483 dev_dbg(imxdma
->dev
, "%s called, disr=0x%08x\n", __func__
, disr
);
485 imx_dmav1_writel(imxdma
, disr
, DMA_DISR
);
486 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
488 dma_irq_handle_channel(&imxdma
->channel
[i
]);
494 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
496 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
497 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
502 /* Configure and enable */
504 case IMXDMA_DESC_INTERLEAVED
:
505 /* Try to get a free 2D slot */
506 spin_lock_irqsave(&imxdma
->lock
, flags
);
507 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++) {
508 if ((imxdma
->slots_2d
[i
].count
> 0) &&
509 ((imxdma
->slots_2d
[i
].xsr
!= d
->x
) ||
510 (imxdma
->slots_2d
[i
].ysr
!= d
->y
) ||
511 (imxdma
->slots_2d
[i
].wsr
!= d
->w
)))
517 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
521 imxdma
->slots_2d
[slot
].xsr
= d
->x
;
522 imxdma
->slots_2d
[slot
].ysr
= d
->y
;
523 imxdma
->slots_2d
[slot
].wsr
= d
->w
;
524 imxdma
->slots_2d
[slot
].count
++;
526 imxdmac
->slot_2d
= slot
;
527 imxdmac
->enabled_2d
= true;
528 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
530 if (slot
== IMX_DMA_2D_SLOT_A
) {
531 d
->config_mem
&= ~CCR_MSEL_B
;
532 d
->config_port
&= ~CCR_MSEL_B
;
533 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRA
);
534 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRA
);
535 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRA
);
537 d
->config_mem
|= CCR_MSEL_B
;
538 d
->config_port
|= CCR_MSEL_B
;
539 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRB
);
540 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRB
);
541 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRB
);
544 * We fall-through here intentionally, since a 2D transfer is
545 * similar to MEMCPY just adding the 2D slot configuration.
547 case IMXDMA_DESC_MEMCPY
:
548 imx_dmav1_writel(imxdma
, d
->src
, DMA_SAR(imxdmac
->channel
));
549 imx_dmav1_writel(imxdma
, d
->dest
, DMA_DAR(imxdmac
->channel
));
550 imx_dmav1_writel(imxdma
, d
->config_mem
| (d
->config_port
<< 2),
551 DMA_CCR(imxdmac
->channel
));
553 imx_dmav1_writel(imxdma
, d
->len
, DMA_CNTR(imxdmac
->channel
));
555 dev_dbg(imxdma
->dev
, "%s channel: %d dest=0x%08x src=0x%08x "
556 "dma_length=%d\n", __func__
, imxdmac
->channel
,
557 d
->dest
, d
->src
, d
->len
);
560 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
561 case IMXDMA_DESC_CYCLIC
:
562 case IMXDMA_DESC_SLAVE_SG
:
563 if (d
->direction
== DMA_DEV_TO_MEM
) {
564 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
565 DMA_SAR(imxdmac
->channel
));
566 imx_dmav1_writel(imxdma
, imxdmac
->ccr_from_device
,
567 DMA_CCR(imxdmac
->channel
));
569 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
570 "total length=%d dev_addr=0x%08x (dev2mem)\n",
571 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
572 d
->len
, imxdmac
->per_address
);
573 } else if (d
->direction
== DMA_MEM_TO_DEV
) {
574 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
575 DMA_DAR(imxdmac
->channel
));
576 imx_dmav1_writel(imxdma
, imxdmac
->ccr_to_device
,
577 DMA_CCR(imxdmac
->channel
));
579 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
580 "total length=%d dev_addr=0x%08x (mem2dev)\n",
581 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
582 d
->len
, imxdmac
->per_address
);
584 dev_err(imxdma
->dev
, "%s channel: %d bad dma mode\n",
585 __func__
, imxdmac
->channel
);
599 static void imxdma_tasklet(unsigned long data
)
601 struct imxdma_channel
*imxdmac
= (void *)data
;
602 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
603 struct imxdma_desc
*desc
;
605 spin_lock(&imxdma
->lock
);
607 if (list_empty(&imxdmac
->ld_active
)) {
608 /* Someone might have called terminate all */
611 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
613 if (desc
->desc
.callback
)
614 desc
->desc
.callback(desc
->desc
.callback_param
);
616 /* If we are dealing with a cyclic descriptor, keep it on ld_active
617 * and dont mark the descriptor as complete.
618 * Only in non-cyclic cases it would be marked as complete
620 if (imxdma_chan_is_doing_cyclic(imxdmac
))
623 dma_cookie_complete(&desc
->desc
);
625 /* Free 2D slot if it was an interleaved transfer */
626 if (imxdmac
->enabled_2d
) {
627 imxdma
->slots_2d
[imxdmac
->slot_2d
].count
--;
628 imxdmac
->enabled_2d
= false;
631 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
633 if (!list_empty(&imxdmac
->ld_queue
)) {
634 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
636 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
637 if (imxdma_xfer_desc(desc
) < 0)
638 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
639 __func__
, imxdmac
->channel
);
642 spin_unlock(&imxdma
->lock
);
645 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
648 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
649 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
650 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
652 unsigned int mode
= 0;
655 case DMA_TERMINATE_ALL
:
656 imxdma_disable_hw(imxdmac
);
658 spin_lock_irqsave(&imxdma
->lock
, flags
);
659 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
660 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
661 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
663 case DMA_SLAVE_CONFIG
:
664 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
665 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
666 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
667 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
669 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
670 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
671 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
674 switch (imxdmac
->word_size
) {
675 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
676 mode
= IMX_DMA_MEMSIZE_8
;
678 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
679 mode
= IMX_DMA_MEMSIZE_16
;
682 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
683 mode
= IMX_DMA_MEMSIZE_32
;
687 imxdmac
->hw_chaining
= 1;
688 if (!imxdma_hw_chain(imxdmac
))
690 imxdmac
->ccr_from_device
= (mode
| IMX_DMA_TYPE_FIFO
) |
691 ((IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) << 2) |
693 imxdmac
->ccr_to_device
=
694 (IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) |
695 ((mode
| IMX_DMA_TYPE_FIFO
) << 2) | CCR_REN
;
696 imx_dmav1_writel(imxdma
, imxdmac
->dma_request
,
697 DMA_RSSR(imxdmac
->channel
));
699 /* Set burst length */
700 imx_dmav1_writel(imxdma
, imxdmac
->watermark_level
*
701 imxdmac
->word_size
, DMA_BLR(imxdmac
->channel
));
711 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
713 struct dma_tx_state
*txstate
)
715 return dma_cookie_status(chan
, cookie
, txstate
);
718 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
720 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
721 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
725 spin_lock_irqsave(&imxdma
->lock
, flags
);
726 list_move_tail(imxdmac
->ld_free
.next
, &imxdmac
->ld_queue
);
727 cookie
= dma_cookie_assign(tx
);
728 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
733 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
735 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
736 struct imx_dma_data
*data
= chan
->private;
739 imxdmac
->dma_request
= data
->dma_request
;
741 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
742 struct imxdma_desc
*desc
;
744 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
747 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
748 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
749 desc
->desc
.tx_submit
= imxdma_tx_submit
;
750 /* txd.flags will be overwritten in prep funcs */
751 desc
->desc
.flags
= DMA_CTRL_ACK
;
752 desc
->status
= DMA_SUCCESS
;
754 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
755 imxdmac
->descs_allocated
++;
758 if (!imxdmac
->descs_allocated
)
761 return imxdmac
->descs_allocated
;
764 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
766 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
767 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
768 struct imxdma_desc
*desc
, *_desc
;
771 spin_lock_irqsave(&imxdma
->lock
, flags
);
773 imxdma_disable_hw(imxdmac
);
774 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
775 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
777 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
779 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
781 imxdmac
->descs_allocated
--;
783 INIT_LIST_HEAD(&imxdmac
->ld_free
);
785 if (imxdmac
->sg_list
) {
786 kfree(imxdmac
->sg_list
);
787 imxdmac
->sg_list
= NULL
;
791 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
792 struct dma_chan
*chan
, struct scatterlist
*sgl
,
793 unsigned int sg_len
, enum dma_transfer_direction direction
,
794 unsigned long flags
, void *context
)
796 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
797 struct scatterlist
*sg
;
798 int i
, dma_length
= 0;
799 struct imxdma_desc
*desc
;
801 if (list_empty(&imxdmac
->ld_free
) ||
802 imxdma_chan_is_doing_cyclic(imxdmac
))
805 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
807 for_each_sg(sgl
, sg
, sg_len
, i
) {
808 dma_length
+= sg_dma_len(sg
);
811 switch (imxdmac
->word_size
) {
812 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
813 if (sg_dma_len(sgl
) & 3 || sgl
->dma_address
& 3)
816 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
817 if (sg_dma_len(sgl
) & 1 || sgl
->dma_address
& 1)
820 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
826 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
828 desc
->sgcount
= sg_len
;
829 desc
->len
= dma_length
;
830 desc
->direction
= direction
;
831 if (direction
== DMA_DEV_TO_MEM
) {
832 desc
->src
= imxdmac
->per_address
;
834 desc
->dest
= imxdmac
->per_address
;
836 desc
->desc
.callback
= NULL
;
837 desc
->desc
.callback_param
= NULL
;
842 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
843 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
844 size_t period_len
, enum dma_transfer_direction direction
,
845 unsigned long flags
, void *context
)
847 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
848 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
849 struct imxdma_desc
*desc
;
851 unsigned int periods
= buf_len
/ period_len
;
853 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
854 __func__
, imxdmac
->channel
, buf_len
, period_len
);
856 if (list_empty(&imxdmac
->ld_free
) ||
857 imxdma_chan_is_doing_cyclic(imxdmac
))
860 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
862 if (imxdmac
->sg_list
)
863 kfree(imxdmac
->sg_list
);
865 imxdmac
->sg_list
= kcalloc(periods
+ 1,
866 sizeof(struct scatterlist
), GFP_KERNEL
);
867 if (!imxdmac
->sg_list
)
870 sg_init_table(imxdmac
->sg_list
, periods
);
872 for (i
= 0; i
< periods
; i
++) {
873 imxdmac
->sg_list
[i
].page_link
= 0;
874 imxdmac
->sg_list
[i
].offset
= 0;
875 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
876 sg_dma_len(&imxdmac
->sg_list
[i
]) = period_len
;
877 dma_addr
+= period_len
;
881 imxdmac
->sg_list
[periods
].offset
= 0;
882 sg_dma_len(&imxdmac
->sg_list
[periods
]) = 0;
883 imxdmac
->sg_list
[periods
].page_link
=
884 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
886 desc
->type
= IMXDMA_DESC_CYCLIC
;
887 desc
->sg
= imxdmac
->sg_list
;
888 desc
->sgcount
= periods
;
889 desc
->len
= IMX_DMA_LENGTH_LOOP
;
890 desc
->direction
= direction
;
891 if (direction
== DMA_DEV_TO_MEM
) {
892 desc
->src
= imxdmac
->per_address
;
894 desc
->dest
= imxdmac
->per_address
;
896 desc
->desc
.callback
= NULL
;
897 desc
->desc
.callback_param
= NULL
;
902 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
903 struct dma_chan
*chan
, dma_addr_t dest
,
904 dma_addr_t src
, size_t len
, unsigned long flags
)
906 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
907 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
908 struct imxdma_desc
*desc
;
910 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
911 __func__
, imxdmac
->channel
, src
, dest
, len
);
913 if (list_empty(&imxdmac
->ld_free
) ||
914 imxdma_chan_is_doing_cyclic(imxdmac
))
917 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
919 desc
->type
= IMXDMA_DESC_MEMCPY
;
923 desc
->direction
= DMA_MEM_TO_MEM
;
924 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
925 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
926 desc
->desc
.callback
= NULL
;
927 desc
->desc
.callback_param
= NULL
;
932 static struct dma_async_tx_descriptor
*imxdma_prep_dma_interleaved(
933 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
936 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
937 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
938 struct imxdma_desc
*desc
;
940 dev_dbg(imxdma
->dev
, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
941 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__
,
942 imxdmac
->channel
, xt
->src_start
, xt
->dst_start
,
943 xt
->src_sgl
? "true" : "false", xt
->dst_sgl
? "true" : "false",
944 xt
->numf
, xt
->frame_size
);
946 if (list_empty(&imxdmac
->ld_free
) ||
947 imxdma_chan_is_doing_cyclic(imxdmac
))
950 if (xt
->frame_size
!= 1 || xt
->numf
<= 0 || xt
->dir
!= DMA_MEM_TO_MEM
)
953 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
955 desc
->type
= IMXDMA_DESC_INTERLEAVED
;
956 desc
->src
= xt
->src_start
;
957 desc
->dest
= xt
->dst_start
;
958 desc
->x
= xt
->sgl
[0].size
;
960 desc
->w
= xt
->sgl
[0].icg
+ desc
->x
;
961 desc
->len
= desc
->x
* desc
->y
;
962 desc
->direction
= DMA_MEM_TO_MEM
;
963 desc
->config_port
= IMX_DMA_MEMSIZE_32
;
964 desc
->config_mem
= IMX_DMA_MEMSIZE_32
;
966 desc
->config_mem
|= IMX_DMA_TYPE_2D
;
968 desc
->config_port
|= IMX_DMA_TYPE_2D
;
969 desc
->desc
.callback
= NULL
;
970 desc
->desc
.callback_param
= NULL
;
975 static void imxdma_issue_pending(struct dma_chan
*chan
)
977 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
978 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
979 struct imxdma_desc
*desc
;
982 spin_lock_irqsave(&imxdma
->lock
, flags
);
983 if (list_empty(&imxdmac
->ld_active
) &&
984 !list_empty(&imxdmac
->ld_queue
)) {
985 desc
= list_first_entry(&imxdmac
->ld_queue
,
986 struct imxdma_desc
, node
);
988 if (imxdma_xfer_desc(desc
) < 0) {
989 dev_warn(imxdma
->dev
,
990 "%s: channel: %d couldn't issue DMA xfer\n",
991 __func__
, imxdmac
->channel
);
993 list_move_tail(imxdmac
->ld_queue
.next
,
994 &imxdmac
->ld_active
);
997 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
1000 static int __init
imxdma_probe(struct platform_device
*pdev
)
1002 struct imxdma_engine
*imxdma
;
1003 struct resource
*res
;
1007 imxdma
= devm_kzalloc(&pdev
->dev
, sizeof(*imxdma
), GFP_KERNEL
);
1011 imxdma
->devtype
= pdev
->id_entry
->driver_data
;
1013 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1014 imxdma
->base
= devm_request_and_ioremap(&pdev
->dev
, res
);
1016 return -EADDRNOTAVAIL
;
1018 irq
= platform_get_irq(pdev
, 0);
1022 imxdma
->dma_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1023 if (IS_ERR(imxdma
->dma_ipg
))
1024 return PTR_ERR(imxdma
->dma_ipg
);
1026 imxdma
->dma_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
1027 if (IS_ERR(imxdma
->dma_ahb
))
1028 return PTR_ERR(imxdma
->dma_ahb
);
1030 clk_prepare_enable(imxdma
->dma_ipg
);
1031 clk_prepare_enable(imxdma
->dma_ahb
);
1033 /* reset DMA module */
1034 imx_dmav1_writel(imxdma
, DCR_DRST
, DMA_DCR
);
1036 if (is_imx1_dma(imxdma
)) {
1037 ret
= devm_request_irq(&pdev
->dev
, irq
,
1038 dma_irq_handler
, 0, "DMA", imxdma
);
1040 dev_warn(imxdma
->dev
, "Can't register IRQ for DMA\n");
1044 irq_err
= platform_get_irq(pdev
, 1);
1050 ret
= devm_request_irq(&pdev
->dev
, irq_err
,
1051 imxdma_err_handler
, 0, "DMA", imxdma
);
1053 dev_warn(imxdma
->dev
, "Can't register ERRIRQ for DMA\n");
1058 /* enable DMA module */
1059 imx_dmav1_writel(imxdma
, DCR_DEN
, DMA_DCR
);
1061 /* clear all interrupts */
1062 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
1064 /* disable interrupts */
1065 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
1067 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
1069 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
1070 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
1071 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
1072 dma_cap_set(DMA_INTERLEAVE
, imxdma
->dma_device
.cap_mask
);
1074 /* Initialize 2D global parameters */
1075 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++)
1076 imxdma
->slots_2d
[i
].count
= 0;
1078 spin_lock_init(&imxdma
->lock
);
1080 /* Initialize channel parameters */
1081 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1082 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1084 if (!is_imx1_dma(imxdma
)) {
1085 ret
= devm_request_irq(&pdev
->dev
, irq
+ i
,
1086 dma_irq_handler
, 0, "DMA", imxdma
);
1088 dev_warn(imxdma
->dev
, "Can't register IRQ %d "
1089 "for DMA channel %d\n",
1093 init_timer(&imxdmac
->watchdog
);
1094 imxdmac
->watchdog
.function
= &imxdma_watchdog
;
1095 imxdmac
->watchdog
.data
= (unsigned long)imxdmac
;
1098 imxdmac
->imxdma
= imxdma
;
1100 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
1101 INIT_LIST_HEAD(&imxdmac
->ld_free
);
1102 INIT_LIST_HEAD(&imxdmac
->ld_active
);
1104 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
1105 (unsigned long)imxdmac
);
1106 imxdmac
->chan
.device
= &imxdma
->dma_device
;
1107 dma_cookie_init(&imxdmac
->chan
);
1108 imxdmac
->channel
= i
;
1110 /* Add the channel to the DMAC list */
1111 list_add_tail(&imxdmac
->chan
.device_node
,
1112 &imxdma
->dma_device
.channels
);
1115 imxdma
->dev
= &pdev
->dev
;
1116 imxdma
->dma_device
.dev
= &pdev
->dev
;
1118 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
1119 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
1120 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
1121 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
1122 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
1123 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
1124 imxdma
->dma_device
.device_prep_interleaved_dma
= imxdma_prep_dma_interleaved
;
1125 imxdma
->dma_device
.device_control
= imxdma_control
;
1126 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
1128 platform_set_drvdata(pdev
, imxdma
);
1130 imxdma
->dma_device
.copy_align
= 2; /* 2^2 = 4 bytes alignment */
1131 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
1132 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
1134 ret
= dma_async_device_register(&imxdma
->dma_device
);
1136 dev_err(&pdev
->dev
, "unable to register\n");
1143 clk_disable_unprepare(imxdma
->dma_ipg
);
1144 clk_disable_unprepare(imxdma
->dma_ahb
);
1148 static int __exit
imxdma_remove(struct platform_device
*pdev
)
1150 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1152 dma_async_device_unregister(&imxdma
->dma_device
);
1154 clk_disable_unprepare(imxdma
->dma_ipg
);
1155 clk_disable_unprepare(imxdma
->dma_ahb
);
1160 static struct platform_driver imxdma_driver
= {
1164 .id_table
= imx_dma_devtype
,
1165 .remove
= __exit_p(imxdma_remove
),
1168 static int __init
imxdma_module_init(void)
1170 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1172 subsys_initcall(imxdma_module_init
);
1174 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1175 MODULE_DESCRIPTION("i.MX dma driver");
1176 MODULE_LICENSE("GPL");