2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/of_dma.h>
34 #include <linux/platform_data/dma-imx.h>
36 #include "dmaengine.h"
37 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
38 #define IMX_DMA_CHANNELS 16
40 #define IMX_DMA_2D_SLOTS 2
41 #define IMX_DMA_2D_SLOT_A 0
42 #define IMX_DMA_2D_SLOT_B 1
44 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
45 #define IMX_DMA_MEMSIZE_32 (0 << 4)
46 #define IMX_DMA_MEMSIZE_8 (1 << 4)
47 #define IMX_DMA_MEMSIZE_16 (2 << 4)
48 #define IMX_DMA_TYPE_LINEAR (0 << 10)
49 #define IMX_DMA_TYPE_2D (1 << 10)
50 #define IMX_DMA_TYPE_FIFO (2 << 10)
52 #define IMX_DMA_ERR_BURST (1 << 0)
53 #define IMX_DMA_ERR_REQUEST (1 << 1)
54 #define IMX_DMA_ERR_TRANSFER (1 << 2)
55 #define IMX_DMA_ERR_BUFFER (1 << 3)
56 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
58 #define DMA_DCR 0x00 /* Control Register */
59 #define DMA_DISR 0x04 /* Interrupt status Register */
60 #define DMA_DIMR 0x08 /* Interrupt mask Register */
61 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
62 #define DMA_DRTOSR 0x10 /* Request timeout Register */
63 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
64 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
65 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
66 #define DMA_WSRA 0x40 /* W-Size Register A */
67 #define DMA_XSRA 0x44 /* X-Size Register A */
68 #define DMA_YSRA 0x48 /* Y-Size Register A */
69 #define DMA_WSRB 0x4c /* W-Size Register B */
70 #define DMA_XSRB 0x50 /* X-Size Register B */
71 #define DMA_YSRB 0x54 /* Y-Size Register B */
72 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
73 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
74 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
75 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
76 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
77 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
78 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
79 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
80 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
82 #define DCR_DRST (1<<1)
83 #define DCR_DEN (1<<0)
84 #define DBTOCR_EN (1<<15)
85 #define DBTOCR_CNT(x) ((x) & 0x7fff)
86 #define CNTR_CNT(x) ((x) & 0xffffff)
87 #define CCR_ACRPT (1<<14)
88 #define CCR_DMOD_LINEAR (0x0 << 12)
89 #define CCR_DMOD_2D (0x1 << 12)
90 #define CCR_DMOD_FIFO (0x2 << 12)
91 #define CCR_DMOD_EOBFIFO (0x3 << 12)
92 #define CCR_SMOD_LINEAR (0x0 << 10)
93 #define CCR_SMOD_2D (0x1 << 10)
94 #define CCR_SMOD_FIFO (0x2 << 10)
95 #define CCR_SMOD_EOBFIFO (0x3 << 10)
96 #define CCR_MDIR_DEC (1<<9)
97 #define CCR_MSEL_B (1<<8)
98 #define CCR_DSIZ_32 (0x0 << 6)
99 #define CCR_DSIZ_8 (0x1 << 6)
100 #define CCR_DSIZ_16 (0x2 << 6)
101 #define CCR_SSIZ_32 (0x0 << 4)
102 #define CCR_SSIZ_8 (0x1 << 4)
103 #define CCR_SSIZ_16 (0x2 << 4)
104 #define CCR_REN (1<<3)
105 #define CCR_RPT (1<<2)
106 #define CCR_FRC (1<<1)
107 #define CCR_CEN (1<<0)
108 #define RTOR_EN (1<<15)
109 #define RTOR_CLK (1<<14)
110 #define RTOR_PSC (1<<13)
112 enum imxdma_prep_type
{
114 IMXDMA_DESC_INTERLEAVED
,
115 IMXDMA_DESC_SLAVE_SG
,
119 struct imx_dma_2d_config
{
127 struct list_head node
;
128 struct dma_async_tx_descriptor desc
;
129 enum dma_status status
;
133 enum dma_transfer_direction direction
;
134 enum imxdma_prep_type type
;
135 /* For memcpy and interleaved */
136 unsigned int config_port
;
137 unsigned int config_mem
;
138 /* For interleaved transfers */
142 /* For slave sg and cyclic */
143 struct scatterlist
*sg
;
144 unsigned int sgcount
;
147 struct imxdma_channel
{
149 struct timer_list watchdog
;
150 struct imxdma_engine
*imxdma
;
151 unsigned int channel
;
153 struct tasklet_struct dma_tasklet
;
154 struct list_head ld_free
;
155 struct list_head ld_queue
;
156 struct list_head ld_active
;
158 enum dma_slave_buswidth word_size
;
159 dma_addr_t per_address
;
161 struct dma_chan chan
;
162 struct dma_async_tx_descriptor desc
;
163 enum dma_status status
;
165 struct scatterlist
*sg_list
;
179 struct imxdma_engine
{
181 struct device_dma_parameters dma_parms
;
182 struct dma_device dma_device
;
187 struct imx_dma_2d_config slots_2d
[IMX_DMA_2D_SLOTS
];
188 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
189 enum imx_dma_type devtype
;
191 unsigned int irq_err
;
195 struct imxdma_filter_data
{
196 struct imxdma_engine
*imxdma
;
200 static const struct platform_device_id imx_dma_devtype
[] = {
203 .driver_data
= IMX1_DMA
,
206 .driver_data
= IMX21_DMA
,
209 .driver_data
= IMX27_DMA
,
214 MODULE_DEVICE_TABLE(platform
, imx_dma_devtype
);
216 static const struct of_device_id imx_dma_of_dev_id
[] = {
218 .compatible
= "fsl,imx1-dma",
219 .data
= &imx_dma_devtype
[IMX1_DMA
],
221 .compatible
= "fsl,imx21-dma",
222 .data
= &imx_dma_devtype
[IMX21_DMA
],
224 .compatible
= "fsl,imx27-dma",
225 .data
= &imx_dma_devtype
[IMX27_DMA
],
230 MODULE_DEVICE_TABLE(of
, imx_dma_of_dev_id
);
232 static inline int is_imx1_dma(struct imxdma_engine
*imxdma
)
234 return imxdma
->devtype
== IMX1_DMA
;
237 static inline int is_imx27_dma(struct imxdma_engine
*imxdma
)
239 return imxdma
->devtype
== IMX27_DMA
;
242 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
244 return container_of(chan
, struct imxdma_channel
, chan
);
247 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
249 struct imxdma_desc
*desc
;
251 if (!list_empty(&imxdmac
->ld_active
)) {
252 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
254 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
262 static void imx_dmav1_writel(struct imxdma_engine
*imxdma
, unsigned val
,
265 __raw_writel(val
, imxdma
->base
+ offset
);
268 static unsigned imx_dmav1_readl(struct imxdma_engine
*imxdma
, unsigned offset
)
270 return __raw_readl(imxdma
->base
+ offset
);
273 static int imxdma_hw_chain(struct imxdma_channel
*imxdmac
)
275 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
277 if (is_imx27_dma(imxdma
))
278 return imxdmac
->hw_chaining
;
284 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
286 static inline int imxdma_sg_next(struct imxdma_desc
*d
)
288 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
289 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
290 struct scatterlist
*sg
= d
->sg
;
293 now
= min(d
->len
, sg_dma_len(sg
));
294 if (d
->len
!= IMX_DMA_LENGTH_LOOP
)
297 if (d
->direction
== DMA_DEV_TO_MEM
)
298 imx_dmav1_writel(imxdma
, sg
->dma_address
,
299 DMA_DAR(imxdmac
->channel
));
301 imx_dmav1_writel(imxdma
, sg
->dma_address
,
302 DMA_SAR(imxdmac
->channel
));
304 imx_dmav1_writel(imxdma
, now
, DMA_CNTR(imxdmac
->channel
));
306 dev_dbg(imxdma
->dev
, " %s channel: %d dst 0x%08x, src 0x%08x, "
307 "size 0x%08x\n", __func__
, imxdmac
->channel
,
308 imx_dmav1_readl(imxdma
, DMA_DAR(imxdmac
->channel
)),
309 imx_dmav1_readl(imxdma
, DMA_SAR(imxdmac
->channel
)),
310 imx_dmav1_readl(imxdma
, DMA_CNTR(imxdmac
->channel
)));
315 static void imxdma_enable_hw(struct imxdma_desc
*d
)
317 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
318 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
319 int channel
= imxdmac
->channel
;
322 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
324 local_irq_save(flags
);
326 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
327 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) &
328 ~(1 << channel
), DMA_DIMR
);
329 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) |
330 CCR_CEN
| CCR_ACRPT
, DMA_CCR(channel
));
332 if (!is_imx1_dma(imxdma
) &&
333 d
->sg
&& imxdma_hw_chain(imxdmac
)) {
334 d
->sg
= sg_next(d
->sg
);
338 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(channel
));
339 imx_dmav1_writel(imxdma
, tmp
| CCR_RPT
| CCR_ACRPT
,
344 local_irq_restore(flags
);
347 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
349 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
350 int channel
= imxdmac
->channel
;
353 dev_dbg(imxdma
->dev
, "%s channel %d\n", __func__
, channel
);
355 if (imxdma_hw_chain(imxdmac
))
356 del_timer(&imxdmac
->watchdog
);
358 local_irq_save(flags
);
359 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) |
360 (1 << channel
), DMA_DIMR
);
361 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) &
362 ~CCR_CEN
, DMA_CCR(channel
));
363 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
364 local_irq_restore(flags
);
367 static void imxdma_watchdog(unsigned long data
)
369 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
370 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
371 int channel
= imxdmac
->channel
;
373 imx_dmav1_writel(imxdma
, 0, DMA_CCR(channel
));
375 /* Tasklet watchdog error handler */
376 tasklet_schedule(&imxdmac
->dma_tasklet
);
377 dev_dbg(imxdma
->dev
, "channel %d: watchdog timeout!\n",
381 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
383 struct imxdma_engine
*imxdma
= dev_id
;
384 unsigned int err_mask
;
388 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
390 err_mask
= imx_dmav1_readl(imxdma
, DMA_DBTOSR
) |
391 imx_dmav1_readl(imxdma
, DMA_DRTOSR
) |
392 imx_dmav1_readl(imxdma
, DMA_DSESR
) |
393 imx_dmav1_readl(imxdma
, DMA_DBOSR
);
398 imx_dmav1_writel(imxdma
, disr
& err_mask
, DMA_DISR
);
400 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
401 if (!(err_mask
& (1 << i
)))
405 if (imx_dmav1_readl(imxdma
, DMA_DBTOSR
) & (1 << i
)) {
406 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBTOSR
);
407 errcode
|= IMX_DMA_ERR_BURST
;
409 if (imx_dmav1_readl(imxdma
, DMA_DRTOSR
) & (1 << i
)) {
410 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DRTOSR
);
411 errcode
|= IMX_DMA_ERR_REQUEST
;
413 if (imx_dmav1_readl(imxdma
, DMA_DSESR
) & (1 << i
)) {
414 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DSESR
);
415 errcode
|= IMX_DMA_ERR_TRANSFER
;
417 if (imx_dmav1_readl(imxdma
, DMA_DBOSR
) & (1 << i
)) {
418 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBOSR
);
419 errcode
|= IMX_DMA_ERR_BUFFER
;
421 /* Tasklet error handler */
422 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
424 dev_warn(imxdma
->dev
,
425 "DMA timeout on channel %d -%s%s%s%s\n", i
,
426 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
427 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
428 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
429 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
434 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
436 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
437 int chno
= imxdmac
->channel
;
438 struct imxdma_desc
*desc
;
441 spin_lock_irqsave(&imxdma
->lock
, flags
);
442 if (list_empty(&imxdmac
->ld_active
)) {
443 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
447 desc
= list_first_entry(&imxdmac
->ld_active
,
450 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
454 desc
->sg
= sg_next(desc
->sg
);
457 imxdma_sg_next(desc
);
459 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(chno
));
461 if (imxdma_hw_chain(imxdmac
)) {
462 /* FIXME: The timeout should probably be
465 mod_timer(&imxdmac
->watchdog
,
466 jiffies
+ msecs_to_jiffies(500));
468 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
469 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
471 imx_dmav1_writel(imxdma
, tmp
& ~CCR_CEN
,
476 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
478 if (imxdma_chan_is_doing_cyclic(imxdmac
))
479 /* Tasklet progression */
480 tasklet_schedule(&imxdmac
->dma_tasklet
);
485 if (imxdma_hw_chain(imxdmac
)) {
486 del_timer(&imxdmac
->watchdog
);
492 imx_dmav1_writel(imxdma
, 0, DMA_CCR(chno
));
494 tasklet_schedule(&imxdmac
->dma_tasklet
);
497 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
499 struct imxdma_engine
*imxdma
= dev_id
;
502 if (!is_imx1_dma(imxdma
))
503 imxdma_err_handler(irq
, dev_id
);
505 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
507 dev_dbg(imxdma
->dev
, "%s called, disr=0x%08x\n", __func__
, disr
);
509 imx_dmav1_writel(imxdma
, disr
, DMA_DISR
);
510 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
512 dma_irq_handle_channel(&imxdma
->channel
[i
]);
518 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
520 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
521 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
525 /* Configure and enable */
527 case IMXDMA_DESC_INTERLEAVED
:
528 /* Try to get a free 2D slot */
529 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++) {
530 if ((imxdma
->slots_2d
[i
].count
> 0) &&
531 ((imxdma
->slots_2d
[i
].xsr
!= d
->x
) ||
532 (imxdma
->slots_2d
[i
].ysr
!= d
->y
) ||
533 (imxdma
->slots_2d
[i
].wsr
!= d
->w
)))
541 imxdma
->slots_2d
[slot
].xsr
= d
->x
;
542 imxdma
->slots_2d
[slot
].ysr
= d
->y
;
543 imxdma
->slots_2d
[slot
].wsr
= d
->w
;
544 imxdma
->slots_2d
[slot
].count
++;
546 imxdmac
->slot_2d
= slot
;
547 imxdmac
->enabled_2d
= true;
549 if (slot
== IMX_DMA_2D_SLOT_A
) {
550 d
->config_mem
&= ~CCR_MSEL_B
;
551 d
->config_port
&= ~CCR_MSEL_B
;
552 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRA
);
553 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRA
);
554 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRA
);
556 d
->config_mem
|= CCR_MSEL_B
;
557 d
->config_port
|= CCR_MSEL_B
;
558 imx_dmav1_writel(imxdma
, d
->x
, DMA_XSRB
);
559 imx_dmav1_writel(imxdma
, d
->y
, DMA_YSRB
);
560 imx_dmav1_writel(imxdma
, d
->w
, DMA_WSRB
);
563 * We fall-through here intentionally, since a 2D transfer is
564 * similar to MEMCPY just adding the 2D slot configuration.
566 case IMXDMA_DESC_MEMCPY
:
567 imx_dmav1_writel(imxdma
, d
->src
, DMA_SAR(imxdmac
->channel
));
568 imx_dmav1_writel(imxdma
, d
->dest
, DMA_DAR(imxdmac
->channel
));
569 imx_dmav1_writel(imxdma
, d
->config_mem
| (d
->config_port
<< 2),
570 DMA_CCR(imxdmac
->channel
));
572 imx_dmav1_writel(imxdma
, d
->len
, DMA_CNTR(imxdmac
->channel
));
575 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
576 __func__
, imxdmac
->channel
,
577 (unsigned long long)d
->dest
,
578 (unsigned long long)d
->src
, d
->len
);
581 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
582 case IMXDMA_DESC_CYCLIC
:
583 case IMXDMA_DESC_SLAVE_SG
:
584 if (d
->direction
== DMA_DEV_TO_MEM
) {
585 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
586 DMA_SAR(imxdmac
->channel
));
587 imx_dmav1_writel(imxdma
, imxdmac
->ccr_from_device
,
588 DMA_CCR(imxdmac
->channel
));
591 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
592 __func__
, imxdmac
->channel
,
593 d
->sg
, d
->sgcount
, d
->len
,
594 (unsigned long long)imxdmac
->per_address
);
595 } else if (d
->direction
== DMA_MEM_TO_DEV
) {
596 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
597 DMA_DAR(imxdmac
->channel
));
598 imx_dmav1_writel(imxdma
, imxdmac
->ccr_to_device
,
599 DMA_CCR(imxdmac
->channel
));
602 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
603 __func__
, imxdmac
->channel
,
604 d
->sg
, d
->sgcount
, d
->len
,
605 (unsigned long long)imxdmac
->per_address
);
607 dev_err(imxdma
->dev
, "%s channel: %d bad dma mode\n",
608 __func__
, imxdmac
->channel
);
622 static void imxdma_tasklet(unsigned long data
)
624 struct imxdma_channel
*imxdmac
= (void *)data
;
625 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
626 struct imxdma_desc
*desc
;
629 spin_lock_irqsave(&imxdma
->lock
, flags
);
631 if (list_empty(&imxdmac
->ld_active
)) {
632 /* Someone might have called terminate all */
633 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
636 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
638 /* If we are dealing with a cyclic descriptor, keep it on ld_active
639 * and dont mark the descriptor as complete.
640 * Only in non-cyclic cases it would be marked as complete
642 if (imxdma_chan_is_doing_cyclic(imxdmac
))
645 dma_cookie_complete(&desc
->desc
);
647 /* Free 2D slot if it was an interleaved transfer */
648 if (imxdmac
->enabled_2d
) {
649 imxdma
->slots_2d
[imxdmac
->slot_2d
].count
--;
650 imxdmac
->enabled_2d
= false;
653 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
655 if (!list_empty(&imxdmac
->ld_queue
)) {
656 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
658 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
659 if (imxdma_xfer_desc(desc
) < 0)
660 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
661 __func__
, imxdmac
->channel
);
664 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
666 if (desc
->desc
.callback
)
667 desc
->desc
.callback(desc
->desc
.callback_param
);
671 static int imxdma_terminate_all(struct dma_chan
*chan
)
673 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
674 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
677 imxdma_disable_hw(imxdmac
);
679 spin_lock_irqsave(&imxdma
->lock
, flags
);
680 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
681 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
682 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
686 static int imxdma_config(struct dma_chan
*chan
,
687 struct dma_slave_config
*dmaengine_cfg
)
689 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
690 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
691 unsigned int mode
= 0;
693 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
694 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
695 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
696 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
698 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
699 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
700 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
703 switch (imxdmac
->word_size
) {
704 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
705 mode
= IMX_DMA_MEMSIZE_8
;
707 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
708 mode
= IMX_DMA_MEMSIZE_16
;
711 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
712 mode
= IMX_DMA_MEMSIZE_32
;
716 imxdmac
->hw_chaining
= 0;
718 imxdmac
->ccr_from_device
= (mode
| IMX_DMA_TYPE_FIFO
) |
719 ((IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) << 2) |
721 imxdmac
->ccr_to_device
=
722 (IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) |
723 ((mode
| IMX_DMA_TYPE_FIFO
) << 2) | CCR_REN
;
724 imx_dmav1_writel(imxdma
, imxdmac
->dma_request
,
725 DMA_RSSR(imxdmac
->channel
));
727 /* Set burst length */
728 imx_dmav1_writel(imxdma
, imxdmac
->watermark_level
*
729 imxdmac
->word_size
, DMA_BLR(imxdmac
->channel
));
734 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
736 struct dma_tx_state
*txstate
)
738 return dma_cookie_status(chan
, cookie
, txstate
);
741 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
743 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
744 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
748 spin_lock_irqsave(&imxdma
->lock
, flags
);
749 list_move_tail(imxdmac
->ld_free
.next
, &imxdmac
->ld_queue
);
750 cookie
= dma_cookie_assign(tx
);
751 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
756 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
758 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
759 struct imx_dma_data
*data
= chan
->private;
762 imxdmac
->dma_request
= data
->dma_request
;
764 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
765 struct imxdma_desc
*desc
;
767 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
770 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
771 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
772 desc
->desc
.tx_submit
= imxdma_tx_submit
;
773 /* txd.flags will be overwritten in prep funcs */
774 desc
->desc
.flags
= DMA_CTRL_ACK
;
775 desc
->status
= DMA_COMPLETE
;
777 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
778 imxdmac
->descs_allocated
++;
781 if (!imxdmac
->descs_allocated
)
784 return imxdmac
->descs_allocated
;
787 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
789 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
790 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
791 struct imxdma_desc
*desc
, *_desc
;
794 spin_lock_irqsave(&imxdma
->lock
, flags
);
796 imxdma_disable_hw(imxdmac
);
797 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
798 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
800 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
802 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
804 imxdmac
->descs_allocated
--;
806 INIT_LIST_HEAD(&imxdmac
->ld_free
);
808 kfree(imxdmac
->sg_list
);
809 imxdmac
->sg_list
= NULL
;
812 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
813 struct dma_chan
*chan
, struct scatterlist
*sgl
,
814 unsigned int sg_len
, enum dma_transfer_direction direction
,
815 unsigned long flags
, void *context
)
817 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
818 struct scatterlist
*sg
;
819 int i
, dma_length
= 0;
820 struct imxdma_desc
*desc
;
822 if (list_empty(&imxdmac
->ld_free
) ||
823 imxdma_chan_is_doing_cyclic(imxdmac
))
826 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
828 for_each_sg(sgl
, sg
, sg_len
, i
) {
829 dma_length
+= sg_dma_len(sg
);
832 switch (imxdmac
->word_size
) {
833 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
834 if (sg_dma_len(sgl
) & 3 || sgl
->dma_address
& 3)
837 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
838 if (sg_dma_len(sgl
) & 1 || sgl
->dma_address
& 1)
841 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
847 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
849 desc
->sgcount
= sg_len
;
850 desc
->len
= dma_length
;
851 desc
->direction
= direction
;
852 if (direction
== DMA_DEV_TO_MEM
) {
853 desc
->src
= imxdmac
->per_address
;
855 desc
->dest
= imxdmac
->per_address
;
857 desc
->desc
.callback
= NULL
;
858 desc
->desc
.callback_param
= NULL
;
863 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
864 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
865 size_t period_len
, enum dma_transfer_direction direction
,
868 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
869 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
870 struct imxdma_desc
*desc
;
872 unsigned int periods
= buf_len
/ period_len
;
874 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%zu period_len=%zu\n",
875 __func__
, imxdmac
->channel
, buf_len
, period_len
);
877 if (list_empty(&imxdmac
->ld_free
) ||
878 imxdma_chan_is_doing_cyclic(imxdmac
))
881 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
883 kfree(imxdmac
->sg_list
);
885 imxdmac
->sg_list
= kcalloc(periods
+ 1,
886 sizeof(struct scatterlist
), GFP_ATOMIC
);
887 if (!imxdmac
->sg_list
)
890 sg_init_table(imxdmac
->sg_list
, periods
);
892 for (i
= 0; i
< periods
; i
++) {
893 imxdmac
->sg_list
[i
].page_link
= 0;
894 imxdmac
->sg_list
[i
].offset
= 0;
895 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
896 sg_dma_len(&imxdmac
->sg_list
[i
]) = period_len
;
897 dma_addr
+= period_len
;
901 imxdmac
->sg_list
[periods
].offset
= 0;
902 sg_dma_len(&imxdmac
->sg_list
[periods
]) = 0;
903 imxdmac
->sg_list
[periods
].page_link
=
904 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
906 desc
->type
= IMXDMA_DESC_CYCLIC
;
907 desc
->sg
= imxdmac
->sg_list
;
908 desc
->sgcount
= periods
;
909 desc
->len
= IMX_DMA_LENGTH_LOOP
;
910 desc
->direction
= direction
;
911 if (direction
== DMA_DEV_TO_MEM
) {
912 desc
->src
= imxdmac
->per_address
;
914 desc
->dest
= imxdmac
->per_address
;
916 desc
->desc
.callback
= NULL
;
917 desc
->desc
.callback_param
= NULL
;
922 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
923 struct dma_chan
*chan
, dma_addr_t dest
,
924 dma_addr_t src
, size_t len
, unsigned long flags
)
926 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
927 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
928 struct imxdma_desc
*desc
;
930 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
931 __func__
, imxdmac
->channel
, (unsigned long long)src
,
932 (unsigned long long)dest
, len
);
934 if (list_empty(&imxdmac
->ld_free
) ||
935 imxdma_chan_is_doing_cyclic(imxdmac
))
938 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
940 desc
->type
= IMXDMA_DESC_MEMCPY
;
944 desc
->direction
= DMA_MEM_TO_MEM
;
945 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
946 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
947 desc
->desc
.callback
= NULL
;
948 desc
->desc
.callback_param
= NULL
;
953 static struct dma_async_tx_descriptor
*imxdma_prep_dma_interleaved(
954 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
957 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
958 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
959 struct imxdma_desc
*desc
;
961 dev_dbg(imxdma
->dev
, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
962 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__
,
963 imxdmac
->channel
, (unsigned long long)xt
->src_start
,
964 (unsigned long long) xt
->dst_start
,
965 xt
->src_sgl
? "true" : "false", xt
->dst_sgl
? "true" : "false",
966 xt
->numf
, xt
->frame_size
);
968 if (list_empty(&imxdmac
->ld_free
) ||
969 imxdma_chan_is_doing_cyclic(imxdmac
))
972 if (xt
->frame_size
!= 1 || xt
->numf
<= 0 || xt
->dir
!= DMA_MEM_TO_MEM
)
975 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
977 desc
->type
= IMXDMA_DESC_INTERLEAVED
;
978 desc
->src
= xt
->src_start
;
979 desc
->dest
= xt
->dst_start
;
980 desc
->x
= xt
->sgl
[0].size
;
982 desc
->w
= xt
->sgl
[0].icg
+ desc
->x
;
983 desc
->len
= desc
->x
* desc
->y
;
984 desc
->direction
= DMA_MEM_TO_MEM
;
985 desc
->config_port
= IMX_DMA_MEMSIZE_32
;
986 desc
->config_mem
= IMX_DMA_MEMSIZE_32
;
988 desc
->config_mem
|= IMX_DMA_TYPE_2D
;
990 desc
->config_port
|= IMX_DMA_TYPE_2D
;
991 desc
->desc
.callback
= NULL
;
992 desc
->desc
.callback_param
= NULL
;
997 static void imxdma_issue_pending(struct dma_chan
*chan
)
999 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
1000 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
1001 struct imxdma_desc
*desc
;
1002 unsigned long flags
;
1004 spin_lock_irqsave(&imxdma
->lock
, flags
);
1005 if (list_empty(&imxdmac
->ld_active
) &&
1006 !list_empty(&imxdmac
->ld_queue
)) {
1007 desc
= list_first_entry(&imxdmac
->ld_queue
,
1008 struct imxdma_desc
, node
);
1010 if (imxdma_xfer_desc(desc
) < 0) {
1011 dev_warn(imxdma
->dev
,
1012 "%s: channel: %d couldn't issue DMA xfer\n",
1013 __func__
, imxdmac
->channel
);
1015 list_move_tail(imxdmac
->ld_queue
.next
,
1016 &imxdmac
->ld_active
);
1019 spin_unlock_irqrestore(&imxdma
->lock
, flags
);
1022 static bool imxdma_filter_fn(struct dma_chan
*chan
, void *param
)
1024 struct imxdma_filter_data
*fdata
= param
;
1025 struct imxdma_channel
*imxdma_chan
= to_imxdma_chan(chan
);
1027 if (chan
->device
->dev
!= fdata
->imxdma
->dev
)
1030 imxdma_chan
->dma_request
= fdata
->request
;
1031 chan
->private = NULL
;
1036 static struct dma_chan
*imxdma_xlate(struct of_phandle_args
*dma_spec
,
1037 struct of_dma
*ofdma
)
1039 int count
= dma_spec
->args_count
;
1040 struct imxdma_engine
*imxdma
= ofdma
->of_dma_data
;
1041 struct imxdma_filter_data fdata
= {
1048 fdata
.request
= dma_spec
->args
[0];
1050 return dma_request_channel(imxdma
->dma_device
.cap_mask
,
1051 imxdma_filter_fn
, &fdata
);
1054 static int __init
imxdma_probe(struct platform_device
*pdev
)
1056 struct imxdma_engine
*imxdma
;
1057 struct resource
*res
;
1058 const struct of_device_id
*of_id
;
1062 of_id
= of_match_device(imx_dma_of_dev_id
, &pdev
->dev
);
1064 pdev
->id_entry
= of_id
->data
;
1066 imxdma
= devm_kzalloc(&pdev
->dev
, sizeof(*imxdma
), GFP_KERNEL
);
1070 imxdma
->dev
= &pdev
->dev
;
1071 imxdma
->devtype
= pdev
->id_entry
->driver_data
;
1073 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1074 imxdma
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1075 if (IS_ERR(imxdma
->base
))
1076 return PTR_ERR(imxdma
->base
);
1078 irq
= platform_get_irq(pdev
, 0);
1082 imxdma
->dma_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1083 if (IS_ERR(imxdma
->dma_ipg
))
1084 return PTR_ERR(imxdma
->dma_ipg
);
1086 imxdma
->dma_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
1087 if (IS_ERR(imxdma
->dma_ahb
))
1088 return PTR_ERR(imxdma
->dma_ahb
);
1090 ret
= clk_prepare_enable(imxdma
->dma_ipg
);
1093 ret
= clk_prepare_enable(imxdma
->dma_ahb
);
1095 goto disable_dma_ipg_clk
;
1097 /* reset DMA module */
1098 imx_dmav1_writel(imxdma
, DCR_DRST
, DMA_DCR
);
1100 if (is_imx1_dma(imxdma
)) {
1101 ret
= devm_request_irq(&pdev
->dev
, irq
,
1102 dma_irq_handler
, 0, "DMA", imxdma
);
1104 dev_warn(imxdma
->dev
, "Can't register IRQ for DMA\n");
1105 goto disable_dma_ahb_clk
;
1109 irq_err
= platform_get_irq(pdev
, 1);
1112 goto disable_dma_ahb_clk
;
1115 ret
= devm_request_irq(&pdev
->dev
, irq_err
,
1116 imxdma_err_handler
, 0, "DMA", imxdma
);
1118 dev_warn(imxdma
->dev
, "Can't register ERRIRQ for DMA\n");
1119 goto disable_dma_ahb_clk
;
1121 imxdma
->irq_err
= irq_err
;
1124 /* enable DMA module */
1125 imx_dmav1_writel(imxdma
, DCR_DEN
, DMA_DCR
);
1127 /* clear all interrupts */
1128 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
1130 /* disable interrupts */
1131 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
1133 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
1135 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
1136 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
1137 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
1138 dma_cap_set(DMA_INTERLEAVE
, imxdma
->dma_device
.cap_mask
);
1140 /* Initialize 2D global parameters */
1141 for (i
= 0; i
< IMX_DMA_2D_SLOTS
; i
++)
1142 imxdma
->slots_2d
[i
].count
= 0;
1144 spin_lock_init(&imxdma
->lock
);
1146 /* Initialize channel parameters */
1147 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1148 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1150 if (!is_imx1_dma(imxdma
)) {
1151 ret
= devm_request_irq(&pdev
->dev
, irq
+ i
,
1152 dma_irq_handler
, 0, "DMA", imxdma
);
1154 dev_warn(imxdma
->dev
, "Can't register IRQ %d "
1155 "for DMA channel %d\n",
1157 goto disable_dma_ahb_clk
;
1160 imxdmac
->irq
= irq
+ i
;
1161 init_timer(&imxdmac
->watchdog
);
1162 imxdmac
->watchdog
.function
= &imxdma_watchdog
;
1163 imxdmac
->watchdog
.data
= (unsigned long)imxdmac
;
1166 imxdmac
->imxdma
= imxdma
;
1168 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
1169 INIT_LIST_HEAD(&imxdmac
->ld_free
);
1170 INIT_LIST_HEAD(&imxdmac
->ld_active
);
1172 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
1173 (unsigned long)imxdmac
);
1174 imxdmac
->chan
.device
= &imxdma
->dma_device
;
1175 dma_cookie_init(&imxdmac
->chan
);
1176 imxdmac
->channel
= i
;
1178 /* Add the channel to the DMAC list */
1179 list_add_tail(&imxdmac
->chan
.device_node
,
1180 &imxdma
->dma_device
.channels
);
1183 imxdma
->dma_device
.dev
= &pdev
->dev
;
1185 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
1186 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
1187 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
1188 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
1189 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
1190 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
1191 imxdma
->dma_device
.device_prep_interleaved_dma
= imxdma_prep_dma_interleaved
;
1192 imxdma
->dma_device
.device_config
= imxdma_config
;
1193 imxdma
->dma_device
.device_terminate_all
= imxdma_terminate_all
;
1194 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
1196 platform_set_drvdata(pdev
, imxdma
);
1198 imxdma
->dma_device
.copy_align
= DMAENGINE_ALIGN_4_BYTES
;
1199 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
1200 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
1202 ret
= dma_async_device_register(&imxdma
->dma_device
);
1204 dev_err(&pdev
->dev
, "unable to register\n");
1205 goto disable_dma_ahb_clk
;
1208 if (pdev
->dev
.of_node
) {
1209 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
1210 imxdma_xlate
, imxdma
);
1212 dev_err(&pdev
->dev
, "unable to register of_dma_controller\n");
1213 goto err_of_dma_controller
;
1219 err_of_dma_controller
:
1220 dma_async_device_unregister(&imxdma
->dma_device
);
1221 disable_dma_ahb_clk
:
1222 clk_disable_unprepare(imxdma
->dma_ahb
);
1223 disable_dma_ipg_clk
:
1224 clk_disable_unprepare(imxdma
->dma_ipg
);
1228 static void imxdma_free_irq(struct platform_device
*pdev
, struct imxdma_engine
*imxdma
)
1232 if (is_imx1_dma(imxdma
)) {
1233 disable_irq(imxdma
->irq
);
1234 disable_irq(imxdma
->irq_err
);
1237 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1238 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1240 if (!is_imx1_dma(imxdma
))
1241 disable_irq(imxdmac
->irq
);
1243 tasklet_kill(&imxdmac
->dma_tasklet
);
1247 static int imxdma_remove(struct platform_device
*pdev
)
1249 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1251 imxdma_free_irq(pdev
, imxdma
);
1253 dma_async_device_unregister(&imxdma
->dma_device
);
1255 if (pdev
->dev
.of_node
)
1256 of_dma_controller_free(pdev
->dev
.of_node
);
1258 clk_disable_unprepare(imxdma
->dma_ipg
);
1259 clk_disable_unprepare(imxdma
->dma_ahb
);
1264 static struct platform_driver imxdma_driver
= {
1267 .of_match_table
= imx_dma_of_dev_id
,
1269 .id_table
= imx_dma_devtype
,
1270 .remove
= imxdma_remove
,
1273 static int __init
imxdma_module_init(void)
1275 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1277 subsys_initcall(imxdma_module_init
);
1279 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1280 MODULE_DESCRIPTION("i.MX dma driver");
1281 MODULE_LICENSE("GPL");