2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
33 #include <mach/hardware.h>
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37 #define IMX_DMA_CHANNELS 16
39 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40 #define IMX_DMA_MEMSIZE_32 (0 << 4)
41 #define IMX_DMA_MEMSIZE_8 (1 << 4)
42 #define IMX_DMA_MEMSIZE_16 (2 << 4)
43 #define IMX_DMA_TYPE_LINEAR (0 << 10)
44 #define IMX_DMA_TYPE_2D (1 << 10)
45 #define IMX_DMA_TYPE_FIFO (2 << 10)
47 #define IMX_DMA_ERR_BURST (1 << 0)
48 #define IMX_DMA_ERR_REQUEST (1 << 1)
49 #define IMX_DMA_ERR_TRANSFER (1 << 2)
50 #define IMX_DMA_ERR_BUFFER (1 << 3)
51 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
53 #define DMA_DCR 0x00 /* Control Register */
54 #define DMA_DISR 0x04 /* Interrupt status Register */
55 #define DMA_DIMR 0x08 /* Interrupt mask Register */
56 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
57 #define DMA_DRTOSR 0x10 /* Request timeout Register */
58 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
59 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
60 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
61 #define DMA_WSRA 0x40 /* W-Size Register A */
62 #define DMA_XSRA 0x44 /* X-Size Register A */
63 #define DMA_YSRA 0x48 /* Y-Size Register A */
64 #define DMA_WSRB 0x4c /* W-Size Register B */
65 #define DMA_XSRB 0x50 /* X-Size Register B */
66 #define DMA_YSRB 0x54 /* Y-Size Register B */
67 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
68 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
77 #define DCR_DRST (1<<1)
78 #define DCR_DEN (1<<0)
79 #define DBTOCR_EN (1<<15)
80 #define DBTOCR_CNT(x) ((x) & 0x7fff)
81 #define CNTR_CNT(x) ((x) & 0xffffff)
82 #define CCR_ACRPT (1<<14)
83 #define CCR_DMOD_LINEAR (0x0 << 12)
84 #define CCR_DMOD_2D (0x1 << 12)
85 #define CCR_DMOD_FIFO (0x2 << 12)
86 #define CCR_DMOD_EOBFIFO (0x3 << 12)
87 #define CCR_SMOD_LINEAR (0x0 << 10)
88 #define CCR_SMOD_2D (0x1 << 10)
89 #define CCR_SMOD_FIFO (0x2 << 10)
90 #define CCR_SMOD_EOBFIFO (0x3 << 10)
91 #define CCR_MDIR_DEC (1<<9)
92 #define CCR_MSEL_B (1<<8)
93 #define CCR_DSIZ_32 (0x0 << 6)
94 #define CCR_DSIZ_8 (0x1 << 6)
95 #define CCR_DSIZ_16 (0x2 << 6)
96 #define CCR_SSIZ_32 (0x0 << 4)
97 #define CCR_SSIZ_8 (0x1 << 4)
98 #define CCR_SSIZ_16 (0x2 << 4)
99 #define CCR_REN (1<<3)
100 #define CCR_RPT (1<<2)
101 #define CCR_FRC (1<<1)
102 #define CCR_CEN (1<<0)
103 #define RTOR_EN (1<<15)
104 #define RTOR_CLK (1<<14)
105 #define RTOR_PSC (1<<13)
107 enum imxdma_prep_type
{
109 IMXDMA_DESC_INTERLEAVED
,
110 IMXDMA_DESC_SLAVE_SG
,
115 * struct imxdma_channel_internal - i.MX specific DMA extension
116 * @name: name specified by DMA client
117 * @irq_handler: client callback for end of transfer
118 * @err_handler: client callback for error condition
119 * @data: clients context data for callbacks
120 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
121 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
122 * @resbytes: total residual number of bytes to transfer
123 * (it can be lower or same as sum of SG mapped chunk sizes)
124 * @sgcount: number of chunks to be read/written
126 * Structure is used for IMX DMA processing. It would be probably good
127 * @struct dma_struct in the future for external interfacing and use
128 * @struct imxdma_channel_internal only as extension to it.
131 struct imxdma_channel_internal
{
132 struct scatterlist
*sg
;
133 unsigned int resbytes
;
137 struct timer_list watchdog
;
143 struct list_head node
;
144 struct dma_async_tx_descriptor desc
;
145 enum dma_status status
;
149 enum dma_transfer_direction direction
;
150 enum imxdma_prep_type type
;
151 /* For memcpy and interleaved */
152 unsigned int config_port
;
153 unsigned int config_mem
;
154 /* For interleaved transfers */
158 /* For slave sg and cyclic */
159 struct scatterlist
*sg
;
160 unsigned int sgcount
;
163 struct imxdma_channel
{
164 struct imxdma_channel_internal internal
;
165 struct imxdma_engine
*imxdma
;
166 unsigned int channel
;
168 struct tasklet_struct dma_tasklet
;
169 struct list_head ld_free
;
170 struct list_head ld_queue
;
171 struct list_head ld_active
;
173 enum dma_slave_buswidth word_size
;
174 dma_addr_t per_address
;
176 struct dma_chan chan
;
178 struct dma_async_tx_descriptor desc
;
179 enum dma_status status
;
181 struct scatterlist
*sg_list
;
186 struct imxdma_engine
{
188 struct device_dma_parameters dma_parms
;
189 struct dma_device dma_device
;
190 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
193 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
195 return container_of(chan
, struct imxdma_channel
, chan
);
198 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
200 struct imxdma_desc
*desc
;
202 if (!list_empty(&imxdmac
->ld_active
)) {
203 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
205 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
211 /* TODO: put this inside any struct */
212 static void __iomem
*imx_dmav1_baseaddr
;
213 static struct clk
*dma_clk
;
215 static void imx_dmav1_writel(unsigned val
, unsigned offset
)
217 __raw_writel(val
, imx_dmav1_baseaddr
+ offset
);
220 static unsigned imx_dmav1_readl(unsigned offset
)
222 return __raw_readl(imx_dmav1_baseaddr
+ offset
);
225 static int imxdma_hw_chain(struct imxdma_channel_internal
*imxdma
)
228 return imxdma
->hw_chaining
;
234 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
236 static inline int imxdma_sg_next(struct imxdma_desc
*d
, struct scatterlist
*sg
)
238 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
239 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
242 now
= min(imxdma
->resbytes
, sg
->length
);
243 if (imxdma
->resbytes
!= IMX_DMA_LENGTH_LOOP
)
244 imxdma
->resbytes
-= now
;
246 if (d
->direction
== DMA_DEV_TO_MEM
)
247 imx_dmav1_writel(sg
->dma_address
, DMA_DAR(imxdmac
->channel
));
249 imx_dmav1_writel(sg
->dma_address
, DMA_SAR(imxdmac
->channel
));
251 imx_dmav1_writel(now
, DMA_CNTR(imxdmac
->channel
));
253 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
254 "size 0x%08x\n", imxdmac
->channel
,
255 imx_dmav1_readl(DMA_DAR(imxdmac
->channel
)),
256 imx_dmav1_readl(DMA_SAR(imxdmac
->channel
)),
257 imx_dmav1_readl(DMA_CNTR(imxdmac
->channel
)));
262 static void imxdma_enable_hw(struct imxdma_desc
*d
)
264 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
265 int channel
= imxdmac
->channel
;
268 pr_debug("imxdma%d: imx_dma_enable\n", channel
);
270 if (imxdmac
->internal
.in_use
)
273 local_irq_save(flags
);
275 imx_dmav1_writel(1 << channel
, DMA_DISR
);
276 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) & ~(1 << channel
), DMA_DIMR
);
277 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) | CCR_CEN
|
278 CCR_ACRPT
, DMA_CCR(channel
));
280 if ((cpu_is_mx21() || cpu_is_mx27()) &&
281 imxdmac
->internal
.sg
&& imxdma_hw_chain(&imxdmac
->internal
)) {
282 imxdmac
->internal
.sg
= sg_next(imxdmac
->internal
.sg
);
283 if (imxdmac
->internal
.sg
) {
285 imxdma_sg_next(d
, imxdmac
->internal
.sg
);
286 tmp
= imx_dmav1_readl(DMA_CCR(channel
));
287 imx_dmav1_writel(tmp
| CCR_RPT
| CCR_ACRPT
,
291 imxdmac
->internal
.in_use
= 1;
293 local_irq_restore(flags
);
296 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
298 int channel
= imxdmac
->channel
;
301 pr_debug("imxdma%d: imx_dma_disable\n", channel
);
303 if (imxdma_hw_chain(&imxdmac
->internal
))
304 del_timer(&imxdmac
->internal
.watchdog
);
306 local_irq_save(flags
);
307 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) | (1 << channel
), DMA_DIMR
);
308 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) & ~CCR_CEN
,
310 imx_dmav1_writel(1 << channel
, DMA_DISR
);
311 imxdmac
->internal
.in_use
= 0;
312 local_irq_restore(flags
);
315 static void imxdma_watchdog(unsigned long data
)
317 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
318 int channel
= imxdmac
->channel
;
320 imx_dmav1_writel(0, DMA_CCR(channel
));
321 imxdmac
->internal
.in_use
= 0;
322 imxdmac
->internal
.sg
= NULL
;
324 /* Tasklet watchdog error handler */
325 tasklet_schedule(&imxdmac
->dma_tasklet
);
326 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac
->channel
);
329 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
331 struct imxdma_engine
*imxdma
= dev_id
;
332 struct imxdma_channel_internal
*internal
;
333 unsigned int err_mask
;
337 disr
= imx_dmav1_readl(DMA_DISR
);
339 err_mask
= imx_dmav1_readl(DMA_DBTOSR
) |
340 imx_dmav1_readl(DMA_DRTOSR
) |
341 imx_dmav1_readl(DMA_DSESR
) |
342 imx_dmav1_readl(DMA_DBOSR
);
347 imx_dmav1_writel(disr
& err_mask
, DMA_DISR
);
349 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
350 if (!(err_mask
& (1 << i
)))
352 internal
= &imxdma
->channel
[i
].internal
;
355 if (imx_dmav1_readl(DMA_DBTOSR
) & (1 << i
)) {
356 imx_dmav1_writel(1 << i
, DMA_DBTOSR
);
357 errcode
|= IMX_DMA_ERR_BURST
;
359 if (imx_dmav1_readl(DMA_DRTOSR
) & (1 << i
)) {
360 imx_dmav1_writel(1 << i
, DMA_DRTOSR
);
361 errcode
|= IMX_DMA_ERR_REQUEST
;
363 if (imx_dmav1_readl(DMA_DSESR
) & (1 << i
)) {
364 imx_dmav1_writel(1 << i
, DMA_DSESR
);
365 errcode
|= IMX_DMA_ERR_TRANSFER
;
367 if (imx_dmav1_readl(DMA_DBOSR
) & (1 << i
)) {
368 imx_dmav1_writel(1 << i
, DMA_DBOSR
);
369 errcode
|= IMX_DMA_ERR_BUFFER
;
371 /* Tasklet error handler */
372 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
375 "DMA timeout on channel %d -%s%s%s%s\n", i
,
376 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
377 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
378 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
379 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
384 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
386 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
387 int chno
= imxdmac
->channel
;
388 struct imxdma_desc
*desc
;
392 imxdma
->sg
= sg_next(imxdma
->sg
);
396 spin_lock(&imxdmac
->lock
);
397 if (list_empty(&imxdmac
->ld_active
)) {
398 spin_unlock(&imxdmac
->lock
);
402 desc
= list_first_entry(&imxdmac
->ld_active
,
405 spin_unlock(&imxdmac
->lock
);
407 imxdma_sg_next(desc
, imxdma
->sg
);
409 tmp
= imx_dmav1_readl(DMA_CCR(chno
));
411 if (imxdma_hw_chain(imxdma
)) {
412 /* FIXME: The timeout should probably be
415 mod_timer(&imxdma
->watchdog
,
416 jiffies
+ msecs_to_jiffies(500));
418 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
419 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
421 imx_dmav1_writel(tmp
& ~CCR_CEN
, DMA_CCR(chno
));
425 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
427 if (imxdma_chan_is_doing_cyclic(imxdmac
))
428 /* Tasklet progression */
429 tasklet_schedule(&imxdmac
->dma_tasklet
);
434 if (imxdma_hw_chain(imxdma
)) {
435 del_timer(&imxdma
->watchdog
);
441 imx_dmav1_writel(0, DMA_CCR(chno
));
444 tasklet_schedule(&imxdmac
->dma_tasklet
);
447 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
449 struct imxdma_engine
*imxdma
= dev_id
;
450 struct imxdma_channel_internal
*internal
;
453 if (cpu_is_mx21() || cpu_is_mx27())
454 imxdma_err_handler(irq
, dev_id
);
456 disr
= imx_dmav1_readl(DMA_DISR
);
458 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
461 imx_dmav1_writel(disr
, DMA_DISR
);
462 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
463 if (disr
& (1 << i
)) {
464 internal
= &imxdma
->channel
[i
].internal
;
465 dma_irq_handle_channel(&imxdma
->channel
[i
]);
472 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
474 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
475 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
477 /* Configure and enable */
479 case IMXDMA_DESC_MEMCPY
:
480 imxdmac
->internal
.sg
= NULL
;
482 imx_dmav1_writel(d
->src
, DMA_SAR(imxdmac
->channel
));
483 imx_dmav1_writel(d
->dest
, DMA_DAR(imxdmac
->channel
));
484 imx_dmav1_writel(d
->config_mem
| (d
->config_port
<< 2),
485 DMA_CCR(imxdmac
->channel
));
487 imx_dmav1_writel(d
->len
, DMA_CNTR(imxdmac
->channel
));
489 dev_dbg(imxdma
->dev
, "%s channel: %d dest=0x%08x src=0x%08x "
490 "dma_length=%d\n", __func__
, imxdmac
->channel
,
491 d
->dest
, d
->src
, d
->len
);
494 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
495 case IMXDMA_DESC_CYCLIC
:
496 case IMXDMA_DESC_SLAVE_SG
:
497 imxdmac
->internal
.sg
= d
->sg
;
498 imxdmac
->internal
.resbytes
= d
->len
;
500 if (d
->direction
== DMA_DEV_TO_MEM
) {
501 imx_dmav1_writel(imxdmac
->per_address
,
502 DMA_SAR(imxdmac
->channel
));
503 imx_dmav1_writel(imxdmac
->ccr_from_device
,
504 DMA_CCR(imxdmac
->channel
));
506 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
507 "total length=%d dev_addr=0x%08x (dev2mem)\n",
508 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
509 d
->len
, imxdmac
->per_address
);
510 } else if (d
->direction
== DMA_MEM_TO_DEV
) {
511 imx_dmav1_writel(imxdmac
->per_address
,
512 DMA_DAR(imxdmac
->channel
));
513 imx_dmav1_writel(imxdmac
->ccr_to_device
,
514 DMA_CCR(imxdmac
->channel
));
516 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
517 "total length=%d dev_addr=0x%08x (mem2dev)\n",
518 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
519 d
->len
, imxdmac
->per_address
);
521 dev_err(imxdma
->dev
, "%s channel: %d bad dma mode\n",
522 __func__
, imxdmac
->channel
);
526 imxdma_sg_next(d
, d
->sg
);
536 static void imxdma_tasklet(unsigned long data
)
538 struct imxdma_channel
*imxdmac
= (void *)data
;
539 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
540 struct imxdma_desc
*desc
;
542 spin_lock(&imxdmac
->lock
);
544 if (list_empty(&imxdmac
->ld_active
)) {
545 /* Someone might have called terminate all */
548 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
550 if (desc
->desc
.callback
)
551 desc
->desc
.callback(desc
->desc
.callback_param
);
553 dma_cookie_complete(&desc
->desc
);
555 /* If we are dealing with a cyclic descriptor keep it on ld_active */
556 if (imxdma_chan_is_doing_cyclic(imxdmac
))
559 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
561 if (!list_empty(&imxdmac
->ld_queue
)) {
562 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
564 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
565 if (imxdma_xfer_desc(desc
) < 0)
566 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
567 __func__
, imxdmac
->channel
);
570 spin_unlock(&imxdmac
->lock
);
573 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
576 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
577 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
579 unsigned int mode
= 0;
582 case DMA_TERMINATE_ALL
:
583 imxdma_disable_hw(imxdmac
);
585 spin_lock_irqsave(&imxdmac
->lock
, flags
);
586 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
587 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
588 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
590 case DMA_SLAVE_CONFIG
:
591 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
592 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
593 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
594 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
596 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
597 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
598 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
601 switch (imxdmac
->word_size
) {
602 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
603 mode
= IMX_DMA_MEMSIZE_8
;
605 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
606 mode
= IMX_DMA_MEMSIZE_16
;
609 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
610 mode
= IMX_DMA_MEMSIZE_32
;
614 imxdmac
->internal
.hw_chaining
= 1;
615 if (!imxdma_hw_chain(&imxdmac
->internal
))
617 imxdmac
->ccr_from_device
= (mode
| IMX_DMA_TYPE_FIFO
) |
618 ((IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) << 2) |
620 imxdmac
->ccr_to_device
=
621 (IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) |
622 ((mode
| IMX_DMA_TYPE_FIFO
) << 2) | CCR_REN
;
623 imx_dmav1_writel(imxdmac
->dma_request
,
624 DMA_RSSR(imxdmac
->channel
));
626 /* Set burst length */
627 imx_dmav1_writel(imxdmac
->watermark_level
* imxdmac
->word_size
,
628 DMA_BLR(imxdmac
->channel
));
638 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
640 struct dma_tx_state
*txstate
)
642 return dma_cookie_status(chan
, cookie
, txstate
);
645 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
647 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
651 spin_lock_irqsave(&imxdmac
->lock
, flags
);
652 cookie
= dma_cookie_assign(tx
);
653 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
658 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
660 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
661 struct imx_dma_data
*data
= chan
->private;
664 imxdmac
->dma_request
= data
->dma_request
;
666 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
667 struct imxdma_desc
*desc
;
669 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
672 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
673 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
674 desc
->desc
.tx_submit
= imxdma_tx_submit
;
675 /* txd.flags will be overwritten in prep funcs */
676 desc
->desc
.flags
= DMA_CTRL_ACK
;
677 desc
->status
= DMA_SUCCESS
;
679 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
680 imxdmac
->descs_allocated
++;
683 if (!imxdmac
->descs_allocated
)
686 return imxdmac
->descs_allocated
;
689 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
691 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
692 struct imxdma_desc
*desc
, *_desc
;
695 spin_lock_irqsave(&imxdmac
->lock
, flags
);
697 imxdma_disable_hw(imxdmac
);
698 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
699 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
701 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
703 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
705 imxdmac
->descs_allocated
--;
707 INIT_LIST_HEAD(&imxdmac
->ld_free
);
709 if (imxdmac
->sg_list
) {
710 kfree(imxdmac
->sg_list
);
711 imxdmac
->sg_list
= NULL
;
715 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
716 struct dma_chan
*chan
, struct scatterlist
*sgl
,
717 unsigned int sg_len
, enum dma_transfer_direction direction
,
718 unsigned long flags
, void *context
)
720 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
721 struct scatterlist
*sg
;
722 int i
, dma_length
= 0;
723 struct imxdma_desc
*desc
;
725 if (list_empty(&imxdmac
->ld_free
) ||
726 imxdma_chan_is_doing_cyclic(imxdmac
))
729 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
731 for_each_sg(sgl
, sg
, sg_len
, i
) {
732 dma_length
+= sg
->length
;
735 switch (imxdmac
->word_size
) {
736 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
737 if (sgl
->length
& 3 || sgl
->dma_address
& 3)
740 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
741 if (sgl
->length
& 1 || sgl
->dma_address
& 1)
744 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
750 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
752 desc
->sgcount
= sg_len
;
753 desc
->len
= dma_length
;
754 desc
->direction
= direction
;
755 if (direction
== DMA_DEV_TO_MEM
) {
756 desc
->src
= imxdmac
->per_address
;
758 desc
->dest
= imxdmac
->per_address
;
760 desc
->desc
.callback
= NULL
;
761 desc
->desc
.callback_param
= NULL
;
766 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
767 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
768 size_t period_len
, enum dma_transfer_direction direction
,
771 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
772 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
773 struct imxdma_desc
*desc
;
775 unsigned int periods
= buf_len
/ period_len
;
777 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
778 __func__
, imxdmac
->channel
, buf_len
, period_len
);
780 if (list_empty(&imxdmac
->ld_free
) ||
781 imxdma_chan_is_doing_cyclic(imxdmac
))
784 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
786 if (imxdmac
->sg_list
)
787 kfree(imxdmac
->sg_list
);
789 imxdmac
->sg_list
= kcalloc(periods
+ 1,
790 sizeof(struct scatterlist
), GFP_KERNEL
);
791 if (!imxdmac
->sg_list
)
794 sg_init_table(imxdmac
->sg_list
, periods
);
796 for (i
= 0; i
< periods
; i
++) {
797 imxdmac
->sg_list
[i
].page_link
= 0;
798 imxdmac
->sg_list
[i
].offset
= 0;
799 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
800 imxdmac
->sg_list
[i
].length
= period_len
;
801 dma_addr
+= period_len
;
805 imxdmac
->sg_list
[periods
].offset
= 0;
806 imxdmac
->sg_list
[periods
].length
= 0;
807 imxdmac
->sg_list
[periods
].page_link
=
808 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
810 desc
->type
= IMXDMA_DESC_CYCLIC
;
811 desc
->sg
= imxdmac
->sg_list
;
812 desc
->sgcount
= periods
;
813 desc
->len
= IMX_DMA_LENGTH_LOOP
;
814 desc
->direction
= direction
;
815 if (direction
== DMA_DEV_TO_MEM
) {
816 desc
->src
= imxdmac
->per_address
;
818 desc
->dest
= imxdmac
->per_address
;
820 desc
->desc
.callback
= NULL
;
821 desc
->desc
.callback_param
= NULL
;
826 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
827 struct dma_chan
*chan
, dma_addr_t dest
,
828 dma_addr_t src
, size_t len
, unsigned long flags
)
830 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
831 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
832 struct imxdma_desc
*desc
;
834 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
835 __func__
, imxdmac
->channel
, src
, dest
, len
);
837 if (list_empty(&imxdmac
->ld_free
) ||
838 imxdma_chan_is_doing_cyclic(imxdmac
))
841 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
843 desc
->type
= IMXDMA_DESC_MEMCPY
;
847 desc
->direction
= DMA_MEM_TO_MEM
;
848 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
849 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
850 desc
->desc
.callback
= NULL
;
851 desc
->desc
.callback_param
= NULL
;
856 static void imxdma_issue_pending(struct dma_chan
*chan
)
858 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
859 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
860 struct imxdma_desc
*desc
;
863 spin_lock_irqsave(&imxdmac
->lock
, flags
);
864 if (list_empty(&imxdmac
->ld_active
) &&
865 !list_empty(&imxdmac
->ld_queue
)) {
866 desc
= list_first_entry(&imxdmac
->ld_queue
,
867 struct imxdma_desc
, node
);
869 if (imxdma_xfer_desc(desc
) < 0) {
870 dev_warn(imxdma
->dev
,
871 "%s: channel: %d couldn't issue DMA xfer\n",
872 __func__
, imxdmac
->channel
);
874 list_move_tail(imxdmac
->ld_queue
.next
,
875 &imxdmac
->ld_active
);
878 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
881 static int __init
imxdma_probe(struct platform_device
*pdev
)
883 struct imxdma_engine
*imxdma
;
887 imx_dmav1_baseaddr
= MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR
);
888 else if (cpu_is_mx21())
889 imx_dmav1_baseaddr
= MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR
);
890 else if (cpu_is_mx27())
891 imx_dmav1_baseaddr
= MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR
);
895 dma_clk
= clk_get(NULL
, "dma");
897 return PTR_ERR(dma_clk
);
900 /* reset DMA module */
901 imx_dmav1_writel(DCR_DRST
, DMA_DCR
);
904 ret
= request_irq(MX1_DMA_INT
, dma_irq_handler
, 0, "DMA", imxdma
);
906 pr_crit("Can't register IRQ for DMA\n");
910 ret
= request_irq(MX1_DMA_ERR
, imxdma_err_handler
, 0, "DMA", imxdma
);
912 pr_crit("Can't register ERRIRQ for DMA\n");
913 free_irq(MX1_DMA_INT
, NULL
);
918 /* enable DMA module */
919 imx_dmav1_writel(DCR_DEN
, DMA_DCR
);
921 /* clear all interrupts */
922 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
924 /* disable interrupts */
925 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
927 imxdma
= kzalloc(sizeof(*imxdma
), GFP_KERNEL
);
931 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
933 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
934 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
935 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
937 /* Initialize channel parameters */
938 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
939 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
940 memset(&imxdmac
->internal
, 0, sizeof(imxdmac
->internal
));
941 if (cpu_is_mx21() || cpu_is_mx27()) {
942 ret
= request_irq(MX2x_INT_DMACH0
+ i
,
943 dma_irq_handler
, 0, "DMA", imxdma
);
945 pr_crit("Can't register IRQ %d for DMA channel %d\n",
946 MX2x_INT_DMACH0
+ i
, i
);
949 init_timer(&imxdmac
->internal
.watchdog
);
950 imxdmac
->internal
.watchdog
.function
= &imxdma_watchdog
;
951 imxdmac
->internal
.watchdog
.data
= (unsigned long)imxdmac
;
954 imxdmac
->imxdma
= imxdma
;
955 spin_lock_init(&imxdmac
->lock
);
957 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
958 INIT_LIST_HEAD(&imxdmac
->ld_free
);
959 INIT_LIST_HEAD(&imxdmac
->ld_active
);
961 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
962 (unsigned long)imxdmac
);
963 imxdmac
->chan
.device
= &imxdma
->dma_device
;
964 dma_cookie_init(&imxdmac
->chan
);
965 imxdmac
->channel
= i
;
967 /* Add the channel to the DMAC list */
968 list_add_tail(&imxdmac
->chan
.device_node
,
969 &imxdma
->dma_device
.channels
);
972 imxdma
->dev
= &pdev
->dev
;
973 imxdma
->dma_device
.dev
= &pdev
->dev
;
975 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
976 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
977 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
978 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
979 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
980 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
981 imxdma
->dma_device
.device_control
= imxdma_control
;
982 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
984 platform_set_drvdata(pdev
, imxdma
);
986 imxdma
->dma_device
.copy_align
= 2; /* 2^2 = 4 bytes alignment */
987 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
988 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
990 ret
= dma_async_device_register(&imxdma
->dma_device
);
992 dev_err(&pdev
->dev
, "unable to register\n");
1000 if (cpu_is_mx21() || cpu_is_mx27()) {
1002 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1003 } else if cpu_is_mx1() {
1004 free_irq(MX1_DMA_INT
, NULL
);
1005 free_irq(MX1_DMA_ERR
, NULL
);
1012 static int __exit
imxdma_remove(struct platform_device
*pdev
)
1014 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1017 dma_async_device_unregister(&imxdma
->dma_device
);
1019 if (cpu_is_mx21() || cpu_is_mx27()) {
1020 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++)
1021 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1022 } else if cpu_is_mx1() {
1023 free_irq(MX1_DMA_INT
, NULL
);
1024 free_irq(MX1_DMA_ERR
, NULL
);
1032 static struct platform_driver imxdma_driver
= {
1036 .remove
= __exit_p(imxdma_remove
),
1039 static int __init
imxdma_module_init(void)
1041 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1043 subsys_initcall(imxdma_module_init
);
1045 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1046 MODULE_DESCRIPTION("i.MX dma driver");
1047 MODULE_LICENSE("GPL");