2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 * The code contained herein is licensed under the GNU General Public
10 * License. You may obtain a copy of the GNU General Public License
11 * Version 2 or later at the following locations:
13 * http://www.opensource.org/licenses/gpl-license.html
14 * http://www.gnu.org/copyleft/gpl.html
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/slab.h>
25 #include <linux/platform_device.h>
26 #include <linux/dmaengine.h>
27 #include <linux/module.h>
30 #include <mach/dma-v1.h>
31 #include <mach/hardware.h>
33 #include "dmaengine.h"
35 struct imxdma_channel
{
36 struct imxdma_engine
*imxdma
;
38 unsigned int imxdma_channel
;
40 enum dma_slave_buswidth word_size
;
41 dma_addr_t per_address
;
45 struct dma_async_tx_descriptor desc
;
46 enum dma_status status
;
48 struct scatterlist
*sg_list
;
51 #define MAX_DMA_CHANNELS 8
53 struct imxdma_engine
{
55 struct device_dma_parameters dma_parms
;
56 struct dma_device dma_device
;
57 struct imxdma_channel channel
[MAX_DMA_CHANNELS
];
60 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
62 return container_of(chan
, struct imxdma_channel
, chan
);
65 static void imxdma_handle(struct imxdma_channel
*imxdmac
)
67 if (imxdmac
->desc
.callback
)
68 imxdmac
->desc
.callback(imxdmac
->desc
.callback_param
);
69 dma_cookie_complete(&imxdmac
->desc
);
72 static void imxdma_irq_handler(int channel
, void *data
)
74 struct imxdma_channel
*imxdmac
= data
;
76 imxdmac
->status
= DMA_SUCCESS
;
77 imxdma_handle(imxdmac
);
80 static void imxdma_err_handler(int channel
, void *data
, int error
)
82 struct imxdma_channel
*imxdmac
= data
;
84 imxdmac
->status
= DMA_ERROR
;
85 imxdma_handle(imxdmac
);
88 static void imxdma_progression(int channel
, void *data
,
89 struct scatterlist
*sg
)
91 struct imxdma_channel
*imxdmac
= data
;
93 imxdmac
->status
= DMA_SUCCESS
;
94 imxdma_handle(imxdmac
);
97 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
100 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
101 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
103 unsigned int mode
= 0;
106 case DMA_TERMINATE_ALL
:
107 imxdmac
->status
= DMA_ERROR
;
108 imx_dma_disable(imxdmac
->imxdma_channel
);
110 case DMA_SLAVE_CONFIG
:
111 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
112 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
113 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
114 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
116 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
117 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
118 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
121 switch (imxdmac
->word_size
) {
122 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
123 mode
= IMX_DMA_MEMSIZE_8
;
125 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
126 mode
= IMX_DMA_MEMSIZE_16
;
129 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
130 mode
= IMX_DMA_MEMSIZE_32
;
133 ret
= imx_dma_config_channel(imxdmac
->imxdma_channel
,
134 mode
| IMX_DMA_TYPE_FIFO
,
135 IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
,
136 imxdmac
->dma_request
, 1);
141 imx_dma_config_burstlen(imxdmac
->imxdma_channel
,
142 imxdmac
->watermark_level
* imxdmac
->word_size
);
152 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
154 struct dma_tx_state
*txstate
)
156 return dma_cookie_status(chan
, cookie
, txstate
);
159 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
161 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
164 spin_lock_irq(&imxdmac
->lock
);
166 cookie
= dma_cookie_assign(tx
);
168 spin_unlock_irq(&imxdmac
->lock
);
173 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
175 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
176 struct imx_dma_data
*data
= chan
->private;
178 imxdmac
->dma_request
= data
->dma_request
;
180 dma_async_tx_descriptor_init(&imxdmac
->desc
, chan
);
181 imxdmac
->desc
.tx_submit
= imxdma_tx_submit
;
182 /* txd.flags will be overwritten in prep funcs */
183 imxdmac
->desc
.flags
= DMA_CTRL_ACK
;
185 imxdmac
->status
= DMA_SUCCESS
;
190 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
192 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
194 imx_dma_disable(imxdmac
->imxdma_channel
);
196 if (imxdmac
->sg_list
) {
197 kfree(imxdmac
->sg_list
);
198 imxdmac
->sg_list
= NULL
;
202 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
203 struct dma_chan
*chan
, struct scatterlist
*sgl
,
204 unsigned int sg_len
, enum dma_transfer_direction direction
,
207 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
208 struct scatterlist
*sg
;
209 int i
, ret
, dma_length
= 0;
210 unsigned int dmamode
;
212 if (imxdmac
->status
== DMA_IN_PROGRESS
)
215 imxdmac
->status
= DMA_IN_PROGRESS
;
217 for_each_sg(sgl
, sg
, sg_len
, i
) {
218 dma_length
+= sg
->length
;
221 if (direction
== DMA_DEV_TO_MEM
)
222 dmamode
= DMA_MODE_READ
;
224 dmamode
= DMA_MODE_WRITE
;
226 switch (imxdmac
->word_size
) {
227 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
228 if (sgl
->length
& 3 || sgl
->dma_address
& 3)
231 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
232 if (sgl
->length
& 1 || sgl
->dma_address
& 1)
235 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
241 ret
= imx_dma_setup_sg(imxdmac
->imxdma_channel
, sgl
, sg_len
,
242 dma_length
, imxdmac
->per_address
, dmamode
);
246 return &imxdmac
->desc
;
249 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
250 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
251 size_t period_len
, enum dma_transfer_direction direction
)
253 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
254 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
256 unsigned int periods
= buf_len
/ period_len
;
257 unsigned int dmamode
;
259 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
260 __func__
, imxdmac
->channel
, buf_len
, period_len
);
262 if (imxdmac
->status
== DMA_IN_PROGRESS
)
264 imxdmac
->status
= DMA_IN_PROGRESS
;
266 ret
= imx_dma_setup_progression_handler(imxdmac
->imxdma_channel
,
269 dev_err(imxdma
->dev
, "Failed to setup the DMA handler\n");
273 if (imxdmac
->sg_list
)
274 kfree(imxdmac
->sg_list
);
276 imxdmac
->sg_list
= kcalloc(periods
+ 1,
277 sizeof(struct scatterlist
), GFP_KERNEL
);
278 if (!imxdmac
->sg_list
)
281 sg_init_table(imxdmac
->sg_list
, periods
);
283 for (i
= 0; i
< periods
; i
++) {
284 imxdmac
->sg_list
[i
].page_link
= 0;
285 imxdmac
->sg_list
[i
].offset
= 0;
286 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
287 imxdmac
->sg_list
[i
].length
= period_len
;
288 dma_addr
+= period_len
;
292 imxdmac
->sg_list
[periods
].offset
= 0;
293 imxdmac
->sg_list
[periods
].length
= 0;
294 imxdmac
->sg_list
[periods
].page_link
=
295 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
297 if (direction
== DMA_DEV_TO_MEM
)
298 dmamode
= DMA_MODE_READ
;
300 dmamode
= DMA_MODE_WRITE
;
302 ret
= imx_dma_setup_sg(imxdmac
->imxdma_channel
, imxdmac
->sg_list
, periods
,
303 IMX_DMA_LENGTH_LOOP
, imxdmac
->per_address
, dmamode
);
307 return &imxdmac
->desc
;
310 static void imxdma_issue_pending(struct dma_chan
*chan
)
312 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
314 if (imxdmac
->status
== DMA_IN_PROGRESS
)
315 imx_dma_enable(imxdmac
->imxdma_channel
);
318 static int __init
imxdma_probe(struct platform_device
*pdev
)
320 struct imxdma_engine
*imxdma
;
323 imxdma
= kzalloc(sizeof(*imxdma
), GFP_KERNEL
);
327 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
329 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
330 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
332 /* Initialize channel parameters */
333 for (i
= 0; i
< MAX_DMA_CHANNELS
; i
++) {
334 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
336 imxdmac
->imxdma_channel
= imx_dma_request_by_prio("dmaengine",
338 if ((int)imxdmac
->channel
< 0) {
343 imx_dma_setup_handlers(imxdmac
->imxdma_channel
,
344 imxdma_irq_handler
, imxdma_err_handler
, imxdmac
);
346 imxdmac
->imxdma
= imxdma
;
347 spin_lock_init(&imxdmac
->lock
);
349 imxdmac
->chan
.device
= &imxdma
->dma_device
;
350 imxdmac
->channel
= i
;
352 /* Add the channel to the DMAC list */
353 list_add_tail(&imxdmac
->chan
.device_node
, &imxdma
->dma_device
.channels
);
356 imxdma
->dev
= &pdev
->dev
;
357 imxdma
->dma_device
.dev
= &pdev
->dev
;
359 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
360 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
361 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
362 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
363 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
364 imxdma
->dma_device
.device_control
= imxdma_control
;
365 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
367 platform_set_drvdata(pdev
, imxdma
);
369 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
370 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
372 ret
= dma_async_device_register(&imxdma
->dma_device
);
374 dev_err(&pdev
->dev
, "unable to register\n");
382 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
383 imx_dma_free(imxdmac
->imxdma_channel
);
390 static int __exit
imxdma_remove(struct platform_device
*pdev
)
392 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
395 dma_async_device_unregister(&imxdma
->dma_device
);
397 for (i
= 0; i
< MAX_DMA_CHANNELS
; i
++) {
398 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
400 imx_dma_free(imxdmac
->imxdma_channel
);
408 static struct platform_driver imxdma_driver
= {
412 .remove
= __exit_p(imxdma_remove
),
415 static int __init
imxdma_module_init(void)
417 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
419 subsys_initcall(imxdma_module_init
);
421 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
422 MODULE_DESCRIPTION("i.MX dma driver");
423 MODULE_LICENSE("GPL");