Commit | Line | Data |
---|---|---|
1f1846c6 SH |
1 | /* |
2 | * drivers/dma/imx-dma.c | |
3 | * | |
4 | * This file contains a driver for the Freescale i.MX DMA engine | |
5 | * found on i.MX1/21/27 | |
6 | * | |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | |
9e15db7c | 8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> |
1f1846c6 SH |
9 | * |
10 | * The code contained herein is licensed under the GNU General Public | |
11 | * License. You may obtain a copy of the GNU General Public License | |
12 | * Version 2 or later at the following locations: | |
13 | * | |
14 | * http://www.opensource.org/licenses/gpl-license.html | |
15 | * http://www.gnu.org/copyleft/gpl.html | |
16 | */ | |
17 | #include <linux/init.h> | |
f8de8f4c | 18 | #include <linux/module.h> |
1f1846c6 SH |
19 | #include <linux/types.h> |
20 | #include <linux/mm.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/device.h> | |
24 | #include <linux/dma-mapping.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/platform_device.h> | |
6bd08127 | 27 | #include <linux/clk.h> |
1f1846c6 | 28 | #include <linux/dmaengine.h> |
5170c051 | 29 | #include <linux/module.h> |
1f1846c6 SH |
30 | |
31 | #include <asm/irq.h> | |
6bd08127 | 32 | #include <mach/dma.h> |
1f1846c6 SH |
33 | #include <mach/hardware.h> |
34 | ||
d2ebfb33 | 35 | #include "dmaengine.h" |
9e15db7c | 36 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
6bd08127 JM |
37 | #define IMX_DMA_CHANNELS 16 |
38 | ||
6bd08127 JM |
39 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) |
40 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | |
41 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | |
42 | #define IMX_DMA_MEMSIZE_16 (2 << 4) | |
43 | #define IMX_DMA_TYPE_LINEAR (0 << 10) | |
44 | #define IMX_DMA_TYPE_2D (1 << 10) | |
45 | #define IMX_DMA_TYPE_FIFO (2 << 10) | |
46 | ||
47 | #define IMX_DMA_ERR_BURST (1 << 0) | |
48 | #define IMX_DMA_ERR_REQUEST (1 << 1) | |
49 | #define IMX_DMA_ERR_TRANSFER (1 << 2) | |
50 | #define IMX_DMA_ERR_BUFFER (1 << 3) | |
51 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) | |
52 | ||
53 | #define DMA_DCR 0x00 /* Control Register */ | |
54 | #define DMA_DISR 0x04 /* Interrupt status Register */ | |
55 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | |
56 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | |
57 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | |
58 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | |
59 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | |
60 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | |
61 | #define DMA_WSRA 0x40 /* W-Size Register A */ | |
62 | #define DMA_XSRA 0x44 /* X-Size Register A */ | |
63 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | |
64 | #define DMA_WSRB 0x4c /* W-Size Register B */ | |
65 | #define DMA_XSRB 0x50 /* X-Size Register B */ | |
66 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | |
67 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | |
68 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | |
69 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | |
70 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | |
71 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | |
72 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | |
73 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | |
74 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | |
75 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | |
76 | ||
77 | #define DCR_DRST (1<<1) | |
78 | #define DCR_DEN (1<<0) | |
79 | #define DBTOCR_EN (1<<15) | |
80 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | |
81 | #define CNTR_CNT(x) ((x) & 0xffffff) | |
82 | #define CCR_ACRPT (1<<14) | |
83 | #define CCR_DMOD_LINEAR (0x0 << 12) | |
84 | #define CCR_DMOD_2D (0x1 << 12) | |
85 | #define CCR_DMOD_FIFO (0x2 << 12) | |
86 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | |
87 | #define CCR_SMOD_LINEAR (0x0 << 10) | |
88 | #define CCR_SMOD_2D (0x1 << 10) | |
89 | #define CCR_SMOD_FIFO (0x2 << 10) | |
90 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | |
91 | #define CCR_MDIR_DEC (1<<9) | |
92 | #define CCR_MSEL_B (1<<8) | |
93 | #define CCR_DSIZ_32 (0x0 << 6) | |
94 | #define CCR_DSIZ_8 (0x1 << 6) | |
95 | #define CCR_DSIZ_16 (0x2 << 6) | |
96 | #define CCR_SSIZ_32 (0x0 << 4) | |
97 | #define CCR_SSIZ_8 (0x1 << 4) | |
98 | #define CCR_SSIZ_16 (0x2 << 4) | |
99 | #define CCR_REN (1<<3) | |
100 | #define CCR_RPT (1<<2) | |
101 | #define CCR_FRC (1<<1) | |
102 | #define CCR_CEN (1<<0) | |
103 | #define RTOR_EN (1<<15) | |
104 | #define RTOR_CLK (1<<14) | |
105 | #define RTOR_PSC (1<<13) | |
9e15db7c JM |
106 | |
107 | enum imxdma_prep_type { | |
108 | IMXDMA_DESC_MEMCPY, | |
109 | IMXDMA_DESC_INTERLEAVED, | |
110 | IMXDMA_DESC_SLAVE_SG, | |
111 | IMXDMA_DESC_CYCLIC, | |
112 | }; | |
113 | ||
6bd08127 JM |
114 | /* |
115 | * struct imxdma_channel_internal - i.MX specific DMA extension | |
116 | * @name: name specified by DMA client | |
117 | * @irq_handler: client callback for end of transfer | |
118 | * @err_handler: client callback for error condition | |
119 | * @data: clients context data for callbacks | |
120 | * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE | |
121 | * @sg: pointer to the actual read/written chunk for scatter-gather emulation | |
122 | * @resbytes: total residual number of bytes to transfer | |
123 | * (it can be lower or same as sum of SG mapped chunk sizes) | |
124 | * @sgcount: number of chunks to be read/written | |
125 | * | |
126 | * Structure is used for IMX DMA processing. It would be probably good | |
127 | * @struct dma_struct in the future for external interfacing and use | |
128 | * @struct imxdma_channel_internal only as extension to it. | |
129 | */ | |
130 | ||
131 | struct imxdma_channel_internal { | |
6bd08127 JM |
132 | struct scatterlist *sg; |
133 | unsigned int resbytes; | |
134 | ||
135 | int in_use; | |
136 | ||
6bd08127 JM |
137 | struct timer_list watchdog; |
138 | ||
139 | int hw_chaining; | |
140 | }; | |
141 | ||
9e15db7c JM |
142 | struct imxdma_desc { |
143 | struct list_head node; | |
144 | struct dma_async_tx_descriptor desc; | |
145 | enum dma_status status; | |
146 | dma_addr_t src; | |
147 | dma_addr_t dest; | |
148 | size_t len; | |
2efc3449 | 149 | enum dma_transfer_direction direction; |
9e15db7c JM |
150 | enum imxdma_prep_type type; |
151 | /* For memcpy and interleaved */ | |
152 | unsigned int config_port; | |
153 | unsigned int config_mem; | |
154 | /* For interleaved transfers */ | |
155 | unsigned int x; | |
156 | unsigned int y; | |
157 | unsigned int w; | |
158 | /* For slave sg and cyclic */ | |
159 | struct scatterlist *sg; | |
160 | unsigned int sgcount; | |
161 | }; | |
162 | ||
1f1846c6 | 163 | struct imxdma_channel { |
6bd08127 | 164 | struct imxdma_channel_internal internal; |
1f1846c6 SH |
165 | struct imxdma_engine *imxdma; |
166 | unsigned int channel; | |
1f1846c6 | 167 | |
9e15db7c JM |
168 | struct tasklet_struct dma_tasklet; |
169 | struct list_head ld_free; | |
170 | struct list_head ld_queue; | |
171 | struct list_head ld_active; | |
172 | int descs_allocated; | |
1f1846c6 SH |
173 | enum dma_slave_buswidth word_size; |
174 | dma_addr_t per_address; | |
175 | u32 watermark_level; | |
176 | struct dma_chan chan; | |
177 | spinlock_t lock; | |
178 | struct dma_async_tx_descriptor desc; | |
1f1846c6 SH |
179 | enum dma_status status; |
180 | int dma_request; | |
181 | struct scatterlist *sg_list; | |
359291a1 JM |
182 | u32 ccr_from_device; |
183 | u32 ccr_to_device; | |
1f1846c6 SH |
184 | }; |
185 | ||
1f1846c6 SH |
186 | struct imxdma_engine { |
187 | struct device *dev; | |
1e070a60 | 188 | struct device_dma_parameters dma_parms; |
1f1846c6 | 189 | struct dma_device dma_device; |
6bd08127 | 190 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; |
1f1846c6 SH |
191 | }; |
192 | ||
193 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | |
194 | { | |
195 | return container_of(chan, struct imxdma_channel, chan); | |
196 | } | |
197 | ||
9e15db7c | 198 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
1f1846c6 | 199 | { |
9e15db7c JM |
200 | struct imxdma_desc *desc; |
201 | ||
202 | if (!list_empty(&imxdmac->ld_active)) { | |
203 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | |
204 | node); | |
205 | if (desc->type == IMXDMA_DESC_CYCLIC) | |
206 | return true; | |
207 | } | |
208 | return false; | |
1f1846c6 SH |
209 | } |
210 | ||
6bd08127 JM |
211 | /* TODO: put this inside any struct */ |
212 | static void __iomem *imx_dmav1_baseaddr; | |
213 | static struct clk *dma_clk; | |
214 | ||
215 | static void imx_dmav1_writel(unsigned val, unsigned offset) | |
216 | { | |
217 | __raw_writel(val, imx_dmav1_baseaddr + offset); | |
218 | } | |
219 | ||
220 | static unsigned imx_dmav1_readl(unsigned offset) | |
1f1846c6 | 221 | { |
6bd08127 JM |
222 | return __raw_readl(imx_dmav1_baseaddr + offset); |
223 | } | |
1f1846c6 | 224 | |
6bd08127 JM |
225 | static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) |
226 | { | |
227 | if (cpu_is_mx27()) | |
228 | return imxdma->hw_chaining; | |
229 | else | |
230 | return 0; | |
231 | } | |
232 | ||
233 | /* | |
234 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | |
235 | */ | |
2efc3449 | 236 | static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) |
6bd08127 | 237 | { |
2efc3449 | 238 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
6bd08127 JM |
239 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
240 | unsigned long now; | |
241 | ||
242 | now = min(imxdma->resbytes, sg->length); | |
243 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) | |
244 | imxdma->resbytes -= now; | |
245 | ||
2efc3449 | 246 | if (d->direction == DMA_DEV_TO_MEM) |
6bd08127 JM |
247 | imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); |
248 | else | |
249 | imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); | |
250 | ||
251 | imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel)); | |
252 | ||
253 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " | |
254 | "size 0x%08x\n", imxdmac->channel, | |
255 | imx_dmav1_readl(DMA_DAR(imxdmac->channel)), | |
256 | imx_dmav1_readl(DMA_SAR(imxdmac->channel)), | |
257 | imx_dmav1_readl(DMA_CNTR(imxdmac->channel))); | |
258 | ||
259 | return now; | |
260 | } | |
261 | ||
2efc3449 | 262 | static void imxdma_enable_hw(struct imxdma_desc *d) |
6bd08127 | 263 | { |
2efc3449 | 264 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
6bd08127 JM |
265 | int channel = imxdmac->channel; |
266 | unsigned long flags; | |
267 | ||
268 | pr_debug("imxdma%d: imx_dma_enable\n", channel); | |
269 | ||
270 | if (imxdmac->internal.in_use) | |
271 | return; | |
272 | ||
273 | local_irq_save(flags); | |
274 | ||
275 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
276 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); | |
277 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | | |
278 | CCR_ACRPT, DMA_CCR(channel)); | |
279 | ||
280 | if ((cpu_is_mx21() || cpu_is_mx27()) && | |
281 | imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) { | |
282 | imxdmac->internal.sg = sg_next(imxdmac->internal.sg); | |
283 | if (imxdmac->internal.sg) { | |
284 | u32 tmp; | |
2efc3449 | 285 | imxdma_sg_next(d, imxdmac->internal.sg); |
6bd08127 JM |
286 | tmp = imx_dmav1_readl(DMA_CCR(channel)); |
287 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | |
288 | DMA_CCR(channel)); | |
289 | } | |
290 | } | |
291 | imxdmac->internal.in_use = 1; | |
292 | ||
293 | local_irq_restore(flags); | |
294 | } | |
295 | ||
296 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | |
297 | { | |
298 | int channel = imxdmac->channel; | |
299 | unsigned long flags; | |
300 | ||
301 | pr_debug("imxdma%d: imx_dma_disable\n", channel); | |
302 | ||
303 | if (imxdma_hw_chain(&imxdmac->internal)) | |
304 | del_timer(&imxdmac->internal.watchdog); | |
305 | ||
306 | local_irq_save(flags); | |
307 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); | |
308 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, | |
309 | DMA_CCR(channel)); | |
310 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
311 | imxdmac->internal.in_use = 0; | |
312 | local_irq_restore(flags); | |
313 | } | |
314 | ||
6bd08127 | 315 | static void imxdma_watchdog(unsigned long data) |
1f1846c6 | 316 | { |
6bd08127 JM |
317 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; |
318 | int channel = imxdmac->channel; | |
1f1846c6 | 319 | |
6bd08127 JM |
320 | imx_dmav1_writel(0, DMA_CCR(channel)); |
321 | imxdmac->internal.in_use = 0; | |
322 | imxdmac->internal.sg = NULL; | |
323 | ||
324 | /* Tasklet watchdog error handler */ | |
9e15db7c | 325 | tasklet_schedule(&imxdmac->dma_tasklet); |
6bd08127 JM |
326 | pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel); |
327 | } | |
328 | ||
329 | static irqreturn_t imxdma_err_handler(int irq, void *dev_id) | |
330 | { | |
331 | struct imxdma_engine *imxdma = dev_id; | |
332 | struct imxdma_channel_internal *internal; | |
333 | unsigned int err_mask; | |
334 | int i, disr; | |
335 | int errcode; | |
336 | ||
337 | disr = imx_dmav1_readl(DMA_DISR); | |
338 | ||
339 | err_mask = imx_dmav1_readl(DMA_DBTOSR) | | |
340 | imx_dmav1_readl(DMA_DRTOSR) | | |
341 | imx_dmav1_readl(DMA_DSESR) | | |
342 | imx_dmav1_readl(DMA_DBOSR); | |
343 | ||
344 | if (!err_mask) | |
345 | return IRQ_HANDLED; | |
346 | ||
347 | imx_dmav1_writel(disr & err_mask, DMA_DISR); | |
348 | ||
349 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
350 | if (!(err_mask & (1 << i))) | |
351 | continue; | |
352 | internal = &imxdma->channel[i].internal; | |
353 | errcode = 0; | |
354 | ||
355 | if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { | |
356 | imx_dmav1_writel(1 << i, DMA_DBTOSR); | |
357 | errcode |= IMX_DMA_ERR_BURST; | |
358 | } | |
359 | if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { | |
360 | imx_dmav1_writel(1 << i, DMA_DRTOSR); | |
361 | errcode |= IMX_DMA_ERR_REQUEST; | |
362 | } | |
363 | if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { | |
364 | imx_dmav1_writel(1 << i, DMA_DSESR); | |
365 | errcode |= IMX_DMA_ERR_TRANSFER; | |
366 | } | |
367 | if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { | |
368 | imx_dmav1_writel(1 << i, DMA_DBOSR); | |
369 | errcode |= IMX_DMA_ERR_BUFFER; | |
370 | } | |
371 | /* Tasklet error handler */ | |
372 | tasklet_schedule(&imxdma->channel[i].dma_tasklet); | |
373 | ||
374 | printk(KERN_WARNING | |
375 | "DMA timeout on channel %d -%s%s%s%s\n", i, | |
376 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | |
377 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | |
378 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | |
379 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | |
380 | } | |
381 | return IRQ_HANDLED; | |
1f1846c6 SH |
382 | } |
383 | ||
6bd08127 | 384 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
1f1846c6 | 385 | { |
6bd08127 JM |
386 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
387 | int chno = imxdmac->channel; | |
2efc3449 | 388 | struct imxdma_desc *desc; |
6bd08127 JM |
389 | |
390 | if (imxdma->sg) { | |
391 | u32 tmp; | |
392 | imxdma->sg = sg_next(imxdma->sg); | |
393 | ||
394 | if (imxdma->sg) { | |
2efc3449 JM |
395 | |
396 | spin_lock(&imxdmac->lock); | |
397 | if (list_empty(&imxdmac->ld_active)) { | |
398 | spin_unlock(&imxdmac->lock); | |
399 | goto out; | |
400 | } | |
401 | ||
402 | desc = list_first_entry(&imxdmac->ld_active, | |
403 | struct imxdma_desc, | |
404 | node); | |
405 | spin_unlock(&imxdmac->lock); | |
406 | ||
407 | imxdma_sg_next(desc, imxdma->sg); | |
6bd08127 JM |
408 | |
409 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | |
410 | ||
411 | if (imxdma_hw_chain(imxdma)) { | |
412 | /* FIXME: The timeout should probably be | |
413 | * configurable | |
414 | */ | |
415 | mod_timer(&imxdma->watchdog, | |
416 | jiffies + msecs_to_jiffies(500)); | |
417 | ||
418 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | |
419 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
420 | } else { | |
421 | imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); | |
422 | tmp |= CCR_CEN; | |
423 | } | |
424 | ||
425 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
426 | ||
427 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | |
428 | /* Tasklet progression */ | |
429 | tasklet_schedule(&imxdmac->dma_tasklet); | |
1f1846c6 | 430 | |
6bd08127 JM |
431 | return; |
432 | } | |
433 | ||
434 | if (imxdma_hw_chain(imxdma)) { | |
435 | del_timer(&imxdma->watchdog); | |
436 | return; | |
437 | } | |
438 | } | |
439 | ||
2efc3449 | 440 | out: |
6bd08127 JM |
441 | imx_dmav1_writel(0, DMA_CCR(chno)); |
442 | imxdma->in_use = 0; | |
443 | /* Tasklet irq */ | |
9e15db7c JM |
444 | tasklet_schedule(&imxdmac->dma_tasklet); |
445 | } | |
446 | ||
6bd08127 JM |
447 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) |
448 | { | |
449 | struct imxdma_engine *imxdma = dev_id; | |
450 | struct imxdma_channel_internal *internal; | |
451 | int i, disr; | |
452 | ||
453 | if (cpu_is_mx21() || cpu_is_mx27()) | |
454 | imxdma_err_handler(irq, dev_id); | |
455 | ||
456 | disr = imx_dmav1_readl(DMA_DISR); | |
457 | ||
458 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", | |
459 | disr); | |
460 | ||
461 | imx_dmav1_writel(disr, DMA_DISR); | |
462 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
463 | if (disr & (1 << i)) { | |
464 | internal = &imxdma->channel[i].internal; | |
465 | dma_irq_handle_channel(&imxdma->channel[i]); | |
466 | } | |
467 | } | |
468 | ||
469 | return IRQ_HANDLED; | |
470 | } | |
471 | ||
9e15db7c JM |
472 | static int imxdma_xfer_desc(struct imxdma_desc *d) |
473 | { | |
474 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | |
3b4b6dfc | 475 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
9e15db7c JM |
476 | |
477 | /* Configure and enable */ | |
478 | switch (d->type) { | |
479 | case IMXDMA_DESC_MEMCPY: | |
3b4b6dfc JM |
480 | imxdmac->internal.sg = NULL; |
481 | ||
482 | imx_dmav1_writel(d->src, DMA_SAR(imxdmac->channel)); | |
483 | imx_dmav1_writel(d->dest, DMA_DAR(imxdmac->channel)); | |
484 | imx_dmav1_writel(d->config_mem | (d->config_port << 2), | |
485 | DMA_CCR(imxdmac->channel)); | |
6bd08127 | 486 | |
3b4b6dfc JM |
487 | imx_dmav1_writel(d->len, DMA_CNTR(imxdmac->channel)); |
488 | ||
489 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | |
490 | "dma_length=%d\n", __func__, imxdmac->channel, | |
491 | d->dest, d->src, d->len); | |
492 | ||
493 | break; | |
6bd08127 | 494 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
9e15db7c | 495 | case IMXDMA_DESC_CYCLIC: |
9e15db7c | 496 | case IMXDMA_DESC_SLAVE_SG: |
359291a1 JM |
497 | imxdmac->internal.sg = d->sg; |
498 | imxdmac->internal.resbytes = d->len; | |
499 | ||
500 | if (d->direction == DMA_DEV_TO_MEM) { | |
501 | imx_dmav1_writel(imxdmac->per_address, | |
502 | DMA_SAR(imxdmac->channel)); | |
503 | imx_dmav1_writel(imxdmac->ccr_from_device, | |
504 | DMA_CCR(imxdmac->channel)); | |
505 | ||
506 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | |
507 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | |
508 | __func__, imxdmac->channel, d->sg, d->sgcount, | |
509 | d->len, imxdmac->per_address); | |
510 | } else if (d->direction == DMA_MEM_TO_DEV) { | |
511 | imx_dmav1_writel(imxdmac->per_address, | |
512 | DMA_DAR(imxdmac->channel)); | |
513 | imx_dmav1_writel(imxdmac->ccr_to_device, | |
514 | DMA_CCR(imxdmac->channel)); | |
515 | ||
516 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | |
517 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | |
518 | __func__, imxdmac->channel, d->sg, d->sgcount, | |
519 | d->len, imxdmac->per_address); | |
520 | } else { | |
521 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | |
522 | __func__, imxdmac->channel); | |
523 | return -EINVAL; | |
524 | } | |
525 | ||
526 | imxdma_sg_next(d, d->sg); | |
527 | ||
9e15db7c JM |
528 | break; |
529 | default: | |
530 | return -EINVAL; | |
531 | } | |
2efc3449 | 532 | imxdma_enable_hw(d); |
9e15db7c JM |
533 | return 0; |
534 | } | |
535 | ||
536 | static void imxdma_tasklet(unsigned long data) | |
537 | { | |
538 | struct imxdma_channel *imxdmac = (void *)data; | |
539 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
540 | struct imxdma_desc *desc; | |
541 | ||
542 | spin_lock(&imxdmac->lock); | |
543 | ||
544 | if (list_empty(&imxdmac->ld_active)) { | |
545 | /* Someone might have called terminate all */ | |
546 | goto out; | |
547 | } | |
548 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | |
549 | ||
550 | if (desc->desc.callback) | |
551 | desc->desc.callback(desc->desc.callback_param); | |
552 | ||
1f3d6dc0 | 553 | dma_cookie_complete(&desc->desc); |
9e15db7c JM |
554 | |
555 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | |
556 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | |
557 | goto out; | |
558 | ||
559 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | |
560 | ||
561 | if (!list_empty(&imxdmac->ld_queue)) { | |
562 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | |
563 | node); | |
564 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | |
565 | if (imxdma_xfer_desc(desc) < 0) | |
566 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | |
567 | __func__, imxdmac->channel); | |
568 | } | |
569 | out: | |
570 | spin_unlock(&imxdmac->lock); | |
1f1846c6 SH |
571 | } |
572 | ||
573 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
574 | unsigned long arg) | |
575 | { | |
576 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
577 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | |
9e15db7c | 578 | unsigned long flags; |
1f1846c6 SH |
579 | unsigned int mode = 0; |
580 | ||
581 | switch (cmd) { | |
582 | case DMA_TERMINATE_ALL: | |
6bd08127 | 583 | imxdma_disable_hw(imxdmac); |
9e15db7c JM |
584 | |
585 | spin_lock_irqsave(&imxdmac->lock, flags); | |
586 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | |
587 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
588 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
589 | return 0; |
590 | case DMA_SLAVE_CONFIG: | |
db8196df | 591 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1f1846c6 SH |
592 | imxdmac->per_address = dmaengine_cfg->src_addr; |
593 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | |
594 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | |
595 | } else { | |
596 | imxdmac->per_address = dmaengine_cfg->dst_addr; | |
597 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | |
598 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | |
599 | } | |
600 | ||
601 | switch (imxdmac->word_size) { | |
602 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
603 | mode = IMX_DMA_MEMSIZE_8; | |
604 | break; | |
605 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
606 | mode = IMX_DMA_MEMSIZE_16; | |
607 | break; | |
608 | default: | |
609 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
610 | mode = IMX_DMA_MEMSIZE_32; | |
611 | break; | |
612 | } | |
1f1846c6 | 613 | |
bdc0c753 JM |
614 | imxdmac->internal.hw_chaining = 1; |
615 | if (!imxdma_hw_chain(&imxdmac->internal)) | |
616 | return -EINVAL; | |
359291a1 | 617 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | |
bdc0c753 JM |
618 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | |
619 | CCR_REN; | |
359291a1 | 620 | imxdmac->ccr_to_device = |
bdc0c753 JM |
621 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | |
622 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; | |
623 | imx_dmav1_writel(imxdmac->dma_request, | |
624 | DMA_RSSR(imxdmac->channel)); | |
625 | ||
6bd08127 JM |
626 | /* Set burst length */ |
627 | imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, | |
628 | DMA_BLR(imxdmac->channel)); | |
1f1846c6 SH |
629 | |
630 | return 0; | |
631 | default: | |
632 | return -ENOSYS; | |
633 | } | |
634 | ||
635 | return -EINVAL; | |
636 | } | |
637 | ||
638 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |
639 | dma_cookie_t cookie, | |
640 | struct dma_tx_state *txstate) | |
641 | { | |
96a2af41 | 642 | return dma_cookie_status(chan, cookie, txstate); |
1f1846c6 SH |
643 | } |
644 | ||
645 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
646 | { | |
647 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | |
648 | dma_cookie_t cookie; | |
9e15db7c | 649 | unsigned long flags; |
1f1846c6 | 650 | |
9e15db7c | 651 | spin_lock_irqsave(&imxdmac->lock, flags); |
884485e1 | 652 | cookie = dma_cookie_assign(tx); |
9e15db7c | 653 | spin_unlock_irqrestore(&imxdmac->lock, flags); |
1f1846c6 SH |
654 | |
655 | return cookie; | |
656 | } | |
657 | ||
658 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |
659 | { | |
660 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
661 | struct imx_dma_data *data = chan->private; | |
662 | ||
6c05f091 JM |
663 | if (data != NULL) |
664 | imxdmac->dma_request = data->dma_request; | |
1f1846c6 | 665 | |
9e15db7c JM |
666 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
667 | struct imxdma_desc *desc; | |
1f1846c6 | 668 | |
9e15db7c JM |
669 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
670 | if (!desc) | |
671 | break; | |
672 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | |
673 | dma_async_tx_descriptor_init(&desc->desc, chan); | |
674 | desc->desc.tx_submit = imxdma_tx_submit; | |
675 | /* txd.flags will be overwritten in prep funcs */ | |
676 | desc->desc.flags = DMA_CTRL_ACK; | |
677 | desc->status = DMA_SUCCESS; | |
678 | ||
679 | list_add_tail(&desc->node, &imxdmac->ld_free); | |
680 | imxdmac->descs_allocated++; | |
681 | } | |
1f1846c6 | 682 | |
9e15db7c JM |
683 | if (!imxdmac->descs_allocated) |
684 | return -ENOMEM; | |
685 | ||
686 | return imxdmac->descs_allocated; | |
1f1846c6 SH |
687 | } |
688 | ||
689 | static void imxdma_free_chan_resources(struct dma_chan *chan) | |
690 | { | |
691 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
9e15db7c JM |
692 | struct imxdma_desc *desc, *_desc; |
693 | unsigned long flags; | |
694 | ||
695 | spin_lock_irqsave(&imxdmac->lock, flags); | |
1f1846c6 | 696 | |
6bd08127 | 697 | imxdma_disable_hw(imxdmac); |
9e15db7c JM |
698 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); |
699 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
700 | ||
701 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
702 | ||
703 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | |
704 | kfree(desc); | |
705 | imxdmac->descs_allocated--; | |
706 | } | |
707 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
1f1846c6 SH |
708 | |
709 | if (imxdmac->sg_list) { | |
710 | kfree(imxdmac->sg_list); | |
711 | imxdmac->sg_list = NULL; | |
712 | } | |
713 | } | |
714 | ||
715 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |
716 | struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 717 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 718 | unsigned long flags, void *context) |
1f1846c6 SH |
719 | { |
720 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
721 | struct scatterlist *sg; | |
9e15db7c JM |
722 | int i, dma_length = 0; |
723 | struct imxdma_desc *desc; | |
1f1846c6 | 724 | |
9e15db7c JM |
725 | if (list_empty(&imxdmac->ld_free) || |
726 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 SH |
727 | return NULL; |
728 | ||
9e15db7c | 729 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
730 | |
731 | for_each_sg(sgl, sg, sg_len, i) { | |
732 | dma_length += sg->length; | |
733 | } | |
734 | ||
d07102a1 SH |
735 | switch (imxdmac->word_size) { |
736 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
737 | if (sgl->length & 3 || sgl->dma_address & 3) | |
738 | return NULL; | |
739 | break; | |
740 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
741 | if (sgl->length & 1 || sgl->dma_address & 1) | |
742 | return NULL; | |
743 | break; | |
744 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
745 | break; | |
746 | default: | |
747 | return NULL; | |
748 | } | |
749 | ||
9e15db7c JM |
750 | desc->type = IMXDMA_DESC_SLAVE_SG; |
751 | desc->sg = sgl; | |
752 | desc->sgcount = sg_len; | |
753 | desc->len = dma_length; | |
2efc3449 | 754 | desc->direction = direction; |
9e15db7c | 755 | if (direction == DMA_DEV_TO_MEM) { |
9e15db7c JM |
756 | desc->src = imxdmac->per_address; |
757 | } else { | |
9e15db7c JM |
758 | desc->dest = imxdmac->per_address; |
759 | } | |
760 | desc->desc.callback = NULL; | |
761 | desc->desc.callback_param = NULL; | |
1f1846c6 | 762 | |
9e15db7c | 763 | return &desc->desc; |
1f1846c6 SH |
764 | } |
765 | ||
766 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |
767 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
185ecb5f AB |
768 | size_t period_len, enum dma_transfer_direction direction, |
769 | void *context) | |
1f1846c6 SH |
770 | { |
771 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
772 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c JM |
773 | struct imxdma_desc *desc; |
774 | int i; | |
1f1846c6 | 775 | unsigned int periods = buf_len / period_len; |
1f1846c6 SH |
776 | |
777 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | |
778 | __func__, imxdmac->channel, buf_len, period_len); | |
779 | ||
9e15db7c JM |
780 | if (list_empty(&imxdmac->ld_free) || |
781 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 | 782 | return NULL; |
1f1846c6 | 783 | |
9e15db7c | 784 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
785 | |
786 | if (imxdmac->sg_list) | |
787 | kfree(imxdmac->sg_list); | |
788 | ||
789 | imxdmac->sg_list = kcalloc(periods + 1, | |
790 | sizeof(struct scatterlist), GFP_KERNEL); | |
791 | if (!imxdmac->sg_list) | |
792 | return NULL; | |
793 | ||
794 | sg_init_table(imxdmac->sg_list, periods); | |
795 | ||
796 | for (i = 0; i < periods; i++) { | |
797 | imxdmac->sg_list[i].page_link = 0; | |
798 | imxdmac->sg_list[i].offset = 0; | |
799 | imxdmac->sg_list[i].dma_address = dma_addr; | |
800 | imxdmac->sg_list[i].length = period_len; | |
801 | dma_addr += period_len; | |
802 | } | |
803 | ||
804 | /* close the loop */ | |
805 | imxdmac->sg_list[periods].offset = 0; | |
806 | imxdmac->sg_list[periods].length = 0; | |
807 | imxdmac->sg_list[periods].page_link = | |
808 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | |
809 | ||
9e15db7c JM |
810 | desc->type = IMXDMA_DESC_CYCLIC; |
811 | desc->sg = imxdmac->sg_list; | |
812 | desc->sgcount = periods; | |
813 | desc->len = IMX_DMA_LENGTH_LOOP; | |
2efc3449 | 814 | desc->direction = direction; |
9e15db7c | 815 | if (direction == DMA_DEV_TO_MEM) { |
9e15db7c JM |
816 | desc->src = imxdmac->per_address; |
817 | } else { | |
9e15db7c JM |
818 | desc->dest = imxdmac->per_address; |
819 | } | |
820 | desc->desc.callback = NULL; | |
821 | desc->desc.callback_param = NULL; | |
1f1846c6 | 822 | |
9e15db7c | 823 | return &desc->desc; |
1f1846c6 SH |
824 | } |
825 | ||
6c05f091 JM |
826 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( |
827 | struct dma_chan *chan, dma_addr_t dest, | |
828 | dma_addr_t src, size_t len, unsigned long flags) | |
829 | { | |
830 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
831 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c | 832 | struct imxdma_desc *desc; |
6c05f091 JM |
833 | |
834 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | |
835 | __func__, imxdmac->channel, src, dest, len); | |
836 | ||
9e15db7c JM |
837 | if (list_empty(&imxdmac->ld_free) || |
838 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
6c05f091 | 839 | return NULL; |
6c05f091 | 840 | |
9e15db7c | 841 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
6c05f091 | 842 | |
9e15db7c JM |
843 | desc->type = IMXDMA_DESC_MEMCPY; |
844 | desc->src = src; | |
845 | desc->dest = dest; | |
846 | desc->len = len; | |
2efc3449 | 847 | desc->direction = DMA_MEM_TO_MEM; |
9e15db7c JM |
848 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
849 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | |
850 | desc->desc.callback = NULL; | |
851 | desc->desc.callback_param = NULL; | |
6c05f091 | 852 | |
9e15db7c | 853 | return &desc->desc; |
6c05f091 JM |
854 | } |
855 | ||
1f1846c6 SH |
856 | static void imxdma_issue_pending(struct dma_chan *chan) |
857 | { | |
5b316876 | 858 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
9e15db7c JM |
859 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
860 | struct imxdma_desc *desc; | |
861 | unsigned long flags; | |
862 | ||
863 | spin_lock_irqsave(&imxdmac->lock, flags); | |
864 | if (list_empty(&imxdmac->ld_active) && | |
865 | !list_empty(&imxdmac->ld_queue)) { | |
866 | desc = list_first_entry(&imxdmac->ld_queue, | |
867 | struct imxdma_desc, node); | |
868 | ||
869 | if (imxdma_xfer_desc(desc) < 0) { | |
870 | dev_warn(imxdma->dev, | |
871 | "%s: channel: %d couldn't issue DMA xfer\n", | |
872 | __func__, imxdmac->channel); | |
873 | } else { | |
874 | list_move_tail(imxdmac->ld_queue.next, | |
875 | &imxdmac->ld_active); | |
876 | } | |
877 | } | |
878 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
879 | } |
880 | ||
881 | static int __init imxdma_probe(struct platform_device *pdev) | |
6bd08127 | 882 | { |
1f1846c6 SH |
883 | struct imxdma_engine *imxdma; |
884 | int ret, i; | |
885 | ||
6bd08127 JM |
886 | if (cpu_is_mx1()) |
887 | imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | |
888 | else if (cpu_is_mx21()) | |
889 | imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | |
890 | else if (cpu_is_mx27()) | |
891 | imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | |
892 | else | |
893 | return 0; | |
894 | ||
895 | dma_clk = clk_get(NULL, "dma"); | |
896 | if (IS_ERR(dma_clk)) | |
897 | return PTR_ERR(dma_clk); | |
898 | clk_enable(dma_clk); | |
899 | ||
900 | /* reset DMA module */ | |
901 | imx_dmav1_writel(DCR_DRST, DMA_DCR); | |
902 | ||
903 | if (cpu_is_mx1()) { | |
904 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); | |
905 | if (ret) { | |
906 | pr_crit("Can't register IRQ for DMA\n"); | |
907 | return ret; | |
908 | } | |
909 | ||
910 | ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); | |
911 | if (ret) { | |
912 | pr_crit("Can't register ERRIRQ for DMA\n"); | |
913 | free_irq(MX1_DMA_INT, NULL); | |
914 | return ret; | |
915 | } | |
916 | } | |
917 | ||
918 | /* enable DMA module */ | |
919 | imx_dmav1_writel(DCR_DEN, DMA_DCR); | |
920 | ||
921 | /* clear all interrupts */ | |
922 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | |
923 | ||
924 | /* disable interrupts */ | |
925 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | |
926 | ||
1f1846c6 SH |
927 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); |
928 | if (!imxdma) | |
929 | return -ENOMEM; | |
930 | ||
931 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | |
932 | ||
f8a356ff SH |
933 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
934 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | |
6c05f091 | 935 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); |
f8a356ff | 936 | |
1f1846c6 | 937 | /* Initialize channel parameters */ |
6bd08127 | 938 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
1f1846c6 | 939 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
6bd08127 JM |
940 | memset(&imxdmac->internal, 0, sizeof(imxdmac->internal)); |
941 | if (cpu_is_mx21() || cpu_is_mx27()) { | |
942 | ret = request_irq(MX2x_INT_DMACH0 + i, | |
943 | dma_irq_handler, 0, "DMA", imxdma); | |
944 | if (ret) { | |
945 | pr_crit("Can't register IRQ %d for DMA channel %d\n", | |
946 | MX2x_INT_DMACH0 + i, i); | |
947 | goto err_init; | |
948 | } | |
949 | init_timer(&imxdmac->internal.watchdog); | |
950 | imxdmac->internal.watchdog.function = &imxdma_watchdog; | |
951 | imxdmac->internal.watchdog.data = (unsigned long)imxdmac; | |
8267f16e | 952 | } |
1f1846c6 | 953 | |
1f1846c6 SH |
954 | imxdmac->imxdma = imxdma; |
955 | spin_lock_init(&imxdmac->lock); | |
956 | ||
9e15db7c JM |
957 | INIT_LIST_HEAD(&imxdmac->ld_queue); |
958 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
959 | INIT_LIST_HEAD(&imxdmac->ld_active); | |
960 | ||
961 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | |
962 | (unsigned long)imxdmac); | |
1f1846c6 | 963 | imxdmac->chan.device = &imxdma->dma_device; |
8ac69546 | 964 | dma_cookie_init(&imxdmac->chan); |
1f1846c6 SH |
965 | imxdmac->channel = i; |
966 | ||
967 | /* Add the channel to the DMAC list */ | |
9e15db7c JM |
968 | list_add_tail(&imxdmac->chan.device_node, |
969 | &imxdma->dma_device.channels); | |
1f1846c6 SH |
970 | } |
971 | ||
972 | imxdma->dev = &pdev->dev; | |
973 | imxdma->dma_device.dev = &pdev->dev; | |
974 | ||
975 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | |
976 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; | |
977 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | |
978 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | |
979 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | |
6c05f091 | 980 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
1f1846c6 SH |
981 | imxdma->dma_device.device_control = imxdma_control; |
982 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | |
983 | ||
984 | platform_set_drvdata(pdev, imxdma); | |
985 | ||
6c05f091 | 986 | imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ |
1e070a60 SH |
987 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; |
988 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | |
989 | ||
1f1846c6 SH |
990 | ret = dma_async_device_register(&imxdma->dma_device); |
991 | if (ret) { | |
992 | dev_err(&pdev->dev, "unable to register\n"); | |
993 | goto err_init; | |
994 | } | |
995 | ||
996 | return 0; | |
997 | ||
998 | err_init: | |
6bd08127 JM |
999 | |
1000 | if (cpu_is_mx21() || cpu_is_mx27()) { | |
1001 | while (--i >= 0) | |
1002 | free_irq(MX2x_INT_DMACH0 + i, NULL); | |
1003 | } else if cpu_is_mx1() { | |
1004 | free_irq(MX1_DMA_INT, NULL); | |
1005 | free_irq(MX1_DMA_ERR, NULL); | |
1f1846c6 SH |
1006 | } |
1007 | ||
1008 | kfree(imxdma); | |
1009 | return ret; | |
1010 | } | |
1011 | ||
1012 | static int __exit imxdma_remove(struct platform_device *pdev) | |
1013 | { | |
1014 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | |
1015 | int i; | |
1016 | ||
1017 | dma_async_device_unregister(&imxdma->dma_device); | |
1018 | ||
6bd08127 JM |
1019 | if (cpu_is_mx21() || cpu_is_mx27()) { |
1020 | for (i = 0; i < IMX_DMA_CHANNELS; i++) | |
1021 | free_irq(MX2x_INT_DMACH0 + i, NULL); | |
1022 | } else if cpu_is_mx1() { | |
1023 | free_irq(MX1_DMA_INT, NULL); | |
1024 | free_irq(MX1_DMA_ERR, NULL); | |
1f1846c6 SH |
1025 | } |
1026 | ||
1027 | kfree(imxdma); | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static struct platform_driver imxdma_driver = { | |
1033 | .driver = { | |
1034 | .name = "imx-dma", | |
1035 | }, | |
1036 | .remove = __exit_p(imxdma_remove), | |
1037 | }; | |
1038 | ||
1039 | static int __init imxdma_module_init(void) | |
1040 | { | |
1041 | return platform_driver_probe(&imxdma_driver, imxdma_probe); | |
1042 | } | |
1043 | subsys_initcall(imxdma_module_init); | |
1044 | ||
1045 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | |
1046 | MODULE_DESCRIPTION("i.MX dma driver"); | |
1047 | MODULE_LICENSE("GPL"); |