Commit | Line | Data |
---|---|---|
1f1846c6 SH |
1 | /* |
2 | * drivers/dma/imx-dma.c | |
3 | * | |
4 | * This file contains a driver for the Freescale i.MX DMA engine | |
5 | * found on i.MX1/21/27 | |
6 | * | |
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | |
9e15db7c | 8 | * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> |
1f1846c6 SH |
9 | * |
10 | * The code contained herein is licensed under the GNU General Public | |
11 | * License. You may obtain a copy of the GNU General Public License | |
12 | * Version 2 or later at the following locations: | |
13 | * | |
14 | * http://www.opensource.org/licenses/gpl-license.html | |
15 | * http://www.gnu.org/copyleft/gpl.html | |
16 | */ | |
17 | #include <linux/init.h> | |
f8de8f4c | 18 | #include <linux/module.h> |
1f1846c6 SH |
19 | #include <linux/types.h> |
20 | #include <linux/mm.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/device.h> | |
24 | #include <linux/dma-mapping.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/platform_device.h> | |
6bd08127 | 27 | #include <linux/clk.h> |
1f1846c6 | 28 | #include <linux/dmaengine.h> |
5170c051 | 29 | #include <linux/module.h> |
1f1846c6 SH |
30 | |
31 | #include <asm/irq.h> | |
6bd08127 | 32 | #include <mach/dma.h> |
1f1846c6 SH |
33 | #include <mach/hardware.h> |
34 | ||
d2ebfb33 | 35 | #include "dmaengine.h" |
9e15db7c | 36 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
6bd08127 JM |
37 | #define IMX_DMA_CHANNELS 16 |
38 | ||
6bd08127 JM |
39 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) |
40 | #define IMX_DMA_MEMSIZE_32 (0 << 4) | |
41 | #define IMX_DMA_MEMSIZE_8 (1 << 4) | |
42 | #define IMX_DMA_MEMSIZE_16 (2 << 4) | |
43 | #define IMX_DMA_TYPE_LINEAR (0 << 10) | |
44 | #define IMX_DMA_TYPE_2D (1 << 10) | |
45 | #define IMX_DMA_TYPE_FIFO (2 << 10) | |
46 | ||
47 | #define IMX_DMA_ERR_BURST (1 << 0) | |
48 | #define IMX_DMA_ERR_REQUEST (1 << 1) | |
49 | #define IMX_DMA_ERR_TRANSFER (1 << 2) | |
50 | #define IMX_DMA_ERR_BUFFER (1 << 3) | |
51 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) | |
52 | ||
53 | #define DMA_DCR 0x00 /* Control Register */ | |
54 | #define DMA_DISR 0x04 /* Interrupt status Register */ | |
55 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ | |
56 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ | |
57 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ | |
58 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ | |
59 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ | |
60 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ | |
61 | #define DMA_WSRA 0x40 /* W-Size Register A */ | |
62 | #define DMA_XSRA 0x44 /* X-Size Register A */ | |
63 | #define DMA_YSRA 0x48 /* Y-Size Register A */ | |
64 | #define DMA_WSRB 0x4c /* W-Size Register B */ | |
65 | #define DMA_XSRB 0x50 /* X-Size Register B */ | |
66 | #define DMA_YSRB 0x54 /* Y-Size Register B */ | |
67 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ | |
68 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ | |
69 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ | |
70 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ | |
71 | #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ | |
72 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ | |
73 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ | |
74 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ | |
75 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ | |
76 | ||
77 | #define DCR_DRST (1<<1) | |
78 | #define DCR_DEN (1<<0) | |
79 | #define DBTOCR_EN (1<<15) | |
80 | #define DBTOCR_CNT(x) ((x) & 0x7fff) | |
81 | #define CNTR_CNT(x) ((x) & 0xffffff) | |
82 | #define CCR_ACRPT (1<<14) | |
83 | #define CCR_DMOD_LINEAR (0x0 << 12) | |
84 | #define CCR_DMOD_2D (0x1 << 12) | |
85 | #define CCR_DMOD_FIFO (0x2 << 12) | |
86 | #define CCR_DMOD_EOBFIFO (0x3 << 12) | |
87 | #define CCR_SMOD_LINEAR (0x0 << 10) | |
88 | #define CCR_SMOD_2D (0x1 << 10) | |
89 | #define CCR_SMOD_FIFO (0x2 << 10) | |
90 | #define CCR_SMOD_EOBFIFO (0x3 << 10) | |
91 | #define CCR_MDIR_DEC (1<<9) | |
92 | #define CCR_MSEL_B (1<<8) | |
93 | #define CCR_DSIZ_32 (0x0 << 6) | |
94 | #define CCR_DSIZ_8 (0x1 << 6) | |
95 | #define CCR_DSIZ_16 (0x2 << 6) | |
96 | #define CCR_SSIZ_32 (0x0 << 4) | |
97 | #define CCR_SSIZ_8 (0x1 << 4) | |
98 | #define CCR_SSIZ_16 (0x2 << 4) | |
99 | #define CCR_REN (1<<3) | |
100 | #define CCR_RPT (1<<2) | |
101 | #define CCR_FRC (1<<1) | |
102 | #define CCR_CEN (1<<0) | |
103 | #define RTOR_EN (1<<15) | |
104 | #define RTOR_CLK (1<<14) | |
105 | #define RTOR_PSC (1<<13) | |
9e15db7c JM |
106 | |
107 | enum imxdma_prep_type { | |
108 | IMXDMA_DESC_MEMCPY, | |
109 | IMXDMA_DESC_INTERLEAVED, | |
110 | IMXDMA_DESC_SLAVE_SG, | |
111 | IMXDMA_DESC_CYCLIC, | |
112 | }; | |
113 | ||
6bd08127 JM |
114 | /* |
115 | * struct imxdma_channel_internal - i.MX specific DMA extension | |
116 | * @name: name specified by DMA client | |
117 | * @irq_handler: client callback for end of transfer | |
118 | * @err_handler: client callback for error condition | |
119 | * @data: clients context data for callbacks | |
120 | * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE | |
121 | * @sg: pointer to the actual read/written chunk for scatter-gather emulation | |
122 | * @resbytes: total residual number of bytes to transfer | |
123 | * (it can be lower or same as sum of SG mapped chunk sizes) | |
124 | * @sgcount: number of chunks to be read/written | |
125 | * | |
126 | * Structure is used for IMX DMA processing. It would be probably good | |
127 | * @struct dma_struct in the future for external interfacing and use | |
128 | * @struct imxdma_channel_internal only as extension to it. | |
129 | */ | |
130 | ||
131 | struct imxdma_channel_internal { | |
6bd08127 JM |
132 | struct scatterlist *sg; |
133 | unsigned int resbytes; | |
134 | ||
135 | int in_use; | |
136 | ||
137 | u32 ccr_from_device; | |
138 | u32 ccr_to_device; | |
139 | ||
140 | struct timer_list watchdog; | |
141 | ||
142 | int hw_chaining; | |
143 | }; | |
144 | ||
9e15db7c JM |
145 | struct imxdma_desc { |
146 | struct list_head node; | |
147 | struct dma_async_tx_descriptor desc; | |
148 | enum dma_status status; | |
149 | dma_addr_t src; | |
150 | dma_addr_t dest; | |
151 | size_t len; | |
2efc3449 | 152 | enum dma_transfer_direction direction; |
9e15db7c JM |
153 | enum imxdma_prep_type type; |
154 | /* For memcpy and interleaved */ | |
155 | unsigned int config_port; | |
156 | unsigned int config_mem; | |
157 | /* For interleaved transfers */ | |
158 | unsigned int x; | |
159 | unsigned int y; | |
160 | unsigned int w; | |
161 | /* For slave sg and cyclic */ | |
162 | struct scatterlist *sg; | |
163 | unsigned int sgcount; | |
164 | }; | |
165 | ||
1f1846c6 | 166 | struct imxdma_channel { |
6bd08127 | 167 | struct imxdma_channel_internal internal; |
1f1846c6 SH |
168 | struct imxdma_engine *imxdma; |
169 | unsigned int channel; | |
1f1846c6 | 170 | |
9e15db7c JM |
171 | struct tasklet_struct dma_tasklet; |
172 | struct list_head ld_free; | |
173 | struct list_head ld_queue; | |
174 | struct list_head ld_active; | |
175 | int descs_allocated; | |
1f1846c6 SH |
176 | enum dma_slave_buswidth word_size; |
177 | dma_addr_t per_address; | |
178 | u32 watermark_level; | |
179 | struct dma_chan chan; | |
180 | spinlock_t lock; | |
181 | struct dma_async_tx_descriptor desc; | |
1f1846c6 SH |
182 | enum dma_status status; |
183 | int dma_request; | |
184 | struct scatterlist *sg_list; | |
185 | }; | |
186 | ||
1f1846c6 SH |
187 | struct imxdma_engine { |
188 | struct device *dev; | |
1e070a60 | 189 | struct device_dma_parameters dma_parms; |
1f1846c6 | 190 | struct dma_device dma_device; |
6bd08127 | 191 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; |
1f1846c6 SH |
192 | }; |
193 | ||
194 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | |
195 | { | |
196 | return container_of(chan, struct imxdma_channel, chan); | |
197 | } | |
198 | ||
9e15db7c | 199 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
1f1846c6 | 200 | { |
9e15db7c JM |
201 | struct imxdma_desc *desc; |
202 | ||
203 | if (!list_empty(&imxdmac->ld_active)) { | |
204 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, | |
205 | node); | |
206 | if (desc->type == IMXDMA_DESC_CYCLIC) | |
207 | return true; | |
208 | } | |
209 | return false; | |
1f1846c6 SH |
210 | } |
211 | ||
6bd08127 JM |
212 | /* TODO: put this inside any struct */ |
213 | static void __iomem *imx_dmav1_baseaddr; | |
214 | static struct clk *dma_clk; | |
215 | ||
216 | static void imx_dmav1_writel(unsigned val, unsigned offset) | |
217 | { | |
218 | __raw_writel(val, imx_dmav1_baseaddr + offset); | |
219 | } | |
220 | ||
221 | static unsigned imx_dmav1_readl(unsigned offset) | |
1f1846c6 | 222 | { |
6bd08127 JM |
223 | return __raw_readl(imx_dmav1_baseaddr + offset); |
224 | } | |
1f1846c6 | 225 | |
6bd08127 JM |
226 | static int imxdma_hw_chain(struct imxdma_channel_internal *imxdma) |
227 | { | |
228 | if (cpu_is_mx27()) | |
229 | return imxdma->hw_chaining; | |
230 | else | |
231 | return 0; | |
232 | } | |
233 | ||
234 | /* | |
235 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | |
236 | */ | |
2efc3449 | 237 | static inline int imxdma_sg_next(struct imxdma_desc *d, struct scatterlist *sg) |
6bd08127 | 238 | { |
2efc3449 | 239 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
6bd08127 JM |
240 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
241 | unsigned long now; | |
242 | ||
243 | now = min(imxdma->resbytes, sg->length); | |
244 | if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP) | |
245 | imxdma->resbytes -= now; | |
246 | ||
2efc3449 | 247 | if (d->direction == DMA_DEV_TO_MEM) |
6bd08127 JM |
248 | imx_dmav1_writel(sg->dma_address, DMA_DAR(imxdmac->channel)); |
249 | else | |
250 | imx_dmav1_writel(sg->dma_address, DMA_SAR(imxdmac->channel)); | |
251 | ||
252 | imx_dmav1_writel(now, DMA_CNTR(imxdmac->channel)); | |
253 | ||
254 | pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, " | |
255 | "size 0x%08x\n", imxdmac->channel, | |
256 | imx_dmav1_readl(DMA_DAR(imxdmac->channel)), | |
257 | imx_dmav1_readl(DMA_SAR(imxdmac->channel)), | |
258 | imx_dmav1_readl(DMA_CNTR(imxdmac->channel))); | |
259 | ||
260 | return now; | |
261 | } | |
262 | ||
263 | static int | |
2efc3449 JM |
264 | imxdma_setup_mem2mem_hw(struct imxdma_channel *imxdmac, dma_addr_t dma_address, |
265 | unsigned int dma_length, unsigned int dev_addr) | |
6bd08127 JM |
266 | { |
267 | int channel = imxdmac->channel; | |
268 | ||
269 | imxdmac->internal.sg = NULL; | |
6bd08127 JM |
270 | |
271 | if (!dma_address) { | |
272 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n", | |
273 | channel); | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | if (!dma_length) { | |
278 | printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n", | |
279 | channel); | |
280 | return -EINVAL; | |
281 | } | |
282 | ||
2efc3449 JM |
283 | pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d " |
284 | "dev_addr=0x%08x for write\n", | |
285 | channel, __func__, (unsigned int)dma_address, | |
286 | dma_length, dev_addr); | |
6bd08127 | 287 | |
2efc3449 JM |
288 | imx_dmav1_writel(dma_address, DMA_SAR(channel)); |
289 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | |
290 | imx_dmav1_writel(imxdmac->internal.ccr_to_device, | |
291 | DMA_CCR(channel)); | |
6bd08127 JM |
292 | |
293 | imx_dmav1_writel(dma_length, DMA_CNTR(channel)); | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
2efc3449 | 298 | static void imxdma_enable_hw(struct imxdma_desc *d) |
6bd08127 | 299 | { |
2efc3449 | 300 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
6bd08127 JM |
301 | int channel = imxdmac->channel; |
302 | unsigned long flags; | |
303 | ||
304 | pr_debug("imxdma%d: imx_dma_enable\n", channel); | |
305 | ||
306 | if (imxdmac->internal.in_use) | |
307 | return; | |
308 | ||
309 | local_irq_save(flags); | |
310 | ||
311 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
312 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR); | |
313 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN | | |
314 | CCR_ACRPT, DMA_CCR(channel)); | |
315 | ||
316 | if ((cpu_is_mx21() || cpu_is_mx27()) && | |
317 | imxdmac->internal.sg && imxdma_hw_chain(&imxdmac->internal)) { | |
318 | imxdmac->internal.sg = sg_next(imxdmac->internal.sg); | |
319 | if (imxdmac->internal.sg) { | |
320 | u32 tmp; | |
2efc3449 | 321 | imxdma_sg_next(d, imxdmac->internal.sg); |
6bd08127 JM |
322 | tmp = imx_dmav1_readl(DMA_CCR(channel)); |
323 | imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT, | |
324 | DMA_CCR(channel)); | |
325 | } | |
326 | } | |
327 | imxdmac->internal.in_use = 1; | |
328 | ||
329 | local_irq_restore(flags); | |
330 | } | |
331 | ||
332 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | |
333 | { | |
334 | int channel = imxdmac->channel; | |
335 | unsigned long flags; | |
336 | ||
337 | pr_debug("imxdma%d: imx_dma_disable\n", channel); | |
338 | ||
339 | if (imxdma_hw_chain(&imxdmac->internal)) | |
340 | del_timer(&imxdmac->internal.watchdog); | |
341 | ||
342 | local_irq_save(flags); | |
343 | imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR); | |
344 | imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN, | |
345 | DMA_CCR(channel)); | |
346 | imx_dmav1_writel(1 << channel, DMA_DISR); | |
347 | imxdmac->internal.in_use = 0; | |
348 | local_irq_restore(flags); | |
349 | } | |
350 | ||
351 | static int | |
352 | imxdma_config_channel_hw(struct imxdma_channel *imxdmac, unsigned int config_port, | |
353 | unsigned int config_mem, unsigned int dmareq, int hw_chaining) | |
354 | { | |
355 | int channel = imxdmac->channel; | |
356 | u32 dreq = 0; | |
357 | ||
358 | imxdmac->internal.hw_chaining = 0; | |
359 | ||
360 | if (hw_chaining) { | |
361 | imxdmac->internal.hw_chaining = 1; | |
362 | if (!imxdma_hw_chain(&imxdmac->internal)) | |
363 | return -EINVAL; | |
364 | } | |
365 | ||
366 | if (dmareq) | |
367 | dreq = CCR_REN; | |
368 | ||
369 | imxdmac->internal.ccr_from_device = config_port | (config_mem << 2) | dreq; | |
370 | imxdmac->internal.ccr_to_device = config_mem | (config_port << 2) | dreq; | |
371 | ||
372 | imx_dmav1_writel(dmareq, DMA_RSSR(channel)); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
377 | static int | |
2efc3449 | 378 | imxdma_setup_sg_hw(struct imxdma_desc *d, |
6bd08127 JM |
379 | struct scatterlist *sg, unsigned int sgcount, |
380 | unsigned int dma_length, unsigned int dev_addr, | |
2efc3449 | 381 | enum dma_transfer_direction direction) |
6bd08127 | 382 | { |
2efc3449 | 383 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
6bd08127 JM |
384 | int channel = imxdmac->channel; |
385 | ||
386 | if (imxdmac->internal.in_use) | |
387 | return -EBUSY; | |
388 | ||
389 | imxdmac->internal.sg = sg; | |
6bd08127 JM |
390 | imxdmac->internal.resbytes = dma_length; |
391 | ||
392 | if (!sg || !sgcount) { | |
393 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n", | |
394 | channel); | |
395 | return -EINVAL; | |
396 | } | |
397 | ||
398 | if (!sg->length) { | |
399 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n", | |
400 | channel); | |
401 | return -EINVAL; | |
402 | } | |
403 | ||
2efc3449 | 404 | if (direction == DMA_DEV_TO_MEM) { |
6bd08127 JM |
405 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " |
406 | "dev_addr=0x%08x for read\n", | |
407 | channel, __func__, sg, sgcount, dma_length, dev_addr); | |
408 | ||
409 | imx_dmav1_writel(dev_addr, DMA_SAR(channel)); | |
410 | imx_dmav1_writel(imxdmac->internal.ccr_from_device, DMA_CCR(channel)); | |
2efc3449 | 411 | } else if (direction == DMA_MEM_TO_DEV) { |
6bd08127 JM |
412 | pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d " |
413 | "dev_addr=0x%08x for write\n", | |
414 | channel, __func__, sg, sgcount, dma_length, dev_addr); | |
415 | ||
416 | imx_dmav1_writel(dev_addr, DMA_DAR(channel)); | |
417 | imx_dmav1_writel(imxdmac->internal.ccr_to_device, DMA_CCR(channel)); | |
418 | } else { | |
419 | printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n", | |
420 | channel); | |
421 | return -EINVAL; | |
422 | } | |
423 | ||
2efc3449 | 424 | imxdma_sg_next(d, sg); |
6bd08127 JM |
425 | |
426 | return 0; | |
1f1846c6 SH |
427 | } |
428 | ||
6bd08127 | 429 | static void imxdma_watchdog(unsigned long data) |
1f1846c6 | 430 | { |
6bd08127 JM |
431 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; |
432 | int channel = imxdmac->channel; | |
1f1846c6 | 433 | |
6bd08127 JM |
434 | imx_dmav1_writel(0, DMA_CCR(channel)); |
435 | imxdmac->internal.in_use = 0; | |
436 | imxdmac->internal.sg = NULL; | |
437 | ||
438 | /* Tasklet watchdog error handler */ | |
9e15db7c | 439 | tasklet_schedule(&imxdmac->dma_tasklet); |
6bd08127 JM |
440 | pr_debug("imxdma%d: watchdog timeout!\n", imxdmac->channel); |
441 | } | |
442 | ||
443 | static irqreturn_t imxdma_err_handler(int irq, void *dev_id) | |
444 | { | |
445 | struct imxdma_engine *imxdma = dev_id; | |
446 | struct imxdma_channel_internal *internal; | |
447 | unsigned int err_mask; | |
448 | int i, disr; | |
449 | int errcode; | |
450 | ||
451 | disr = imx_dmav1_readl(DMA_DISR); | |
452 | ||
453 | err_mask = imx_dmav1_readl(DMA_DBTOSR) | | |
454 | imx_dmav1_readl(DMA_DRTOSR) | | |
455 | imx_dmav1_readl(DMA_DSESR) | | |
456 | imx_dmav1_readl(DMA_DBOSR); | |
457 | ||
458 | if (!err_mask) | |
459 | return IRQ_HANDLED; | |
460 | ||
461 | imx_dmav1_writel(disr & err_mask, DMA_DISR); | |
462 | ||
463 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
464 | if (!(err_mask & (1 << i))) | |
465 | continue; | |
466 | internal = &imxdma->channel[i].internal; | |
467 | errcode = 0; | |
468 | ||
469 | if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) { | |
470 | imx_dmav1_writel(1 << i, DMA_DBTOSR); | |
471 | errcode |= IMX_DMA_ERR_BURST; | |
472 | } | |
473 | if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) { | |
474 | imx_dmav1_writel(1 << i, DMA_DRTOSR); | |
475 | errcode |= IMX_DMA_ERR_REQUEST; | |
476 | } | |
477 | if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) { | |
478 | imx_dmav1_writel(1 << i, DMA_DSESR); | |
479 | errcode |= IMX_DMA_ERR_TRANSFER; | |
480 | } | |
481 | if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) { | |
482 | imx_dmav1_writel(1 << i, DMA_DBOSR); | |
483 | errcode |= IMX_DMA_ERR_BUFFER; | |
484 | } | |
485 | /* Tasklet error handler */ | |
486 | tasklet_schedule(&imxdma->channel[i].dma_tasklet); | |
487 | ||
488 | printk(KERN_WARNING | |
489 | "DMA timeout on channel %d -%s%s%s%s\n", i, | |
490 | errcode & IMX_DMA_ERR_BURST ? " burst" : "", | |
491 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "", | |
492 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", | |
493 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); | |
494 | } | |
495 | return IRQ_HANDLED; | |
1f1846c6 SH |
496 | } |
497 | ||
6bd08127 | 498 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
1f1846c6 | 499 | { |
6bd08127 JM |
500 | struct imxdma_channel_internal *imxdma = &imxdmac->internal; |
501 | int chno = imxdmac->channel; | |
2efc3449 | 502 | struct imxdma_desc *desc; |
6bd08127 JM |
503 | |
504 | if (imxdma->sg) { | |
505 | u32 tmp; | |
506 | imxdma->sg = sg_next(imxdma->sg); | |
507 | ||
508 | if (imxdma->sg) { | |
2efc3449 JM |
509 | |
510 | spin_lock(&imxdmac->lock); | |
511 | if (list_empty(&imxdmac->ld_active)) { | |
512 | spin_unlock(&imxdmac->lock); | |
513 | goto out; | |
514 | } | |
515 | ||
516 | desc = list_first_entry(&imxdmac->ld_active, | |
517 | struct imxdma_desc, | |
518 | node); | |
519 | spin_unlock(&imxdmac->lock); | |
520 | ||
521 | imxdma_sg_next(desc, imxdma->sg); | |
6bd08127 JM |
522 | |
523 | tmp = imx_dmav1_readl(DMA_CCR(chno)); | |
524 | ||
525 | if (imxdma_hw_chain(imxdma)) { | |
526 | /* FIXME: The timeout should probably be | |
527 | * configurable | |
528 | */ | |
529 | mod_timer(&imxdma->watchdog, | |
530 | jiffies + msecs_to_jiffies(500)); | |
531 | ||
532 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; | |
533 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
534 | } else { | |
535 | imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno)); | |
536 | tmp |= CCR_CEN; | |
537 | } | |
538 | ||
539 | imx_dmav1_writel(tmp, DMA_CCR(chno)); | |
540 | ||
541 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | |
542 | /* Tasklet progression */ | |
543 | tasklet_schedule(&imxdmac->dma_tasklet); | |
1f1846c6 | 544 | |
6bd08127 JM |
545 | return; |
546 | } | |
547 | ||
548 | if (imxdma_hw_chain(imxdma)) { | |
549 | del_timer(&imxdma->watchdog); | |
550 | return; | |
551 | } | |
552 | } | |
553 | ||
2efc3449 | 554 | out: |
6bd08127 JM |
555 | imx_dmav1_writel(0, DMA_CCR(chno)); |
556 | imxdma->in_use = 0; | |
557 | /* Tasklet irq */ | |
9e15db7c JM |
558 | tasklet_schedule(&imxdmac->dma_tasklet); |
559 | } | |
560 | ||
6bd08127 JM |
561 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) |
562 | { | |
563 | struct imxdma_engine *imxdma = dev_id; | |
564 | struct imxdma_channel_internal *internal; | |
565 | int i, disr; | |
566 | ||
567 | if (cpu_is_mx21() || cpu_is_mx27()) | |
568 | imxdma_err_handler(irq, dev_id); | |
569 | ||
570 | disr = imx_dmav1_readl(DMA_DISR); | |
571 | ||
572 | pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n", | |
573 | disr); | |
574 | ||
575 | imx_dmav1_writel(disr, DMA_DISR); | |
576 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { | |
577 | if (disr & (1 << i)) { | |
578 | internal = &imxdma->channel[i].internal; | |
579 | dma_irq_handle_channel(&imxdma->channel[i]); | |
580 | } | |
581 | } | |
582 | ||
583 | return IRQ_HANDLED; | |
584 | } | |
585 | ||
9e15db7c JM |
586 | static int imxdma_xfer_desc(struct imxdma_desc *d) |
587 | { | |
588 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | |
589 | int ret; | |
590 | ||
591 | /* Configure and enable */ | |
592 | switch (d->type) { | |
593 | case IMXDMA_DESC_MEMCPY: | |
6bd08127 | 594 | ret = imxdma_config_channel_hw(imxdmac, |
9e15db7c JM |
595 | d->config_port, d->config_mem, 0, 0); |
596 | if (ret < 0) | |
597 | return ret; | |
2efc3449 | 598 | ret = imxdma_setup_mem2mem_hw(imxdmac, d->src, d->len, d->dest); |
9e15db7c JM |
599 | if (ret < 0) |
600 | return ret; | |
601 | break; | |
6bd08127 JM |
602 | |
603 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | |
9e15db7c | 604 | case IMXDMA_DESC_CYCLIC: |
9e15db7c | 605 | case IMXDMA_DESC_SLAVE_SG: |
2efc3449 JM |
606 | ret = imxdma_setup_sg_hw(d, d->sg, d->sgcount, d->len, |
607 | imxdmac->per_address, d->direction); | |
9e15db7c JM |
608 | if (ret < 0) |
609 | return ret; | |
610 | break; | |
611 | default: | |
612 | return -EINVAL; | |
613 | } | |
2efc3449 | 614 | imxdma_enable_hw(d); |
9e15db7c JM |
615 | return 0; |
616 | } | |
617 | ||
618 | static void imxdma_tasklet(unsigned long data) | |
619 | { | |
620 | struct imxdma_channel *imxdmac = (void *)data; | |
621 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
622 | struct imxdma_desc *desc; | |
623 | ||
624 | spin_lock(&imxdmac->lock); | |
625 | ||
626 | if (list_empty(&imxdmac->ld_active)) { | |
627 | /* Someone might have called terminate all */ | |
628 | goto out; | |
629 | } | |
630 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | |
631 | ||
632 | if (desc->desc.callback) | |
633 | desc->desc.callback(desc->desc.callback_param); | |
634 | ||
1f3d6dc0 | 635 | dma_cookie_complete(&desc->desc); |
9e15db7c JM |
636 | |
637 | /* If we are dealing with a cyclic descriptor keep it on ld_active */ | |
638 | if (imxdma_chan_is_doing_cyclic(imxdmac)) | |
639 | goto out; | |
640 | ||
641 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | |
642 | ||
643 | if (!list_empty(&imxdmac->ld_queue)) { | |
644 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | |
645 | node); | |
646 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | |
647 | if (imxdma_xfer_desc(desc) < 0) | |
648 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | |
649 | __func__, imxdmac->channel); | |
650 | } | |
651 | out: | |
652 | spin_unlock(&imxdmac->lock); | |
1f1846c6 SH |
653 | } |
654 | ||
655 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
656 | unsigned long arg) | |
657 | { | |
658 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
659 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | |
660 | int ret; | |
9e15db7c | 661 | unsigned long flags; |
1f1846c6 SH |
662 | unsigned int mode = 0; |
663 | ||
664 | switch (cmd) { | |
665 | case DMA_TERMINATE_ALL: | |
6bd08127 | 666 | imxdma_disable_hw(imxdmac); |
9e15db7c JM |
667 | |
668 | spin_lock_irqsave(&imxdmac->lock, flags); | |
669 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); | |
670 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
671 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
672 | return 0; |
673 | case DMA_SLAVE_CONFIG: | |
db8196df | 674 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1f1846c6 SH |
675 | imxdmac->per_address = dmaengine_cfg->src_addr; |
676 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | |
677 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | |
678 | } else { | |
679 | imxdmac->per_address = dmaengine_cfg->dst_addr; | |
680 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | |
681 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | |
682 | } | |
683 | ||
684 | switch (imxdmac->word_size) { | |
685 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
686 | mode = IMX_DMA_MEMSIZE_8; | |
687 | break; | |
688 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
689 | mode = IMX_DMA_MEMSIZE_16; | |
690 | break; | |
691 | default: | |
692 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
693 | mode = IMX_DMA_MEMSIZE_32; | |
694 | break; | |
695 | } | |
6bd08127 | 696 | ret = imxdma_config_channel_hw(imxdmac, |
1f1846c6 SH |
697 | mode | IMX_DMA_TYPE_FIFO, |
698 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | |
699 | imxdmac->dma_request, 1); | |
700 | ||
701 | if (ret) | |
702 | return ret; | |
6bd08127 JM |
703 | /* Set burst length */ |
704 | imx_dmav1_writel(imxdmac->watermark_level * imxdmac->word_size, | |
705 | DMA_BLR(imxdmac->channel)); | |
1f1846c6 SH |
706 | |
707 | return 0; | |
708 | default: | |
709 | return -ENOSYS; | |
710 | } | |
711 | ||
712 | return -EINVAL; | |
713 | } | |
714 | ||
715 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | |
716 | dma_cookie_t cookie, | |
717 | struct dma_tx_state *txstate) | |
718 | { | |
96a2af41 | 719 | return dma_cookie_status(chan, cookie, txstate); |
1f1846c6 SH |
720 | } |
721 | ||
722 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
723 | { | |
724 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | |
725 | dma_cookie_t cookie; | |
9e15db7c | 726 | unsigned long flags; |
1f1846c6 | 727 | |
9e15db7c | 728 | spin_lock_irqsave(&imxdmac->lock, flags); |
884485e1 | 729 | cookie = dma_cookie_assign(tx); |
9e15db7c | 730 | spin_unlock_irqrestore(&imxdmac->lock, flags); |
1f1846c6 SH |
731 | |
732 | return cookie; | |
733 | } | |
734 | ||
735 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |
736 | { | |
737 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
738 | struct imx_dma_data *data = chan->private; | |
739 | ||
6c05f091 JM |
740 | if (data != NULL) |
741 | imxdmac->dma_request = data->dma_request; | |
1f1846c6 | 742 | |
9e15db7c JM |
743 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
744 | struct imxdma_desc *desc; | |
1f1846c6 | 745 | |
9e15db7c JM |
746 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
747 | if (!desc) | |
748 | break; | |
749 | __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); | |
750 | dma_async_tx_descriptor_init(&desc->desc, chan); | |
751 | desc->desc.tx_submit = imxdma_tx_submit; | |
752 | /* txd.flags will be overwritten in prep funcs */ | |
753 | desc->desc.flags = DMA_CTRL_ACK; | |
754 | desc->status = DMA_SUCCESS; | |
755 | ||
756 | list_add_tail(&desc->node, &imxdmac->ld_free); | |
757 | imxdmac->descs_allocated++; | |
758 | } | |
1f1846c6 | 759 | |
9e15db7c JM |
760 | if (!imxdmac->descs_allocated) |
761 | return -ENOMEM; | |
762 | ||
763 | return imxdmac->descs_allocated; | |
1f1846c6 SH |
764 | } |
765 | ||
766 | static void imxdma_free_chan_resources(struct dma_chan *chan) | |
767 | { | |
768 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
9e15db7c JM |
769 | struct imxdma_desc *desc, *_desc; |
770 | unsigned long flags; | |
771 | ||
772 | spin_lock_irqsave(&imxdmac->lock, flags); | |
1f1846c6 | 773 | |
6bd08127 | 774 | imxdma_disable_hw(imxdmac); |
9e15db7c JM |
775 | list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); |
776 | list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); | |
777 | ||
778 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
779 | ||
780 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { | |
781 | kfree(desc); | |
782 | imxdmac->descs_allocated--; | |
783 | } | |
784 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
1f1846c6 SH |
785 | |
786 | if (imxdmac->sg_list) { | |
787 | kfree(imxdmac->sg_list); | |
788 | imxdmac->sg_list = NULL; | |
789 | } | |
790 | } | |
791 | ||
792 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |
793 | struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 794 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 795 | unsigned long flags, void *context) |
1f1846c6 SH |
796 | { |
797 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
798 | struct scatterlist *sg; | |
9e15db7c JM |
799 | int i, dma_length = 0; |
800 | struct imxdma_desc *desc; | |
1f1846c6 | 801 | |
9e15db7c JM |
802 | if (list_empty(&imxdmac->ld_free) || |
803 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 SH |
804 | return NULL; |
805 | ||
9e15db7c | 806 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
807 | |
808 | for_each_sg(sgl, sg, sg_len, i) { | |
809 | dma_length += sg->length; | |
810 | } | |
811 | ||
d07102a1 SH |
812 | switch (imxdmac->word_size) { |
813 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
814 | if (sgl->length & 3 || sgl->dma_address & 3) | |
815 | return NULL; | |
816 | break; | |
817 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
818 | if (sgl->length & 1 || sgl->dma_address & 1) | |
819 | return NULL; | |
820 | break; | |
821 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
822 | break; | |
823 | default: | |
824 | return NULL; | |
825 | } | |
826 | ||
9e15db7c JM |
827 | desc->type = IMXDMA_DESC_SLAVE_SG; |
828 | desc->sg = sgl; | |
829 | desc->sgcount = sg_len; | |
830 | desc->len = dma_length; | |
2efc3449 | 831 | desc->direction = direction; |
9e15db7c | 832 | if (direction == DMA_DEV_TO_MEM) { |
9e15db7c JM |
833 | desc->src = imxdmac->per_address; |
834 | } else { | |
9e15db7c JM |
835 | desc->dest = imxdmac->per_address; |
836 | } | |
837 | desc->desc.callback = NULL; | |
838 | desc->desc.callback_param = NULL; | |
1f1846c6 | 839 | |
9e15db7c | 840 | return &desc->desc; |
1f1846c6 SH |
841 | } |
842 | ||
843 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |
844 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
185ecb5f AB |
845 | size_t period_len, enum dma_transfer_direction direction, |
846 | void *context) | |
1f1846c6 SH |
847 | { |
848 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
849 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c JM |
850 | struct imxdma_desc *desc; |
851 | int i; | |
1f1846c6 | 852 | unsigned int periods = buf_len / period_len; |
1f1846c6 SH |
853 | |
854 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | |
855 | __func__, imxdmac->channel, buf_len, period_len); | |
856 | ||
9e15db7c JM |
857 | if (list_empty(&imxdmac->ld_free) || |
858 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
1f1846c6 | 859 | return NULL; |
1f1846c6 | 860 | |
9e15db7c | 861 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6 SH |
862 | |
863 | if (imxdmac->sg_list) | |
864 | kfree(imxdmac->sg_list); | |
865 | ||
866 | imxdmac->sg_list = kcalloc(periods + 1, | |
867 | sizeof(struct scatterlist), GFP_KERNEL); | |
868 | if (!imxdmac->sg_list) | |
869 | return NULL; | |
870 | ||
871 | sg_init_table(imxdmac->sg_list, periods); | |
872 | ||
873 | for (i = 0; i < periods; i++) { | |
874 | imxdmac->sg_list[i].page_link = 0; | |
875 | imxdmac->sg_list[i].offset = 0; | |
876 | imxdmac->sg_list[i].dma_address = dma_addr; | |
877 | imxdmac->sg_list[i].length = period_len; | |
878 | dma_addr += period_len; | |
879 | } | |
880 | ||
881 | /* close the loop */ | |
882 | imxdmac->sg_list[periods].offset = 0; | |
883 | imxdmac->sg_list[periods].length = 0; | |
884 | imxdmac->sg_list[periods].page_link = | |
885 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | |
886 | ||
9e15db7c JM |
887 | desc->type = IMXDMA_DESC_CYCLIC; |
888 | desc->sg = imxdmac->sg_list; | |
889 | desc->sgcount = periods; | |
890 | desc->len = IMX_DMA_LENGTH_LOOP; | |
2efc3449 | 891 | desc->direction = direction; |
9e15db7c | 892 | if (direction == DMA_DEV_TO_MEM) { |
9e15db7c JM |
893 | desc->src = imxdmac->per_address; |
894 | } else { | |
9e15db7c JM |
895 | desc->dest = imxdmac->per_address; |
896 | } | |
897 | desc->desc.callback = NULL; | |
898 | desc->desc.callback_param = NULL; | |
1f1846c6 | 899 | |
9e15db7c | 900 | return &desc->desc; |
1f1846c6 SH |
901 | } |
902 | ||
6c05f091 JM |
903 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( |
904 | struct dma_chan *chan, dma_addr_t dest, | |
905 | dma_addr_t src, size_t len, unsigned long flags) | |
906 | { | |
907 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | |
908 | struct imxdma_engine *imxdma = imxdmac->imxdma; | |
9e15db7c | 909 | struct imxdma_desc *desc; |
6c05f091 JM |
910 | |
911 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | |
912 | __func__, imxdmac->channel, src, dest, len); | |
913 | ||
9e15db7c JM |
914 | if (list_empty(&imxdmac->ld_free) || |
915 | imxdma_chan_is_doing_cyclic(imxdmac)) | |
6c05f091 | 916 | return NULL; |
6c05f091 | 917 | |
9e15db7c | 918 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
6c05f091 | 919 | |
9e15db7c JM |
920 | desc->type = IMXDMA_DESC_MEMCPY; |
921 | desc->src = src; | |
922 | desc->dest = dest; | |
923 | desc->len = len; | |
2efc3449 | 924 | desc->direction = DMA_MEM_TO_MEM; |
9e15db7c JM |
925 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
926 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; | |
927 | desc->desc.callback = NULL; | |
928 | desc->desc.callback_param = NULL; | |
6c05f091 | 929 | |
9e15db7c | 930 | return &desc->desc; |
6c05f091 JM |
931 | } |
932 | ||
1f1846c6 SH |
933 | static void imxdma_issue_pending(struct dma_chan *chan) |
934 | { | |
5b316876 | 935 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
9e15db7c JM |
936 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
937 | struct imxdma_desc *desc; | |
938 | unsigned long flags; | |
939 | ||
940 | spin_lock_irqsave(&imxdmac->lock, flags); | |
941 | if (list_empty(&imxdmac->ld_active) && | |
942 | !list_empty(&imxdmac->ld_queue)) { | |
943 | desc = list_first_entry(&imxdmac->ld_queue, | |
944 | struct imxdma_desc, node); | |
945 | ||
946 | if (imxdma_xfer_desc(desc) < 0) { | |
947 | dev_warn(imxdma->dev, | |
948 | "%s: channel: %d couldn't issue DMA xfer\n", | |
949 | __func__, imxdmac->channel); | |
950 | } else { | |
951 | list_move_tail(imxdmac->ld_queue.next, | |
952 | &imxdmac->ld_active); | |
953 | } | |
954 | } | |
955 | spin_unlock_irqrestore(&imxdmac->lock, flags); | |
1f1846c6 SH |
956 | } |
957 | ||
958 | static int __init imxdma_probe(struct platform_device *pdev) | |
6bd08127 | 959 | { |
1f1846c6 SH |
960 | struct imxdma_engine *imxdma; |
961 | int ret, i; | |
962 | ||
6bd08127 JM |
963 | if (cpu_is_mx1()) |
964 | imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR); | |
965 | else if (cpu_is_mx21()) | |
966 | imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR); | |
967 | else if (cpu_is_mx27()) | |
968 | imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR); | |
969 | else | |
970 | return 0; | |
971 | ||
972 | dma_clk = clk_get(NULL, "dma"); | |
973 | if (IS_ERR(dma_clk)) | |
974 | return PTR_ERR(dma_clk); | |
975 | clk_enable(dma_clk); | |
976 | ||
977 | /* reset DMA module */ | |
978 | imx_dmav1_writel(DCR_DRST, DMA_DCR); | |
979 | ||
980 | if (cpu_is_mx1()) { | |
981 | ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); | |
982 | if (ret) { | |
983 | pr_crit("Can't register IRQ for DMA\n"); | |
984 | return ret; | |
985 | } | |
986 | ||
987 | ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); | |
988 | if (ret) { | |
989 | pr_crit("Can't register ERRIRQ for DMA\n"); | |
990 | free_irq(MX1_DMA_INT, NULL); | |
991 | return ret; | |
992 | } | |
993 | } | |
994 | ||
995 | /* enable DMA module */ | |
996 | imx_dmav1_writel(DCR_DEN, DMA_DCR); | |
997 | ||
998 | /* clear all interrupts */ | |
999 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); | |
1000 | ||
1001 | /* disable interrupts */ | |
1002 | imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); | |
1003 | ||
1f1846c6 SH |
1004 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); |
1005 | if (!imxdma) | |
1006 | return -ENOMEM; | |
1007 | ||
1008 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | |
1009 | ||
f8a356ff SH |
1010 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
1011 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | |
6c05f091 | 1012 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); |
f8a356ff | 1013 | |
1f1846c6 | 1014 | /* Initialize channel parameters */ |
6bd08127 | 1015 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
1f1846c6 | 1016 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
6bd08127 JM |
1017 | memset(&imxdmac->internal, 0, sizeof(imxdmac->internal)); |
1018 | if (cpu_is_mx21() || cpu_is_mx27()) { | |
1019 | ret = request_irq(MX2x_INT_DMACH0 + i, | |
1020 | dma_irq_handler, 0, "DMA", imxdma); | |
1021 | if (ret) { | |
1022 | pr_crit("Can't register IRQ %d for DMA channel %d\n", | |
1023 | MX2x_INT_DMACH0 + i, i); | |
1024 | goto err_init; | |
1025 | } | |
1026 | init_timer(&imxdmac->internal.watchdog); | |
1027 | imxdmac->internal.watchdog.function = &imxdma_watchdog; | |
1028 | imxdmac->internal.watchdog.data = (unsigned long)imxdmac; | |
8267f16e | 1029 | } |
1f1846c6 | 1030 | |
1f1846c6 SH |
1031 | imxdmac->imxdma = imxdma; |
1032 | spin_lock_init(&imxdmac->lock); | |
1033 | ||
9e15db7c JM |
1034 | INIT_LIST_HEAD(&imxdmac->ld_queue); |
1035 | INIT_LIST_HEAD(&imxdmac->ld_free); | |
1036 | INIT_LIST_HEAD(&imxdmac->ld_active); | |
1037 | ||
1038 | tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, | |
1039 | (unsigned long)imxdmac); | |
1f1846c6 | 1040 | imxdmac->chan.device = &imxdma->dma_device; |
8ac69546 | 1041 | dma_cookie_init(&imxdmac->chan); |
1f1846c6 SH |
1042 | imxdmac->channel = i; |
1043 | ||
1044 | /* Add the channel to the DMAC list */ | |
9e15db7c JM |
1045 | list_add_tail(&imxdmac->chan.device_node, |
1046 | &imxdma->dma_device.channels); | |
1f1846c6 SH |
1047 | } |
1048 | ||
1049 | imxdma->dev = &pdev->dev; | |
1050 | imxdma->dma_device.dev = &pdev->dev; | |
1051 | ||
1052 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | |
1053 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; | |
1054 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | |
1055 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | |
1056 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | |
6c05f091 | 1057 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
1f1846c6 SH |
1058 | imxdma->dma_device.device_control = imxdma_control; |
1059 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | |
1060 | ||
1061 | platform_set_drvdata(pdev, imxdma); | |
1062 | ||
6c05f091 | 1063 | imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ |
1e070a60 SH |
1064 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; |
1065 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | |
1066 | ||
1f1846c6 SH |
1067 | ret = dma_async_device_register(&imxdma->dma_device); |
1068 | if (ret) { | |
1069 | dev_err(&pdev->dev, "unable to register\n"); | |
1070 | goto err_init; | |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | ||
1075 | err_init: | |
6bd08127 JM |
1076 | |
1077 | if (cpu_is_mx21() || cpu_is_mx27()) { | |
1078 | while (--i >= 0) | |
1079 | free_irq(MX2x_INT_DMACH0 + i, NULL); | |
1080 | } else if cpu_is_mx1() { | |
1081 | free_irq(MX1_DMA_INT, NULL); | |
1082 | free_irq(MX1_DMA_ERR, NULL); | |
1f1846c6 SH |
1083 | } |
1084 | ||
1085 | kfree(imxdma); | |
1086 | return ret; | |
1087 | } | |
1088 | ||
1089 | static int __exit imxdma_remove(struct platform_device *pdev) | |
1090 | { | |
1091 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | |
1092 | int i; | |
1093 | ||
1094 | dma_async_device_unregister(&imxdma->dma_device); | |
1095 | ||
6bd08127 JM |
1096 | if (cpu_is_mx21() || cpu_is_mx27()) { |
1097 | for (i = 0; i < IMX_DMA_CHANNELS; i++) | |
1098 | free_irq(MX2x_INT_DMACH0 + i, NULL); | |
1099 | } else if cpu_is_mx1() { | |
1100 | free_irq(MX1_DMA_INT, NULL); | |
1101 | free_irq(MX1_DMA_ERR, NULL); | |
1f1846c6 SH |
1102 | } |
1103 | ||
1104 | kfree(imxdma); | |
1105 | ||
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static struct platform_driver imxdma_driver = { | |
1110 | .driver = { | |
1111 | .name = "imx-dma", | |
1112 | }, | |
1113 | .remove = __exit_p(imxdma_remove), | |
1114 | }; | |
1115 | ||
1116 | static int __init imxdma_module_init(void) | |
1117 | { | |
1118 | return platform_driver_probe(&imxdma_driver, imxdma_probe); | |
1119 | } | |
1120 | subsys_initcall(imxdma_module_init); | |
1121 | ||
1122 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | |
1123 | MODULE_DESCRIPTION("i.MX dma driver"); | |
1124 | MODULE_LICENSE("GPL"); |