2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver
8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
10 * Approved as OSADL project by a majority of OSADL members and funded
11 * by OSADL membership fees in 2009; for details see www.osadl.org.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * The full GNU General Public License is included in this distribution in the
28 * file called COPYING.
32 * This is initial version of MPC5121 DMA driver. Only memory to memory
33 * transfers are supported (tested using dmatest module).
36 #include <linux/module.h>
37 #include <linux/dmaengine.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/interrupt.h>
41 #include <linux/slab.h>
42 #include <linux/of_device.h>
43 #include <linux/of_platform.h>
45 #include <linux/random.h>
47 /* Number of DMA Transfer descriptors allocated per channel */
48 #define MPC_DMA_DESCRIPTORS 64
50 /* Macro definitions */
51 #define MPC_DMA_CHANNELS 64
52 #define MPC_DMA_TCD_OFFSET 0x1000
54 /* Arbitration mode of group and channel */
55 #define MPC_DMA_DMACR_EDCG (1 << 31)
56 #define MPC_DMA_DMACR_ERGA (1 << 3)
57 #define MPC_DMA_DMACR_ERCA (1 << 2)
60 #define MPC_DMA_DMAES_VLD (1 << 31)
61 #define MPC_DMA_DMAES_GPE (1 << 15)
62 #define MPC_DMA_DMAES_CPE (1 << 14)
63 #define MPC_DMA_DMAES_ERRCHN(err) \
65 #define MPC_DMA_DMAES_SAE (1 << 7)
66 #define MPC_DMA_DMAES_SOE (1 << 6)
67 #define MPC_DMA_DMAES_DAE (1 << 5)
68 #define MPC_DMA_DMAES_DOE (1 << 4)
69 #define MPC_DMA_DMAES_NCE (1 << 3)
70 #define MPC_DMA_DMAES_SGE (1 << 2)
71 #define MPC_DMA_DMAES_SBE (1 << 1)
72 #define MPC_DMA_DMAES_DBE (1 << 0)
74 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
76 #define MPC_DMA_TSIZE_1 0x00
77 #define MPC_DMA_TSIZE_2 0x01
78 #define MPC_DMA_TSIZE_4 0x02
79 #define MPC_DMA_TSIZE_16 0x04
80 #define MPC_DMA_TSIZE_32 0x05
82 /* MPC5121 DMA engine registers */
83 struct __attribute__ ((__packed__
)) mpc_dma_regs
{
85 u32 dmacr
; /* DMA control register */
86 u32 dmaes
; /* DMA error status */
88 u32 dmaerqh
; /* DMA enable request high(channels 63~32) */
89 u32 dmaerql
; /* DMA enable request low(channels 31~0) */
90 u32 dmaeeih
; /* DMA enable error interrupt high(ch63~32) */
91 u32 dmaeeil
; /* DMA enable error interrupt low(ch31~0) */
93 u8 dmaserq
; /* DMA set enable request */
94 u8 dmacerq
; /* DMA clear enable request */
95 u8 dmaseei
; /* DMA set enable error interrupt */
96 u8 dmaceei
; /* DMA clear enable error interrupt */
98 u8 dmacint
; /* DMA clear interrupt request */
99 u8 dmacerr
; /* DMA clear error */
100 u8 dmassrt
; /* DMA set start bit */
101 u8 dmacdne
; /* DMA clear DONE status bit */
103 u32 dmainth
; /* DMA interrupt request high(ch63~32) */
104 u32 dmaintl
; /* DMA interrupt request low(ch31~0) */
105 u32 dmaerrh
; /* DMA error high(ch63~32) */
106 u32 dmaerrl
; /* DMA error low(ch31~0) */
108 u32 dmahrsh
; /* DMA hw request status high(ch63~32) */
109 u32 dmahrsl
; /* DMA hardware request status low(ch31~0) */
111 u32 dmaihsa
; /* DMA interrupt high select AXE(ch63~32) */
112 u32 dmagpor
; /* (General purpose register on MPC8308) */
114 u32 dmailsa
; /* DMA interrupt low select AXE(ch31~0) */
116 u32 reserve0
[48]; /* Reserved */
118 u8 dchpri
[MPC_DMA_CHANNELS
];
119 /* DMA channels(0~63) priority */
122 struct __attribute__ ((__packed__
)) mpc_dma_tcd
{
124 u32 saddr
; /* Source address */
126 u32 smod
:5; /* Source address modulo */
127 u32 ssize
:3; /* Source data transfer size */
128 u32 dmod
:5; /* Destination address modulo */
129 u32 dsize
:3; /* Destination data transfer size */
130 u32 soff
:16; /* Signed source address offset */
133 u32 nbytes
; /* Inner "minor" byte count */
134 u32 slast
; /* Last source address adjustment */
135 u32 daddr
; /* Destination address */
138 u32 citer_elink
:1; /* Enable channel-to-channel linking on
139 * minor loop complete
141 u32 citer_linkch
:6; /* Link channel for minor loop complete */
142 u32 citer
:9; /* Current "major" iteration count */
143 u32 doff
:16; /* Signed destination address offset */
146 u32 dlast_sga
; /* Last Destination address adjustment/scatter
151 u32 biter_elink
:1; /* Enable channel-to-channel linking on major
155 u32 biter
:9; /* Beginning "major" iteration count */
156 u32 bwc
:2; /* Bandwidth control */
157 u32 major_linkch
:6; /* Link channel number */
158 u32 done
:1; /* Channel done */
159 u32 active
:1; /* Channel active */
160 u32 major_elink
:1; /* Enable channel-to-channel linking on major
163 u32 e_sg
:1; /* Enable scatter/gather processing */
164 u32 d_req
:1; /* Disable request */
165 u32 int_half
:1; /* Enable an interrupt when major counter is
168 u32 int_maj
:1; /* Enable an interrupt when major iteration
171 u32 start
:1; /* Channel start */
174 struct mpc_dma_desc
{
175 struct dma_async_tx_descriptor desc
;
176 struct mpc_dma_tcd
*tcd
;
177 dma_addr_t tcd_paddr
;
179 struct list_head node
;
182 struct mpc_dma_chan
{
183 struct dma_chan chan
;
184 struct list_head free
;
185 struct list_head prepared
;
186 struct list_head queued
;
187 struct list_head active
;
188 struct list_head completed
;
189 struct mpc_dma_tcd
*tcd
;
190 dma_addr_t tcd_paddr
;
192 /* Lock for this structure */
197 struct dma_device dma
;
198 struct tasklet_struct tasklet
;
199 struct mpc_dma_chan channels
[MPC_DMA_CHANNELS
];
200 struct mpc_dma_regs __iomem
*regs
;
201 struct mpc_dma_tcd __iomem
*tcd
;
207 /* Lock for error_status field in this structure */
208 spinlock_t error_status_lock
;
211 #define DRV_NAME "mpc512x_dma"
213 /* Convert struct dma_chan to struct mpc_dma_chan */
214 static inline struct mpc_dma_chan
*dma_chan_to_mpc_dma_chan(struct dma_chan
*c
)
216 return container_of(c
, struct mpc_dma_chan
, chan
);
219 /* Convert struct dma_chan to struct mpc_dma */
220 static inline struct mpc_dma
*dma_chan_to_mpc_dma(struct dma_chan
*c
)
222 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(c
);
223 return container_of(mchan
, struct mpc_dma
, channels
[c
->chan_id
]);
227 * Execute all queued DMA descriptors.
229 * Following requirements must be met while calling mpc_dma_execute():
230 * a) mchan->lock is acquired,
231 * b) mchan->active list is empty,
232 * c) mchan->queued list contains at least one entry.
234 static void mpc_dma_execute(struct mpc_dma_chan
*mchan
)
236 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(&mchan
->chan
);
237 struct mpc_dma_desc
*first
= NULL
;
238 struct mpc_dma_desc
*prev
= NULL
;
239 struct mpc_dma_desc
*mdesc
;
240 int cid
= mchan
->chan
.chan_id
;
242 /* Move all queued descriptors to active list */
243 list_splice_tail_init(&mchan
->queued
, &mchan
->active
);
245 /* Chain descriptors into one transaction */
246 list_for_each_entry(mdesc
, &mchan
->active
, node
) {
255 prev
->tcd
->dlast_sga
= mdesc
->tcd_paddr
;
257 mdesc
->tcd
->start
= 1;
262 prev
->tcd
->int_maj
= 1;
264 /* Send first descriptor in chain into hardware */
265 memcpy_toio(&mdma
->tcd
[cid
], first
->tcd
, sizeof(struct mpc_dma_tcd
));
268 mdma
->tcd
[cid
].e_sg
= 1;
269 out_8(&mdma
->regs
->dmassrt
, cid
);
272 /* Handle interrupt on one half of DMA controller (32 channels) */
273 static void mpc_dma_irq_process(struct mpc_dma
*mdma
, u32 is
, u32 es
, int off
)
275 struct mpc_dma_chan
*mchan
;
276 struct mpc_dma_desc
*mdesc
;
277 u32 status
= is
| es
;
280 while ((ch
= fls(status
) - 1) >= 0) {
281 status
&= ~(1 << ch
);
282 mchan
= &mdma
->channels
[ch
+ off
];
284 spin_lock(&mchan
->lock
);
286 out_8(&mdma
->regs
->dmacint
, ch
+ off
);
287 out_8(&mdma
->regs
->dmacerr
, ch
+ off
);
289 /* Check error status */
291 list_for_each_entry(mdesc
, &mchan
->active
, node
)
294 /* Execute queued descriptors */
295 list_splice_tail_init(&mchan
->active
, &mchan
->completed
);
296 if (!list_empty(&mchan
->queued
))
297 mpc_dma_execute(mchan
);
299 spin_unlock(&mchan
->lock
);
303 /* Interrupt handler */
304 static irqreturn_t
mpc_dma_irq(int irq
, void *data
)
306 struct mpc_dma
*mdma
= data
;
309 /* Save error status register */
310 es
= in_be32(&mdma
->regs
->dmaes
);
311 spin_lock(&mdma
->error_status_lock
);
312 if ((es
& MPC_DMA_DMAES_VLD
) && mdma
->error_status
== 0)
313 mdma
->error_status
= es
;
314 spin_unlock(&mdma
->error_status_lock
);
316 /* Handle interrupt on each channel */
317 if (mdma
->dma
.chancnt
> 32) {
318 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmainth
),
319 in_be32(&mdma
->regs
->dmaerrh
), 32);
321 mpc_dma_irq_process(mdma
, in_be32(&mdma
->regs
->dmaintl
),
322 in_be32(&mdma
->regs
->dmaerrl
), 0);
324 /* Schedule tasklet */
325 tasklet_schedule(&mdma
->tasklet
);
330 /* process completed descriptors */
331 static void mpc_dma_process_completed(struct mpc_dma
*mdma
)
333 dma_cookie_t last_cookie
= 0;
334 struct mpc_dma_chan
*mchan
;
335 struct mpc_dma_desc
*mdesc
;
336 struct dma_async_tx_descriptor
*desc
;
341 for (i
= 0; i
< mdma
->dma
.chancnt
; i
++) {
342 mchan
= &mdma
->channels
[i
];
344 /* Get all completed descriptors */
345 spin_lock_irqsave(&mchan
->lock
, flags
);
346 if (!list_empty(&mchan
->completed
))
347 list_splice_tail_init(&mchan
->completed
, &list
);
348 spin_unlock_irqrestore(&mchan
->lock
, flags
);
350 if (list_empty(&list
))
353 /* Execute callbacks and run dependencies */
354 list_for_each_entry(mdesc
, &list
, node
) {
358 desc
->callback(desc
->callback_param
);
360 last_cookie
= desc
->cookie
;
361 dma_run_dependencies(desc
);
364 /* Free descriptors */
365 spin_lock_irqsave(&mchan
->lock
, flags
);
366 list_splice_tail_init(&list
, &mchan
->free
);
367 mchan
->chan
.completed_cookie
= last_cookie
;
368 spin_unlock_irqrestore(&mchan
->lock
, flags
);
373 static void mpc_dma_tasklet(unsigned long data
)
375 struct mpc_dma
*mdma
= (void *)data
;
379 spin_lock_irqsave(&mdma
->error_status_lock
, flags
);
380 es
= mdma
->error_status
;
381 mdma
->error_status
= 0;
382 spin_unlock_irqrestore(&mdma
->error_status_lock
, flags
);
384 /* Print nice error report */
386 dev_err(mdma
->dma
.dev
,
387 "Hardware reported following error(s) on channel %u:\n",
388 MPC_DMA_DMAES_ERRCHN(es
));
390 if (es
& MPC_DMA_DMAES_GPE
)
391 dev_err(mdma
->dma
.dev
, "- Group Priority Error\n");
392 if (es
& MPC_DMA_DMAES_CPE
)
393 dev_err(mdma
->dma
.dev
, "- Channel Priority Error\n");
394 if (es
& MPC_DMA_DMAES_SAE
)
395 dev_err(mdma
->dma
.dev
, "- Source Address Error\n");
396 if (es
& MPC_DMA_DMAES_SOE
)
397 dev_err(mdma
->dma
.dev
, "- Source Offset"
398 " Configuration Error\n");
399 if (es
& MPC_DMA_DMAES_DAE
)
400 dev_err(mdma
->dma
.dev
, "- Destination Address"
402 if (es
& MPC_DMA_DMAES_DOE
)
403 dev_err(mdma
->dma
.dev
, "- Destination Offset"
404 " Configuration Error\n");
405 if (es
& MPC_DMA_DMAES_NCE
)
406 dev_err(mdma
->dma
.dev
, "- NBytes/Citter"
407 " Configuration Error\n");
408 if (es
& MPC_DMA_DMAES_SGE
)
409 dev_err(mdma
->dma
.dev
, "- Scatter/Gather"
410 " Configuration Error\n");
411 if (es
& MPC_DMA_DMAES_SBE
)
412 dev_err(mdma
->dma
.dev
, "- Source Bus Error\n");
413 if (es
& MPC_DMA_DMAES_DBE
)
414 dev_err(mdma
->dma
.dev
, "- Destination Bus Error\n");
417 mpc_dma_process_completed(mdma
);
420 /* Submit descriptor to hardware */
421 static dma_cookie_t
mpc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
423 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(txd
->chan
);
424 struct mpc_dma_desc
*mdesc
;
428 mdesc
= container_of(txd
, struct mpc_dma_desc
, desc
);
430 spin_lock_irqsave(&mchan
->lock
, flags
);
432 /* Move descriptor to queue */
433 list_move_tail(&mdesc
->node
, &mchan
->queued
);
435 /* If channel is idle, execute all queued descriptors */
436 if (list_empty(&mchan
->active
))
437 mpc_dma_execute(mchan
);
440 cookie
= mchan
->chan
.cookie
+ 1;
444 mchan
->chan
.cookie
= cookie
;
445 mdesc
->desc
.cookie
= cookie
;
447 spin_unlock_irqrestore(&mchan
->lock
, flags
);
452 /* Alloc channel resources */
453 static int mpc_dma_alloc_chan_resources(struct dma_chan
*chan
)
455 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
456 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
457 struct mpc_dma_desc
*mdesc
;
458 struct mpc_dma_tcd
*tcd
;
459 dma_addr_t tcd_paddr
;
464 /* Alloc DMA memory for Transfer Control Descriptors */
465 tcd
= dma_alloc_coherent(mdma
->dma
.dev
,
466 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
467 &tcd_paddr
, GFP_KERNEL
);
471 /* Alloc descriptors for this channel */
472 for (i
= 0; i
< MPC_DMA_DESCRIPTORS
; i
++) {
473 mdesc
= kzalloc(sizeof(struct mpc_dma_desc
), GFP_KERNEL
);
475 dev_notice(mdma
->dma
.dev
, "Memory allocation error. "
476 "Allocated only %u descriptors\n", i
);
480 dma_async_tx_descriptor_init(&mdesc
->desc
, chan
);
481 mdesc
->desc
.flags
= DMA_CTRL_ACK
;
482 mdesc
->desc
.tx_submit
= mpc_dma_tx_submit
;
484 mdesc
->tcd
= &tcd
[i
];
485 mdesc
->tcd_paddr
= tcd_paddr
+ (i
* sizeof(struct mpc_dma_tcd
));
487 list_add_tail(&mdesc
->node
, &descs
);
490 /* Return error only if no descriptors were allocated */
492 dma_free_coherent(mdma
->dma
.dev
,
493 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
498 spin_lock_irqsave(&mchan
->lock
, flags
);
500 mchan
->tcd_paddr
= tcd_paddr
;
501 list_splice_tail_init(&descs
, &mchan
->free
);
502 spin_unlock_irqrestore(&mchan
->lock
, flags
);
504 /* Enable Error Interrupt */
505 out_8(&mdma
->regs
->dmaseei
, chan
->chan_id
);
510 /* Free channel resources */
511 static void mpc_dma_free_chan_resources(struct dma_chan
*chan
)
513 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
514 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
515 struct mpc_dma_desc
*mdesc
, *tmp
;
516 struct mpc_dma_tcd
*tcd
;
517 dma_addr_t tcd_paddr
;
521 spin_lock_irqsave(&mchan
->lock
, flags
);
523 /* Channel must be idle */
524 BUG_ON(!list_empty(&mchan
->prepared
));
525 BUG_ON(!list_empty(&mchan
->queued
));
526 BUG_ON(!list_empty(&mchan
->active
));
527 BUG_ON(!list_empty(&mchan
->completed
));
530 list_splice_tail_init(&mchan
->free
, &descs
);
532 tcd_paddr
= mchan
->tcd_paddr
;
534 spin_unlock_irqrestore(&mchan
->lock
, flags
);
536 /* Free DMA memory used by descriptors */
537 dma_free_coherent(mdma
->dma
.dev
,
538 MPC_DMA_DESCRIPTORS
* sizeof(struct mpc_dma_tcd
),
541 /* Free descriptors */
542 list_for_each_entry_safe(mdesc
, tmp
, &descs
, node
)
545 /* Disable Error Interrupt */
546 out_8(&mdma
->regs
->dmaceei
, chan
->chan_id
);
549 /* Send all pending descriptor to hardware */
550 static void mpc_dma_issue_pending(struct dma_chan
*chan
)
553 * We are posting descriptors to the hardware as soon as
554 * they are ready, so this function does nothing.
558 /* Check request completion status */
559 static enum dma_status
560 mpc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
561 struct dma_tx_state
*txstate
)
563 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
565 dma_cookie_t last_used
;
566 dma_cookie_t last_complete
;
568 spin_lock_irqsave(&mchan
->lock
, flags
);
569 last_used
= mchan
->chan
.cookie
;
570 last_complete
= mchan
->chan
.completed_cookie
;
571 spin_unlock_irqrestore(&mchan
->lock
, flags
);
573 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
574 return dma_async_is_complete(cookie
, last_complete
, last_used
);
577 /* Prepare descriptor for memory to memory copy */
578 static struct dma_async_tx_descriptor
*
579 mpc_dma_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t src
,
580 size_t len
, unsigned long flags
)
582 struct mpc_dma
*mdma
= dma_chan_to_mpc_dma(chan
);
583 struct mpc_dma_chan
*mchan
= dma_chan_to_mpc_dma_chan(chan
);
584 struct mpc_dma_desc
*mdesc
= NULL
;
585 struct mpc_dma_tcd
*tcd
;
586 unsigned long iflags
;
588 /* Get free descriptor */
589 spin_lock_irqsave(&mchan
->lock
, iflags
);
590 if (!list_empty(&mchan
->free
)) {
591 mdesc
= list_first_entry(&mchan
->free
, struct mpc_dma_desc
,
593 list_del(&mdesc
->node
);
595 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
598 /* try to free completed descriptors */
599 mpc_dma_process_completed(mdma
);
606 /* Prepare Transfer Control Descriptor for this transaction */
607 memset(tcd
, 0, sizeof(struct mpc_dma_tcd
));
609 if (IS_ALIGNED(src
| dst
| len
, 32)) {
610 tcd
->ssize
= MPC_DMA_TSIZE_32
;
611 tcd
->dsize
= MPC_DMA_TSIZE_32
;
614 } else if (!mdma
->is_mpc8308
&& IS_ALIGNED(src
| dst
| len
, 16)) {
615 /* MPC8308 doesn't support 16 byte transfers */
616 tcd
->ssize
= MPC_DMA_TSIZE_16
;
617 tcd
->dsize
= MPC_DMA_TSIZE_16
;
620 } else if (IS_ALIGNED(src
| dst
| len
, 4)) {
621 tcd
->ssize
= MPC_DMA_TSIZE_4
;
622 tcd
->dsize
= MPC_DMA_TSIZE_4
;
625 } else if (IS_ALIGNED(src
| dst
| len
, 2)) {
626 tcd
->ssize
= MPC_DMA_TSIZE_2
;
627 tcd
->dsize
= MPC_DMA_TSIZE_2
;
631 tcd
->ssize
= MPC_DMA_TSIZE_1
;
632 tcd
->dsize
= MPC_DMA_TSIZE_1
;
643 /* Place descriptor in prepared list */
644 spin_lock_irqsave(&mchan
->lock
, iflags
);
645 list_add_tail(&mdesc
->node
, &mchan
->prepared
);
646 spin_unlock_irqrestore(&mchan
->lock
, iflags
);
651 static int __devinit
mpc_dma_probe(struct platform_device
*op
)
653 struct device_node
*dn
= op
->dev
.of_node
;
654 struct device
*dev
= &op
->dev
;
655 struct dma_device
*dma
;
656 struct mpc_dma
*mdma
;
657 struct mpc_dma_chan
*mchan
;
659 ulong regs_start
, regs_size
;
662 mdma
= devm_kzalloc(dev
, sizeof(struct mpc_dma
), GFP_KERNEL
);
664 dev_err(dev
, "Memory exhausted!\n");
668 mdma
->irq
= irq_of_parse_and_map(dn
, 0);
669 if (mdma
->irq
== NO_IRQ
) {
670 dev_err(dev
, "Error mapping IRQ!\n");
674 if (of_device_is_compatible(dn
, "fsl,mpc8308-dma")) {
675 mdma
->is_mpc8308
= 1;
676 mdma
->irq2
= irq_of_parse_and_map(dn
, 1);
677 if (mdma
->irq2
== NO_IRQ
) {
678 dev_err(dev
, "Error mapping IRQ!\n");
683 retval
= of_address_to_resource(dn
, 0, &res
);
685 dev_err(dev
, "Error parsing memory region!\n");
689 regs_start
= res
.start
;
690 regs_size
= resource_size(&res
);
692 if (!devm_request_mem_region(dev
, regs_start
, regs_size
, DRV_NAME
)) {
693 dev_err(dev
, "Error requesting memory region!\n");
697 mdma
->regs
= devm_ioremap(dev
, regs_start
, regs_size
);
699 dev_err(dev
, "Error mapping memory region!\n");
703 mdma
->tcd
= (struct mpc_dma_tcd
*)((u8
*)(mdma
->regs
)
704 + MPC_DMA_TCD_OFFSET
);
706 retval
= devm_request_irq(dev
, mdma
->irq
, &mpc_dma_irq
, 0, DRV_NAME
,
709 dev_err(dev
, "Error requesting IRQ!\n");
713 if (mdma
->is_mpc8308
) {
714 retval
= devm_request_irq(dev
, mdma
->irq2
, &mpc_dma_irq
, 0,
717 dev_err(dev
, "Error requesting IRQ2!\n");
722 spin_lock_init(&mdma
->error_status_lock
);
726 if (!mdma
->is_mpc8308
)
727 dma
->chancnt
= MPC_DMA_CHANNELS
;
729 dma
->chancnt
= 16; /* MPC8308 DMA has only 16 channels */
730 dma
->device_alloc_chan_resources
= mpc_dma_alloc_chan_resources
;
731 dma
->device_free_chan_resources
= mpc_dma_free_chan_resources
;
732 dma
->device_issue_pending
= mpc_dma_issue_pending
;
733 dma
->device_tx_status
= mpc_dma_tx_status
;
734 dma
->device_prep_dma_memcpy
= mpc_dma_prep_memcpy
;
736 INIT_LIST_HEAD(&dma
->channels
);
737 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
739 for (i
= 0; i
< dma
->chancnt
; i
++) {
740 mchan
= &mdma
->channels
[i
];
742 mchan
->chan
.device
= dma
;
743 mchan
->chan
.cookie
= 1;
744 mchan
->chan
.completed_cookie
= mchan
->chan
.cookie
;
746 INIT_LIST_HEAD(&mchan
->free
);
747 INIT_LIST_HEAD(&mchan
->prepared
);
748 INIT_LIST_HEAD(&mchan
->queued
);
749 INIT_LIST_HEAD(&mchan
->active
);
750 INIT_LIST_HEAD(&mchan
->completed
);
752 spin_lock_init(&mchan
->lock
);
753 list_add_tail(&mchan
->chan
.device_node
, &dma
->channels
);
756 tasklet_init(&mdma
->tasklet
, mpc_dma_tasklet
, (unsigned long)mdma
);
759 * Configure DMA Engine:
761 * - Round-robin group arbitration,
762 * - Round-robin channel arbitration.
764 if (!mdma
->is_mpc8308
) {
765 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_EDCG
|
766 MPC_DMA_DMACR_ERGA
| MPC_DMA_DMACR_ERCA
);
768 /* Disable hardware DMA requests */
769 out_be32(&mdma
->regs
->dmaerqh
, 0);
770 out_be32(&mdma
->regs
->dmaerql
, 0);
772 /* Disable error interrupts */
773 out_be32(&mdma
->regs
->dmaeeih
, 0);
774 out_be32(&mdma
->regs
->dmaeeil
, 0);
776 /* Clear interrupts status */
777 out_be32(&mdma
->regs
->dmainth
, 0xFFFFFFFF);
778 out_be32(&mdma
->regs
->dmaintl
, 0xFFFFFFFF);
779 out_be32(&mdma
->regs
->dmaerrh
, 0xFFFFFFFF);
780 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFFFFFF);
782 /* Route interrupts to IPIC */
783 out_be32(&mdma
->regs
->dmaihsa
, 0);
784 out_be32(&mdma
->regs
->dmailsa
, 0);
786 /* MPC8308 has 16 channels and lacks some registers */
787 out_be32(&mdma
->regs
->dmacr
, MPC_DMA_DMACR_ERCA
);
789 /* enable snooping */
790 out_be32(&mdma
->regs
->dmagpor
, MPC_DMA_DMAGPOR_SNOOP_ENABLE
);
791 /* Disable error interrupts */
792 out_be32(&mdma
->regs
->dmaeeil
, 0);
794 /* Clear interrupts status */
795 out_be32(&mdma
->regs
->dmaintl
, 0xFFFF);
796 out_be32(&mdma
->regs
->dmaerrl
, 0xFFFF);
799 /* Register DMA engine */
800 dev_set_drvdata(dev
, mdma
);
801 retval
= dma_async_device_register(dma
);
803 devm_free_irq(dev
, mdma
->irq
, mdma
);
804 irq_dispose_mapping(mdma
->irq
);
810 static int __devexit
mpc_dma_remove(struct platform_device
*op
)
812 struct device
*dev
= &op
->dev
;
813 struct mpc_dma
*mdma
= dev_get_drvdata(dev
);
815 dma_async_device_unregister(&mdma
->dma
);
816 devm_free_irq(dev
, mdma
->irq
, mdma
);
817 irq_dispose_mapping(mdma
->irq
);
822 static struct of_device_id mpc_dma_match
[] = {
823 { .compatible
= "fsl,mpc5121-dma", },
827 static struct platform_driver mpc_dma_driver
= {
828 .probe
= mpc_dma_probe
,
829 .remove
= __devexit_p(mpc_dma_remove
),
832 .owner
= THIS_MODULE
,
833 .of_match_table
= mpc_dma_match
,
837 module_platform_driver(mpc_dma_driver
);
839 MODULE_LICENSE("GPL");
840 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");