2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA controller is also added.
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dmapool.h>
36 #include <linux/of_address.h>
37 #include <linux/of_irq.h>
38 #include <linux/of_platform.h>
40 #include "dmaengine.h"
43 #define chan_dbg(chan, fmt, arg...) \
44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45 #define chan_err(chan, fmt, arg...) \
46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
48 static const char msg_ld_oom
[] = "No free memory for link descriptor";
54 static void set_sr(struct fsldma_chan
*chan
, u32 val
)
56 DMA_OUT(chan
, &chan
->regs
->sr
, val
, 32);
59 static u32
get_sr(struct fsldma_chan
*chan
)
61 return DMA_IN(chan
, &chan
->regs
->sr
, 32);
64 static void set_cdar(struct fsldma_chan
*chan
, dma_addr_t addr
)
66 DMA_OUT(chan
, &chan
->regs
->cdar
, addr
| FSL_DMA_SNEN
, 64);
69 static dma_addr_t
get_cdar(struct fsldma_chan
*chan
)
71 return DMA_IN(chan
, &chan
->regs
->cdar
, 64) & ~FSL_DMA_SNEN
;
74 static u32
get_bcr(struct fsldma_chan
*chan
)
76 return DMA_IN(chan
, &chan
->regs
->bcr
, 32);
83 static void set_desc_cnt(struct fsldma_chan
*chan
,
84 struct fsl_dma_ld_hw
*hw
, u32 count
)
86 hw
->count
= CPU_TO_DMA(chan
, count
, 32);
89 static u32
get_desc_cnt(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
91 return DMA_TO_CPU(chan
, desc
->hw
.count
, 32);
94 static void set_desc_src(struct fsldma_chan
*chan
,
95 struct fsl_dma_ld_hw
*hw
, dma_addr_t src
)
99 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
100 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
101 hw
->src_addr
= CPU_TO_DMA(chan
, snoop_bits
| src
, 64);
104 static dma_addr_t
get_desc_src(struct fsldma_chan
*chan
,
105 struct fsl_desc_sw
*desc
)
109 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
110 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
111 return DMA_TO_CPU(chan
, desc
->hw
.src_addr
, 64) & ~snoop_bits
;
114 static void set_desc_dst(struct fsldma_chan
*chan
,
115 struct fsl_dma_ld_hw
*hw
, dma_addr_t dst
)
119 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
120 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
121 hw
->dst_addr
= CPU_TO_DMA(chan
, snoop_bits
| dst
, 64);
124 static dma_addr_t
get_desc_dst(struct fsldma_chan
*chan
,
125 struct fsl_desc_sw
*desc
)
129 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
130 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
131 return DMA_TO_CPU(chan
, desc
->hw
.dst_addr
, 64) & ~snoop_bits
;
134 static void set_desc_next(struct fsldma_chan
*chan
,
135 struct fsl_dma_ld_hw
*hw
, dma_addr_t next
)
139 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
141 hw
->next_ln_addr
= CPU_TO_DMA(chan
, snoop_bits
| next
, 64);
144 static void set_ld_eol(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
148 snoop_bits
= ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
151 desc
->hw
.next_ln_addr
= CPU_TO_DMA(chan
,
152 DMA_TO_CPU(chan
, desc
->hw
.next_ln_addr
, 64) | FSL_DMA_EOL
157 * DMA Engine Hardware Control Helpers
160 static void dma_init(struct fsldma_chan
*chan
)
162 /* Reset the channel */
163 DMA_OUT(chan
, &chan
->regs
->mr
, 0, 32);
165 switch (chan
->feature
& FSL_DMA_IP_MASK
) {
166 case FSL_DMA_IP_85XX
:
167 /* Set the channel to below modes:
168 * EIE - Error interrupt enable
169 * EOLNIE - End of links interrupt enable
170 * BWC - Bandwidth sharing among channels
172 DMA_OUT(chan
, &chan
->regs
->mr
, FSL_DMA_MR_BWC
173 | FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE
, 32);
175 case FSL_DMA_IP_83XX
:
176 /* Set the channel to below modes:
177 * EOTIE - End-of-transfer interrupt enable
178 * PRC_RM - PCI read multiple
180 DMA_OUT(chan
, &chan
->regs
->mr
, FSL_DMA_MR_EOTIE
181 | FSL_DMA_MR_PRC_RM
, 32);
186 static int dma_is_idle(struct fsldma_chan
*chan
)
188 u32 sr
= get_sr(chan
);
189 return (!(sr
& FSL_DMA_SR_CB
)) || (sr
& FSL_DMA_SR_CH
);
193 * Start the DMA controller
196 * - the CDAR register must point to the start descriptor
197 * - the MRn[CS] bit must be cleared
199 static void dma_start(struct fsldma_chan
*chan
)
203 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
205 if (chan
->feature
& FSL_DMA_CHAN_PAUSE_EXT
) {
206 DMA_OUT(chan
, &chan
->regs
->bcr
, 0, 32);
207 mode
|= FSL_DMA_MR_EMP_EN
;
209 mode
&= ~FSL_DMA_MR_EMP_EN
;
212 if (chan
->feature
& FSL_DMA_CHAN_START_EXT
) {
213 mode
|= FSL_DMA_MR_EMS_EN
;
215 mode
&= ~FSL_DMA_MR_EMS_EN
;
216 mode
|= FSL_DMA_MR_CS
;
219 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
222 static void dma_halt(struct fsldma_chan
*chan
)
227 /* read the mode register */
228 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
231 * The 85xx controller supports channel abort, which will stop
232 * the current transfer. On 83xx, this bit is the transfer error
233 * mask bit, which should not be changed.
235 if ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
) {
236 mode
|= FSL_DMA_MR_CA
;
237 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
239 mode
&= ~FSL_DMA_MR_CA
;
242 /* stop the DMA controller */
243 mode
&= ~(FSL_DMA_MR_CS
| FSL_DMA_MR_EMS_EN
);
244 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
246 /* wait for the DMA controller to become idle */
247 for (i
= 0; i
< 100; i
++) {
248 if (dma_is_idle(chan
))
254 if (!dma_is_idle(chan
))
255 chan_err(chan
, "DMA halt timeout!\n");
259 * fsl_chan_set_src_loop_size - Set source address hold transfer size
260 * @chan : Freescale DMA channel
261 * @size : Address loop size, 0 for disable loop
263 * The set source address hold transfer size. The source
264 * address hold or loop transfer size is when the DMA transfer
265 * data from source address (SA), if the loop size is 4, the DMA will
266 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
267 * SA + 1 ... and so on.
269 static void fsl_chan_set_src_loop_size(struct fsldma_chan
*chan
, int size
)
273 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
277 mode
&= ~FSL_DMA_MR_SAHE
;
283 mode
|= FSL_DMA_MR_SAHE
| (__ilog2(size
) << 14);
287 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
291 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
292 * @chan : Freescale DMA channel
293 * @size : Address loop size, 0 for disable loop
295 * The set destination address hold transfer size. The destination
296 * address hold or loop transfer size is when the DMA transfer
297 * data to destination address (TA), if the loop size is 4, the DMA will
298 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
299 * TA + 1 ... and so on.
301 static void fsl_chan_set_dst_loop_size(struct fsldma_chan
*chan
, int size
)
305 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
309 mode
&= ~FSL_DMA_MR_DAHE
;
315 mode
|= FSL_DMA_MR_DAHE
| (__ilog2(size
) << 16);
319 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
323 * fsl_chan_set_request_count - Set DMA Request Count for external control
324 * @chan : Freescale DMA channel
325 * @size : Number of bytes to transfer in a single request
327 * The Freescale DMA channel can be controlled by the external signal DREQ#.
328 * The DMA request count is how many bytes are allowed to transfer before
329 * pausing the channel, after which a new assertion of DREQ# resumes channel
332 * A size of 0 disables external pause control. The maximum size is 1024.
334 static void fsl_chan_set_request_count(struct fsldma_chan
*chan
, int size
)
340 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
341 mode
|= (__ilog2(size
) << 24) & 0x0f000000;
343 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
347 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
348 * @chan : Freescale DMA channel
349 * @enable : 0 is disabled, 1 is enabled.
351 * The Freescale DMA channel can be controlled by the external signal DREQ#.
352 * The DMA Request Count feature should be used in addition to this feature
353 * to set the number of bytes to transfer before pausing the channel.
355 static void fsl_chan_toggle_ext_pause(struct fsldma_chan
*chan
, int enable
)
358 chan
->feature
|= FSL_DMA_CHAN_PAUSE_EXT
;
360 chan
->feature
&= ~FSL_DMA_CHAN_PAUSE_EXT
;
364 * fsl_chan_toggle_ext_start - Toggle channel external start status
365 * @chan : Freescale DMA channel
366 * @enable : 0 is disabled, 1 is enabled.
368 * If enable the external start, the channel can be started by an
369 * external DMA start pin. So the dma_start() does not start the
370 * transfer immediately. The DMA channel will wait for the
371 * control pin asserted.
373 static void fsl_chan_toggle_ext_start(struct fsldma_chan
*chan
, int enable
)
376 chan
->feature
|= FSL_DMA_CHAN_START_EXT
;
378 chan
->feature
&= ~FSL_DMA_CHAN_START_EXT
;
381 static void append_ld_queue(struct fsldma_chan
*chan
, struct fsl_desc_sw
*desc
)
383 struct fsl_desc_sw
*tail
= to_fsl_desc(chan
->ld_pending
.prev
);
385 if (list_empty(&chan
->ld_pending
))
389 * Add the hardware descriptor to the chain of hardware descriptors
390 * that already exists in memory.
392 * This will un-set the EOL bit of the existing transaction, and the
393 * last link in this transaction will become the EOL descriptor.
395 set_desc_next(chan
, &tail
->hw
, desc
->async_tx
.phys
);
398 * Add the software descriptor and all children to the list
399 * of pending transactions
402 list_splice_tail_init(&desc
->tx_list
, &chan
->ld_pending
);
405 static dma_cookie_t
fsl_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
407 struct fsldma_chan
*chan
= to_fsl_chan(tx
->chan
);
408 struct fsl_desc_sw
*desc
= tx_to_fsl_desc(tx
);
409 struct fsl_desc_sw
*child
;
413 spin_lock_irqsave(&chan
->desc_lock
, flags
);
416 * assign cookies to all of the software descriptors
417 * that make up this transaction
419 list_for_each_entry(child
, &desc
->tx_list
, node
) {
420 cookie
= dma_cookie_assign(&child
->async_tx
);
423 /* put this transaction onto the tail of the pending queue */
424 append_ld_queue(chan
, desc
);
426 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
432 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
433 * @chan : Freescale DMA channel
435 * Return - The descriptor allocated. NULL for failed.
437 static struct fsl_desc_sw
*fsl_dma_alloc_descriptor(struct fsldma_chan
*chan
)
439 struct fsl_desc_sw
*desc
;
442 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
444 chan_dbg(chan
, "out of memory for link descriptor\n");
448 memset(desc
, 0, sizeof(*desc
));
449 INIT_LIST_HEAD(&desc
->tx_list
);
450 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
451 desc
->async_tx
.tx_submit
= fsl_dma_tx_submit
;
452 desc
->async_tx
.phys
= pdesc
;
454 #ifdef FSL_DMA_LD_DEBUG
455 chan_dbg(chan
, "LD %p allocated\n", desc
);
462 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
463 * @chan : Freescale DMA channel
465 * This function will create a dma pool for descriptor allocation.
467 * Return - The number of descriptors allocated.
469 static int fsl_dma_alloc_chan_resources(struct dma_chan
*dchan
)
471 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
473 /* Has this channel already been allocated? */
478 * We need the descriptor to be aligned to 32bytes
479 * for meeting FSL DMA specification requirement.
481 chan
->desc_pool
= dma_pool_create(chan
->name
, chan
->dev
,
482 sizeof(struct fsl_desc_sw
),
483 __alignof__(struct fsl_desc_sw
), 0);
484 if (!chan
->desc_pool
) {
485 chan_err(chan
, "unable to allocate descriptor pool\n");
489 /* there is at least one descriptor free to be allocated */
494 * fsldma_free_desc_list - Free all descriptors in a queue
495 * @chan: Freescae DMA channel
496 * @list: the list to free
498 * LOCKING: must hold chan->desc_lock
500 static void fsldma_free_desc_list(struct fsldma_chan
*chan
,
501 struct list_head
*list
)
503 struct fsl_desc_sw
*desc
, *_desc
;
505 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
506 list_del(&desc
->node
);
507 #ifdef FSL_DMA_LD_DEBUG
508 chan_dbg(chan
, "LD %p free\n", desc
);
510 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
514 static void fsldma_free_desc_list_reverse(struct fsldma_chan
*chan
,
515 struct list_head
*list
)
517 struct fsl_desc_sw
*desc
, *_desc
;
519 list_for_each_entry_safe_reverse(desc
, _desc
, list
, node
) {
520 list_del(&desc
->node
);
521 #ifdef FSL_DMA_LD_DEBUG
522 chan_dbg(chan
, "LD %p free\n", desc
);
524 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
529 * fsl_dma_free_chan_resources - Free all resources of the channel.
530 * @chan : Freescale DMA channel
532 static void fsl_dma_free_chan_resources(struct dma_chan
*dchan
)
534 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
537 chan_dbg(chan
, "free all channel resources\n");
538 spin_lock_irqsave(&chan
->desc_lock
, flags
);
539 fsldma_free_desc_list(chan
, &chan
->ld_pending
);
540 fsldma_free_desc_list(chan
, &chan
->ld_running
);
541 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
543 dma_pool_destroy(chan
->desc_pool
);
544 chan
->desc_pool
= NULL
;
547 static struct dma_async_tx_descriptor
*
548 fsl_dma_prep_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
550 struct fsldma_chan
*chan
;
551 struct fsl_desc_sw
*new;
556 chan
= to_fsl_chan(dchan
);
558 new = fsl_dma_alloc_descriptor(chan
);
560 chan_err(chan
, "%s\n", msg_ld_oom
);
564 new->async_tx
.cookie
= -EBUSY
;
565 new->async_tx
.flags
= flags
;
567 /* Insert the link descriptor to the LD ring */
568 list_add_tail(&new->node
, &new->tx_list
);
570 /* Set End-of-link to the last link descriptor of new list */
571 set_ld_eol(chan
, new);
573 return &new->async_tx
;
576 static struct dma_async_tx_descriptor
*
577 fsl_dma_prep_memcpy(struct dma_chan
*dchan
,
578 dma_addr_t dma_dst
, dma_addr_t dma_src
,
579 size_t len
, unsigned long flags
)
581 struct fsldma_chan
*chan
;
582 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
591 chan
= to_fsl_chan(dchan
);
595 /* Allocate the link descriptor from DMA pool */
596 new = fsl_dma_alloc_descriptor(chan
);
598 chan_err(chan
, "%s\n", msg_ld_oom
);
602 copy
= min(len
, (size_t)FSL_DMA_BCR_MAX_CNT
);
604 set_desc_cnt(chan
, &new->hw
, copy
);
605 set_desc_src(chan
, &new->hw
, dma_src
);
606 set_desc_dst(chan
, &new->hw
, dma_dst
);
611 set_desc_next(chan
, &prev
->hw
, new->async_tx
.phys
);
613 new->async_tx
.cookie
= 0;
614 async_tx_ack(&new->async_tx
);
621 /* Insert the link descriptor to the LD ring */
622 list_add_tail(&new->node
, &first
->tx_list
);
625 new->async_tx
.flags
= flags
; /* client is in control of this ack */
626 new->async_tx
.cookie
= -EBUSY
;
628 /* Set End-of-link to the last link descriptor of new list */
629 set_ld_eol(chan
, new);
631 return &first
->async_tx
;
637 fsldma_free_desc_list_reverse(chan
, &first
->tx_list
);
641 static struct dma_async_tx_descriptor
*fsl_dma_prep_sg(struct dma_chan
*dchan
,
642 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
643 struct scatterlist
*src_sg
, unsigned int src_nents
,
646 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
647 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
648 size_t dst_avail
, src_avail
;
652 /* basic sanity checks */
653 if (dst_nents
== 0 || src_nents
== 0)
656 if (dst_sg
== NULL
|| src_sg
== NULL
)
660 * TODO: should we check that both scatterlists have the same
661 * TODO: number of bytes in total? Is that really an error?
664 /* get prepared for the loop */
665 dst_avail
= sg_dma_len(dst_sg
);
666 src_avail
= sg_dma_len(src_sg
);
668 /* run until we are out of scatterlist entries */
671 /* create the largest transaction possible */
672 len
= min_t(size_t, src_avail
, dst_avail
);
673 len
= min_t(size_t, len
, FSL_DMA_BCR_MAX_CNT
);
677 dst
= sg_dma_address(dst_sg
) + sg_dma_len(dst_sg
) - dst_avail
;
678 src
= sg_dma_address(src_sg
) + sg_dma_len(src_sg
) - src_avail
;
680 /* allocate and populate the descriptor */
681 new = fsl_dma_alloc_descriptor(chan
);
683 chan_err(chan
, "%s\n", msg_ld_oom
);
687 set_desc_cnt(chan
, &new->hw
, len
);
688 set_desc_src(chan
, &new->hw
, src
);
689 set_desc_dst(chan
, &new->hw
, dst
);
694 set_desc_next(chan
, &prev
->hw
, new->async_tx
.phys
);
696 new->async_tx
.cookie
= 0;
697 async_tx_ack(&new->async_tx
);
700 /* Insert the link descriptor to the LD ring */
701 list_add_tail(&new->node
, &first
->tx_list
);
703 /* update metadata */
708 /* fetch the next dst scatterlist entry */
709 if (dst_avail
== 0) {
711 /* no more entries: we're done */
715 /* fetch the next entry: if there are no more: done */
716 dst_sg
= sg_next(dst_sg
);
721 dst_avail
= sg_dma_len(dst_sg
);
724 /* fetch the next src scatterlist entry */
725 if (src_avail
== 0) {
727 /* no more entries: we're done */
731 /* fetch the next entry: if there are no more: done */
732 src_sg
= sg_next(src_sg
);
737 src_avail
= sg_dma_len(src_sg
);
741 new->async_tx
.flags
= flags
; /* client is in control of this ack */
742 new->async_tx
.cookie
= -EBUSY
;
744 /* Set End-of-link to the last link descriptor of new list */
745 set_ld_eol(chan
, new);
747 return &first
->async_tx
;
753 fsldma_free_desc_list_reverse(chan
, &first
->tx_list
);
758 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
760 * @sgl: scatterlist to transfer to/from
761 * @sg_len: number of entries in @scatterlist
762 * @direction: DMA direction
763 * @flags: DMAEngine flags
764 * @context: transaction context (ignored)
766 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
767 * DMA_SLAVE API, this gets the device-specific information from the
768 * chan->private variable.
770 static struct dma_async_tx_descriptor
*fsl_dma_prep_slave_sg(
771 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
772 enum dma_transfer_direction direction
, unsigned long flags
,
776 * This operation is not supported on the Freescale DMA controller
778 * However, we need to provide the function pointer to allow the
779 * device_control() method to work.
784 static int fsl_dma_device_control(struct dma_chan
*dchan
,
785 enum dma_ctrl_cmd cmd
, unsigned long arg
)
787 struct dma_slave_config
*config
;
788 struct fsldma_chan
*chan
;
795 chan
= to_fsl_chan(dchan
);
798 case DMA_TERMINATE_ALL
:
799 spin_lock_irqsave(&chan
->desc_lock
, flags
);
801 /* Halt the DMA engine */
804 /* Remove and free all of the descriptors in the LD queue */
805 fsldma_free_desc_list(chan
, &chan
->ld_pending
);
806 fsldma_free_desc_list(chan
, &chan
->ld_running
);
809 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
812 case DMA_SLAVE_CONFIG
:
813 config
= (struct dma_slave_config
*)arg
;
815 /* make sure the channel supports setting burst size */
816 if (!chan
->set_request_count
)
819 /* we set the controller burst size depending on direction */
820 if (config
->direction
== DMA_MEM_TO_DEV
)
821 size
= config
->dst_addr_width
* config
->dst_maxburst
;
823 size
= config
->src_addr_width
* config
->src_maxburst
;
825 chan
->set_request_count(chan
, size
);
828 case FSLDMA_EXTERNAL_START
:
830 /* make sure the channel supports external start */
831 if (!chan
->toggle_ext_start
)
834 chan
->toggle_ext_start(chan
, arg
);
845 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
846 * @chan: Freescale DMA channel
847 * @desc: descriptor to cleanup and free
849 * This function is used on a descriptor which has been executed by the DMA
850 * controller. It will run any callbacks, submit any dependencies, and then
851 * free the descriptor.
853 static void fsldma_cleanup_descriptor(struct fsldma_chan
*chan
,
854 struct fsl_desc_sw
*desc
)
856 struct dma_async_tx_descriptor
*txd
= &desc
->async_tx
;
857 struct device
*dev
= chan
->common
.device
->dev
;
858 dma_addr_t src
= get_desc_src(chan
, desc
);
859 dma_addr_t dst
= get_desc_dst(chan
, desc
);
860 u32 len
= get_desc_cnt(chan
, desc
);
862 /* Run the link descriptor callback function */
864 #ifdef FSL_DMA_LD_DEBUG
865 chan_dbg(chan
, "LD %p callback\n", desc
);
867 txd
->callback(txd
->callback_param
);
870 /* Run any dependencies */
871 dma_run_dependencies(txd
);
873 /* Unmap the dst buffer, if requested */
874 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
875 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
876 dma_unmap_single(dev
, dst
, len
, DMA_FROM_DEVICE
);
878 dma_unmap_page(dev
, dst
, len
, DMA_FROM_DEVICE
);
881 /* Unmap the src buffer, if requested */
882 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
883 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
884 dma_unmap_single(dev
, src
, len
, DMA_TO_DEVICE
);
886 dma_unmap_page(dev
, src
, len
, DMA_TO_DEVICE
);
889 #ifdef FSL_DMA_LD_DEBUG
890 chan_dbg(chan
, "LD %p free\n", desc
);
892 dma_pool_free(chan
->desc_pool
, desc
, txd
->phys
);
896 * fsl_chan_xfer_ld_queue - transfer any pending transactions
897 * @chan : Freescale DMA channel
899 * HARDWARE STATE: idle
900 * LOCKING: must hold chan->desc_lock
902 static void fsl_chan_xfer_ld_queue(struct fsldma_chan
*chan
)
904 struct fsl_desc_sw
*desc
;
907 * If the list of pending descriptors is empty, then we
908 * don't need to do any work at all
910 if (list_empty(&chan
->ld_pending
)) {
911 chan_dbg(chan
, "no pending LDs\n");
916 * The DMA controller is not idle, which means that the interrupt
917 * handler will start any queued transactions when it runs after
918 * this transaction finishes
921 chan_dbg(chan
, "DMA controller still busy\n");
926 * If there are some link descriptors which have not been
927 * transferred, we need to start the controller
931 * Move all elements from the queue of pending transactions
932 * onto the list of running transactions
934 chan_dbg(chan
, "idle, starting controller\n");
935 desc
= list_first_entry(&chan
->ld_pending
, struct fsl_desc_sw
, node
);
936 list_splice_tail_init(&chan
->ld_pending
, &chan
->ld_running
);
939 * The 85xx DMA controller doesn't clear the channel start bit
940 * automatically at the end of a transfer. Therefore we must clear
941 * it in software before starting the transfer.
943 if ((chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
) {
946 mode
= DMA_IN(chan
, &chan
->regs
->mr
, 32);
947 mode
&= ~FSL_DMA_MR_CS
;
948 DMA_OUT(chan
, &chan
->regs
->mr
, mode
, 32);
952 * Program the descriptor's address into the DMA controller,
953 * then start the DMA transaction
955 set_cdar(chan
, desc
->async_tx
.phys
);
963 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
964 * @chan : Freescale DMA channel
966 static void fsl_dma_memcpy_issue_pending(struct dma_chan
*dchan
)
968 struct fsldma_chan
*chan
= to_fsl_chan(dchan
);
971 spin_lock_irqsave(&chan
->desc_lock
, flags
);
972 fsl_chan_xfer_ld_queue(chan
);
973 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
977 * fsl_tx_status - Determine the DMA status
978 * @chan : Freescale DMA channel
980 static enum dma_status
fsl_tx_status(struct dma_chan
*dchan
,
982 struct dma_tx_state
*txstate
)
984 return dma_cookie_status(dchan
, cookie
, txstate
);
987 /*----------------------------------------------------------------------------*/
988 /* Interrupt Handling */
989 /*----------------------------------------------------------------------------*/
991 static irqreturn_t
fsldma_chan_irq(int irq
, void *data
)
993 struct fsldma_chan
*chan
= data
;
996 /* save and clear the status register */
999 chan_dbg(chan
, "irq: stat = 0x%x\n", stat
);
1001 /* check that this was really our device */
1002 stat
&= ~(FSL_DMA_SR_CB
| FSL_DMA_SR_CH
);
1006 if (stat
& FSL_DMA_SR_TE
)
1007 chan_err(chan
, "Transfer Error!\n");
1011 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1012 * trigger a PE interrupt.
1014 if (stat
& FSL_DMA_SR_PE
) {
1015 chan_dbg(chan
, "irq: Programming Error INT\n");
1016 stat
&= ~FSL_DMA_SR_PE
;
1017 if (get_bcr(chan
) != 0)
1018 chan_err(chan
, "Programming Error!\n");
1022 * For MPC8349, EOCDI event need to update cookie
1023 * and start the next transfer if it exist.
1025 if (stat
& FSL_DMA_SR_EOCDI
) {
1026 chan_dbg(chan
, "irq: End-of-Chain link INT\n");
1027 stat
&= ~FSL_DMA_SR_EOCDI
;
1031 * If it current transfer is the end-of-transfer,
1032 * we should clear the Channel Start bit for
1033 * prepare next transfer.
1035 if (stat
& FSL_DMA_SR_EOLNI
) {
1036 chan_dbg(chan
, "irq: End-of-link INT\n");
1037 stat
&= ~FSL_DMA_SR_EOLNI
;
1040 /* check that the DMA controller is really idle */
1041 if (!dma_is_idle(chan
))
1042 chan_err(chan
, "irq: controller not idle!\n");
1044 /* check that we handled all of the bits */
1046 chan_err(chan
, "irq: unhandled sr 0x%08x\n", stat
);
1049 * Schedule the tasklet to handle all cleanup of the current
1050 * transaction. It will start a new transaction if there is
1053 tasklet_schedule(&chan
->tasklet
);
1054 chan_dbg(chan
, "irq: Exit\n");
1058 static void dma_do_tasklet(unsigned long data
)
1060 struct fsldma_chan
*chan
= (struct fsldma_chan
*)data
;
1061 struct fsl_desc_sw
*desc
, *_desc
;
1062 LIST_HEAD(ld_cleanup
);
1063 unsigned long flags
;
1065 chan_dbg(chan
, "tasklet entry\n");
1067 spin_lock_irqsave(&chan
->desc_lock
, flags
);
1069 /* update the cookie if we have some descriptors to cleanup */
1070 if (!list_empty(&chan
->ld_running
)) {
1071 dma_cookie_t cookie
;
1073 desc
= to_fsl_desc(chan
->ld_running
.prev
);
1074 cookie
= desc
->async_tx
.cookie
;
1075 dma_cookie_complete(&desc
->async_tx
);
1077 chan_dbg(chan
, "completed_cookie=%d\n", cookie
);
1081 * move the descriptors to a temporary list so we can drop the lock
1082 * during the entire cleanup operation
1084 list_splice_tail_init(&chan
->ld_running
, &ld_cleanup
);
1086 /* the hardware is now idle and ready for more */
1090 * Start any pending transactions automatically
1092 * In the ideal case, we keep the DMA controller busy while we go
1093 * ahead and free the descriptors below.
1095 fsl_chan_xfer_ld_queue(chan
);
1096 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
1098 /* Run the callback for each descriptor, in order */
1099 list_for_each_entry_safe(desc
, _desc
, &ld_cleanup
, node
) {
1101 /* Remove from the list of transactions */
1102 list_del(&desc
->node
);
1104 /* Run all cleanup for this descriptor */
1105 fsldma_cleanup_descriptor(chan
, desc
);
1108 chan_dbg(chan
, "tasklet exit\n");
1111 static irqreturn_t
fsldma_ctrl_irq(int irq
, void *data
)
1113 struct fsldma_device
*fdev
= data
;
1114 struct fsldma_chan
*chan
;
1115 unsigned int handled
= 0;
1119 gsr
= (fdev
->feature
& FSL_DMA_BIG_ENDIAN
) ? in_be32(fdev
->regs
)
1120 : in_le32(fdev
->regs
);
1122 dev_dbg(fdev
->dev
, "IRQ: gsr 0x%.8x\n", gsr
);
1124 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1125 chan
= fdev
->chan
[i
];
1130 dev_dbg(fdev
->dev
, "IRQ: chan %d\n", chan
->id
);
1131 fsldma_chan_irq(irq
, chan
);
1139 return IRQ_RETVAL(handled
);
1142 static void fsldma_free_irqs(struct fsldma_device
*fdev
)
1144 struct fsldma_chan
*chan
;
1147 if (fdev
->irq
!= NO_IRQ
) {
1148 dev_dbg(fdev
->dev
, "free per-controller IRQ\n");
1149 free_irq(fdev
->irq
, fdev
);
1153 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1154 chan
= fdev
->chan
[i
];
1155 if (chan
&& chan
->irq
!= NO_IRQ
) {
1156 chan_dbg(chan
, "free per-channel IRQ\n");
1157 free_irq(chan
->irq
, chan
);
1162 static int fsldma_request_irqs(struct fsldma_device
*fdev
)
1164 struct fsldma_chan
*chan
;
1168 /* if we have a per-controller IRQ, use that */
1169 if (fdev
->irq
!= NO_IRQ
) {
1170 dev_dbg(fdev
->dev
, "request per-controller IRQ\n");
1171 ret
= request_irq(fdev
->irq
, fsldma_ctrl_irq
, IRQF_SHARED
,
1172 "fsldma-controller", fdev
);
1176 /* no per-controller IRQ, use the per-channel IRQs */
1177 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1178 chan
= fdev
->chan
[i
];
1182 if (chan
->irq
== NO_IRQ
) {
1183 chan_err(chan
, "interrupts property missing in device tree\n");
1188 chan_dbg(chan
, "request per-channel IRQ\n");
1189 ret
= request_irq(chan
->irq
, fsldma_chan_irq
, IRQF_SHARED
,
1190 "fsldma-chan", chan
);
1192 chan_err(chan
, "unable to request per-channel IRQ\n");
1200 for (/* none */; i
>= 0; i
--) {
1201 chan
= fdev
->chan
[i
];
1205 if (chan
->irq
== NO_IRQ
)
1208 free_irq(chan
->irq
, chan
);
1214 /*----------------------------------------------------------------------------*/
1215 /* OpenFirmware Subsystem */
1216 /*----------------------------------------------------------------------------*/
1218 static int fsl_dma_chan_probe(struct fsldma_device
*fdev
,
1219 struct device_node
*node
, u32 feature
, const char *compatible
)
1221 struct fsldma_chan
*chan
;
1222 struct resource res
;
1226 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
1228 dev_err(fdev
->dev
, "no free memory for DMA channels!\n");
1233 /* ioremap registers for use */
1234 chan
->regs
= of_iomap(node
, 0);
1236 dev_err(fdev
->dev
, "unable to ioremap registers\n");
1241 err
= of_address_to_resource(node
, 0, &res
);
1243 dev_err(fdev
->dev
, "unable to find 'reg' property\n");
1244 goto out_iounmap_regs
;
1247 chan
->feature
= feature
;
1249 fdev
->feature
= chan
->feature
;
1252 * If the DMA device's feature is different than the feature
1253 * of its channels, report the bug
1255 WARN_ON(fdev
->feature
!= chan
->feature
);
1257 chan
->dev
= fdev
->dev
;
1258 chan
->id
= ((res
.start
- 0x100) & 0xfff) >> 7;
1259 if (chan
->id
>= FSL_DMA_MAX_CHANS_PER_DEVICE
) {
1260 dev_err(fdev
->dev
, "too many channels for device\n");
1262 goto out_iounmap_regs
;
1265 fdev
->chan
[chan
->id
] = chan
;
1266 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
1267 snprintf(chan
->name
, sizeof(chan
->name
), "chan%d", chan
->id
);
1269 /* Initialize the channel */
1272 /* Clear cdar registers */
1275 switch (chan
->feature
& FSL_DMA_IP_MASK
) {
1276 case FSL_DMA_IP_85XX
:
1277 chan
->toggle_ext_pause
= fsl_chan_toggle_ext_pause
;
1278 case FSL_DMA_IP_83XX
:
1279 chan
->toggle_ext_start
= fsl_chan_toggle_ext_start
;
1280 chan
->set_src_loop_size
= fsl_chan_set_src_loop_size
;
1281 chan
->set_dst_loop_size
= fsl_chan_set_dst_loop_size
;
1282 chan
->set_request_count
= fsl_chan_set_request_count
;
1285 spin_lock_init(&chan
->desc_lock
);
1286 INIT_LIST_HEAD(&chan
->ld_pending
);
1287 INIT_LIST_HEAD(&chan
->ld_running
);
1290 chan
->common
.device
= &fdev
->common
;
1291 dma_cookie_init(&chan
->common
);
1293 /* find the IRQ line, if it exists in the device tree */
1294 chan
->irq
= irq_of_parse_and_map(node
, 0);
1296 /* Add the channel to DMA device channel list */
1297 list_add_tail(&chan
->common
.device_node
, &fdev
->common
.channels
);
1298 fdev
->common
.chancnt
++;
1300 dev_info(fdev
->dev
, "#%d (%s), irq %d\n", chan
->id
, compatible
,
1301 chan
->irq
!= NO_IRQ
? chan
->irq
: fdev
->irq
);
1306 iounmap(chan
->regs
);
1313 static void fsl_dma_chan_remove(struct fsldma_chan
*chan
)
1315 irq_dispose_mapping(chan
->irq
);
1316 list_del(&chan
->common
.device_node
);
1317 iounmap(chan
->regs
);
1321 static int fsldma_of_probe(struct platform_device
*op
)
1323 struct fsldma_device
*fdev
;
1324 struct device_node
*child
;
1327 fdev
= kzalloc(sizeof(*fdev
), GFP_KERNEL
);
1329 dev_err(&op
->dev
, "No enough memory for 'priv'\n");
1334 fdev
->dev
= &op
->dev
;
1335 INIT_LIST_HEAD(&fdev
->common
.channels
);
1337 /* ioremap the registers for use */
1338 fdev
->regs
= of_iomap(op
->dev
.of_node
, 0);
1340 dev_err(&op
->dev
, "unable to ioremap registers\n");
1345 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1346 fdev
->irq
= irq_of_parse_and_map(op
->dev
.of_node
, 0);
1348 dma_cap_set(DMA_MEMCPY
, fdev
->common
.cap_mask
);
1349 dma_cap_set(DMA_INTERRUPT
, fdev
->common
.cap_mask
);
1350 dma_cap_set(DMA_SG
, fdev
->common
.cap_mask
);
1351 dma_cap_set(DMA_SLAVE
, fdev
->common
.cap_mask
);
1352 fdev
->common
.device_alloc_chan_resources
= fsl_dma_alloc_chan_resources
;
1353 fdev
->common
.device_free_chan_resources
= fsl_dma_free_chan_resources
;
1354 fdev
->common
.device_prep_dma_interrupt
= fsl_dma_prep_interrupt
;
1355 fdev
->common
.device_prep_dma_memcpy
= fsl_dma_prep_memcpy
;
1356 fdev
->common
.device_prep_dma_sg
= fsl_dma_prep_sg
;
1357 fdev
->common
.device_tx_status
= fsl_tx_status
;
1358 fdev
->common
.device_issue_pending
= fsl_dma_memcpy_issue_pending
;
1359 fdev
->common
.device_prep_slave_sg
= fsl_dma_prep_slave_sg
;
1360 fdev
->common
.device_control
= fsl_dma_device_control
;
1361 fdev
->common
.dev
= &op
->dev
;
1363 dma_set_mask(&(op
->dev
), DMA_BIT_MASK(36));
1365 platform_set_drvdata(op
, fdev
);
1368 * We cannot use of_platform_bus_probe() because there is no
1369 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1372 for_each_child_of_node(op
->dev
.of_node
, child
) {
1373 if (of_device_is_compatible(child
, "fsl,eloplus-dma-channel")) {
1374 fsl_dma_chan_probe(fdev
, child
,
1375 FSL_DMA_IP_85XX
| FSL_DMA_BIG_ENDIAN
,
1376 "fsl,eloplus-dma-channel");
1379 if (of_device_is_compatible(child
, "fsl,elo-dma-channel")) {
1380 fsl_dma_chan_probe(fdev
, child
,
1381 FSL_DMA_IP_83XX
| FSL_DMA_LITTLE_ENDIAN
,
1382 "fsl,elo-dma-channel");
1387 * Hookup the IRQ handler(s)
1389 * If we have a per-controller interrupt, we prefer that to the
1390 * per-channel interrupts to reduce the number of shared interrupt
1391 * handlers on the same IRQ line
1393 err
= fsldma_request_irqs(fdev
);
1395 dev_err(fdev
->dev
, "unable to request IRQs\n");
1399 dma_async_device_register(&fdev
->common
);
1403 irq_dispose_mapping(fdev
->irq
);
1409 static int fsldma_of_remove(struct platform_device
*op
)
1411 struct fsldma_device
*fdev
;
1414 fdev
= platform_get_drvdata(op
);
1415 dma_async_device_unregister(&fdev
->common
);
1417 fsldma_free_irqs(fdev
);
1419 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1421 fsl_dma_chan_remove(fdev
->chan
[i
]);
1424 iounmap(fdev
->regs
);
1430 static const struct of_device_id fsldma_of_ids
[] = {
1431 { .compatible
= "fsl,eloplus-dma", },
1432 { .compatible
= "fsl,elo-dma", },
1436 static struct platform_driver fsldma_of_driver
= {
1438 .name
= "fsl-elo-dma",
1439 .owner
= THIS_MODULE
,
1440 .of_match_table
= fsldma_of_ids
,
1442 .probe
= fsldma_of_probe
,
1443 .remove
= fsldma_of_remove
,
1446 /*----------------------------------------------------------------------------*/
1447 /* Module Init / Exit */
1448 /*----------------------------------------------------------------------------*/
1450 static __init
int fsldma_init(void)
1452 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1453 return platform_driver_register(&fsldma_of_driver
);
1456 static void __exit
fsldma_exit(void)
1458 platform_driver_unregister(&fsldma_of_driver
);
1461 subsys_initcall(fsldma_init
);
1462 module_exit(fsldma_exit
);
1464 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1465 MODULE_LICENSE("GPL");