2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <linux/platform_data/edma.h>
29 #include "dmaengine.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20.
57 #define EDMA_MAX_SLOTS MAX_NR_SG
58 #define EDMA_DESCRIPTORS 16
61 struct virt_dma_desc vdesc
;
62 struct list_head node
;
67 struct edmacc_param pset
[0];
73 struct virt_dma_chan vchan
;
74 struct list_head node
;
75 struct edma_desc
*edesc
;
79 int slot
[EDMA_MAX_SLOTS
];
81 struct dma_slave_config cfg
;
86 struct dma_device dma_slave
;
87 struct edma_chan slave_chans
[EDMA_CHANS
];
92 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
94 return container_of(d
, struct edma_cc
, dma_slave
);
97 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
99 return container_of(c
, struct edma_chan
, vchan
.chan
);
102 static inline struct edma_desc
103 *to_edma_desc(struct dma_async_tx_descriptor
*tx
)
105 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
108 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
110 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
113 /* Dispatch a queued descriptor to the controller (caller holds lock) */
114 static void edma_execute(struct edma_chan
*echan
)
116 struct virt_dma_desc
*vdesc
;
117 struct edma_desc
*edesc
;
118 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
119 int i
, j
, left
, nslots
;
121 /* If either we processed all psets or we're still not started */
123 echan
->edesc
->pset_nr
== echan
->edesc
->processed
) {
125 vdesc
= vchan_next_desc(&echan
->vchan
);
130 list_del(&vdesc
->node
);
131 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
134 edesc
= echan
->edesc
;
136 /* Find out how many left */
137 left
= edesc
->pset_nr
- edesc
->processed
;
138 nslots
= min(MAX_NR_SG
, left
);
140 /* Write descriptor PaRAM set(s) */
141 for (i
= 0; i
< nslots
; i
++) {
142 j
= i
+ edesc
->processed
;
143 edma_write_slot(echan
->slot
[i
], &edesc
->pset
[j
]);
144 dev_vdbg(echan
->vchan
.chan
.device
->dev
,
156 j
, echan
->ch_num
, echan
->slot
[i
],
160 edesc
->pset
[j
].a_b_cnt
,
162 edesc
->pset
[j
].src_dst_bidx
,
163 edesc
->pset
[j
].src_dst_cidx
,
164 edesc
->pset
[j
].link_bcntrld
);
165 /* Link to the previous slot if not the last set */
166 if (i
!= (nslots
- 1))
167 edma_link(echan
->slot
[i
], echan
->slot
[i
+1]);
170 edesc
->processed
+= nslots
;
173 * If this is either the last set in a set of SG-list transactions
174 * then setup a link to the dummy slot, this results in all future
175 * events being absorbed and that's OK because we're done
177 if (edesc
->processed
== edesc
->pset_nr
) {
179 edma_link(echan
->slot
[nslots
-1], echan
->slot
[1]);
181 edma_link(echan
->slot
[nslots
-1],
182 echan
->ecc
->dummy_slot
);
185 if (edesc
->processed
<= MAX_NR_SG
) {
186 dev_dbg(dev
, "first transfer starting %d\n", echan
->ch_num
);
187 edma_start(echan
->ch_num
);
189 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
190 echan
->ch_num
, edesc
->processed
);
191 edma_resume(echan
->ch_num
);
195 * This happens due to setup times between intermediate transfers
196 * in long SG lists which have to be broken up into transfers of
200 dev_dbg(dev
, "missed event in execute detected\n");
201 edma_clean_channel(echan
->ch_num
);
202 edma_stop(echan
->ch_num
);
203 edma_start(echan
->ch_num
);
204 edma_trigger_channel(echan
->ch_num
);
209 static int edma_terminate_all(struct edma_chan
*echan
)
214 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
217 * Stop DMA activity: we assume the callback will not be called
218 * after edma_dma() returns (even if it does, it will see
219 * echan->edesc is NULL and exit.)
223 edma_stop(echan
->ch_num
);
226 vchan_get_all_descriptors(&echan
->vchan
, &head
);
227 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
228 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
233 static int edma_slave_config(struct edma_chan
*echan
,
234 struct dma_slave_config
*cfg
)
236 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
237 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
240 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
245 static int edma_dma_pause(struct edma_chan
*echan
)
247 /* Pause/Resume only allowed with cyclic mode */
248 if (!echan
->edesc
->cyclic
)
251 edma_pause(echan
->ch_num
);
255 static int edma_dma_resume(struct edma_chan
*echan
)
257 /* Pause/Resume only allowed with cyclic mode */
258 if (!echan
->edesc
->cyclic
)
261 edma_resume(echan
->ch_num
);
265 static int edma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
269 struct dma_slave_config
*config
;
270 struct edma_chan
*echan
= to_edma_chan(chan
);
273 case DMA_TERMINATE_ALL
:
274 edma_terminate_all(echan
);
276 case DMA_SLAVE_CONFIG
:
277 config
= (struct dma_slave_config
*)arg
;
278 ret
= edma_slave_config(echan
, config
);
281 ret
= edma_dma_pause(echan
);
285 ret
= edma_dma_resume(echan
);
296 * A PaRAM set configuration abstraction used by other modes
297 * @chan: Channel who's PaRAM set we're configuring
298 * @pset: PaRAM set to initialize and setup.
299 * @src_addr: Source address of the DMA
300 * @dst_addr: Destination address of the DMA
301 * @burst: In units of dev_width, how much to send
302 * @dev_width: How much is the dev_width
303 * @dma_length: Total length of the DMA transfer
304 * @direction: Direction of the transfer
306 static int edma_config_pset(struct dma_chan
*chan
, struct edmacc_param
*pset
,
307 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
308 enum dma_slave_buswidth dev_width
, unsigned int dma_length
,
309 enum dma_transfer_direction direction
)
311 struct edma_chan
*echan
= to_edma_chan(chan
);
312 struct device
*dev
= chan
->device
->dev
;
313 int acnt
, bcnt
, ccnt
, cidx
;
314 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
319 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
323 * If the maxburst is equal to the fifo width, use
324 * A-synced transfers. This allows for large contiguous
325 * buffer transfers using only one PaRAM set.
329 * For the A-sync case, bcnt and ccnt are the remainder
330 * and quotient respectively of the division of:
331 * (dma_length / acnt) by (SZ_64K -1). This is so
332 * that in case bcnt over flows, we have ccnt to use.
333 * Note: In A-sync tranfer only, bcntrld is used, but it
334 * only applies for sg_dma_len(sg) >= SZ_64K.
335 * In this case, the best way adopted is- bccnt for the
336 * first frame will be the remainder below. Then for
337 * every successive frame, bcnt will be SZ_64K-1. This
338 * is assured as bcntrld = 0xffff in end of function.
341 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
342 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
344 * If bcnt is non-zero, we have a remainder and hence an
345 * extra frame to transfer, so increment ccnt.
354 * If maxburst is greater than the fifo address_width,
355 * use AB-synced transfers where A count is the fifo
356 * address_width and B count is the maxburst. In this
357 * case, we are limited to transfers of C count frames
358 * of (address_width * maxburst) where C count is limited
359 * to SZ_64K-1. This places an upper bound on the length
360 * of an SG segment that can be handled.
364 ccnt
= dma_length
/ (acnt
* bcnt
);
365 if (ccnt
> (SZ_64K
- 1)) {
366 dev_err(dev
, "Exceeded max SG segment size\n");
372 if (direction
== DMA_MEM_TO_DEV
) {
377 } else if (direction
== DMA_DEV_TO_MEM
) {
382 } else if (direction
== DMA_MEM_TO_MEM
) {
388 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
392 pset
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
393 /* Configure A or AB synchronized transfers */
395 pset
->opt
|= SYNCDIM
;
397 pset
->src
= src_addr
;
398 pset
->dst
= dst_addr
;
400 pset
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
401 pset
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
403 pset
->a_b_cnt
= bcnt
<< 16 | acnt
;
406 * Only time when (bcntrld) auto reload is required is for
407 * A-sync case, and in this case, a requirement of reload value
408 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
409 * and then later will be populated by edma_execute.
411 pset
->link_bcntrld
= 0xffffffff;
415 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
416 struct dma_chan
*chan
, struct scatterlist
*sgl
,
417 unsigned int sg_len
, enum dma_transfer_direction direction
,
418 unsigned long tx_flags
, void *context
)
420 struct edma_chan
*echan
= to_edma_chan(chan
);
421 struct device
*dev
= chan
->device
->dev
;
422 struct edma_desc
*edesc
;
423 dma_addr_t src_addr
= 0, dst_addr
= 0;
424 enum dma_slave_buswidth dev_width
;
426 struct scatterlist
*sg
;
429 if (unlikely(!echan
|| !sgl
|| !sg_len
))
432 if (direction
== DMA_DEV_TO_MEM
) {
433 src_addr
= echan
->cfg
.src_addr
;
434 dev_width
= echan
->cfg
.src_addr_width
;
435 burst
= echan
->cfg
.src_maxburst
;
436 } else if (direction
== DMA_MEM_TO_DEV
) {
437 dst_addr
= echan
->cfg
.dst_addr
;
438 dev_width
= echan
->cfg
.dst_addr_width
;
439 burst
= echan
->cfg
.dst_maxburst
;
441 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
445 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
446 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
450 edesc
= kzalloc(sizeof(*edesc
) + sg_len
*
451 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
453 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
457 edesc
->pset_nr
= sg_len
;
459 /* Allocate a PaRAM slot, if needed */
460 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
462 for (i
= 0; i
< nslots
; i
++) {
463 if (echan
->slot
[i
] < 0) {
465 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
467 if (echan
->slot
[i
] < 0) {
469 dev_err(dev
, "%s: Failed to allocate slot\n",
476 /* Configure PaRAM sets for each SG */
477 for_each_sg(sgl
, sg
, sg_len
, i
) {
478 /* Get address for each SG */
479 if (direction
== DMA_DEV_TO_MEM
)
480 dst_addr
= sg_dma_address(sg
);
482 src_addr
= sg_dma_address(sg
);
484 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
485 dst_addr
, burst
, dev_width
,
486 sg_dma_len(sg
), direction
);
494 /* If this is the last in a current SG set of transactions,
495 enable interrupts so that next set is processed */
496 if (!((i
+1) % MAX_NR_SG
))
497 edesc
->pset
[i
].opt
|= TCINTEN
;
499 /* If this is the last set, enable completion interrupt flag */
501 edesc
->pset
[i
].opt
|= TCINTEN
;
504 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
507 struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
508 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
509 size_t len
, unsigned long tx_flags
)
512 struct edma_desc
*edesc
;
513 struct device
*dev
= chan
->device
->dev
;
514 struct edma_chan
*echan
= to_edma_chan(chan
);
516 if (unlikely(!echan
|| !len
))
519 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
521 dev_dbg(dev
, "Failed to allocate a descriptor\n");
527 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
528 DMA_SLAVE_BUSWIDTH_4_BYTES
, len
, DMA_MEM_TO_MEM
);
535 * Enable intermediate transfer chaining to re-trigger channel
536 * on completion of every TR, and enable transfer-completion
537 * interrupt on completion of the whole transfer.
539 edesc
->pset
[0].opt
|= ITCCHEN
;
540 edesc
->pset
[0].opt
|= TCINTEN
;
542 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
545 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
546 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
547 size_t period_len
, enum dma_transfer_direction direction
,
548 unsigned long tx_flags
, void *context
)
550 struct edma_chan
*echan
= to_edma_chan(chan
);
551 struct device
*dev
= chan
->device
->dev
;
552 struct edma_desc
*edesc
;
553 dma_addr_t src_addr
, dst_addr
;
554 enum dma_slave_buswidth dev_width
;
558 if (unlikely(!echan
|| !buf_len
|| !period_len
))
561 if (direction
== DMA_DEV_TO_MEM
) {
562 src_addr
= echan
->cfg
.src_addr
;
564 dev_width
= echan
->cfg
.src_addr_width
;
565 burst
= echan
->cfg
.src_maxburst
;
566 } else if (direction
== DMA_MEM_TO_DEV
) {
568 dst_addr
= echan
->cfg
.dst_addr
;
569 dev_width
= echan
->cfg
.dst_addr_width
;
570 burst
= echan
->cfg
.dst_maxburst
;
572 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
576 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
577 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
581 if (unlikely(buf_len
% period_len
)) {
582 dev_err(dev
, "Period should be multiple of Buffer length\n");
586 nslots
= (buf_len
/ period_len
) + 1;
589 * Cyclic DMA users such as audio cannot tolerate delays introduced
590 * by cases where the number of periods is more than the maximum
591 * number of SGs the EDMA driver can handle at a time. For DMA types
592 * such as Slave SGs, such delays are tolerable and synchronized,
593 * but the synchronization is difficult to achieve with Cyclic and
594 * cannot be guaranteed, so we error out early.
596 if (nslots
> MAX_NR_SG
)
599 edesc
= kzalloc(sizeof(*edesc
) + nslots
*
600 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
602 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
607 edesc
->pset_nr
= nslots
;
609 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
610 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
612 for (i
= 0; i
< nslots
; i
++) {
613 /* Allocate a PaRAM slot, if needed */
614 if (echan
->slot
[i
] < 0) {
616 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
618 if (echan
->slot
[i
] < 0) {
620 dev_err(dev
, "%s: Failed to allocate slot\n",
626 if (i
== nslots
- 1) {
627 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
628 sizeof(edesc
->pset
[0]));
632 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
633 dst_addr
, burst
, dev_width
, period_len
,
640 if (direction
== DMA_DEV_TO_MEM
)
641 dst_addr
+= period_len
;
643 src_addr
+= period_len
;
645 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
658 i
, echan
->ch_num
, echan
->slot
[i
],
662 edesc
->pset
[i
].a_b_cnt
,
664 edesc
->pset
[i
].src_dst_bidx
,
665 edesc
->pset
[i
].src_dst_cidx
,
666 edesc
->pset
[i
].link_bcntrld
);
671 * Enable interrupts for every period because callback
672 * has to be called for every period.
674 edesc
->pset
[i
].opt
|= TCINTEN
;
677 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
680 static void edma_callback(unsigned ch_num
, u16 ch_status
, void *data
)
682 struct edma_chan
*echan
= data
;
683 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
684 struct edma_desc
*edesc
;
686 struct edmacc_param p
;
688 edesc
= echan
->edesc
;
690 /* Pause the channel for non-cyclic */
691 if (!edesc
|| (edesc
&& !edesc
->cyclic
))
692 edma_pause(echan
->ch_num
);
695 case EDMA_DMA_COMPLETE
:
696 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
700 vchan_cyclic_callback(&edesc
->vdesc
);
701 } else if (edesc
->processed
== edesc
->pset_nr
) {
702 dev_dbg(dev
, "Transfer complete, stopping channel %d\n", ch_num
);
703 edma_stop(echan
->ch_num
);
704 vchan_cookie_complete(&edesc
->vdesc
);
707 dev_dbg(dev
, "Intermediate transfer complete on channel %d\n", ch_num
);
712 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
715 case EDMA_DMA_CC_ERROR
:
716 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
718 edma_read_slot(EDMA_CHAN_SLOT(echan
->slot
[0]), &p
);
721 * Issue later based on missed flag which will be sure
723 * (1) we finished transmitting an intermediate slot and
724 * edma_execute is coming up.
725 * (2) or we finished current transfer and issue will
728 * Important note: issuing can be dangerous here and
729 * lead to some nasty recursion when we are in a NULL
730 * slot. So we avoid doing so and set the missed flag.
732 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
733 dev_dbg(dev
, "Error occurred, looks like slot is null, just setting miss\n");
737 * The slot is already programmed but the event got
738 * missed, so its safe to issue it here.
740 dev_dbg(dev
, "Error occurred but slot is non-null, TRIGGERING\n");
741 edma_clean_channel(echan
->ch_num
);
742 edma_stop(echan
->ch_num
);
743 edma_start(echan
->ch_num
);
744 edma_trigger_channel(echan
->ch_num
);
747 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
755 /* Alloc channel resources */
756 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
758 struct edma_chan
*echan
= to_edma_chan(chan
);
759 struct device
*dev
= chan
->device
->dev
;
764 a_ch_num
= edma_alloc_channel(echan
->ch_num
, edma_callback
,
765 chan
, EVENTQ_DEFAULT
);
772 if (a_ch_num
!= echan
->ch_num
) {
773 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
774 EDMA_CTLR(echan
->ch_num
),
775 EDMA_CHAN_SLOT(echan
->ch_num
));
780 echan
->alloced
= true;
781 echan
->slot
[0] = echan
->ch_num
;
783 dev_dbg(dev
, "allocated channel for %u:%u\n",
784 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
789 edma_free_channel(a_ch_num
);
794 /* Free channel resources */
795 static void edma_free_chan_resources(struct dma_chan
*chan
)
797 struct edma_chan
*echan
= to_edma_chan(chan
);
798 struct device
*dev
= chan
->device
->dev
;
801 /* Terminate transfers */
802 edma_stop(echan
->ch_num
);
804 vchan_free_chan_resources(&echan
->vchan
);
806 /* Free EDMA PaRAM slots */
807 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
808 if (echan
->slot
[i
] >= 0) {
809 edma_free_slot(echan
->slot
[i
]);
814 /* Free EDMA channel */
815 if (echan
->alloced
) {
816 edma_free_channel(echan
->ch_num
);
817 echan
->alloced
= false;
820 dev_dbg(dev
, "freeing channel for %u\n", echan
->ch_num
);
823 /* Send pending descriptor to hardware */
824 static void edma_issue_pending(struct dma_chan
*chan
)
826 struct edma_chan
*echan
= to_edma_chan(chan
);
829 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
830 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
832 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
835 static size_t edma_desc_size(struct edma_desc
*edesc
)
841 for (size
= i
= 0; i
< edesc
->pset_nr
; i
++)
842 size
+= (edesc
->pset
[i
].a_b_cnt
& 0xffff) *
843 (edesc
->pset
[i
].a_b_cnt
>> 16) *
846 size
= (edesc
->pset
[0].a_b_cnt
& 0xffff) *
847 (edesc
->pset
[0].a_b_cnt
>> 16) +
848 (edesc
->pset
[0].a_b_cnt
& 0xffff) *
849 (SZ_64K
- 1) * edesc
->pset
[0].ccnt
;
854 /* Check request completion status */
855 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
857 struct dma_tx_state
*txstate
)
859 struct edma_chan
*echan
= to_edma_chan(chan
);
860 struct virt_dma_desc
*vdesc
;
864 ret
= dma_cookie_status(chan
, cookie
, txstate
);
865 if (ret
== DMA_COMPLETE
|| !txstate
)
868 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
869 vdesc
= vchan_find_desc(&echan
->vchan
, cookie
);
871 txstate
->residue
= edma_desc_size(to_edma_desc(&vdesc
->tx
));
872 } else if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
) {
873 struct edma_desc
*edesc
= echan
->edesc
;
874 txstate
->residue
= edma_desc_size(edesc
);
876 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
881 static void __init
edma_chan_init(struct edma_cc
*ecc
,
882 struct dma_device
*dma
,
883 struct edma_chan
*echans
)
887 for (i
= 0; i
< EDMA_CHANS
; i
++) {
888 struct edma_chan
*echan
= &echans
[i
];
889 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->ctlr
, i
);
891 echan
->vchan
.desc_free
= edma_desc_free
;
893 vchan_init(&echan
->vchan
, dma
);
895 INIT_LIST_HEAD(&echan
->node
);
896 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
901 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
902 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
903 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
905 static int edma_dma_device_slave_caps(struct dma_chan
*dchan
,
906 struct dma_slave_caps
*caps
)
908 caps
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
909 caps
->dstn_addr_widths
= EDMA_DMA_BUSWIDTHS
;
910 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
911 caps
->cmd_pause
= true;
912 caps
->cmd_terminate
= true;
913 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
918 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
921 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
922 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
923 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
924 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
925 dma
->device_free_chan_resources
= edma_free_chan_resources
;
926 dma
->device_issue_pending
= edma_issue_pending
;
927 dma
->device_tx_status
= edma_tx_status
;
928 dma
->device_control
= edma_control
;
929 dma
->device_slave_caps
= edma_dma_device_slave_caps
;
933 * code using dma memcpy must make sure alignment of
934 * length is at dma->copy_align boundary.
936 dma
->copy_align
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
938 INIT_LIST_HEAD(&dma
->channels
);
941 static int edma_probe(struct platform_device
*pdev
)
946 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
950 ecc
= devm_kzalloc(&pdev
->dev
, sizeof(*ecc
), GFP_KERNEL
);
952 dev_err(&pdev
->dev
, "Can't allocate controller\n");
956 ecc
->ctlr
= pdev
->id
;
957 ecc
->dummy_slot
= edma_alloc_slot(ecc
->ctlr
, EDMA_SLOT_ANY
);
958 if (ecc
->dummy_slot
< 0) {
959 dev_err(&pdev
->dev
, "Can't allocate PaRAM dummy slot\n");
963 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
964 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
965 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
966 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
968 edma_dma_init(ecc
, &ecc
->dma_slave
, &pdev
->dev
);
970 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
972 ret
= dma_async_device_register(&ecc
->dma_slave
);
976 platform_set_drvdata(pdev
, ecc
);
978 dev_info(&pdev
->dev
, "TI EDMA DMA engine driver\n");
983 edma_free_slot(ecc
->dummy_slot
);
987 static int edma_remove(struct platform_device
*pdev
)
989 struct device
*dev
= &pdev
->dev
;
990 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
992 dma_async_device_unregister(&ecc
->dma_slave
);
993 edma_free_slot(ecc
->dummy_slot
);
998 static struct platform_driver edma_driver
= {
1000 .remove
= edma_remove
,
1002 .name
= "edma-dma-engine",
1003 .owner
= THIS_MODULE
,
1007 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
1009 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
1010 struct edma_chan
*echan
= to_edma_chan(chan
);
1011 unsigned ch_req
= *(unsigned *)param
;
1012 return ch_req
== echan
->ch_num
;
1016 EXPORT_SYMBOL(edma_filter_fn
);
1018 static struct platform_device
*pdev0
, *pdev1
;
1020 static const struct platform_device_info edma_dev_info0
= {
1021 .name
= "edma-dma-engine",
1023 .dma_mask
= DMA_BIT_MASK(32),
1026 static const struct platform_device_info edma_dev_info1
= {
1027 .name
= "edma-dma-engine",
1029 .dma_mask
= DMA_BIT_MASK(32),
1032 static int edma_init(void)
1034 int ret
= platform_driver_register(&edma_driver
);
1037 pdev0
= platform_device_register_full(&edma_dev_info0
);
1038 if (IS_ERR(pdev0
)) {
1039 platform_driver_unregister(&edma_driver
);
1040 ret
= PTR_ERR(pdev0
);
1045 if (EDMA_CTLRS
== 2) {
1046 pdev1
= platform_device_register_full(&edma_dev_info1
);
1047 if (IS_ERR(pdev1
)) {
1048 platform_driver_unregister(&edma_driver
);
1049 platform_device_unregister(pdev0
);
1050 ret
= PTR_ERR(pdev1
);
1057 subsys_initcall(edma_init
);
1059 static void __exit
edma_exit(void)
1061 platform_device_unregister(pdev0
);
1063 platform_device_unregister(pdev1
);
1064 platform_driver_unregister(&edma_driver
);
1066 module_exit(edma_exit
);
1068 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
1069 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
1070 MODULE_LICENSE("GPL v2");