2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
29 #include <linux/platform_data/edma.h>
31 #include "dmaengine.h"
35 * This will go away when the private EDMA API is folded
36 * into this driver and the platform device(s) are
37 * instantiated in the arch code. We can only get away
38 * with this simplification because DA8XX may not be built
39 * in the same kernel image with other DaVinci parts. This
40 * avoids having to sprinkle dmaengine driver platform devices
41 * and data throughout all the existing board files.
43 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
49 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
52 * Max of 20 segments per channel to conserve PaRAM slots
53 * Also note that MAX_NR_SG should be atleast the no.of periods
54 * that are required for ASoC, otherwise DMA prep calls will
55 * fail. Today davinci-pcm is the only user of this driver and
56 * requires atleast 17 slots, so we setup the default to 20.
59 #define EDMA_MAX_SLOTS MAX_NR_SG
60 #define EDMA_DESCRIPTORS 16
65 struct edmacc_param param
;
69 struct virt_dma_desc vdesc
;
70 struct list_head node
;
71 enum dma_transfer_direction direction
;
75 struct edma_chan
*echan
;
79 * The following 4 elements are used for residue accounting.
81 * - processed_stat: the number of SG elements we have traversed
82 * so far to cover accounting. This is updated directly to processed
83 * during edma_callback and is always <= processed, because processed
84 * refers to the number of pending transfer (programmed to EDMA
85 * controller), where as processed_stat tracks number of transfers
86 * accounted for so far.
88 * - residue: The amount of bytes we have left to transfer for this desc
90 * - residue_stat: The residue in bytes of data we have covered
91 * so far for accounting. This is updated directly to residue
92 * during callbacks to keep it current.
94 * - sg_len: Tracks the length of the current intermediate transfer,
95 * this is required to update the residue during intermediate transfer
96 * completion callback.
103 struct edma_pset pset
[0];
109 struct virt_dma_chan vchan
;
110 struct list_head node
;
111 struct edma_desc
*edesc
;
115 int slot
[EDMA_MAX_SLOTS
];
117 struct dma_slave_config cfg
;
122 struct dma_device dma_slave
;
123 struct edma_chan slave_chans
[EDMA_CHANS
];
128 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
130 return container_of(d
, struct edma_cc
, dma_slave
);
133 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
135 return container_of(c
, struct edma_chan
, vchan
.chan
);
138 static inline struct edma_desc
139 *to_edma_desc(struct dma_async_tx_descriptor
*tx
)
141 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
144 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
146 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
149 /* Dispatch a queued descriptor to the controller (caller holds lock) */
150 static void edma_execute(struct edma_chan
*echan
)
152 struct virt_dma_desc
*vdesc
;
153 struct edma_desc
*edesc
;
154 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
155 int i
, j
, left
, nslots
;
158 /* Setup is needed for the first transfer */
159 vdesc
= vchan_next_desc(&echan
->vchan
);
162 list_del(&vdesc
->node
);
163 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
166 edesc
= echan
->edesc
;
168 /* Find out how many left */
169 left
= edesc
->pset_nr
- edesc
->processed
;
170 nslots
= min(MAX_NR_SG
, left
);
173 /* Write descriptor PaRAM set(s) */
174 for (i
= 0; i
< nslots
; i
++) {
175 j
= i
+ edesc
->processed
;
176 edma_write_slot(echan
->slot
[i
], &edesc
->pset
[j
].param
);
177 edesc
->sg_len
+= edesc
->pset
[j
].len
;
178 dev_vdbg(echan
->vchan
.chan
.device
->dev
,
190 j
, echan
->ch_num
, echan
->slot
[i
],
191 edesc
->pset
[j
].param
.opt
,
192 edesc
->pset
[j
].param
.src
,
193 edesc
->pset
[j
].param
.dst
,
194 edesc
->pset
[j
].param
.a_b_cnt
,
195 edesc
->pset
[j
].param
.ccnt
,
196 edesc
->pset
[j
].param
.src_dst_bidx
,
197 edesc
->pset
[j
].param
.src_dst_cidx
,
198 edesc
->pset
[j
].param
.link_bcntrld
);
199 /* Link to the previous slot if not the last set */
200 if (i
!= (nslots
- 1))
201 edma_link(echan
->slot
[i
], echan
->slot
[i
+1]);
204 edesc
->processed
+= nslots
;
207 * If this is either the last set in a set of SG-list transactions
208 * then setup a link to the dummy slot, this results in all future
209 * events being absorbed and that's OK because we're done
211 if (edesc
->processed
== edesc
->pset_nr
) {
213 edma_link(echan
->slot
[nslots
-1], echan
->slot
[1]);
215 edma_link(echan
->slot
[nslots
-1],
216 echan
->ecc
->dummy_slot
);
221 * This happens due to setup times between intermediate
222 * transfers in long SG lists which have to be broken up into
223 * transfers of MAX_NR_SG
225 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
226 edma_clean_channel(echan
->ch_num
);
227 edma_stop(echan
->ch_num
);
228 edma_start(echan
->ch_num
);
229 edma_trigger_channel(echan
->ch_num
);
231 } else if (edesc
->processed
<= MAX_NR_SG
) {
232 dev_dbg(dev
, "first transfer starting on channel %d\n",
234 edma_start(echan
->ch_num
);
236 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
237 echan
->ch_num
, edesc
->processed
);
238 edma_resume(echan
->ch_num
);
242 static int edma_terminate_all(struct dma_chan
*chan
)
244 struct edma_chan
*echan
= to_edma_chan(chan
);
248 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
251 * Stop DMA activity: we assume the callback will not be called
252 * after edma_dma() returns (even if it does, it will see
253 * echan->edesc is NULL and exit.)
256 edma_stop(echan
->ch_num
);
257 /* Move the cyclic channel back to default queue */
258 if (echan
->edesc
->cyclic
)
259 edma_assign_channel_eventq(echan
->ch_num
,
262 * free the running request descriptor
263 * since it is not in any of the vdesc lists
265 edma_desc_free(&echan
->edesc
->vdesc
);
269 vchan_get_all_descriptors(&echan
->vchan
, &head
);
270 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
271 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
276 static int edma_slave_config(struct dma_chan
*chan
,
277 struct dma_slave_config
*cfg
)
279 struct edma_chan
*echan
= to_edma_chan(chan
);
281 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
282 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
285 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
290 static int edma_dma_pause(struct dma_chan
*chan
)
292 struct edma_chan
*echan
= to_edma_chan(chan
);
297 edma_pause(echan
->ch_num
);
301 static int edma_dma_resume(struct dma_chan
*chan
)
303 struct edma_chan
*echan
= to_edma_chan(chan
);
305 edma_resume(echan
->ch_num
);
310 * A PaRAM set configuration abstraction used by other modes
311 * @chan: Channel who's PaRAM set we're configuring
312 * @pset: PaRAM set to initialize and setup.
313 * @src_addr: Source address of the DMA
314 * @dst_addr: Destination address of the DMA
315 * @burst: In units of dev_width, how much to send
316 * @dev_width: How much is the dev_width
317 * @dma_length: Total length of the DMA transfer
318 * @direction: Direction of the transfer
320 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
321 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
322 enum dma_slave_buswidth dev_width
, unsigned int dma_length
,
323 enum dma_transfer_direction direction
)
325 struct edma_chan
*echan
= to_edma_chan(chan
);
326 struct device
*dev
= chan
->device
->dev
;
327 struct edmacc_param
*param
= &epset
->param
;
328 int acnt
, bcnt
, ccnt
, cidx
;
329 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
334 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
338 * If the maxburst is equal to the fifo width, use
339 * A-synced transfers. This allows for large contiguous
340 * buffer transfers using only one PaRAM set.
344 * For the A-sync case, bcnt and ccnt are the remainder
345 * and quotient respectively of the division of:
346 * (dma_length / acnt) by (SZ_64K -1). This is so
347 * that in case bcnt over flows, we have ccnt to use.
348 * Note: In A-sync tranfer only, bcntrld is used, but it
349 * only applies for sg_dma_len(sg) >= SZ_64K.
350 * In this case, the best way adopted is- bccnt for the
351 * first frame will be the remainder below. Then for
352 * every successive frame, bcnt will be SZ_64K-1. This
353 * is assured as bcntrld = 0xffff in end of function.
356 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
357 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
359 * If bcnt is non-zero, we have a remainder and hence an
360 * extra frame to transfer, so increment ccnt.
369 * If maxburst is greater than the fifo address_width,
370 * use AB-synced transfers where A count is the fifo
371 * address_width and B count is the maxburst. In this
372 * case, we are limited to transfers of C count frames
373 * of (address_width * maxburst) where C count is limited
374 * to SZ_64K-1. This places an upper bound on the length
375 * of an SG segment that can be handled.
379 ccnt
= dma_length
/ (acnt
* bcnt
);
380 if (ccnt
> (SZ_64K
- 1)) {
381 dev_err(dev
, "Exceeded max SG segment size\n");
387 epset
->len
= dma_length
;
389 if (direction
== DMA_MEM_TO_DEV
) {
394 epset
->addr
= src_addr
;
395 } else if (direction
== DMA_DEV_TO_MEM
) {
400 epset
->addr
= dst_addr
;
401 } else if (direction
== DMA_MEM_TO_MEM
) {
407 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
411 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
412 /* Configure A or AB synchronized transfers */
414 param
->opt
|= SYNCDIM
;
416 param
->src
= src_addr
;
417 param
->dst
= dst_addr
;
419 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
420 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
422 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
425 * Only time when (bcntrld) auto reload is required is for
426 * A-sync case, and in this case, a requirement of reload value
427 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
428 * and then later will be populated by edma_execute.
430 param
->link_bcntrld
= 0xffffffff;
434 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
435 struct dma_chan
*chan
, struct scatterlist
*sgl
,
436 unsigned int sg_len
, enum dma_transfer_direction direction
,
437 unsigned long tx_flags
, void *context
)
439 struct edma_chan
*echan
= to_edma_chan(chan
);
440 struct device
*dev
= chan
->device
->dev
;
441 struct edma_desc
*edesc
;
442 dma_addr_t src_addr
= 0, dst_addr
= 0;
443 enum dma_slave_buswidth dev_width
;
445 struct scatterlist
*sg
;
448 if (unlikely(!echan
|| !sgl
|| !sg_len
))
451 if (direction
== DMA_DEV_TO_MEM
) {
452 src_addr
= echan
->cfg
.src_addr
;
453 dev_width
= echan
->cfg
.src_addr_width
;
454 burst
= echan
->cfg
.src_maxburst
;
455 } else if (direction
== DMA_MEM_TO_DEV
) {
456 dst_addr
= echan
->cfg
.dst_addr
;
457 dev_width
= echan
->cfg
.dst_addr_width
;
458 burst
= echan
->cfg
.dst_maxburst
;
460 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
464 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
465 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
469 edesc
= kzalloc(sizeof(*edesc
) + sg_len
*
470 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
472 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
476 edesc
->pset_nr
= sg_len
;
478 edesc
->direction
= direction
;
479 edesc
->echan
= echan
;
481 /* Allocate a PaRAM slot, if needed */
482 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
484 for (i
= 0; i
< nslots
; i
++) {
485 if (echan
->slot
[i
] < 0) {
487 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
489 if (echan
->slot
[i
] < 0) {
491 dev_err(dev
, "%s: Failed to allocate slot\n",
498 /* Configure PaRAM sets for each SG */
499 for_each_sg(sgl
, sg
, sg_len
, i
) {
500 /* Get address for each SG */
501 if (direction
== DMA_DEV_TO_MEM
)
502 dst_addr
= sg_dma_address(sg
);
504 src_addr
= sg_dma_address(sg
);
506 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
507 dst_addr
, burst
, dev_width
,
508 sg_dma_len(sg
), direction
);
515 edesc
->residue
+= sg_dma_len(sg
);
517 /* If this is the last in a current SG set of transactions,
518 enable interrupts so that next set is processed */
519 if (!((i
+1) % MAX_NR_SG
))
520 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
522 /* If this is the last set, enable completion interrupt flag */
524 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
526 edesc
->residue_stat
= edesc
->residue
;
528 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
531 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
532 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
533 size_t len
, unsigned long tx_flags
)
536 struct edma_desc
*edesc
;
537 struct device
*dev
= chan
->device
->dev
;
538 struct edma_chan
*echan
= to_edma_chan(chan
);
540 if (unlikely(!echan
|| !len
))
543 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
545 dev_dbg(dev
, "Failed to allocate a descriptor\n");
551 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
552 DMA_SLAVE_BUSWIDTH_4_BYTES
, len
, DMA_MEM_TO_MEM
);
559 * Enable intermediate transfer chaining to re-trigger channel
560 * on completion of every TR, and enable transfer-completion
561 * interrupt on completion of the whole transfer.
563 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
564 edesc
->pset
[0].param
.opt
|= TCINTEN
;
566 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
569 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
570 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
571 size_t period_len
, enum dma_transfer_direction direction
,
572 unsigned long tx_flags
)
574 struct edma_chan
*echan
= to_edma_chan(chan
);
575 struct device
*dev
= chan
->device
->dev
;
576 struct edma_desc
*edesc
;
577 dma_addr_t src_addr
, dst_addr
;
578 enum dma_slave_buswidth dev_width
;
582 if (unlikely(!echan
|| !buf_len
|| !period_len
))
585 if (direction
== DMA_DEV_TO_MEM
) {
586 src_addr
= echan
->cfg
.src_addr
;
588 dev_width
= echan
->cfg
.src_addr_width
;
589 burst
= echan
->cfg
.src_maxburst
;
590 } else if (direction
== DMA_MEM_TO_DEV
) {
592 dst_addr
= echan
->cfg
.dst_addr
;
593 dev_width
= echan
->cfg
.dst_addr_width
;
594 burst
= echan
->cfg
.dst_maxburst
;
596 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
600 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
601 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
605 if (unlikely(buf_len
% period_len
)) {
606 dev_err(dev
, "Period should be multiple of Buffer length\n");
610 nslots
= (buf_len
/ period_len
) + 1;
613 * Cyclic DMA users such as audio cannot tolerate delays introduced
614 * by cases where the number of periods is more than the maximum
615 * number of SGs the EDMA driver can handle at a time. For DMA types
616 * such as Slave SGs, such delays are tolerable and synchronized,
617 * but the synchronization is difficult to achieve with Cyclic and
618 * cannot be guaranteed, so we error out early.
620 if (nslots
> MAX_NR_SG
)
623 edesc
= kzalloc(sizeof(*edesc
) + nslots
*
624 sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
626 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
631 edesc
->pset_nr
= nslots
;
632 edesc
->residue
= edesc
->residue_stat
= buf_len
;
633 edesc
->direction
= direction
;
634 edesc
->echan
= echan
;
636 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
637 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
639 for (i
= 0; i
< nslots
; i
++) {
640 /* Allocate a PaRAM slot, if needed */
641 if (echan
->slot
[i
] < 0) {
643 edma_alloc_slot(EDMA_CTLR(echan
->ch_num
),
645 if (echan
->slot
[i
] < 0) {
647 dev_err(dev
, "%s: Failed to allocate slot\n",
653 if (i
== nslots
- 1) {
654 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
655 sizeof(edesc
->pset
[0]));
659 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
660 dst_addr
, burst
, dev_width
, period_len
,
667 if (direction
== DMA_DEV_TO_MEM
)
668 dst_addr
+= period_len
;
670 src_addr
+= period_len
;
672 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
685 i
, echan
->ch_num
, echan
->slot
[i
],
686 edesc
->pset
[i
].param
.opt
,
687 edesc
->pset
[i
].param
.src
,
688 edesc
->pset
[i
].param
.dst
,
689 edesc
->pset
[i
].param
.a_b_cnt
,
690 edesc
->pset
[i
].param
.ccnt
,
691 edesc
->pset
[i
].param
.src_dst_bidx
,
692 edesc
->pset
[i
].param
.src_dst_cidx
,
693 edesc
->pset
[i
].param
.link_bcntrld
);
698 * Enable period interrupt only if it is requested
700 if (tx_flags
& DMA_PREP_INTERRUPT
)
701 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
704 /* Place the cyclic channel to highest priority queue */
705 edma_assign_channel_eventq(echan
->ch_num
, EVENTQ_0
);
707 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
710 static void edma_callback(unsigned ch_num
, u16 ch_status
, void *data
)
712 struct edma_chan
*echan
= data
;
713 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
714 struct edma_desc
*edesc
;
715 struct edmacc_param p
;
717 edesc
= echan
->edesc
;
719 spin_lock(&echan
->vchan
.lock
);
721 case EDMA_DMA_COMPLETE
:
724 vchan_cyclic_callback(&edesc
->vdesc
);
726 } else if (edesc
->processed
== edesc
->pset_nr
) {
727 dev_dbg(dev
, "Transfer complete, stopping channel %d\n", ch_num
);
729 edma_stop(echan
->ch_num
);
730 vchan_cookie_complete(&edesc
->vdesc
);
733 dev_dbg(dev
, "Intermediate transfer complete on channel %d\n", ch_num
);
735 edma_pause(echan
->ch_num
);
737 /* Update statistics for tx_status */
738 edesc
->residue
-= edesc
->sg_len
;
739 edesc
->residue_stat
= edesc
->residue
;
740 edesc
->processed_stat
= edesc
->processed
;
745 case EDMA_DMA_CC_ERROR
:
746 edma_read_slot(EDMA_CHAN_SLOT(echan
->slot
[0]), &p
);
749 * Issue later based on missed flag which will be sure
751 * (1) we finished transmitting an intermediate slot and
752 * edma_execute is coming up.
753 * (2) or we finished current transfer and issue will
756 * Important note: issuing can be dangerous here and
757 * lead to some nasty recursion when we are in a NULL
758 * slot. So we avoid doing so and set the missed flag.
760 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
761 dev_dbg(dev
, "Error occurred, looks like slot is null, just setting miss\n");
765 * The slot is already programmed but the event got
766 * missed, so its safe to issue it here.
768 dev_dbg(dev
, "Error occurred but slot is non-null, TRIGGERING\n");
769 edma_clean_channel(echan
->ch_num
);
770 edma_stop(echan
->ch_num
);
771 edma_start(echan
->ch_num
);
772 edma_trigger_channel(echan
->ch_num
);
779 spin_unlock(&echan
->vchan
.lock
);
782 /* Alloc channel resources */
783 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
785 struct edma_chan
*echan
= to_edma_chan(chan
);
786 struct device
*dev
= chan
->device
->dev
;
791 a_ch_num
= edma_alloc_channel(echan
->ch_num
, edma_callback
,
792 echan
, EVENTQ_DEFAULT
);
799 if (a_ch_num
!= echan
->ch_num
) {
800 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
801 EDMA_CTLR(echan
->ch_num
),
802 EDMA_CHAN_SLOT(echan
->ch_num
));
807 echan
->alloced
= true;
808 echan
->slot
[0] = echan
->ch_num
;
810 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
811 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
816 edma_free_channel(a_ch_num
);
821 /* Free channel resources */
822 static void edma_free_chan_resources(struct dma_chan
*chan
)
824 struct edma_chan
*echan
= to_edma_chan(chan
);
825 struct device
*dev
= chan
->device
->dev
;
828 /* Terminate transfers */
829 edma_stop(echan
->ch_num
);
831 vchan_free_chan_resources(&echan
->vchan
);
833 /* Free EDMA PaRAM slots */
834 for (i
= 1; i
< EDMA_MAX_SLOTS
; i
++) {
835 if (echan
->slot
[i
] >= 0) {
836 edma_free_slot(echan
->slot
[i
]);
841 /* Free EDMA channel */
842 if (echan
->alloced
) {
843 edma_free_channel(echan
->ch_num
);
844 echan
->alloced
= false;
847 dev_dbg(dev
, "freeing channel for %u\n", echan
->ch_num
);
850 /* Send pending descriptor to hardware */
851 static void edma_issue_pending(struct dma_chan
*chan
)
853 struct edma_chan
*echan
= to_edma_chan(chan
);
856 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
857 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
859 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
862 static u32
edma_residue(struct edma_desc
*edesc
)
864 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
865 struct edma_pset
*pset
= edesc
->pset
;
866 dma_addr_t done
, pos
;
870 * We always read the dst/src position from the first RamPar
871 * pset. That's the one which is active now.
873 pos
= edma_get_position(edesc
->echan
->slot
[0], dst
);
876 * Cyclic is simple. Just subtract pset[0].addr from pos.
878 * We never update edesc->residue in the cyclic case, so we
879 * can tell the remaining room to the end of the circular
883 done
= pos
- pset
->addr
;
884 edesc
->residue_stat
= edesc
->residue
- done
;
885 return edesc
->residue_stat
;
889 * For SG operation we catch up with the last processed
892 pset
+= edesc
->processed_stat
;
894 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
896 * If we are inside this pset address range, we know
897 * this is the active one. Get the current delta and
898 * stop walking the psets.
900 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
901 return edesc
->residue_stat
- (pos
- pset
->addr
);
903 /* Otherwise mark it done and update residue_stat. */
904 edesc
->processed_stat
++;
905 edesc
->residue_stat
-= pset
->len
;
907 return edesc
->residue_stat
;
910 /* Check request completion status */
911 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
913 struct dma_tx_state
*txstate
)
915 struct edma_chan
*echan
= to_edma_chan(chan
);
916 struct virt_dma_desc
*vdesc
;
920 ret
= dma_cookie_status(chan
, cookie
, txstate
);
921 if (ret
== DMA_COMPLETE
|| !txstate
)
924 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
925 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
926 txstate
->residue
= edma_residue(echan
->edesc
);
927 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
928 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
929 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
934 static void __init
edma_chan_init(struct edma_cc
*ecc
,
935 struct dma_device
*dma
,
936 struct edma_chan
*echans
)
940 for (i
= 0; i
< EDMA_CHANS
; i
++) {
941 struct edma_chan
*echan
= &echans
[i
];
942 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->ctlr
, i
);
944 echan
->vchan
.desc_free
= edma_desc_free
;
946 vchan_init(&echan
->vchan
, dma
);
948 INIT_LIST_HEAD(&echan
->node
);
949 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
954 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
955 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
956 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
957 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
959 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
962 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
963 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
964 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
965 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
966 dma
->device_free_chan_resources
= edma_free_chan_resources
;
967 dma
->device_issue_pending
= edma_issue_pending
;
968 dma
->device_tx_status
= edma_tx_status
;
969 dma
->device_config
= edma_slave_config
;
970 dma
->device_pause
= edma_dma_pause
;
971 dma
->device_resume
= edma_dma_resume
;
972 dma
->device_terminate_all
= edma_terminate_all
;
974 dma
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
975 dma
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
976 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
977 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
982 * code using dma memcpy must make sure alignment of
983 * length is at dma->copy_align boundary.
985 dma
->copy_align
= DMAENGINE_ALIGN_4_BYTES
;
987 INIT_LIST_HEAD(&dma
->channels
);
990 static int edma_probe(struct platform_device
*pdev
)
995 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
999 ecc
= devm_kzalloc(&pdev
->dev
, sizeof(*ecc
), GFP_KERNEL
);
1001 dev_err(&pdev
->dev
, "Can't allocate controller\n");
1005 ecc
->ctlr
= pdev
->id
;
1006 ecc
->dummy_slot
= edma_alloc_slot(ecc
->ctlr
, EDMA_SLOT_ANY
);
1007 if (ecc
->dummy_slot
< 0) {
1008 dev_err(&pdev
->dev
, "Can't allocate PaRAM dummy slot\n");
1009 return ecc
->dummy_slot
;
1012 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
1013 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
1014 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
1015 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
1017 edma_dma_init(ecc
, &ecc
->dma_slave
, &pdev
->dev
);
1019 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
1021 ret
= dma_async_device_register(&ecc
->dma_slave
);
1025 platform_set_drvdata(pdev
, ecc
);
1027 dev_info(&pdev
->dev
, "TI EDMA DMA engine driver\n");
1032 edma_free_slot(ecc
->dummy_slot
);
1036 static int edma_remove(struct platform_device
*pdev
)
1038 struct device
*dev
= &pdev
->dev
;
1039 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
1041 dma_async_device_unregister(&ecc
->dma_slave
);
1042 edma_free_slot(ecc
->dummy_slot
);
1047 static struct platform_driver edma_driver
= {
1048 .probe
= edma_probe
,
1049 .remove
= edma_remove
,
1051 .name
= "edma-dma-engine",
1055 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
1057 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
1058 struct edma_chan
*echan
= to_edma_chan(chan
);
1059 unsigned ch_req
= *(unsigned *)param
;
1060 return ch_req
== echan
->ch_num
;
1064 EXPORT_SYMBOL(edma_filter_fn
);
1066 static int edma_init(void)
1068 return platform_driver_register(&edma_driver
);
1070 subsys_initcall(edma_init
);
1072 static void __exit
edma_exit(void)
1074 platform_driver_unregister(&edma_driver
);
1076 module_exit(edma_exit
);
1078 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
1079 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
1080 MODULE_LICENSE("GPL v2");