2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
111 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
112 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
113 #define CHMAP_EXIST BIT(24)
116 * Max of 20 segments per channel to conserve PaRAM slots
117 * Also note that MAX_NR_SG should be atleast the no.of periods
118 * that are required for ASoC, otherwise DMA prep calls will
119 * fail. Today davinci-pcm is the only user of this driver and
120 * requires atleast 17 slots, so we setup the default to 20.
123 #define EDMA_MAX_SLOTS MAX_NR_SG
124 #define EDMA_DESCRIPTORS 16
126 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
127 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
128 #define EDMA_CONT_PARAMS_ANY 1001
129 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
130 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
132 /* PaRAM slots are laid out like this */
133 struct edmacc_param
{
144 /* fields in edmacc_param.opt */
147 #define SYNCDIM BIT(2)
148 #define STATIC BIT(3)
149 #define EDMA_FWID (0x07 << 8)
150 #define TCCMODE BIT(11)
151 #define EDMA_TCC(t) ((t) << 12)
152 #define TCINTEN BIT(20)
153 #define ITCINTEN BIT(21)
154 #define TCCHEN BIT(22)
155 #define ITCCHEN BIT(23)
160 struct edmacc_param param
;
164 struct virt_dma_desc vdesc
;
165 struct list_head node
;
166 enum dma_transfer_direction direction
;
170 struct edma_chan
*echan
;
174 * The following 4 elements are used for residue accounting.
176 * - processed_stat: the number of SG elements we have traversed
177 * so far to cover accounting. This is updated directly to processed
178 * during edma_callback and is always <= processed, because processed
179 * refers to the number of pending transfer (programmed to EDMA
180 * controller), where as processed_stat tracks number of transfers
181 * accounted for so far.
183 * - residue: The amount of bytes we have left to transfer for this desc
185 * - residue_stat: The residue in bytes of data we have covered
186 * so far for accounting. This is updated directly to residue
187 * during callbacks to keep it current.
189 * - sg_len: Tracks the length of the current intermediate transfer,
190 * this is required to update the residue during intermediate transfer
191 * completion callback.
198 struct edma_pset pset
[0];
204 struct virt_dma_chan vchan
;
205 struct list_head node
;
206 struct edma_desc
*edesc
;
210 int slot
[EDMA_MAX_SLOTS
];
212 struct dma_slave_config cfg
;
217 struct edma_soc_info
*info
;
221 /* eDMA3 resource information */
222 unsigned num_channels
;
227 enum dma_event_q default_queue
;
229 bool unused_chan_list_done
;
230 /* The slot_inuse bit for each PaRAM slot is clear unless the
231 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
233 unsigned long *slot_inuse
;
235 /* The channel_unused bit for each channel is clear unless
236 * it is not being used on this platform. It uses a bit
237 * of SOC-specific initialization code.
239 unsigned long *channel_unused
;
241 struct dma_device dma_slave
;
242 struct edma_chan
*slave_chans
;
246 /* dummy param set used to (re)initialize parameter RAM slots */
247 static const struct edmacc_param dummy_paramset
= {
248 .link_bcntrld
= 0xffff,
252 static const struct of_device_id edma_of_ids
[] = {
253 { .compatible
= "ti,edma3", },
257 static inline unsigned int edma_read(struct edma_cc
*ecc
, int offset
)
259 return (unsigned int)__raw_readl(ecc
->base
+ offset
);
262 static inline void edma_write(struct edma_cc
*ecc
, int offset
, int val
)
264 __raw_writel(val
, ecc
->base
+ offset
);
267 static inline void edma_modify(struct edma_cc
*ecc
, int offset
, unsigned and,
270 unsigned val
= edma_read(ecc
, offset
);
274 edma_write(ecc
, offset
, val
);
277 static inline void edma_and(struct edma_cc
*ecc
, int offset
, unsigned and)
279 unsigned val
= edma_read(ecc
, offset
);
282 edma_write(ecc
, offset
, val
);
285 static inline void edma_or(struct edma_cc
*ecc
, int offset
, unsigned or)
287 unsigned val
= edma_read(ecc
, offset
);
290 edma_write(ecc
, offset
, val
);
293 static inline unsigned int edma_read_array(struct edma_cc
*ecc
, int offset
,
296 return edma_read(ecc
, offset
+ (i
<< 2));
299 static inline void edma_write_array(struct edma_cc
*ecc
, int offset
, int i
,
302 edma_write(ecc
, offset
+ (i
<< 2), val
);
305 static inline void edma_modify_array(struct edma_cc
*ecc
, int offset
, int i
,
306 unsigned and, unsigned or)
308 edma_modify(ecc
, offset
+ (i
<< 2), and, or);
311 static inline void edma_or_array(struct edma_cc
*ecc
, int offset
, int i
,
314 edma_or(ecc
, offset
+ (i
<< 2), or);
317 static inline void edma_or_array2(struct edma_cc
*ecc
, int offset
, int i
, int j
,
320 edma_or(ecc
, offset
+ ((i
* 2 + j
) << 2), or);
323 static inline void edma_write_array2(struct edma_cc
*ecc
, int offset
, int i
,
326 edma_write(ecc
, offset
+ ((i
* 2 + j
) << 2), val
);
329 static inline unsigned int edma_shadow0_read(struct edma_cc
*ecc
, int offset
)
331 return edma_read(ecc
, EDMA_SHADOW0
+ offset
);
334 static inline unsigned int edma_shadow0_read_array(struct edma_cc
*ecc
,
337 return edma_read(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
340 static inline void edma_shadow0_write(struct edma_cc
*ecc
, int offset
,
343 edma_write(ecc
, EDMA_SHADOW0
+ offset
, val
);
346 static inline void edma_shadow0_write_array(struct edma_cc
*ecc
, int offset
,
349 edma_write(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
352 static inline unsigned int edma_param_read(struct edma_cc
*ecc
, int offset
,
355 return edma_read(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5));
358 static inline void edma_param_write(struct edma_cc
*ecc
, int offset
,
359 int param_no
, unsigned val
)
361 edma_write(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
364 static inline void edma_param_modify(struct edma_cc
*ecc
, int offset
,
365 int param_no
, unsigned and, unsigned or)
367 edma_modify(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
370 static inline void edma_param_and(struct edma_cc
*ecc
, int offset
, int param_no
,
373 edma_and(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
376 static inline void edma_param_or(struct edma_cc
*ecc
, int offset
, int param_no
,
379 edma_or(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
382 static inline void set_bits(int offset
, int len
, unsigned long *p
)
384 for (; len
> 0; len
--)
385 set_bit(offset
+ (len
- 1), p
);
388 static inline void clear_bits(int offset
, int len
, unsigned long *p
)
390 for (; len
> 0; len
--)
391 clear_bit(offset
+ (len
- 1), p
);
394 static void edma_map_dmach_to_queue(struct edma_chan
*echan
,
395 enum dma_event_q queue_no
)
397 struct edma_cc
*ecc
= echan
->ecc
;
398 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
399 int bit
= (channel
& 0x7) * 4;
401 /* default to low priority queue */
402 if (queue_no
== EVENTQ_DEFAULT
)
403 queue_no
= ecc
->default_queue
;
406 edma_modify_array(ecc
, EDMA_DMAQNUM
, (channel
>> 3), ~(0x7 << bit
),
410 static void edma_assign_priority_to_queue(struct edma_cc
*ecc
, int queue_no
,
413 int bit
= queue_no
* 4;
415 edma_modify(ecc
, EDMA_QUEPRI
, ~(0x7 << bit
), ((priority
& 0x7) << bit
));
418 static void edma_set_chmap(struct edma_chan
*echan
, int slot
)
420 struct edma_cc
*ecc
= echan
->ecc
;
421 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
423 if (ecc
->chmap_exist
) {
424 slot
= EDMA_CHAN_SLOT(slot
);
425 edma_write_array(ecc
, EDMA_DCHMAP
, channel
, (slot
<< 5));
429 static int prepare_unused_channel_list(struct device
*dev
, void *data
)
431 struct platform_device
*pdev
= to_platform_device(dev
);
432 struct edma_cc
*ecc
= data
;
433 int dma_req_min
= EDMA_CTLR_CHAN(ecc
->id
, 0);
434 int dma_req_max
= dma_req_min
+ ecc
->num_channels
;
436 struct of_phandle_args dma_spec
;
439 struct platform_device
*dma_pdev
;
441 count
= of_property_count_strings(dev
->of_node
, "dma-names");
444 for (i
= 0; i
< count
; i
++) {
445 if (of_parse_phandle_with_args(dev
->of_node
, "dmas",
450 if (!of_match_node(edma_of_ids
, dma_spec
.np
)) {
451 of_node_put(dma_spec
.np
);
455 dma_pdev
= of_find_device_by_node(dma_spec
.np
);
456 if (&dma_pdev
->dev
!= ecc
->dev
)
459 clear_bit(EDMA_CHAN_SLOT(dma_spec
.args
[0]),
460 ecc
->channel_unused
);
461 of_node_put(dma_spec
.np
);
466 /* For non-OF case */
467 for (i
= 0; i
< pdev
->num_resources
; i
++) {
468 struct resource
*res
= &pdev
->resource
[i
];
471 if (!(res
->flags
& IORESOURCE_DMA
))
474 dma_req
= (int)res
->start
;
475 if (dma_req
>= dma_req_min
&& dma_req
< dma_req_max
)
476 clear_bit(EDMA_CHAN_SLOT(pdev
->resource
[i
].start
),
477 ecc
->channel_unused
);
483 static void edma_setup_interrupt(struct edma_chan
*echan
, bool enable
)
485 struct edma_cc
*ecc
= echan
->ecc
;
486 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
489 edma_shadow0_write_array(ecc
, SH_ICR
, channel
>> 5,
490 BIT(channel
& 0x1f));
491 edma_shadow0_write_array(ecc
, SH_IESR
, channel
>> 5,
492 BIT(channel
& 0x1f));
494 edma_shadow0_write_array(ecc
, SH_IECR
, channel
>> 5,
495 BIT(channel
& 0x1f));
500 * paRAM slot management functions
502 static void edma_write_slot(struct edma_cc
*ecc
, unsigned slot
,
503 const struct edmacc_param
*param
)
505 slot
= EDMA_CHAN_SLOT(slot
);
506 if (slot
>= ecc
->num_slots
)
508 memcpy_toio(ecc
->base
+ PARM_OFFSET(slot
), param
, PARM_SIZE
);
511 static void edma_read_slot(struct edma_cc
*ecc
, unsigned slot
,
512 struct edmacc_param
*param
)
514 slot
= EDMA_CHAN_SLOT(slot
);
515 if (slot
>= ecc
->num_slots
)
517 memcpy_fromio(param
, ecc
->base
+ PARM_OFFSET(slot
), PARM_SIZE
);
521 * edma_alloc_slot - allocate DMA parameter RAM
522 * @ecc: pointer to edma_cc struct
523 * @slot: specific slot to allocate; negative for "any unused slot"
525 * This allocates a parameter RAM slot, initializing it to hold a
526 * dummy transfer. Slots allocated using this routine have not been
527 * mapped to a hardware DMA channel, and will normally be used by
528 * linking to them from a slot associated with a DMA channel.
530 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
531 * slots may be allocated on behalf of DSP firmware.
533 * Returns the number of the slot, else negative errno.
535 static int edma_alloc_slot(struct edma_cc
*ecc
, int slot
)
538 slot
= EDMA_CHAN_SLOT(slot
);
539 /* Requesting entry paRAM slot for a HW triggered channel. */
540 if (ecc
->chmap_exist
&& slot
< ecc
->num_channels
)
541 slot
= EDMA_SLOT_ANY
;
545 if (ecc
->chmap_exist
)
548 slot
= ecc
->num_channels
;
550 slot
= find_next_zero_bit(ecc
->slot_inuse
,
553 if (slot
== ecc
->num_slots
)
555 if (!test_and_set_bit(slot
, ecc
->slot_inuse
))
558 } else if (slot
>= ecc
->num_slots
) {
560 } else if (test_and_set_bit(slot
, ecc
->slot_inuse
)) {
564 edma_write_slot(ecc
, slot
, &dummy_paramset
);
566 return EDMA_CTLR_CHAN(ecc
->id
, slot
);
569 static void edma_free_slot(struct edma_cc
*ecc
, unsigned slot
)
571 slot
= EDMA_CHAN_SLOT(slot
);
572 if (slot
>= ecc
->num_slots
)
575 edma_write_slot(ecc
, slot
, &dummy_paramset
);
576 clear_bit(slot
, ecc
->slot_inuse
);
580 * edma_link - link one parameter RAM slot to another
581 * @ecc: pointer to edma_cc struct
582 * @from: parameter RAM slot originating the link
583 * @to: parameter RAM slot which is the link target
585 * The originating slot should not be part of any active DMA transfer.
587 static void edma_link(struct edma_cc
*ecc
, unsigned from
, unsigned to
)
589 if (unlikely(EDMA_CTLR(from
) != EDMA_CTLR(to
)))
590 dev_warn(ecc
->dev
, "Ignoring eDMA instance for linking\n");
592 from
= EDMA_CHAN_SLOT(from
);
593 to
= EDMA_CHAN_SLOT(to
);
594 if (from
>= ecc
->num_slots
|| to
>= ecc
->num_slots
)
597 edma_param_modify(ecc
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
602 * edma_get_position - returns the current transfer point
603 * @ecc: pointer to edma_cc struct
604 * @slot: parameter RAM slot being examined
605 * @dst: true selects the dest position, false the source
607 * Returns the position of the current active slot
609 static dma_addr_t
edma_get_position(struct edma_cc
*ecc
, unsigned slot
,
614 slot
= EDMA_CHAN_SLOT(slot
);
615 offs
= PARM_OFFSET(slot
);
616 offs
+= dst
? PARM_DST
: PARM_SRC
;
618 return edma_read(ecc
, offs
);
622 * Channels with event associations will be triggered by their hardware
623 * events, and channels without such associations will be triggered by
624 * software. (At this writing there is no interface for using software
625 * triggers except with channels that don't support hardware triggers.)
627 static void edma_start(struct edma_chan
*echan
)
629 struct edma_cc
*ecc
= echan
->ecc
;
630 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
631 int j
= (channel
>> 5);
632 unsigned int mask
= BIT(channel
& 0x1f);
634 if (test_bit(channel
, ecc
->channel_unused
)) {
635 /* EDMA channels without event association */
636 dev_dbg(ecc
->dev
, "ESR%d %08x\n", j
,
637 edma_shadow0_read_array(ecc
, SH_ESR
, j
));
638 edma_shadow0_write_array(ecc
, SH_ESR
, j
, mask
);
640 /* EDMA channel with event association */
641 dev_dbg(ecc
->dev
, "ER%d %08x\n", j
,
642 edma_shadow0_read_array(ecc
, SH_ER
, j
));
643 /* Clear any pending event or error */
644 edma_write_array(ecc
, EDMA_ECR
, j
, mask
);
645 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
647 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
648 edma_shadow0_write_array(ecc
, SH_EESR
, j
, mask
);
649 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
650 edma_shadow0_read_array(ecc
, SH_EER
, j
));
654 static void edma_stop(struct edma_chan
*echan
)
656 struct edma_cc
*ecc
= echan
->ecc
;
657 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
658 int j
= (channel
>> 5);
659 unsigned int mask
= BIT(channel
& 0x1f);
661 edma_shadow0_write_array(ecc
, SH_EECR
, j
, mask
);
662 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
663 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
664 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
666 /* clear possibly pending completion interrupt */
667 edma_shadow0_write_array(ecc
, SH_ICR
, j
, mask
);
669 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
670 edma_shadow0_read_array(ecc
, SH_EER
, j
));
672 /* REVISIT: consider guarding against inappropriate event
673 * chaining by overwriting with dummy_paramset.
678 * Temporarily disable EDMA hardware events on the specified channel,
679 * preventing them from triggering new transfers
681 static void edma_pause(struct edma_chan
*echan
)
683 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
684 unsigned int mask
= BIT(channel
& 0x1f);
686 edma_shadow0_write_array(echan
->ecc
, SH_EECR
, channel
>> 5, mask
);
689 /* Re-enable EDMA hardware events on the specified channel. */
690 static void edma_resume(struct edma_chan
*echan
)
692 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
693 unsigned int mask
= BIT(channel
& 0x1f);
695 edma_shadow0_write_array(echan
->ecc
, SH_EESR
, channel
>> 5, mask
);
698 static void edma_trigger_channel(struct edma_chan
*echan
)
700 struct edma_cc
*ecc
= echan
->ecc
;
701 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
702 unsigned int mask
= BIT(channel
& 0x1f);
704 edma_shadow0_write_array(ecc
, SH_ESR
, (channel
>> 5), mask
);
706 dev_dbg(ecc
->dev
, "ESR%d %08x\n", (channel
>> 5),
707 edma_shadow0_read_array(ecc
, SH_ESR
, (channel
>> 5)));
710 static void edma_clean_channel(struct edma_chan
*echan
)
712 struct edma_cc
*ecc
= echan
->ecc
;
713 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
714 int j
= (channel
>> 5);
715 unsigned int mask
= BIT(channel
& 0x1f);
717 dev_dbg(ecc
->dev
, "EMR%d %08x\n", j
, edma_read_array(ecc
, EDMA_EMR
, j
));
718 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
719 /* Clear the corresponding EMR bits */
720 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
722 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
723 edma_write(ecc
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
726 static int edma_alloc_channel(struct edma_chan
*echan
,
727 enum dma_event_q eventq_no
)
729 struct edma_cc
*ecc
= echan
->ecc
;
730 int channel
= EDMA_CHAN_SLOT(echan
->ch_num
);
732 if (!ecc
->unused_chan_list_done
) {
734 * Scan all the platform devices to find out the EDMA channels
735 * used and clear them in the unused list, making the rest
736 * available for ARM usage.
738 int ret
= bus_for_each_dev(&platform_bus_type
, NULL
, ecc
,
739 prepare_unused_channel_list
);
743 ecc
->unused_chan_list_done
= true;
746 /* ensure access through shadow region 0 */
747 edma_or_array2(ecc
, EDMA_DRAE
, 0, channel
>> 5, BIT(channel
& 0x1f));
749 /* ensure no events are pending */
752 edma_setup_interrupt(echan
, true);
754 edma_map_dmach_to_queue(echan
, eventq_no
);
759 static void edma_free_channel(struct edma_chan
*echan
)
761 /* ensure no events are pending */
763 /* REVISIT should probably take out of shadow region 0 */
764 edma_setup_interrupt(echan
, false);
767 /* Move channel to a specific event queue */
768 static void edma_assign_channel_eventq(struct edma_chan
*echan
,
769 enum dma_event_q eventq_no
)
771 struct edma_cc
*ecc
= echan
->ecc
;
773 /* default to low priority queue */
774 if (eventq_no
== EVENTQ_DEFAULT
)
775 eventq_no
= ecc
->default_queue
;
776 if (eventq_no
>= ecc
->num_tc
)
779 edma_map_dmach_to_queue(echan
, eventq_no
);
782 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
784 return container_of(d
, struct edma_cc
, dma_slave
);
787 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
789 return container_of(c
, struct edma_chan
, vchan
.chan
);
792 static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor
*tx
)
794 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
797 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
799 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
802 /* Dispatch a queued descriptor to the controller (caller holds lock) */
803 static void edma_execute(struct edma_chan
*echan
)
805 struct edma_cc
*ecc
= echan
->ecc
;
806 struct virt_dma_desc
*vdesc
;
807 struct edma_desc
*edesc
;
808 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
809 int i
, j
, left
, nslots
;
812 /* Setup is needed for the first transfer */
813 vdesc
= vchan_next_desc(&echan
->vchan
);
816 list_del(&vdesc
->node
);
817 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
820 edesc
= echan
->edesc
;
822 /* Find out how many left */
823 left
= edesc
->pset_nr
- edesc
->processed
;
824 nslots
= min(MAX_NR_SG
, left
);
827 /* Write descriptor PaRAM set(s) */
828 for (i
= 0; i
< nslots
; i
++) {
829 j
= i
+ edesc
->processed
;
830 edma_write_slot(ecc
, echan
->slot
[i
], &edesc
->pset
[j
].param
);
831 edesc
->sg_len
+= edesc
->pset
[j
].len
;
844 j
, echan
->ch_num
, echan
->slot
[i
],
845 edesc
->pset
[j
].param
.opt
,
846 edesc
->pset
[j
].param
.src
,
847 edesc
->pset
[j
].param
.dst
,
848 edesc
->pset
[j
].param
.a_b_cnt
,
849 edesc
->pset
[j
].param
.ccnt
,
850 edesc
->pset
[j
].param
.src_dst_bidx
,
851 edesc
->pset
[j
].param
.src_dst_cidx
,
852 edesc
->pset
[j
].param
.link_bcntrld
);
853 /* Link to the previous slot if not the last set */
854 if (i
!= (nslots
- 1))
855 edma_link(ecc
, echan
->slot
[i
], echan
->slot
[i
+ 1]);
858 edesc
->processed
+= nslots
;
861 * If this is either the last set in a set of SG-list transactions
862 * then setup a link to the dummy slot, this results in all future
863 * events being absorbed and that's OK because we're done
865 if (edesc
->processed
== edesc
->pset_nr
) {
867 edma_link(ecc
, echan
->slot
[nslots
- 1], echan
->slot
[1]);
869 edma_link(ecc
, echan
->slot
[nslots
- 1],
870 echan
->ecc
->dummy_slot
);
875 * This happens due to setup times between intermediate
876 * transfers in long SG lists which have to be broken up into
877 * transfers of MAX_NR_SG
879 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
880 edma_clean_channel(echan
);
883 edma_trigger_channel(echan
);
885 } else if (edesc
->processed
<= MAX_NR_SG
) {
886 dev_dbg(dev
, "first transfer starting on channel %d\n",
890 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
891 echan
->ch_num
, edesc
->processed
);
896 static int edma_terminate_all(struct dma_chan
*chan
)
898 struct edma_chan
*echan
= to_edma_chan(chan
);
902 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
905 * Stop DMA activity: we assume the callback will not be called
906 * after edma_dma() returns (even if it does, it will see
907 * echan->edesc is NULL and exit.)
911 /* Move the cyclic channel back to default queue */
912 if (echan
->edesc
->cyclic
)
913 edma_assign_channel_eventq(echan
, EVENTQ_DEFAULT
);
915 * free the running request descriptor
916 * since it is not in any of the vdesc lists
918 edma_desc_free(&echan
->edesc
->vdesc
);
922 vchan_get_all_descriptors(&echan
->vchan
, &head
);
923 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
924 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
929 static int edma_slave_config(struct dma_chan
*chan
,
930 struct dma_slave_config
*cfg
)
932 struct edma_chan
*echan
= to_edma_chan(chan
);
934 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
935 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
938 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
943 static int edma_dma_pause(struct dma_chan
*chan
)
945 struct edma_chan
*echan
= to_edma_chan(chan
);
954 static int edma_dma_resume(struct dma_chan
*chan
)
956 struct edma_chan
*echan
= to_edma_chan(chan
);
963 * A PaRAM set configuration abstraction used by other modes
964 * @chan: Channel who's PaRAM set we're configuring
965 * @pset: PaRAM set to initialize and setup.
966 * @src_addr: Source address of the DMA
967 * @dst_addr: Destination address of the DMA
968 * @burst: In units of dev_width, how much to send
969 * @dev_width: How much is the dev_width
970 * @dma_length: Total length of the DMA transfer
971 * @direction: Direction of the transfer
973 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
974 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
975 unsigned int acnt
, unsigned int dma_length
,
976 enum dma_transfer_direction direction
)
978 struct edma_chan
*echan
= to_edma_chan(chan
);
979 struct device
*dev
= chan
->device
->dev
;
980 struct edmacc_param
*param
= &epset
->param
;
981 int bcnt
, ccnt
, cidx
;
982 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
985 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
989 * If the maxburst is equal to the fifo width, use
990 * A-synced transfers. This allows for large contiguous
991 * buffer transfers using only one PaRAM set.
995 * For the A-sync case, bcnt and ccnt are the remainder
996 * and quotient respectively of the division of:
997 * (dma_length / acnt) by (SZ_64K -1). This is so
998 * that in case bcnt over flows, we have ccnt to use.
999 * Note: In A-sync tranfer only, bcntrld is used, but it
1000 * only applies for sg_dma_len(sg) >= SZ_64K.
1001 * In this case, the best way adopted is- bccnt for the
1002 * first frame will be the remainder below. Then for
1003 * every successive frame, bcnt will be SZ_64K-1. This
1004 * is assured as bcntrld = 0xffff in end of function.
1007 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
1008 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
1010 * If bcnt is non-zero, we have a remainder and hence an
1011 * extra frame to transfer, so increment ccnt.
1020 * If maxburst is greater than the fifo address_width,
1021 * use AB-synced transfers where A count is the fifo
1022 * address_width and B count is the maxburst. In this
1023 * case, we are limited to transfers of C count frames
1024 * of (address_width * maxburst) where C count is limited
1025 * to SZ_64K-1. This places an upper bound on the length
1026 * of an SG segment that can be handled.
1030 ccnt
= dma_length
/ (acnt
* bcnt
);
1031 if (ccnt
> (SZ_64K
- 1)) {
1032 dev_err(dev
, "Exceeded max SG segment size\n");
1038 epset
->len
= dma_length
;
1040 if (direction
== DMA_MEM_TO_DEV
) {
1045 epset
->addr
= src_addr
;
1046 } else if (direction
== DMA_DEV_TO_MEM
) {
1051 epset
->addr
= dst_addr
;
1052 } else if (direction
== DMA_MEM_TO_MEM
) {
1058 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
1062 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
1063 /* Configure A or AB synchronized transfers */
1065 param
->opt
|= SYNCDIM
;
1067 param
->src
= src_addr
;
1068 param
->dst
= dst_addr
;
1070 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
1071 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
1073 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
1076 * Only time when (bcntrld) auto reload is required is for
1077 * A-sync case, and in this case, a requirement of reload value
1078 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1079 * and then later will be populated by edma_execute.
1081 param
->link_bcntrld
= 0xffffffff;
1085 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
1086 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1087 unsigned int sg_len
, enum dma_transfer_direction direction
,
1088 unsigned long tx_flags
, void *context
)
1090 struct edma_chan
*echan
= to_edma_chan(chan
);
1091 struct device
*dev
= chan
->device
->dev
;
1092 struct edma_desc
*edesc
;
1093 dma_addr_t src_addr
= 0, dst_addr
= 0;
1094 enum dma_slave_buswidth dev_width
;
1096 struct scatterlist
*sg
;
1099 if (unlikely(!echan
|| !sgl
|| !sg_len
))
1102 if (direction
== DMA_DEV_TO_MEM
) {
1103 src_addr
= echan
->cfg
.src_addr
;
1104 dev_width
= echan
->cfg
.src_addr_width
;
1105 burst
= echan
->cfg
.src_maxburst
;
1106 } else if (direction
== DMA_MEM_TO_DEV
) {
1107 dst_addr
= echan
->cfg
.dst_addr
;
1108 dev_width
= echan
->cfg
.dst_addr_width
;
1109 burst
= echan
->cfg
.dst_maxburst
;
1111 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1115 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1116 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1120 edesc
= kzalloc(sizeof(*edesc
) + sg_len
* sizeof(edesc
->pset
[0]),
1123 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1127 edesc
->pset_nr
= sg_len
;
1129 edesc
->direction
= direction
;
1130 edesc
->echan
= echan
;
1132 /* Allocate a PaRAM slot, if needed */
1133 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
1135 for (i
= 0; i
< nslots
; i
++) {
1136 if (echan
->slot
[i
] < 0) {
1138 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1139 if (echan
->slot
[i
] < 0) {
1141 dev_err(dev
, "%s: Failed to allocate slot\n",
1148 /* Configure PaRAM sets for each SG */
1149 for_each_sg(sgl
, sg
, sg_len
, i
) {
1150 /* Get address for each SG */
1151 if (direction
== DMA_DEV_TO_MEM
)
1152 dst_addr
= sg_dma_address(sg
);
1154 src_addr
= sg_dma_address(sg
);
1156 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1157 dst_addr
, burst
, dev_width
,
1158 sg_dma_len(sg
), direction
);
1164 edesc
->absync
= ret
;
1165 edesc
->residue
+= sg_dma_len(sg
);
1167 /* If this is the last in a current SG set of transactions,
1168 enable interrupts so that next set is processed */
1169 if (!((i
+1) % MAX_NR_SG
))
1170 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1172 /* If this is the last set, enable completion interrupt flag */
1173 if (i
== sg_len
- 1)
1174 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1176 edesc
->residue_stat
= edesc
->residue
;
1178 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1181 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
1182 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1183 size_t len
, unsigned long tx_flags
)
1186 struct edma_desc
*edesc
;
1187 struct device
*dev
= chan
->device
->dev
;
1188 struct edma_chan
*echan
= to_edma_chan(chan
);
1189 unsigned int width
, pset_len
;
1191 if (unlikely(!echan
|| !len
))
1196 * Transfer size less than 64K can be handled with one paRAM
1197 * slot and with one burst.
1205 * Transfer size bigger than 64K will be handled with maximum of
1207 * slot1: (full_length / 32767) times 32767 bytes bursts.
1208 * ACNT = 32767, length1: (full_length / 32767) * 32767
1209 * slot2: the remaining amount of data after slot1.
1210 * ACNT = full_length - length1, length2 = ACNT
1212 * When the full_length is multibple of 32767 one slot can be
1213 * used to complete the transfer.
1216 pset_len
= rounddown(len
, width
);
1217 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1218 if (unlikely(pset_len
== len
))
1224 edesc
= kzalloc(sizeof(*edesc
) + nslots
* sizeof(edesc
->pset
[0]),
1227 dev_dbg(dev
, "Failed to allocate a descriptor\n");
1231 edesc
->pset_nr
= nslots
;
1232 edesc
->residue
= edesc
->residue_stat
= len
;
1233 edesc
->direction
= DMA_MEM_TO_MEM
;
1234 edesc
->echan
= echan
;
1236 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
1237 width
, pset_len
, DMA_MEM_TO_MEM
);
1243 edesc
->absync
= ret
;
1245 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
1247 /* Enable transfer complete interrupt */
1248 edesc
->pset
[0].param
.opt
|= TCINTEN
;
1250 /* Enable transfer complete chaining for the first slot */
1251 edesc
->pset
[0].param
.opt
|= TCCHEN
;
1253 if (echan
->slot
[1] < 0) {
1254 echan
->slot
[1] = edma_alloc_slot(echan
->ecc
,
1256 if (echan
->slot
[1] < 0) {
1258 dev_err(dev
, "%s: Failed to allocate slot\n",
1265 pset_len
= width
= len
% (SZ_32K
- 1);
1267 ret
= edma_config_pset(chan
, &edesc
->pset
[1], src
, dest
, 1,
1268 width
, pset_len
, DMA_MEM_TO_MEM
);
1274 edesc
->pset
[1].param
.opt
|= ITCCHEN
;
1275 edesc
->pset
[1].param
.opt
|= TCINTEN
;
1278 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1281 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
1282 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1283 size_t period_len
, enum dma_transfer_direction direction
,
1284 unsigned long tx_flags
)
1286 struct edma_chan
*echan
= to_edma_chan(chan
);
1287 struct device
*dev
= chan
->device
->dev
;
1288 struct edma_desc
*edesc
;
1289 dma_addr_t src_addr
, dst_addr
;
1290 enum dma_slave_buswidth dev_width
;
1294 if (unlikely(!echan
|| !buf_len
|| !period_len
))
1297 if (direction
== DMA_DEV_TO_MEM
) {
1298 src_addr
= echan
->cfg
.src_addr
;
1299 dst_addr
= buf_addr
;
1300 dev_width
= echan
->cfg
.src_addr_width
;
1301 burst
= echan
->cfg
.src_maxburst
;
1302 } else if (direction
== DMA_MEM_TO_DEV
) {
1303 src_addr
= buf_addr
;
1304 dst_addr
= echan
->cfg
.dst_addr
;
1305 dev_width
= echan
->cfg
.dst_addr_width
;
1306 burst
= echan
->cfg
.dst_maxburst
;
1308 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1312 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1313 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1317 if (unlikely(buf_len
% period_len
)) {
1318 dev_err(dev
, "Period should be multiple of Buffer length\n");
1322 nslots
= (buf_len
/ period_len
) + 1;
1325 * Cyclic DMA users such as audio cannot tolerate delays introduced
1326 * by cases where the number of periods is more than the maximum
1327 * number of SGs the EDMA driver can handle at a time. For DMA types
1328 * such as Slave SGs, such delays are tolerable and synchronized,
1329 * but the synchronization is difficult to achieve with Cyclic and
1330 * cannot be guaranteed, so we error out early.
1332 if (nslots
> MAX_NR_SG
)
1335 edesc
= kzalloc(sizeof(*edesc
) + nslots
* sizeof(edesc
->pset
[0]),
1338 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1343 edesc
->pset_nr
= nslots
;
1344 edesc
->residue
= edesc
->residue_stat
= buf_len
;
1345 edesc
->direction
= direction
;
1346 edesc
->echan
= echan
;
1348 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1349 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
1351 for (i
= 0; i
< nslots
; i
++) {
1352 /* Allocate a PaRAM slot, if needed */
1353 if (echan
->slot
[i
] < 0) {
1355 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1356 if (echan
->slot
[i
] < 0) {
1358 dev_err(dev
, "%s: Failed to allocate slot\n",
1364 if (i
== nslots
- 1) {
1365 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
1366 sizeof(edesc
->pset
[0]));
1370 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1371 dst_addr
, burst
, dev_width
, period_len
,
1378 if (direction
== DMA_DEV_TO_MEM
)
1379 dst_addr
+= period_len
;
1381 src_addr
+= period_len
;
1383 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
1396 i
, echan
->ch_num
, echan
->slot
[i
],
1397 edesc
->pset
[i
].param
.opt
,
1398 edesc
->pset
[i
].param
.src
,
1399 edesc
->pset
[i
].param
.dst
,
1400 edesc
->pset
[i
].param
.a_b_cnt
,
1401 edesc
->pset
[i
].param
.ccnt
,
1402 edesc
->pset
[i
].param
.src_dst_bidx
,
1403 edesc
->pset
[i
].param
.src_dst_cidx
,
1404 edesc
->pset
[i
].param
.link_bcntrld
);
1406 edesc
->absync
= ret
;
1409 * Enable period interrupt only if it is requested
1411 if (tx_flags
& DMA_PREP_INTERRUPT
)
1412 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1415 /* Place the cyclic channel to highest priority queue */
1416 edma_assign_channel_eventq(echan
, EVENTQ_0
);
1418 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1421 static void edma_completion_handler(struct edma_chan
*echan
)
1423 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1424 struct edma_desc
*edesc
= echan
->edesc
;
1429 spin_lock(&echan
->vchan
.lock
);
1430 if (edesc
->cyclic
) {
1431 vchan_cyclic_callback(&edesc
->vdesc
);
1432 spin_unlock(&echan
->vchan
.lock
);
1434 } else if (edesc
->processed
== edesc
->pset_nr
) {
1437 vchan_cookie_complete(&edesc
->vdesc
);
1438 echan
->edesc
= NULL
;
1440 dev_dbg(dev
, "Transfer completed on channel %d\n",
1443 dev_dbg(dev
, "Sub transfer completed on channel %d\n",
1448 /* Update statistics for tx_status */
1449 edesc
->residue
-= edesc
->sg_len
;
1450 edesc
->residue_stat
= edesc
->residue
;
1451 edesc
->processed_stat
= edesc
->processed
;
1453 edma_execute(echan
);
1455 spin_unlock(&echan
->vchan
.lock
);
1458 /* eDMA interrupt handler */
1459 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
1461 struct edma_cc
*ecc
= data
;
1471 dev_vdbg(ecc
->dev
, "dma_irq_handler\n");
1473 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 0);
1475 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 1);
1478 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 1);
1481 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 0);
1489 slot
= __ffs(sh_ipr
);
1490 sh_ipr
&= ~(BIT(slot
));
1492 if (sh_ier
& BIT(slot
)) {
1493 channel
= (bank
<< 5) | slot
;
1494 /* Clear the corresponding IPR bits */
1495 edma_shadow0_write_array(ecc
, SH_ICR
, bank
, BIT(slot
));
1496 edma_completion_handler(&ecc
->slave_chans
[channel
]);
1500 edma_shadow0_write(ecc
, SH_IEVAL
, 1);
1504 static void edma_error_handler(struct edma_chan
*echan
)
1506 struct edma_cc
*ecc
= echan
->ecc
;
1507 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1508 struct edmacc_param p
;
1513 spin_lock(&echan
->vchan
.lock
);
1515 edma_read_slot(ecc
, echan
->slot
[0], &p
);
1517 * Issue later based on missed flag which will be sure
1519 * (1) we finished transmitting an intermediate slot and
1520 * edma_execute is coming up.
1521 * (2) or we finished current transfer and issue will
1522 * call edma_execute.
1524 * Important note: issuing can be dangerous here and
1525 * lead to some nasty recursion when we are in a NULL
1526 * slot. So we avoid doing so and set the missed flag.
1528 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
1529 dev_dbg(dev
, "Error on null slot, setting miss\n");
1533 * The slot is already programmed but the event got
1534 * missed, so its safe to issue it here.
1536 dev_dbg(dev
, "Missed event, TRIGGERING\n");
1537 edma_clean_channel(echan
);
1540 edma_trigger_channel(echan
);
1542 spin_unlock(&echan
->vchan
.lock
);
1545 static inline bool edma_error_pending(struct edma_cc
*ecc
)
1547 if (edma_read_array(ecc
, EDMA_EMR
, 0) ||
1548 edma_read_array(ecc
, EDMA_EMR
, 1) ||
1549 edma_read(ecc
, EDMA_QEMR
) || edma_read(ecc
, EDMA_CCERR
))
1555 /* eDMA error interrupt handler */
1556 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
1558 struct edma_cc
*ecc
= data
;
1561 unsigned int cnt
= 0;
1568 dev_vdbg(ecc
->dev
, "dma_ccerr_handler\n");
1570 if (!edma_error_pending(ecc
))
1574 /* Event missed register(s) */
1575 for (j
= 0; j
< 2; j
++) {
1578 val
= edma_read_array(ecc
, EDMA_EMR
, j
);
1582 dev_dbg(ecc
->dev
, "EMR%d 0x%08x\n", j
, val
);
1584 for (i
= find_next_bit(&emr
, 32, 0); i
< 32;
1585 i
= find_next_bit(&emr
, 32, i
+ 1)) {
1586 int k
= (j
<< 5) + i
;
1588 /* Clear the corresponding EMR bits */
1589 edma_write_array(ecc
, EDMA_EMCR
, j
, BIT(i
));
1591 edma_shadow0_write_array(ecc
, SH_SECR
, j
,
1593 edma_error_handler(&ecc
->slave_chans
[k
]);
1597 val
= edma_read(ecc
, EDMA_QEMR
);
1599 dev_dbg(ecc
->dev
, "QEMR 0x%02x\n", val
);
1600 /* Not reported, just clear the interrupt reason. */
1601 edma_write(ecc
, EDMA_QEMCR
, val
);
1602 edma_shadow0_write(ecc
, SH_QSECR
, val
);
1605 val
= edma_read(ecc
, EDMA_CCERR
);
1607 dev_warn(ecc
->dev
, "CCERR 0x%08x\n", val
);
1608 /* Not reported, just clear the interrupt reason. */
1609 edma_write(ecc
, EDMA_CCERRCLR
, val
);
1612 if (!edma_error_pending(ecc
))
1618 edma_write(ecc
, EDMA_EEVAL
, 1);
1622 /* Alloc channel resources */
1623 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
1625 struct edma_chan
*echan
= to_edma_chan(chan
);
1626 struct device
*dev
= chan
->device
->dev
;
1629 ret
= edma_alloc_channel(echan
, EVENTQ_DEFAULT
);
1633 echan
->slot
[0] = edma_alloc_slot(echan
->ecc
, echan
->ch_num
);
1634 if (echan
->slot
[0] < 0) {
1635 dev_err(dev
, "Entry slot allocation failed for channel %u\n",
1636 EDMA_CHAN_SLOT(echan
->ch_num
));
1640 /* Set up channel -> slot mapping for the entry slot */
1641 edma_set_chmap(echan
, echan
->slot
[0]);
1642 echan
->alloced
= true;
1644 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
1645 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
1650 edma_free_channel(echan
);
1654 /* Free channel resources */
1655 static void edma_free_chan_resources(struct dma_chan
*chan
)
1657 struct edma_chan
*echan
= to_edma_chan(chan
);
1660 /* Terminate transfers */
1663 vchan_free_chan_resources(&echan
->vchan
);
1665 /* Free EDMA PaRAM slots */
1666 for (i
= 0; i
< EDMA_MAX_SLOTS
; i
++) {
1667 if (echan
->slot
[i
] >= 0) {
1668 edma_free_slot(echan
->ecc
, echan
->slot
[i
]);
1669 echan
->slot
[i
] = -1;
1673 /* Set entry slot to the dummy slot */
1674 edma_set_chmap(echan
, echan
->ecc
->dummy_slot
);
1676 /* Free EDMA channel */
1677 if (echan
->alloced
) {
1678 edma_free_channel(echan
);
1679 echan
->alloced
= false;
1682 dev_dbg(chan
->device
->dev
, "freeing channel for %u\n", echan
->ch_num
);
1685 /* Send pending descriptor to hardware */
1686 static void edma_issue_pending(struct dma_chan
*chan
)
1688 struct edma_chan
*echan
= to_edma_chan(chan
);
1689 unsigned long flags
;
1691 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1692 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
1693 edma_execute(echan
);
1694 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1697 static u32
edma_residue(struct edma_desc
*edesc
)
1699 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
1700 struct edma_pset
*pset
= edesc
->pset
;
1701 dma_addr_t done
, pos
;
1705 * We always read the dst/src position from the first RamPar
1706 * pset. That's the one which is active now.
1708 pos
= edma_get_position(edesc
->echan
->ecc
, edesc
->echan
->slot
[0], dst
);
1711 * Cyclic is simple. Just subtract pset[0].addr from pos.
1713 * We never update edesc->residue in the cyclic case, so we
1714 * can tell the remaining room to the end of the circular
1717 if (edesc
->cyclic
) {
1718 done
= pos
- pset
->addr
;
1719 edesc
->residue_stat
= edesc
->residue
- done
;
1720 return edesc
->residue_stat
;
1724 * For SG operation we catch up with the last processed
1727 pset
+= edesc
->processed_stat
;
1729 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
1731 * If we are inside this pset address range, we know
1732 * this is the active one. Get the current delta and
1733 * stop walking the psets.
1735 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
1736 return edesc
->residue_stat
- (pos
- pset
->addr
);
1738 /* Otherwise mark it done and update residue_stat. */
1739 edesc
->processed_stat
++;
1740 edesc
->residue_stat
-= pset
->len
;
1742 return edesc
->residue_stat
;
1745 /* Check request completion status */
1746 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
1747 dma_cookie_t cookie
,
1748 struct dma_tx_state
*txstate
)
1750 struct edma_chan
*echan
= to_edma_chan(chan
);
1751 struct virt_dma_desc
*vdesc
;
1752 enum dma_status ret
;
1753 unsigned long flags
;
1755 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1756 if (ret
== DMA_COMPLETE
|| !txstate
)
1759 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1760 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
1761 txstate
->residue
= edma_residue(echan
->edesc
);
1762 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
1763 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
1764 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1769 static void __init
edma_chan_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1770 struct edma_chan
*echans
)
1774 for (i
= 0; i
< ecc
->num_channels
; i
++) {
1775 struct edma_chan
*echan
= &echans
[i
];
1776 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->id
, i
);
1778 echan
->vchan
.desc_free
= edma_desc_free
;
1780 vchan_init(&echan
->vchan
, dma
);
1782 INIT_LIST_HEAD(&echan
->node
);
1783 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
1784 echan
->slot
[j
] = -1;
1788 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1789 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1790 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1791 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1793 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1796 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
1797 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
1798 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1799 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1800 dma
->device_free_chan_resources
= edma_free_chan_resources
;
1801 dma
->device_issue_pending
= edma_issue_pending
;
1802 dma
->device_tx_status
= edma_tx_status
;
1803 dma
->device_config
= edma_slave_config
;
1804 dma
->device_pause
= edma_dma_pause
;
1805 dma
->device_resume
= edma_dma_resume
;
1806 dma
->device_terminate_all
= edma_terminate_all
;
1808 dma
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1809 dma
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1810 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1811 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1815 INIT_LIST_HEAD(&dma
->channels
);
1818 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
1819 struct edma_cc
*ecc
)
1823 s8 (*queue_priority_map
)[2];
1825 /* Decode the eDMA3 configuration from CCCFG register */
1826 cccfg
= edma_read(ecc
, EDMA_CCCFG
);
1828 value
= GET_NUM_REGN(cccfg
);
1829 ecc
->num_region
= BIT(value
);
1831 value
= GET_NUM_DMACH(cccfg
);
1832 ecc
->num_channels
= BIT(value
+ 1);
1834 value
= GET_NUM_PAENTRY(cccfg
);
1835 ecc
->num_slots
= BIT(value
+ 4);
1837 value
= GET_NUM_EVQUE(cccfg
);
1838 ecc
->num_tc
= value
+ 1;
1840 ecc
->chmap_exist
= (cccfg
& CHMAP_EXIST
) ? true : false;
1842 dev_dbg(dev
, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg
);
1843 dev_dbg(dev
, "num_region: %u\n", ecc
->num_region
);
1844 dev_dbg(dev
, "num_channels: %u\n", ecc
->num_channels
);
1845 dev_dbg(dev
, "num_slots: %u\n", ecc
->num_slots
);
1846 dev_dbg(dev
, "num_tc: %u\n", ecc
->num_tc
);
1847 dev_dbg(dev
, "chmap_exist: %s\n", ecc
->chmap_exist
? "yes" : "no");
1849 /* Nothing need to be done if queue priority is provided */
1850 if (pdata
->queue_priority_mapping
)
1854 * Configure TC/queue priority as follows:
1859 * The meaning of priority numbers: 0 highest priority, 7 lowest
1860 * priority. So Q0 is the highest priority queue and the last queue has
1861 * the lowest priority.
1863 queue_priority_map
= devm_kcalloc(dev
, ecc
->num_tc
+ 1, sizeof(s8
),
1865 if (!queue_priority_map
)
1868 for (i
= 0; i
< ecc
->num_tc
; i
++) {
1869 queue_priority_map
[i
][0] = i
;
1870 queue_priority_map
[i
][1] = i
;
1872 queue_priority_map
[i
][0] = -1;
1873 queue_priority_map
[i
][1] = -1;
1875 pdata
->queue_priority_mapping
= queue_priority_map
;
1876 /* Default queue has the lowest priority */
1877 pdata
->default_queue
= i
- 1;
1882 #if IS_ENABLED(CONFIG_OF)
1883 static int edma_xbar_event_map(struct device
*dev
, struct edma_soc_info
*pdata
,
1886 const char pname
[] = "ti,edma-xbar-event-map";
1887 struct resource res
;
1889 s16 (*xbar_chans
)[2];
1890 size_t nelm
= sz
/ sizeof(s16
);
1891 u32 shift
, offset
, mux
;
1894 xbar_chans
= devm_kcalloc(dev
, nelm
+ 2, sizeof(s16
), GFP_KERNEL
);
1898 ret
= of_address_to_resource(dev
->of_node
, 1, &res
);
1902 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
1906 ret
= of_property_read_u16_array(dev
->of_node
, pname
, (u16
*)xbar_chans
,
1911 /* Invalidate last entry for the other user of this mess */
1913 xbar_chans
[nelm
][0] = -1;
1914 xbar_chans
[nelm
][1] = -1;
1916 for (i
= 0; i
< nelm
; i
++) {
1917 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
1918 offset
= xbar_chans
[i
][1] & 0xfffffffc;
1919 mux
= readl(xbar
+ offset
);
1920 mux
&= ~(0xff << shift
);
1921 mux
|= xbar_chans
[i
][0] << shift
;
1922 writel(mux
, (xbar
+ offset
));
1925 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
1929 static int edma_of_parse_dt(struct device
*dev
, struct edma_soc_info
*pdata
)
1932 struct property
*prop
;
1934 struct edma_rsv_info
*rsv_info
;
1936 rsv_info
= devm_kzalloc(dev
, sizeof(struct edma_rsv_info
), GFP_KERNEL
);
1939 pdata
->rsv
= rsv_info
;
1941 prop
= of_find_property(dev
->of_node
, "ti,edma-xbar-event-map", &sz
);
1943 ret
= edma_xbar_event_map(dev
, pdata
, sz
);
1948 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
1950 struct edma_soc_info
*info
;
1953 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
1955 return ERR_PTR(-ENOMEM
);
1957 ret
= edma_of_parse_dt(dev
, info
);
1959 return ERR_PTR(ret
);
1964 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
1966 return ERR_PTR(-EINVAL
);
1970 static int edma_probe(struct platform_device
*pdev
)
1972 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
1973 s8 (*queue_priority_mapping
)[2];
1975 const s16 (*rsv_chans
)[2];
1976 const s16 (*rsv_slots
)[2];
1977 const s16 (*xbar_chans
)[2];
1980 struct resource
*mem
;
1981 struct device_node
*node
= pdev
->dev
.of_node
;
1982 struct device
*dev
= &pdev
->dev
;
1983 struct edma_cc
*ecc
;
1987 info
= edma_setup_info_from_dt(dev
);
1989 dev_err(dev
, "failed to get DT data\n");
1990 return PTR_ERR(info
);
1997 pm_runtime_enable(dev
);
1998 ret
= pm_runtime_get_sync(dev
);
2000 dev_err(dev
, "pm_runtime_get_sync() failed\n");
2004 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2008 ecc
= devm_kzalloc(dev
, sizeof(*ecc
), GFP_KERNEL
);
2010 dev_err(dev
, "Can't allocate controller\n");
2016 /* When booting with DT the pdev->id is -1 */
2020 mem
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "edma3_cc");
2022 dev_dbg(dev
, "mem resource not found, using index 0\n");
2023 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2025 dev_err(dev
, "no mem resource?\n");
2029 ecc
->base
= devm_ioremap_resource(dev
, mem
);
2030 if (IS_ERR(ecc
->base
))
2031 return PTR_ERR(ecc
->base
);
2033 platform_set_drvdata(pdev
, ecc
);
2035 /* Get eDMA3 configuration from IP */
2036 ret
= edma_setup_from_hw(dev
, info
, ecc
);
2040 /* Allocate memory based on the information we got from the IP */
2041 ecc
->slave_chans
= devm_kcalloc(dev
, ecc
->num_channels
,
2042 sizeof(*ecc
->slave_chans
), GFP_KERNEL
);
2043 if (!ecc
->slave_chans
)
2046 ecc
->channel_unused
= devm_kcalloc(dev
,
2047 BITS_TO_LONGS(ecc
->num_channels
),
2048 sizeof(unsigned long), GFP_KERNEL
);
2049 if (!ecc
->channel_unused
)
2052 ecc
->slot_inuse
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_slots
),
2053 sizeof(unsigned long), GFP_KERNEL
);
2054 if (!ecc
->slot_inuse
)
2057 ecc
->default_queue
= info
->default_queue
;
2059 for (i
= 0; i
< ecc
->num_slots
; i
++)
2060 edma_write_slot(ecc
, i
, &dummy_paramset
);
2062 /* Mark all channels as unused */
2063 memset(ecc
->channel_unused
, 0xff, sizeof(ecc
->channel_unused
));
2066 /* Clear the reserved channels in unused list */
2067 rsv_chans
= info
->rsv
->rsv_chans
;
2069 for (i
= 0; rsv_chans
[i
][0] != -1; i
++) {
2070 off
= rsv_chans
[i
][0];
2071 ln
= rsv_chans
[i
][1];
2072 clear_bits(off
, ln
, ecc
->channel_unused
);
2076 /* Set the reserved slots in inuse list */
2077 rsv_slots
= info
->rsv
->rsv_slots
;
2079 for (i
= 0; rsv_slots
[i
][0] != -1; i
++) {
2080 off
= rsv_slots
[i
][0];
2081 ln
= rsv_slots
[i
][1];
2082 set_bits(off
, ln
, ecc
->slot_inuse
);
2087 /* Clear the xbar mapped channels in unused list */
2088 xbar_chans
= info
->xbar_chans
;
2090 for (i
= 0; xbar_chans
[i
][1] != -1; i
++) {
2091 off
= xbar_chans
[i
][1];
2092 clear_bits(off
, 1, ecc
->channel_unused
);
2096 irq
= platform_get_irq_byname(pdev
, "edma3_ccint");
2097 if (irq
< 0 && node
)
2098 irq
= irq_of_parse_and_map(node
, 0);
2101 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccint",
2103 ret
= devm_request_irq(dev
, irq
, dma_irq_handler
, 0, irq_name
,
2106 dev_err(dev
, "CCINT (%d) failed --> %d\n", irq
, ret
);
2111 irq
= platform_get_irq_byname(pdev
, "edma3_ccerrint");
2112 if (irq
< 0 && node
)
2113 irq
= irq_of_parse_and_map(node
, 2);
2116 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccerrint",
2118 ret
= devm_request_irq(dev
, irq
, dma_ccerr_handler
, 0, irq_name
,
2121 dev_err(dev
, "CCERRINT (%d) failed --> %d\n", irq
, ret
);
2126 ecc
->dummy_slot
= edma_alloc_slot(ecc
, EDMA_SLOT_ANY
);
2127 if (ecc
->dummy_slot
< 0) {
2128 dev_err(dev
, "Can't allocate PaRAM dummy slot\n");
2129 return ecc
->dummy_slot
;
2132 queue_priority_mapping
= info
->queue_priority_mapping
;
2134 /* Event queue priority mapping */
2135 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2136 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2137 queue_priority_mapping
[i
][1]);
2139 for (i
= 0; i
< ecc
->num_region
; i
++) {
2140 edma_write_array2(ecc
, EDMA_DRAE
, i
, 0, 0x0);
2141 edma_write_array2(ecc
, EDMA_DRAE
, i
, 1, 0x0);
2142 edma_write_array(ecc
, EDMA_QRAE
, i
, 0x0);
2146 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
2147 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
2148 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
2149 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
2151 edma_dma_init(ecc
, &ecc
->dma_slave
, dev
);
2153 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
2155 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2156 /* Assign all channels to the default queue */
2157 edma_map_dmach_to_queue(&ecc
->slave_chans
[i
],
2158 info
->default_queue
);
2159 /* Set entry slot to the dummy slot */
2160 edma_set_chmap(&ecc
->slave_chans
[i
], ecc
->dummy_slot
);
2163 ret
= dma_async_device_register(&ecc
->dma_slave
);
2168 of_dma_controller_register(node
, of_dma_xlate_by_chan_id
,
2171 dev_info(dev
, "TI EDMA DMA engine driver\n");
2176 edma_free_slot(ecc
, ecc
->dummy_slot
);
2180 static int edma_remove(struct platform_device
*pdev
)
2182 struct device
*dev
= &pdev
->dev
;
2183 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2186 of_dma_controller_free(dev
->of_node
);
2187 dma_async_device_unregister(&ecc
->dma_slave
);
2188 edma_free_slot(ecc
, ecc
->dummy_slot
);
2193 #ifdef CONFIG_PM_SLEEP
2194 static int edma_pm_resume(struct device
*dev
)
2196 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2197 struct edma_chan
*echan
= ecc
->slave_chans
;
2199 s8 (*queue_priority_mapping
)[2];
2201 queue_priority_mapping
= ecc
->info
->queue_priority_mapping
;
2203 /* Event queue priority mapping */
2204 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2205 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2206 queue_priority_mapping
[i
][1]);
2208 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2209 if (echan
[i
].alloced
) {
2210 /* ensure access through shadow region 0 */
2211 edma_or_array2(ecc
, EDMA_DRAE
, 0, i
>> 5,
2214 edma_setup_interrupt(&echan
[i
], true);
2216 /* Set up channel -> slot mapping for the entry slot */
2217 edma_set_chmap(&echan
[i
], echan
[i
].slot
[0]);
2225 static const struct dev_pm_ops edma_pm_ops
= {
2226 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL
, edma_pm_resume
)
2229 static struct platform_driver edma_driver
= {
2230 .probe
= edma_probe
,
2231 .remove
= edma_remove
,
2235 .of_match_table
= edma_of_ids
,
2239 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
2241 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
2242 struct edma_chan
*echan
= to_edma_chan(chan
);
2243 unsigned ch_req
= *(unsigned *)param
;
2244 return ch_req
== echan
->ch_num
;
2248 EXPORT_SYMBOL(edma_filter_fn
);
2250 static int edma_init(void)
2252 return platform_driver_register(&edma_driver
);
2254 subsys_initcall(edma_init
);
2256 static void __exit
edma_exit(void)
2258 platform_driver_unregister(&edma_driver
);
2260 module_exit(edma_exit
);
2262 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2263 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2264 MODULE_LICENSE("GPL v2");