2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
111 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
112 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
113 #define CHMAP_EXIST BIT(24)
116 * Max of 20 segments per channel to conserve PaRAM slots
117 * Also note that MAX_NR_SG should be atleast the no.of periods
118 * that are required for ASoC, otherwise DMA prep calls will
119 * fail. Today davinci-pcm is the only user of this driver and
120 * requires atleast 17 slots, so we setup the default to 20.
123 #define EDMA_MAX_SLOTS MAX_NR_SG
124 #define EDMA_DESCRIPTORS 16
126 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
127 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
128 #define EDMA_CONT_PARAMS_ANY 1001
129 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
130 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
132 /* PaRAM slots are laid out like this */
133 struct edmacc_param
{
144 /* fields in edmacc_param.opt */
147 #define SYNCDIM BIT(2)
148 #define STATIC BIT(3)
149 #define EDMA_FWID (0x07 << 8)
150 #define TCCMODE BIT(11)
151 #define EDMA_TCC(t) ((t) << 12)
152 #define TCINTEN BIT(20)
153 #define ITCINTEN BIT(21)
154 #define TCCHEN BIT(22)
155 #define ITCCHEN BIT(23)
160 struct edmacc_param param
;
164 struct virt_dma_desc vdesc
;
165 struct list_head node
;
166 enum dma_transfer_direction direction
;
170 struct edma_chan
*echan
;
174 * The following 4 elements are used for residue accounting.
176 * - processed_stat: the number of SG elements we have traversed
177 * so far to cover accounting. This is updated directly to processed
178 * during edma_callback and is always <= processed, because processed
179 * refers to the number of pending transfer (programmed to EDMA
180 * controller), where as processed_stat tracks number of transfers
181 * accounted for so far.
183 * - residue: The amount of bytes we have left to transfer for this desc
185 * - residue_stat: The residue in bytes of data we have covered
186 * so far for accounting. This is updated directly to residue
187 * during callbacks to keep it current.
189 * - sg_len: Tracks the length of the current intermediate transfer,
190 * this is required to update the residue during intermediate transfer
191 * completion callback.
198 struct edma_pset pset
[0];
204 struct virt_dma_chan vchan
;
205 struct list_head node
;
206 struct edma_desc
*edesc
;
210 int slot
[EDMA_MAX_SLOTS
];
212 struct dma_slave_config cfg
;
217 struct edma_soc_info
*info
;
221 /* eDMA3 resource information */
222 unsigned num_channels
;
227 enum dma_event_q default_queue
;
229 bool unused_chan_list_done
;
230 /* The slot_inuse bit for each PaRAM slot is clear unless the
231 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
233 unsigned long *slot_inuse
;
235 /* The channel_unused bit for each channel is clear unless
236 * it is not being used on this platform. It uses a bit
237 * of SOC-specific initialization code.
239 unsigned long *channel_unused
;
241 struct dma_device dma_slave
;
242 struct edma_chan
*slave_chans
;
246 /* dummy param set used to (re)initialize parameter RAM slots */
247 static const struct edmacc_param dummy_paramset
= {
248 .link_bcntrld
= 0xffff,
252 static const struct of_device_id edma_of_ids
[] = {
253 { .compatible
= "ti,edma3", },
257 static inline unsigned int edma_read(struct edma_cc
*ecc
, int offset
)
259 return (unsigned int)__raw_readl(ecc
->base
+ offset
);
262 static inline void edma_write(struct edma_cc
*ecc
, int offset
, int val
)
264 __raw_writel(val
, ecc
->base
+ offset
);
267 static inline void edma_modify(struct edma_cc
*ecc
, int offset
, unsigned and,
270 unsigned val
= edma_read(ecc
, offset
);
274 edma_write(ecc
, offset
, val
);
277 static inline void edma_and(struct edma_cc
*ecc
, int offset
, unsigned and)
279 unsigned val
= edma_read(ecc
, offset
);
282 edma_write(ecc
, offset
, val
);
285 static inline void edma_or(struct edma_cc
*ecc
, int offset
, unsigned or)
287 unsigned val
= edma_read(ecc
, offset
);
290 edma_write(ecc
, offset
, val
);
293 static inline unsigned int edma_read_array(struct edma_cc
*ecc
, int offset
,
296 return edma_read(ecc
, offset
+ (i
<< 2));
299 static inline void edma_write_array(struct edma_cc
*ecc
, int offset
, int i
,
302 edma_write(ecc
, offset
+ (i
<< 2), val
);
305 static inline void edma_modify_array(struct edma_cc
*ecc
, int offset
, int i
,
306 unsigned and, unsigned or)
308 edma_modify(ecc
, offset
+ (i
<< 2), and, or);
311 static inline void edma_or_array(struct edma_cc
*ecc
, int offset
, int i
,
314 edma_or(ecc
, offset
+ (i
<< 2), or);
317 static inline void edma_or_array2(struct edma_cc
*ecc
, int offset
, int i
, int j
,
320 edma_or(ecc
, offset
+ ((i
* 2 + j
) << 2), or);
323 static inline void edma_write_array2(struct edma_cc
*ecc
, int offset
, int i
,
326 edma_write(ecc
, offset
+ ((i
* 2 + j
) << 2), val
);
329 static inline unsigned int edma_shadow0_read(struct edma_cc
*ecc
, int offset
)
331 return edma_read(ecc
, EDMA_SHADOW0
+ offset
);
334 static inline unsigned int edma_shadow0_read_array(struct edma_cc
*ecc
,
337 return edma_read(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
340 static inline void edma_shadow0_write(struct edma_cc
*ecc
, int offset
,
343 edma_write(ecc
, EDMA_SHADOW0
+ offset
, val
);
346 static inline void edma_shadow0_write_array(struct edma_cc
*ecc
, int offset
,
349 edma_write(ecc
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
352 static inline unsigned int edma_parm_read(struct edma_cc
*ecc
, int offset
,
355 return edma_read(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5));
358 static inline void edma_parm_write(struct edma_cc
*ecc
, int offset
,
359 int param_no
, unsigned val
)
361 edma_write(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
364 static inline void edma_parm_modify(struct edma_cc
*ecc
, int offset
,
365 int param_no
, unsigned and, unsigned or)
367 edma_modify(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
370 static inline void edma_parm_and(struct edma_cc
*ecc
, int offset
, int param_no
,
373 edma_and(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
376 static inline void edma_parm_or(struct edma_cc
*ecc
, int offset
, int param_no
,
379 edma_or(ecc
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
382 static inline void set_bits(int offset
, int len
, unsigned long *p
)
384 for (; len
> 0; len
--)
385 set_bit(offset
+ (len
- 1), p
);
388 static inline void clear_bits(int offset
, int len
, unsigned long *p
)
390 for (; len
> 0; len
--)
391 clear_bit(offset
+ (len
- 1), p
);
394 static void edma_map_dmach_to_queue(struct edma_cc
*ecc
, unsigned ch_no
,
395 enum dma_event_q queue_no
)
397 int bit
= (ch_no
& 0x7) * 4;
399 /* default to low priority queue */
400 if (queue_no
== EVENTQ_DEFAULT
)
401 queue_no
= ecc
->default_queue
;
404 edma_modify_array(ecc
, EDMA_DMAQNUM
, (ch_no
>> 3), ~(0x7 << bit
),
408 static void edma_assign_priority_to_queue(struct edma_cc
*ecc
, int queue_no
,
411 int bit
= queue_no
* 4;
413 edma_modify(ecc
, EDMA_QUEPRI
, ~(0x7 << bit
), ((priority
& 0x7) << bit
));
416 static void edma_set_chmap(struct edma_cc
*ecc
, int channel
, int slot
)
418 if (ecc
->chmap_exist
) {
419 channel
= EDMA_CHAN_SLOT(channel
);
420 slot
= EDMA_CHAN_SLOT(slot
);
421 edma_write_array(ecc
, EDMA_DCHMAP
, channel
, (slot
<< 5));
425 static int prepare_unused_channel_list(struct device
*dev
, void *data
)
427 struct platform_device
*pdev
= to_platform_device(dev
);
428 struct edma_cc
*ecc
= data
;
429 int dma_req_min
= EDMA_CTLR_CHAN(ecc
->id
, 0);
430 int dma_req_max
= dma_req_min
+ ecc
->num_channels
;
432 struct of_phandle_args dma_spec
;
435 struct platform_device
*dma_pdev
;
437 count
= of_property_count_strings(dev
->of_node
, "dma-names");
440 for (i
= 0; i
< count
; i
++) {
441 if (of_parse_phandle_with_args(dev
->of_node
, "dmas",
446 if (!of_match_node(edma_of_ids
, dma_spec
.np
)) {
447 of_node_put(dma_spec
.np
);
451 dma_pdev
= of_find_device_by_node(dma_spec
.np
);
452 if (&dma_pdev
->dev
!= ecc
->dev
)
455 clear_bit(EDMA_CHAN_SLOT(dma_spec
.args
[0]),
456 ecc
->channel_unused
);
457 of_node_put(dma_spec
.np
);
462 /* For non-OF case */
463 for (i
= 0; i
< pdev
->num_resources
; i
++) {
464 struct resource
*res
= &pdev
->resource
[i
];
467 if (!(res
->flags
& IORESOURCE_DMA
))
470 dma_req
= (int)res
->start
;
471 if (dma_req
>= dma_req_min
&& dma_req
< dma_req_max
)
472 clear_bit(EDMA_CHAN_SLOT(pdev
->resource
[i
].start
),
473 ecc
->channel_unused
);
479 static void edma_setup_interrupt(struct edma_cc
*ecc
, unsigned lch
, bool enable
)
481 lch
= EDMA_CHAN_SLOT(lch
);
484 edma_shadow0_write_array(ecc
, SH_ICR
, lch
>> 5,
486 edma_shadow0_write_array(ecc
, SH_IESR
, lch
>> 5,
489 edma_shadow0_write_array(ecc
, SH_IECR
, lch
>> 5,
495 * paRAM slot management functions
497 static void edma_write_slot(struct edma_cc
*ecc
, unsigned slot
,
498 const struct edmacc_param
*param
)
500 slot
= EDMA_CHAN_SLOT(slot
);
501 if (slot
>= ecc
->num_slots
)
503 memcpy_toio(ecc
->base
+ PARM_OFFSET(slot
), param
, PARM_SIZE
);
506 static void edma_read_slot(struct edma_cc
*ecc
, unsigned slot
,
507 struct edmacc_param
*param
)
509 slot
= EDMA_CHAN_SLOT(slot
);
510 if (slot
>= ecc
->num_slots
)
512 memcpy_fromio(param
, ecc
->base
+ PARM_OFFSET(slot
), PARM_SIZE
);
516 * edma_alloc_slot - allocate DMA parameter RAM
517 * @ecc: pointer to edma_cc struct
518 * @slot: specific slot to allocate; negative for "any unused slot"
520 * This allocates a parameter RAM slot, initializing it to hold a
521 * dummy transfer. Slots allocated using this routine have not been
522 * mapped to a hardware DMA channel, and will normally be used by
523 * linking to them from a slot associated with a DMA channel.
525 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
526 * slots may be allocated on behalf of DSP firmware.
528 * Returns the number of the slot, else negative errno.
530 static int edma_alloc_slot(struct edma_cc
*ecc
, int slot
)
533 slot
= EDMA_CHAN_SLOT(slot
);
534 /* Requesting entry paRAM slot for a HW triggered channel. */
535 if (ecc
->chmap_exist
&& slot
< ecc
->num_channels
)
536 slot
= EDMA_SLOT_ANY
;
540 if (ecc
->chmap_exist
)
543 slot
= ecc
->num_channels
;
545 slot
= find_next_zero_bit(ecc
->slot_inuse
,
548 if (slot
== ecc
->num_slots
)
550 if (!test_and_set_bit(slot
, ecc
->slot_inuse
))
553 } else if (slot
>= ecc
->num_slots
) {
555 } else if (test_and_set_bit(slot
, ecc
->slot_inuse
)) {
559 edma_write_slot(ecc
, slot
, &dummy_paramset
);
561 return EDMA_CTLR_CHAN(ecc
->id
, slot
);
564 static void edma_free_slot(struct edma_cc
*ecc
, unsigned slot
)
566 slot
= EDMA_CHAN_SLOT(slot
);
567 if (slot
>= ecc
->num_slots
)
570 edma_write_slot(ecc
, slot
, &dummy_paramset
);
571 clear_bit(slot
, ecc
->slot_inuse
);
575 * edma_link - link one parameter RAM slot to another
576 * @ecc: pointer to edma_cc struct
577 * @from: parameter RAM slot originating the link
578 * @to: parameter RAM slot which is the link target
580 * The originating slot should not be part of any active DMA transfer.
582 static void edma_link(struct edma_cc
*ecc
, unsigned from
, unsigned to
)
584 if (unlikely(EDMA_CTLR(from
) != EDMA_CTLR(to
)))
585 dev_warn(ecc
->dev
, "Ignoring eDMA instance for linking\n");
587 from
= EDMA_CHAN_SLOT(from
);
588 to
= EDMA_CHAN_SLOT(to
);
589 if (from
>= ecc
->num_slots
|| to
>= ecc
->num_slots
)
592 edma_parm_modify(ecc
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
597 * edma_get_position - returns the current transfer point
598 * @ecc: pointer to edma_cc struct
599 * @slot: parameter RAM slot being examined
600 * @dst: true selects the dest position, false the source
602 * Returns the position of the current active slot
604 static dma_addr_t
edma_get_position(struct edma_cc
*ecc
, unsigned slot
,
609 slot
= EDMA_CHAN_SLOT(slot
);
610 offs
= PARM_OFFSET(slot
);
611 offs
+= dst
? PARM_DST
: PARM_SRC
;
613 return edma_read(ecc
, offs
);
616 /*-----------------------------------------------------------------------*/
618 * edma_start - start dma on a channel
619 * @ecc: pointer to edma_cc struct
620 * @channel: channel being activated
622 * Channels with event associations will be triggered by their hardware
623 * events, and channels without such associations will be triggered by
624 * software. (At this writing there is no interface for using software
625 * triggers except with channels that don't support hardware triggers.)
627 * Returns zero on success, else negative errno.
629 static int edma_start(struct edma_cc
*ecc
, unsigned channel
)
631 if (ecc
->id
!= EDMA_CTLR(channel
)) {
632 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
633 ecc
->id
, EDMA_CTLR(channel
));
636 channel
= EDMA_CHAN_SLOT(channel
);
638 if (channel
< ecc
->num_channels
) {
639 int j
= channel
>> 5;
640 unsigned int mask
= BIT(channel
& 0x1f);
642 /* EDMA channels without event association */
643 if (test_bit(channel
, ecc
->channel_unused
)) {
644 dev_dbg(ecc
->dev
, "ESR%d %08x\n", j
,
645 edma_shadow0_read_array(ecc
, SH_ESR
, j
));
646 edma_shadow0_write_array(ecc
, SH_ESR
, j
, mask
);
650 /* EDMA channel with event association */
651 dev_dbg(ecc
->dev
, "ER%d %08x\n", j
,
652 edma_shadow0_read_array(ecc
, SH_ER
, j
));
653 /* Clear any pending event or error */
654 edma_write_array(ecc
, EDMA_ECR
, j
, mask
);
655 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
657 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
658 edma_shadow0_write_array(ecc
, SH_EESR
, j
, mask
);
659 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
660 edma_shadow0_read_array(ecc
, SH_EER
, j
));
668 * edma_stop - stops dma on the channel passed
669 * @ecc: pointer to edma_cc struct
670 * @channel: channel being deactivated
672 * Any active transfer is paused and all pending hardware events are cleared.
673 * The current transfer may not be resumed, and the channel's Parameter RAM
674 * should be reinitialized before being reused.
676 static void edma_stop(struct edma_cc
*ecc
, unsigned channel
)
678 if (ecc
->id
!= EDMA_CTLR(channel
)) {
679 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
680 ecc
->id
, EDMA_CTLR(channel
));
683 channel
= EDMA_CHAN_SLOT(channel
);
685 if (channel
< ecc
->num_channels
) {
686 int j
= channel
>> 5;
687 unsigned int mask
= BIT(channel
& 0x1f);
689 edma_shadow0_write_array(ecc
, SH_EECR
, j
, mask
);
690 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
691 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
692 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
694 /* clear possibly pending completion interrupt */
695 edma_shadow0_write_array(ecc
, SH_ICR
, j
, mask
);
697 dev_dbg(ecc
->dev
, "EER%d %08x\n", j
,
698 edma_shadow0_read_array(ecc
, SH_EER
, j
));
700 /* REVISIT: consider guarding against inappropriate event
701 * chaining by overwriting with dummy_paramset.
707 * Temporarily disable EDMA hardware events on the specified channel,
708 * preventing them from triggering new transfers
710 static void edma_pause(struct edma_cc
*ecc
, unsigned channel
)
712 if (ecc
->id
!= EDMA_CTLR(channel
)) {
713 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
714 ecc
->id
, EDMA_CTLR(channel
));
717 channel
= EDMA_CHAN_SLOT(channel
);
719 if (channel
< ecc
->num_channels
) {
720 unsigned int mask
= BIT(channel
& 0x1f);
722 edma_shadow0_write_array(ecc
, SH_EECR
, channel
>> 5, mask
);
726 /* Re-enable EDMA hardware events on the specified channel. */
727 static void edma_resume(struct edma_cc
*ecc
, unsigned channel
)
729 if (ecc
->id
!= EDMA_CTLR(channel
)) {
730 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
731 ecc
->id
, EDMA_CTLR(channel
));
734 channel
= EDMA_CHAN_SLOT(channel
);
736 if (channel
< ecc
->num_channels
) {
737 unsigned int mask
= BIT(channel
& 0x1f);
739 edma_shadow0_write_array(ecc
, SH_EESR
, channel
>> 5, mask
);
743 static int edma_trigger_channel(struct edma_cc
*ecc
, unsigned channel
)
747 if (ecc
->id
!= EDMA_CTLR(channel
)) {
748 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
749 ecc
->id
, EDMA_CTLR(channel
));
752 channel
= EDMA_CHAN_SLOT(channel
);
753 mask
= BIT(channel
& 0x1f);
755 edma_shadow0_write_array(ecc
, SH_ESR
, (channel
>> 5), mask
);
757 dev_dbg(ecc
->dev
, "ESR%d %08x\n", (channel
>> 5),
758 edma_shadow0_read_array(ecc
, SH_ESR
, (channel
>> 5)));
762 static void edma_clean_channel(struct edma_cc
*ecc
, unsigned channel
)
764 if (ecc
->id
!= EDMA_CTLR(channel
)) {
765 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
766 ecc
->id
, EDMA_CTLR(channel
));
769 channel
= EDMA_CHAN_SLOT(channel
);
771 if (channel
< ecc
->num_channels
) {
772 int j
= (channel
>> 5);
773 unsigned int mask
= BIT(channel
& 0x1f);
775 dev_dbg(ecc
->dev
, "EMR%d %08x\n", j
,
776 edma_read_array(ecc
, EDMA_EMR
, j
));
777 edma_shadow0_write_array(ecc
, SH_ECR
, j
, mask
);
778 /* Clear the corresponding EMR bits */
779 edma_write_array(ecc
, EDMA_EMCR
, j
, mask
);
781 edma_shadow0_write_array(ecc
, SH_SECR
, j
, mask
);
782 edma_write(ecc
, EDMA_CCERRCLR
, BIT(16) | BIT(1) | BIT(0));
787 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
788 * @ecc: pointer to edma_cc struct
789 * @channel: specific channel to allocate; negative for "any unmapped channel"
790 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
791 * Controller (TC) executes requests using this channel. Use
792 * EVENTQ_DEFAULT unless you really need a high priority queue.
794 * This allocates a DMA channel and its associated parameter RAM slot.
795 * The parameter RAM is initialized to hold a dummy transfer.
797 * Normal use is to pass a specific channel number as @channel, to make
798 * use of hardware events mapped to that channel. When the channel will
799 * be used only for software triggering or event chaining, channels not
800 * mapped to hardware events (or mapped to unused events) are preferable.
802 * DMA transfers start from a channel using edma_start(), or by
803 * chaining. When the transfer described in that channel's parameter RAM
804 * slot completes, that slot's data may be reloaded through a link.
806 * DMA errors are only reported to the @callback associated with the
807 * channel driving that transfer, but transfer completion callbacks can
808 * be sent to another channel under control of the TCC field in
809 * the option word of the transfer's parameter RAM set. Drivers must not
810 * use DMA transfer completion callbacks for channels they did not allocate.
811 * (The same applies to TCC codes used in transfer chaining.)
813 * Returns the number of the channel, else negative errno.
815 static int edma_alloc_channel(struct edma_cc
*ecc
, int channel
,
816 enum dma_event_q eventq_no
)
820 if (!ecc
->unused_chan_list_done
) {
822 * Scan all the platform devices to find out the EDMA channels
823 * used and clear them in the unused list, making the rest
824 * available for ARM usage.
826 ret
= bus_for_each_dev(&platform_bus_type
, NULL
, ecc
,
827 prepare_unused_channel_list
);
831 ecc
->unused_chan_list_done
= true;
835 if (ecc
->id
!= EDMA_CTLR(channel
)) {
836 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n",
837 __func__
, ecc
->id
, EDMA_CTLR(channel
));
840 channel
= EDMA_CHAN_SLOT(channel
);
844 channel
= find_next_bit(ecc
->channel_unused
, ecc
->num_channels
,
846 if (channel
== ecc
->num_channels
)
848 } else if (channel
>= ecc
->num_channels
) {
852 /* ensure access through shadow region 0 */
853 edma_or_array2(ecc
, EDMA_DRAE
, 0, channel
>> 5, BIT(channel
& 0x1f));
855 /* ensure no events are pending */
856 edma_stop(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
));
858 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, channel
), true);
860 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
862 return EDMA_CTLR_CHAN(ecc
->id
, channel
);
866 * edma_free_channel - deallocate DMA channel
867 * @ecc: pointer to edma_cc struct
868 * @channel: dma channel returned from edma_alloc_channel()
870 * This deallocates the DMA channel and associated parameter RAM slot
871 * allocated by edma_alloc_channel().
873 * Callers are responsible for ensuring the channel is inactive, and
874 * will not be reactivated by linking, chaining, or software calls to
877 static void edma_free_channel(struct edma_cc
*ecc
, unsigned channel
)
879 if (ecc
->id
!= EDMA_CTLR(channel
)) {
880 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
881 ecc
->id
, EDMA_CTLR(channel
));
884 channel
= EDMA_CHAN_SLOT(channel
);
886 if (channel
>= ecc
->num_channels
)
889 /* REVISIT should probably take out of shadow region 0 */
890 edma_setup_interrupt(ecc
, channel
, false);
893 /* Move channel to a specific event queue */
894 static void edma_assign_channel_eventq(struct edma_cc
*ecc
, unsigned channel
,
895 enum dma_event_q eventq_no
)
897 if (ecc
->id
!= EDMA_CTLR(channel
)) {
898 dev_err(ecc
->dev
, "%s: ID mismatch for eDMA%d: %d\n", __func__
,
899 ecc
->id
, EDMA_CTLR(channel
));
902 channel
= EDMA_CHAN_SLOT(channel
);
904 if (channel
>= ecc
->num_channels
)
907 /* default to low priority queue */
908 if (eventq_no
== EVENTQ_DEFAULT
)
909 eventq_no
= ecc
->default_queue
;
910 if (eventq_no
>= ecc
->num_tc
)
913 edma_map_dmach_to_queue(ecc
, channel
, eventq_no
);
916 static inline struct edma_cc
*to_edma_cc(struct dma_device
*d
)
918 return container_of(d
, struct edma_cc
, dma_slave
);
921 static inline struct edma_chan
*to_edma_chan(struct dma_chan
*c
)
923 return container_of(c
, struct edma_chan
, vchan
.chan
);
926 static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor
*tx
)
928 return container_of(tx
, struct edma_desc
, vdesc
.tx
);
931 static void edma_desc_free(struct virt_dma_desc
*vdesc
)
933 kfree(container_of(vdesc
, struct edma_desc
, vdesc
));
936 /* Dispatch a queued descriptor to the controller (caller holds lock) */
937 static void edma_execute(struct edma_chan
*echan
)
939 struct edma_cc
*ecc
= echan
->ecc
;
940 struct virt_dma_desc
*vdesc
;
941 struct edma_desc
*edesc
;
942 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
943 int i
, j
, left
, nslots
;
946 /* Setup is needed for the first transfer */
947 vdesc
= vchan_next_desc(&echan
->vchan
);
950 list_del(&vdesc
->node
);
951 echan
->edesc
= to_edma_desc(&vdesc
->tx
);
954 edesc
= echan
->edesc
;
956 /* Find out how many left */
957 left
= edesc
->pset_nr
- edesc
->processed
;
958 nslots
= min(MAX_NR_SG
, left
);
961 /* Write descriptor PaRAM set(s) */
962 for (i
= 0; i
< nslots
; i
++) {
963 j
= i
+ edesc
->processed
;
964 edma_write_slot(ecc
, echan
->slot
[i
], &edesc
->pset
[j
].param
);
965 edesc
->sg_len
+= edesc
->pset
[j
].len
;
978 j
, echan
->ch_num
, echan
->slot
[i
],
979 edesc
->pset
[j
].param
.opt
,
980 edesc
->pset
[j
].param
.src
,
981 edesc
->pset
[j
].param
.dst
,
982 edesc
->pset
[j
].param
.a_b_cnt
,
983 edesc
->pset
[j
].param
.ccnt
,
984 edesc
->pset
[j
].param
.src_dst_bidx
,
985 edesc
->pset
[j
].param
.src_dst_cidx
,
986 edesc
->pset
[j
].param
.link_bcntrld
);
987 /* Link to the previous slot if not the last set */
988 if (i
!= (nslots
- 1))
989 edma_link(ecc
, echan
->slot
[i
], echan
->slot
[i
+ 1]);
992 edesc
->processed
+= nslots
;
995 * If this is either the last set in a set of SG-list transactions
996 * then setup a link to the dummy slot, this results in all future
997 * events being absorbed and that's OK because we're done
999 if (edesc
->processed
== edesc
->pset_nr
) {
1001 edma_link(ecc
, echan
->slot
[nslots
- 1], echan
->slot
[1]);
1003 edma_link(ecc
, echan
->slot
[nslots
- 1],
1004 echan
->ecc
->dummy_slot
);
1007 if (echan
->missed
) {
1009 * This happens due to setup times between intermediate
1010 * transfers in long SG lists which have to be broken up into
1011 * transfers of MAX_NR_SG
1013 dev_dbg(dev
, "missed event on channel %d\n", echan
->ch_num
);
1014 edma_clean_channel(ecc
, echan
->ch_num
);
1015 edma_stop(ecc
, echan
->ch_num
);
1016 edma_start(ecc
, echan
->ch_num
);
1017 edma_trigger_channel(ecc
, echan
->ch_num
);
1019 } else if (edesc
->processed
<= MAX_NR_SG
) {
1020 dev_dbg(dev
, "first transfer starting on channel %d\n",
1022 edma_start(ecc
, echan
->ch_num
);
1024 dev_dbg(dev
, "chan: %d: completed %d elements, resuming\n",
1025 echan
->ch_num
, edesc
->processed
);
1026 edma_resume(ecc
, echan
->ch_num
);
1030 static int edma_terminate_all(struct dma_chan
*chan
)
1032 struct edma_chan
*echan
= to_edma_chan(chan
);
1033 unsigned long flags
;
1036 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1039 * Stop DMA activity: we assume the callback will not be called
1040 * after edma_dma() returns (even if it does, it will see
1041 * echan->edesc is NULL and exit.)
1044 edma_stop(echan
->ecc
, echan
->ch_num
);
1045 /* Move the cyclic channel back to default queue */
1046 if (echan
->edesc
->cyclic
)
1047 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
,
1050 * free the running request descriptor
1051 * since it is not in any of the vdesc lists
1053 edma_desc_free(&echan
->edesc
->vdesc
);
1054 echan
->edesc
= NULL
;
1057 vchan_get_all_descriptors(&echan
->vchan
, &head
);
1058 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1059 vchan_dma_desc_free_list(&echan
->vchan
, &head
);
1064 static int edma_slave_config(struct dma_chan
*chan
,
1065 struct dma_slave_config
*cfg
)
1067 struct edma_chan
*echan
= to_edma_chan(chan
);
1069 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
1070 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
1073 memcpy(&echan
->cfg
, cfg
, sizeof(echan
->cfg
));
1078 static int edma_dma_pause(struct dma_chan
*chan
)
1080 struct edma_chan
*echan
= to_edma_chan(chan
);
1085 edma_pause(echan
->ecc
, echan
->ch_num
);
1089 static int edma_dma_resume(struct dma_chan
*chan
)
1091 struct edma_chan
*echan
= to_edma_chan(chan
);
1093 edma_resume(echan
->ecc
, echan
->ch_num
);
1098 * A PaRAM set configuration abstraction used by other modes
1099 * @chan: Channel who's PaRAM set we're configuring
1100 * @pset: PaRAM set to initialize and setup.
1101 * @src_addr: Source address of the DMA
1102 * @dst_addr: Destination address of the DMA
1103 * @burst: In units of dev_width, how much to send
1104 * @dev_width: How much is the dev_width
1105 * @dma_length: Total length of the DMA transfer
1106 * @direction: Direction of the transfer
1108 static int edma_config_pset(struct dma_chan
*chan
, struct edma_pset
*epset
,
1109 dma_addr_t src_addr
, dma_addr_t dst_addr
, u32 burst
,
1110 enum dma_slave_buswidth dev_width
,
1111 unsigned int dma_length
,
1112 enum dma_transfer_direction direction
)
1114 struct edma_chan
*echan
= to_edma_chan(chan
);
1115 struct device
*dev
= chan
->device
->dev
;
1116 struct edmacc_param
*param
= &epset
->param
;
1117 int acnt
, bcnt
, ccnt
, cidx
;
1118 int src_bidx
, dst_bidx
, src_cidx
, dst_cidx
;
1123 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
1127 * If the maxburst is equal to the fifo width, use
1128 * A-synced transfers. This allows for large contiguous
1129 * buffer transfers using only one PaRAM set.
1133 * For the A-sync case, bcnt and ccnt are the remainder
1134 * and quotient respectively of the division of:
1135 * (dma_length / acnt) by (SZ_64K -1). This is so
1136 * that in case bcnt over flows, we have ccnt to use.
1137 * Note: In A-sync tranfer only, bcntrld is used, but it
1138 * only applies for sg_dma_len(sg) >= SZ_64K.
1139 * In this case, the best way adopted is- bccnt for the
1140 * first frame will be the remainder below. Then for
1141 * every successive frame, bcnt will be SZ_64K-1. This
1142 * is assured as bcntrld = 0xffff in end of function.
1145 ccnt
= dma_length
/ acnt
/ (SZ_64K
- 1);
1146 bcnt
= dma_length
/ acnt
- ccnt
* (SZ_64K
- 1);
1148 * If bcnt is non-zero, we have a remainder and hence an
1149 * extra frame to transfer, so increment ccnt.
1158 * If maxburst is greater than the fifo address_width,
1159 * use AB-synced transfers where A count is the fifo
1160 * address_width and B count is the maxburst. In this
1161 * case, we are limited to transfers of C count frames
1162 * of (address_width * maxburst) where C count is limited
1163 * to SZ_64K-1. This places an upper bound on the length
1164 * of an SG segment that can be handled.
1168 ccnt
= dma_length
/ (acnt
* bcnt
);
1169 if (ccnt
> (SZ_64K
- 1)) {
1170 dev_err(dev
, "Exceeded max SG segment size\n");
1176 epset
->len
= dma_length
;
1178 if (direction
== DMA_MEM_TO_DEV
) {
1183 epset
->addr
= src_addr
;
1184 } else if (direction
== DMA_DEV_TO_MEM
) {
1189 epset
->addr
= dst_addr
;
1190 } else if (direction
== DMA_MEM_TO_MEM
) {
1196 dev_err(dev
, "%s: direction not implemented yet\n", __func__
);
1200 param
->opt
= EDMA_TCC(EDMA_CHAN_SLOT(echan
->ch_num
));
1201 /* Configure A or AB synchronized transfers */
1203 param
->opt
|= SYNCDIM
;
1205 param
->src
= src_addr
;
1206 param
->dst
= dst_addr
;
1208 param
->src_dst_bidx
= (dst_bidx
<< 16) | src_bidx
;
1209 param
->src_dst_cidx
= (dst_cidx
<< 16) | src_cidx
;
1211 param
->a_b_cnt
= bcnt
<< 16 | acnt
;
1214 * Only time when (bcntrld) auto reload is required is for
1215 * A-sync case, and in this case, a requirement of reload value
1216 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1217 * and then later will be populated by edma_execute.
1219 param
->link_bcntrld
= 0xffffffff;
1223 static struct dma_async_tx_descriptor
*edma_prep_slave_sg(
1224 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1225 unsigned int sg_len
, enum dma_transfer_direction direction
,
1226 unsigned long tx_flags
, void *context
)
1228 struct edma_chan
*echan
= to_edma_chan(chan
);
1229 struct device
*dev
= chan
->device
->dev
;
1230 struct edma_desc
*edesc
;
1231 dma_addr_t src_addr
= 0, dst_addr
= 0;
1232 enum dma_slave_buswidth dev_width
;
1234 struct scatterlist
*sg
;
1237 if (unlikely(!echan
|| !sgl
|| !sg_len
))
1240 if (direction
== DMA_DEV_TO_MEM
) {
1241 src_addr
= echan
->cfg
.src_addr
;
1242 dev_width
= echan
->cfg
.src_addr_width
;
1243 burst
= echan
->cfg
.src_maxburst
;
1244 } else if (direction
== DMA_MEM_TO_DEV
) {
1245 dst_addr
= echan
->cfg
.dst_addr
;
1246 dev_width
= echan
->cfg
.dst_addr_width
;
1247 burst
= echan
->cfg
.dst_maxburst
;
1249 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1253 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1254 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1258 edesc
= kzalloc(sizeof(*edesc
) + sg_len
* sizeof(edesc
->pset
[0]),
1261 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1265 edesc
->pset_nr
= sg_len
;
1267 edesc
->direction
= direction
;
1268 edesc
->echan
= echan
;
1270 /* Allocate a PaRAM slot, if needed */
1271 nslots
= min_t(unsigned, MAX_NR_SG
, sg_len
);
1273 for (i
= 0; i
< nslots
; i
++) {
1274 if (echan
->slot
[i
] < 0) {
1276 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1277 if (echan
->slot
[i
] < 0) {
1279 dev_err(dev
, "%s: Failed to allocate slot\n",
1286 /* Configure PaRAM sets for each SG */
1287 for_each_sg(sgl
, sg
, sg_len
, i
) {
1288 /* Get address for each SG */
1289 if (direction
== DMA_DEV_TO_MEM
)
1290 dst_addr
= sg_dma_address(sg
);
1292 src_addr
= sg_dma_address(sg
);
1294 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1295 dst_addr
, burst
, dev_width
,
1296 sg_dma_len(sg
), direction
);
1302 edesc
->absync
= ret
;
1303 edesc
->residue
+= sg_dma_len(sg
);
1305 /* If this is the last in a current SG set of transactions,
1306 enable interrupts so that next set is processed */
1307 if (!((i
+1) % MAX_NR_SG
))
1308 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1310 /* If this is the last set, enable completion interrupt flag */
1311 if (i
== sg_len
- 1)
1312 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1314 edesc
->residue_stat
= edesc
->residue
;
1316 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1319 static struct dma_async_tx_descriptor
*edma_prep_dma_memcpy(
1320 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1321 size_t len
, unsigned long tx_flags
)
1324 struct edma_desc
*edesc
;
1325 struct device
*dev
= chan
->device
->dev
;
1326 struct edma_chan
*echan
= to_edma_chan(chan
);
1329 if (unlikely(!echan
|| !len
))
1332 edesc
= kzalloc(sizeof(*edesc
) + sizeof(edesc
->pset
[0]), GFP_ATOMIC
);
1334 dev_dbg(dev
, "Failed to allocate a descriptor\n");
1340 width
= 1 << __ffs((src
| dest
| len
));
1341 if (width
> DMA_SLAVE_BUSWIDTH_64_BYTES
)
1342 width
= DMA_SLAVE_BUSWIDTH_64_BYTES
;
1344 ret
= edma_config_pset(chan
, &edesc
->pset
[0], src
, dest
, 1,
1345 width
, len
, DMA_MEM_TO_MEM
);
1349 edesc
->absync
= ret
;
1352 * Enable intermediate transfer chaining to re-trigger channel
1353 * on completion of every TR, and enable transfer-completion
1354 * interrupt on completion of the whole transfer.
1356 edesc
->pset
[0].param
.opt
|= ITCCHEN
;
1357 edesc
->pset
[0].param
.opt
|= TCINTEN
;
1359 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1362 static struct dma_async_tx_descriptor
*edma_prep_dma_cyclic(
1363 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1364 size_t period_len
, enum dma_transfer_direction direction
,
1365 unsigned long tx_flags
)
1367 struct edma_chan
*echan
= to_edma_chan(chan
);
1368 struct device
*dev
= chan
->device
->dev
;
1369 struct edma_desc
*edesc
;
1370 dma_addr_t src_addr
, dst_addr
;
1371 enum dma_slave_buswidth dev_width
;
1375 if (unlikely(!echan
|| !buf_len
|| !period_len
))
1378 if (direction
== DMA_DEV_TO_MEM
) {
1379 src_addr
= echan
->cfg
.src_addr
;
1380 dst_addr
= buf_addr
;
1381 dev_width
= echan
->cfg
.src_addr_width
;
1382 burst
= echan
->cfg
.src_maxburst
;
1383 } else if (direction
== DMA_MEM_TO_DEV
) {
1384 src_addr
= buf_addr
;
1385 dst_addr
= echan
->cfg
.dst_addr
;
1386 dev_width
= echan
->cfg
.dst_addr_width
;
1387 burst
= echan
->cfg
.dst_maxburst
;
1389 dev_err(dev
, "%s: bad direction: %d\n", __func__
, direction
);
1393 if (dev_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
) {
1394 dev_err(dev
, "%s: Undefined slave buswidth\n", __func__
);
1398 if (unlikely(buf_len
% period_len
)) {
1399 dev_err(dev
, "Period should be multiple of Buffer length\n");
1403 nslots
= (buf_len
/ period_len
) + 1;
1406 * Cyclic DMA users such as audio cannot tolerate delays introduced
1407 * by cases where the number of periods is more than the maximum
1408 * number of SGs the EDMA driver can handle at a time. For DMA types
1409 * such as Slave SGs, such delays are tolerable and synchronized,
1410 * but the synchronization is difficult to achieve with Cyclic and
1411 * cannot be guaranteed, so we error out early.
1413 if (nslots
> MAX_NR_SG
)
1416 edesc
= kzalloc(sizeof(*edesc
) + nslots
* sizeof(edesc
->pset
[0]),
1419 dev_err(dev
, "%s: Failed to allocate a descriptor\n", __func__
);
1424 edesc
->pset_nr
= nslots
;
1425 edesc
->residue
= edesc
->residue_stat
= buf_len
;
1426 edesc
->direction
= direction
;
1427 edesc
->echan
= echan
;
1429 dev_dbg(dev
, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1430 __func__
, echan
->ch_num
, nslots
, period_len
, buf_len
);
1432 for (i
= 0; i
< nslots
; i
++) {
1433 /* Allocate a PaRAM slot, if needed */
1434 if (echan
->slot
[i
] < 0) {
1436 edma_alloc_slot(echan
->ecc
, EDMA_SLOT_ANY
);
1437 if (echan
->slot
[i
] < 0) {
1439 dev_err(dev
, "%s: Failed to allocate slot\n",
1445 if (i
== nslots
- 1) {
1446 memcpy(&edesc
->pset
[i
], &edesc
->pset
[0],
1447 sizeof(edesc
->pset
[0]));
1451 ret
= edma_config_pset(chan
, &edesc
->pset
[i
], src_addr
,
1452 dst_addr
, burst
, dev_width
, period_len
,
1459 if (direction
== DMA_DEV_TO_MEM
)
1460 dst_addr
+= period_len
;
1462 src_addr
+= period_len
;
1464 dev_vdbg(dev
, "%s: Configure period %d of buf:\n", __func__
, i
);
1477 i
, echan
->ch_num
, echan
->slot
[i
],
1478 edesc
->pset
[i
].param
.opt
,
1479 edesc
->pset
[i
].param
.src
,
1480 edesc
->pset
[i
].param
.dst
,
1481 edesc
->pset
[i
].param
.a_b_cnt
,
1482 edesc
->pset
[i
].param
.ccnt
,
1483 edesc
->pset
[i
].param
.src_dst_bidx
,
1484 edesc
->pset
[i
].param
.src_dst_cidx
,
1485 edesc
->pset
[i
].param
.link_bcntrld
);
1487 edesc
->absync
= ret
;
1490 * Enable period interrupt only if it is requested
1492 if (tx_flags
& DMA_PREP_INTERRUPT
)
1493 edesc
->pset
[i
].param
.opt
|= TCINTEN
;
1496 /* Place the cyclic channel to highest priority queue */
1497 edma_assign_channel_eventq(echan
->ecc
, echan
->ch_num
, EVENTQ_0
);
1499 return vchan_tx_prep(&echan
->vchan
, &edesc
->vdesc
, tx_flags
);
1502 static void edma_completion_handler(struct edma_chan
*echan
)
1504 struct edma_cc
*ecc
= echan
->ecc
;
1505 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1506 struct edma_desc
*edesc
= echan
->edesc
;
1511 spin_lock(&echan
->vchan
.lock
);
1512 if (edesc
->cyclic
) {
1513 vchan_cyclic_callback(&edesc
->vdesc
);
1514 spin_unlock(&echan
->vchan
.lock
);
1516 } else if (edesc
->processed
== edesc
->pset_nr
) {
1518 edma_stop(ecc
, echan
->ch_num
);
1519 vchan_cookie_complete(&edesc
->vdesc
);
1520 echan
->edesc
= NULL
;
1522 dev_dbg(dev
, "Transfer completed on channel %d\n",
1525 dev_dbg(dev
, "Sub transfer completed on channel %d\n",
1528 edma_pause(ecc
, echan
->ch_num
);
1530 /* Update statistics for tx_status */
1531 edesc
->residue
-= edesc
->sg_len
;
1532 edesc
->residue_stat
= edesc
->residue
;
1533 edesc
->processed_stat
= edesc
->processed
;
1535 edma_execute(echan
);
1537 spin_unlock(&echan
->vchan
.lock
);
1540 /* eDMA interrupt handler */
1541 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
1543 struct edma_cc
*ecc
= data
;
1553 dev_vdbg(ecc
->dev
, "dma_irq_handler\n");
1555 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 0);
1557 sh_ipr
= edma_shadow0_read_array(ecc
, SH_IPR
, 1);
1560 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 1);
1563 sh_ier
= edma_shadow0_read_array(ecc
, SH_IER
, 0);
1571 slot
= __ffs(sh_ipr
);
1572 sh_ipr
&= ~(BIT(slot
));
1574 if (sh_ier
& BIT(slot
)) {
1575 channel
= (bank
<< 5) | slot
;
1576 /* Clear the corresponding IPR bits */
1577 edma_shadow0_write_array(ecc
, SH_ICR
, bank
, BIT(slot
));
1578 edma_completion_handler(&ecc
->slave_chans
[channel
]);
1582 edma_shadow0_write(ecc
, SH_IEVAL
, 1);
1586 static void edma_error_handler(struct edma_chan
*echan
)
1588 struct edma_cc
*ecc
= echan
->ecc
;
1589 struct device
*dev
= echan
->vchan
.chan
.device
->dev
;
1590 struct edmacc_param p
;
1595 spin_lock(&echan
->vchan
.lock
);
1597 edma_read_slot(ecc
, echan
->slot
[0], &p
);
1599 * Issue later based on missed flag which will be sure
1601 * (1) we finished transmitting an intermediate slot and
1602 * edma_execute is coming up.
1603 * (2) or we finished current transfer and issue will
1604 * call edma_execute.
1606 * Important note: issuing can be dangerous here and
1607 * lead to some nasty recursion when we are in a NULL
1608 * slot. So we avoid doing so and set the missed flag.
1610 if (p
.a_b_cnt
== 0 && p
.ccnt
== 0) {
1611 dev_dbg(dev
, "Error on null slot, setting miss\n");
1615 * The slot is already programmed but the event got
1616 * missed, so its safe to issue it here.
1618 dev_dbg(dev
, "Missed event, TRIGGERING\n");
1619 edma_clean_channel(ecc
, echan
->ch_num
);
1620 edma_stop(ecc
, echan
->ch_num
);
1621 edma_start(ecc
, echan
->ch_num
);
1622 edma_trigger_channel(ecc
, echan
->ch_num
);
1624 spin_unlock(&echan
->vchan
.lock
);
1627 static inline bool edma_error_pending(struct edma_cc
*ecc
)
1629 if (edma_read_array(ecc
, EDMA_EMR
, 0) ||
1630 edma_read_array(ecc
, EDMA_EMR
, 1) ||
1631 edma_read(ecc
, EDMA_QEMR
) || edma_read(ecc
, EDMA_CCERR
))
1637 /* eDMA error interrupt handler */
1638 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
1640 struct edma_cc
*ecc
= data
;
1643 unsigned int cnt
= 0;
1650 dev_vdbg(ecc
->dev
, "dma_ccerr_handler\n");
1652 if (!edma_error_pending(ecc
))
1656 /* Event missed register(s) */
1657 for (j
= 0; j
< 2; j
++) {
1660 val
= edma_read_array(ecc
, EDMA_EMR
, j
);
1664 dev_dbg(ecc
->dev
, "EMR%d 0x%08x\n", j
, val
);
1666 for (i
= find_next_bit(&emr
, 32, 0); i
< 32;
1667 i
= find_next_bit(&emr
, 32, i
+ 1)) {
1668 int k
= (j
<< 5) + i
;
1670 /* Clear the corresponding EMR bits */
1671 edma_write_array(ecc
, EDMA_EMCR
, j
, BIT(i
));
1673 edma_shadow0_write_array(ecc
, SH_SECR
, j
,
1675 edma_error_handler(&ecc
->slave_chans
[k
]);
1679 val
= edma_read(ecc
, EDMA_QEMR
);
1681 dev_dbg(ecc
->dev
, "QEMR 0x%02x\n", val
);
1682 /* Not reported, just clear the interrupt reason. */
1683 edma_write(ecc
, EDMA_QEMCR
, val
);
1684 edma_shadow0_write(ecc
, SH_QSECR
, val
);
1687 val
= edma_read(ecc
, EDMA_CCERR
);
1689 dev_warn(ecc
->dev
, "CCERR 0x%08x\n", val
);
1690 /* Not reported, just clear the interrupt reason. */
1691 edma_write(ecc
, EDMA_CCERRCLR
, val
);
1694 if (!edma_error_pending(ecc
))
1700 edma_write(ecc
, EDMA_EEVAL
, 1);
1704 /* Alloc channel resources */
1705 static int edma_alloc_chan_resources(struct dma_chan
*chan
)
1707 struct edma_chan
*echan
= to_edma_chan(chan
);
1708 struct device
*dev
= chan
->device
->dev
;
1713 a_ch_num
= edma_alloc_channel(echan
->ecc
, echan
->ch_num
, EVENTQ_DEFAULT
);
1720 if (a_ch_num
!= echan
->ch_num
) {
1721 dev_err(dev
, "failed to allocate requested channel %u:%u\n",
1722 EDMA_CTLR(echan
->ch_num
),
1723 EDMA_CHAN_SLOT(echan
->ch_num
));
1725 goto err_wrong_chan
;
1728 echan
->alloced
= true;
1729 echan
->slot
[0] = edma_alloc_slot(echan
->ecc
, echan
->ch_num
);
1730 if (echan
->slot
[0] < 0) {
1731 dev_err(dev
, "Entry slot allocation failed for channel %u\n",
1732 EDMA_CHAN_SLOT(echan
->ch_num
));
1733 goto err_wrong_chan
;
1736 /* Set up channel -> slot mapping for the entry slot */
1737 edma_set_chmap(echan
->ecc
, echan
->ch_num
, echan
->slot
[0]);
1739 dev_dbg(dev
, "allocated channel %d for %u:%u\n", echan
->ch_num
,
1740 EDMA_CTLR(echan
->ch_num
), EDMA_CHAN_SLOT(echan
->ch_num
));
1745 edma_free_channel(echan
->ecc
, a_ch_num
);
1750 /* Free channel resources */
1751 static void edma_free_chan_resources(struct dma_chan
*chan
)
1753 struct edma_chan
*echan
= to_edma_chan(chan
);
1756 /* Terminate transfers */
1757 edma_stop(echan
->ecc
, echan
->ch_num
);
1759 vchan_free_chan_resources(&echan
->vchan
);
1761 /* Free EDMA PaRAM slots */
1762 for (i
= 0; i
< EDMA_MAX_SLOTS
; i
++) {
1763 if (echan
->slot
[i
] >= 0) {
1764 edma_free_slot(echan
->ecc
, echan
->slot
[i
]);
1765 echan
->slot
[i
] = -1;
1769 /* Set entry slot to the dummy slot */
1770 edma_set_chmap(echan
->ecc
, echan
->ch_num
, echan
->ecc
->dummy_slot
);
1772 /* Free EDMA channel */
1773 if (echan
->alloced
) {
1774 edma_free_channel(echan
->ecc
, echan
->ch_num
);
1775 echan
->alloced
= false;
1778 dev_dbg(chan
->device
->dev
, "freeing channel for %u\n", echan
->ch_num
);
1781 /* Send pending descriptor to hardware */
1782 static void edma_issue_pending(struct dma_chan
*chan
)
1784 struct edma_chan
*echan
= to_edma_chan(chan
);
1785 unsigned long flags
;
1787 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1788 if (vchan_issue_pending(&echan
->vchan
) && !echan
->edesc
)
1789 edma_execute(echan
);
1790 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1793 static u32
edma_residue(struct edma_desc
*edesc
)
1795 bool dst
= edesc
->direction
== DMA_DEV_TO_MEM
;
1796 struct edma_pset
*pset
= edesc
->pset
;
1797 dma_addr_t done
, pos
;
1801 * We always read the dst/src position from the first RamPar
1802 * pset. That's the one which is active now.
1804 pos
= edma_get_position(edesc
->echan
->ecc
, edesc
->echan
->slot
[0], dst
);
1807 * Cyclic is simple. Just subtract pset[0].addr from pos.
1809 * We never update edesc->residue in the cyclic case, so we
1810 * can tell the remaining room to the end of the circular
1813 if (edesc
->cyclic
) {
1814 done
= pos
- pset
->addr
;
1815 edesc
->residue_stat
= edesc
->residue
- done
;
1816 return edesc
->residue_stat
;
1820 * For SG operation we catch up with the last processed
1823 pset
+= edesc
->processed_stat
;
1825 for (i
= edesc
->processed_stat
; i
< edesc
->processed
; i
++, pset
++) {
1827 * If we are inside this pset address range, we know
1828 * this is the active one. Get the current delta and
1829 * stop walking the psets.
1831 if (pos
>= pset
->addr
&& pos
< pset
->addr
+ pset
->len
)
1832 return edesc
->residue_stat
- (pos
- pset
->addr
);
1834 /* Otherwise mark it done and update residue_stat. */
1835 edesc
->processed_stat
++;
1836 edesc
->residue_stat
-= pset
->len
;
1838 return edesc
->residue_stat
;
1841 /* Check request completion status */
1842 static enum dma_status
edma_tx_status(struct dma_chan
*chan
,
1843 dma_cookie_t cookie
,
1844 struct dma_tx_state
*txstate
)
1846 struct edma_chan
*echan
= to_edma_chan(chan
);
1847 struct virt_dma_desc
*vdesc
;
1848 enum dma_status ret
;
1849 unsigned long flags
;
1851 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1852 if (ret
== DMA_COMPLETE
|| !txstate
)
1855 spin_lock_irqsave(&echan
->vchan
.lock
, flags
);
1856 if (echan
->edesc
&& echan
->edesc
->vdesc
.tx
.cookie
== cookie
)
1857 txstate
->residue
= edma_residue(echan
->edesc
);
1858 else if ((vdesc
= vchan_find_desc(&echan
->vchan
, cookie
)))
1859 txstate
->residue
= to_edma_desc(&vdesc
->tx
)->residue
;
1860 spin_unlock_irqrestore(&echan
->vchan
.lock
, flags
);
1865 static void __init
edma_chan_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1866 struct edma_chan
*echans
)
1870 for (i
= 0; i
< ecc
->num_channels
; i
++) {
1871 struct edma_chan
*echan
= &echans
[i
];
1872 echan
->ch_num
= EDMA_CTLR_CHAN(ecc
->id
, i
);
1874 echan
->vchan
.desc_free
= edma_desc_free
;
1876 vchan_init(&echan
->vchan
, dma
);
1878 INIT_LIST_HEAD(&echan
->node
);
1879 for (j
= 0; j
< EDMA_MAX_SLOTS
; j
++)
1880 echan
->slot
[j
] = -1;
1884 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1885 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1886 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1887 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1889 static void edma_dma_init(struct edma_cc
*ecc
, struct dma_device
*dma
,
1892 dma
->device_prep_slave_sg
= edma_prep_slave_sg
;
1893 dma
->device_prep_dma_cyclic
= edma_prep_dma_cyclic
;
1894 dma
->device_prep_dma_memcpy
= edma_prep_dma_memcpy
;
1895 dma
->device_alloc_chan_resources
= edma_alloc_chan_resources
;
1896 dma
->device_free_chan_resources
= edma_free_chan_resources
;
1897 dma
->device_issue_pending
= edma_issue_pending
;
1898 dma
->device_tx_status
= edma_tx_status
;
1899 dma
->device_config
= edma_slave_config
;
1900 dma
->device_pause
= edma_dma_pause
;
1901 dma
->device_resume
= edma_dma_resume
;
1902 dma
->device_terminate_all
= edma_terminate_all
;
1904 dma
->src_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1905 dma
->dst_addr_widths
= EDMA_DMA_BUSWIDTHS
;
1906 dma
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1907 dma
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1911 INIT_LIST_HEAD(&dma
->channels
);
1914 static int edma_setup_from_hw(struct device
*dev
, struct edma_soc_info
*pdata
,
1915 struct edma_cc
*ecc
)
1919 s8 (*queue_priority_map
)[2];
1921 /* Decode the eDMA3 configuration from CCCFG register */
1922 cccfg
= edma_read(ecc
, EDMA_CCCFG
);
1924 value
= GET_NUM_REGN(cccfg
);
1925 ecc
->num_region
= BIT(value
);
1927 value
= GET_NUM_DMACH(cccfg
);
1928 ecc
->num_channels
= BIT(value
+ 1);
1930 value
= GET_NUM_PAENTRY(cccfg
);
1931 ecc
->num_slots
= BIT(value
+ 4);
1933 value
= GET_NUM_EVQUE(cccfg
);
1934 ecc
->num_tc
= value
+ 1;
1936 ecc
->chmap_exist
= (cccfg
& CHMAP_EXIST
) ? true : false;
1938 dev_dbg(dev
, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg
);
1939 dev_dbg(dev
, "num_region: %u\n", ecc
->num_region
);
1940 dev_dbg(dev
, "num_channels: %u\n", ecc
->num_channels
);
1941 dev_dbg(dev
, "num_slots: %u\n", ecc
->num_slots
);
1942 dev_dbg(dev
, "num_tc: %u\n", ecc
->num_tc
);
1943 dev_dbg(dev
, "chmap_exist: %s\n", ecc
->chmap_exist
? "yes" : "no");
1945 /* Nothing need to be done if queue priority is provided */
1946 if (pdata
->queue_priority_mapping
)
1950 * Configure TC/queue priority as follows:
1955 * The meaning of priority numbers: 0 highest priority, 7 lowest
1956 * priority. So Q0 is the highest priority queue and the last queue has
1957 * the lowest priority.
1959 queue_priority_map
= devm_kcalloc(dev
, ecc
->num_tc
+ 1, sizeof(s8
),
1961 if (!queue_priority_map
)
1964 for (i
= 0; i
< ecc
->num_tc
; i
++) {
1965 queue_priority_map
[i
][0] = i
;
1966 queue_priority_map
[i
][1] = i
;
1968 queue_priority_map
[i
][0] = -1;
1969 queue_priority_map
[i
][1] = -1;
1971 pdata
->queue_priority_mapping
= queue_priority_map
;
1972 /* Default queue has the lowest priority */
1973 pdata
->default_queue
= i
- 1;
1978 #if IS_ENABLED(CONFIG_OF)
1979 static int edma_xbar_event_map(struct device
*dev
, struct edma_soc_info
*pdata
,
1982 const char pname
[] = "ti,edma-xbar-event-map";
1983 struct resource res
;
1985 s16 (*xbar_chans
)[2];
1986 size_t nelm
= sz
/ sizeof(s16
);
1987 u32 shift
, offset
, mux
;
1990 xbar_chans
= devm_kcalloc(dev
, nelm
+ 2, sizeof(s16
), GFP_KERNEL
);
1994 ret
= of_address_to_resource(dev
->of_node
, 1, &res
);
1998 xbar
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
2002 ret
= of_property_read_u16_array(dev
->of_node
, pname
, (u16
*)xbar_chans
,
2007 /* Invalidate last entry for the other user of this mess */
2009 xbar_chans
[nelm
][0] = -1;
2010 xbar_chans
[nelm
][1] = -1;
2012 for (i
= 0; i
< nelm
; i
++) {
2013 shift
= (xbar_chans
[i
][1] & 0x03) << 3;
2014 offset
= xbar_chans
[i
][1] & 0xfffffffc;
2015 mux
= readl(xbar
+ offset
);
2016 mux
&= ~(0xff << shift
);
2017 mux
|= xbar_chans
[i
][0] << shift
;
2018 writel(mux
, (xbar
+ offset
));
2021 pdata
->xbar_chans
= (const s16 (*)[2]) xbar_chans
;
2025 static int edma_of_parse_dt(struct device
*dev
, struct edma_soc_info
*pdata
)
2028 struct property
*prop
;
2030 struct edma_rsv_info
*rsv_info
;
2032 rsv_info
= devm_kzalloc(dev
, sizeof(struct edma_rsv_info
), GFP_KERNEL
);
2035 pdata
->rsv
= rsv_info
;
2037 prop
= of_find_property(dev
->of_node
, "ti,edma-xbar-event-map", &sz
);
2039 ret
= edma_xbar_event_map(dev
, pdata
, sz
);
2044 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2046 struct edma_soc_info
*info
;
2049 info
= devm_kzalloc(dev
, sizeof(struct edma_soc_info
), GFP_KERNEL
);
2051 return ERR_PTR(-ENOMEM
);
2053 ret
= edma_of_parse_dt(dev
, info
);
2055 return ERR_PTR(ret
);
2060 static struct edma_soc_info
*edma_setup_info_from_dt(struct device
*dev
)
2062 return ERR_PTR(-EINVAL
);
2066 static int edma_probe(struct platform_device
*pdev
)
2068 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
2069 s8 (*queue_priority_mapping
)[2];
2071 const s16 (*rsv_chans
)[2];
2072 const s16 (*rsv_slots
)[2];
2073 const s16 (*xbar_chans
)[2];
2076 struct resource
*mem
;
2077 struct device_node
*node
= pdev
->dev
.of_node
;
2078 struct device
*dev
= &pdev
->dev
;
2079 struct edma_cc
*ecc
;
2083 info
= edma_setup_info_from_dt(dev
);
2085 dev_err(dev
, "failed to get DT data\n");
2086 return PTR_ERR(info
);
2093 pm_runtime_enable(dev
);
2094 ret
= pm_runtime_get_sync(dev
);
2096 dev_err(dev
, "pm_runtime_get_sync() failed\n");
2100 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2104 ecc
= devm_kzalloc(dev
, sizeof(*ecc
), GFP_KERNEL
);
2106 dev_err(dev
, "Can't allocate controller\n");
2112 /* When booting with DT the pdev->id is -1 */
2116 mem
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "edma3_cc");
2118 dev_dbg(dev
, "mem resource not found, using index 0\n");
2119 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2121 dev_err(dev
, "no mem resource?\n");
2125 ecc
->base
= devm_ioremap_resource(dev
, mem
);
2126 if (IS_ERR(ecc
->base
))
2127 return PTR_ERR(ecc
->base
);
2129 platform_set_drvdata(pdev
, ecc
);
2131 /* Get eDMA3 configuration from IP */
2132 ret
= edma_setup_from_hw(dev
, info
, ecc
);
2136 /* Allocate memory based on the information we got from the IP */
2137 ecc
->slave_chans
= devm_kcalloc(dev
, ecc
->num_channels
,
2138 sizeof(*ecc
->slave_chans
), GFP_KERNEL
);
2139 if (!ecc
->slave_chans
)
2142 ecc
->channel_unused
= devm_kcalloc(dev
,
2143 BITS_TO_LONGS(ecc
->num_channels
),
2144 sizeof(unsigned long), GFP_KERNEL
);
2145 if (!ecc
->channel_unused
)
2148 ecc
->slot_inuse
= devm_kcalloc(dev
, BITS_TO_LONGS(ecc
->num_slots
),
2149 sizeof(unsigned long), GFP_KERNEL
);
2150 if (!ecc
->slot_inuse
)
2153 ecc
->default_queue
= info
->default_queue
;
2155 for (i
= 0; i
< ecc
->num_slots
; i
++)
2156 edma_write_slot(ecc
, i
, &dummy_paramset
);
2158 /* Mark all channels as unused */
2159 memset(ecc
->channel_unused
, 0xff, sizeof(ecc
->channel_unused
));
2162 /* Clear the reserved channels in unused list */
2163 rsv_chans
= info
->rsv
->rsv_chans
;
2165 for (i
= 0; rsv_chans
[i
][0] != -1; i
++) {
2166 off
= rsv_chans
[i
][0];
2167 ln
= rsv_chans
[i
][1];
2168 clear_bits(off
, ln
, ecc
->channel_unused
);
2172 /* Set the reserved slots in inuse list */
2173 rsv_slots
= info
->rsv
->rsv_slots
;
2175 for (i
= 0; rsv_slots
[i
][0] != -1; i
++) {
2176 off
= rsv_slots
[i
][0];
2177 ln
= rsv_slots
[i
][1];
2178 set_bits(off
, ln
, ecc
->slot_inuse
);
2183 /* Clear the xbar mapped channels in unused list */
2184 xbar_chans
= info
->xbar_chans
;
2186 for (i
= 0; xbar_chans
[i
][1] != -1; i
++) {
2187 off
= xbar_chans
[i
][1];
2188 clear_bits(off
, 1, ecc
->channel_unused
);
2192 irq
= platform_get_irq_byname(pdev
, "edma3_ccint");
2193 if (irq
< 0 && node
)
2194 irq
= irq_of_parse_and_map(node
, 0);
2197 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccint",
2199 ret
= devm_request_irq(dev
, irq
, dma_irq_handler
, 0, irq_name
,
2202 dev_err(dev
, "CCINT (%d) failed --> %d\n", irq
, ret
);
2207 irq
= platform_get_irq_byname(pdev
, "edma3_ccerrint");
2208 if (irq
< 0 && node
)
2209 irq
= irq_of_parse_and_map(node
, 2);
2212 irq_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_ccerrint",
2214 ret
= devm_request_irq(dev
, irq
, dma_ccerr_handler
, 0, irq_name
,
2217 dev_err(dev
, "CCERRINT (%d) failed --> %d\n", irq
, ret
);
2222 ecc
->dummy_slot
= edma_alloc_slot(ecc
, EDMA_SLOT_ANY
);
2223 if (ecc
->dummy_slot
< 0) {
2224 dev_err(dev
, "Can't allocate PaRAM dummy slot\n");
2225 return ecc
->dummy_slot
;
2228 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2229 /* Assign all channels to the default queue */
2230 edma_map_dmach_to_queue(ecc
, i
, info
->default_queue
);
2231 /* Set entry slot to the dummy slot */
2232 edma_set_chmap(ecc
, i
, ecc
->dummy_slot
);
2235 queue_priority_mapping
= info
->queue_priority_mapping
;
2237 /* Event queue priority mapping */
2238 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2239 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2240 queue_priority_mapping
[i
][1]);
2242 for (i
= 0; i
< ecc
->num_region
; i
++) {
2243 edma_write_array2(ecc
, EDMA_DRAE
, i
, 0, 0x0);
2244 edma_write_array2(ecc
, EDMA_DRAE
, i
, 1, 0x0);
2245 edma_write_array(ecc
, EDMA_QRAE
, i
, 0x0);
2249 dma_cap_zero(ecc
->dma_slave
.cap_mask
);
2250 dma_cap_set(DMA_SLAVE
, ecc
->dma_slave
.cap_mask
);
2251 dma_cap_set(DMA_CYCLIC
, ecc
->dma_slave
.cap_mask
);
2252 dma_cap_set(DMA_MEMCPY
, ecc
->dma_slave
.cap_mask
);
2254 edma_dma_init(ecc
, &ecc
->dma_slave
, dev
);
2256 edma_chan_init(ecc
, &ecc
->dma_slave
, ecc
->slave_chans
);
2258 ret
= dma_async_device_register(&ecc
->dma_slave
);
2263 of_dma_controller_register(node
, of_dma_xlate_by_chan_id
,
2266 dev_info(dev
, "TI EDMA DMA engine driver\n");
2271 edma_free_slot(ecc
, ecc
->dummy_slot
);
2275 static int edma_remove(struct platform_device
*pdev
)
2277 struct device
*dev
= &pdev
->dev
;
2278 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2281 of_dma_controller_free(dev
->of_node
);
2282 dma_async_device_unregister(&ecc
->dma_slave
);
2283 edma_free_slot(ecc
, ecc
->dummy_slot
);
2288 #ifdef CONFIG_PM_SLEEP
2289 static int edma_pm_resume(struct device
*dev
)
2291 struct edma_cc
*ecc
= dev_get_drvdata(dev
);
2292 struct edma_chan
*echan
= ecc
->slave_chans
;
2294 s8 (*queue_priority_mapping
)[2];
2296 queue_priority_mapping
= ecc
->info
->queue_priority_mapping
;
2298 /* Event queue priority mapping */
2299 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
2300 edma_assign_priority_to_queue(ecc
, queue_priority_mapping
[i
][0],
2301 queue_priority_mapping
[i
][1]);
2303 for (i
= 0; i
< ecc
->num_channels
; i
++) {
2304 if (echan
[i
].alloced
) {
2305 /* ensure access through shadow region 0 */
2306 edma_or_array2(ecc
, EDMA_DRAE
, 0, i
>> 5,
2309 edma_setup_interrupt(ecc
, EDMA_CTLR_CHAN(ecc
->id
, i
),
2312 /* Set up channel -> slot mapping for the entry slot */
2313 edma_set_chmap(ecc
, echan
[i
].ch_num
, echan
[i
].slot
[0]);
2321 static const struct dev_pm_ops edma_pm_ops
= {
2322 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL
, edma_pm_resume
)
2325 static struct platform_driver edma_driver
= {
2326 .probe
= edma_probe
,
2327 .remove
= edma_remove
,
2331 .of_match_table
= edma_of_ids
,
2335 bool edma_filter_fn(struct dma_chan
*chan
, void *param
)
2337 if (chan
->device
->dev
->driver
== &edma_driver
.driver
) {
2338 struct edma_chan
*echan
= to_edma_chan(chan
);
2339 unsigned ch_req
= *(unsigned *)param
;
2340 return ch_req
== echan
->ch_num
;
2344 EXPORT_SYMBOL(edma_filter_fn
);
2346 static int edma_init(void)
2348 return platform_driver_register(&edma_driver
);
2350 subsys_initcall(edma_init
);
2352 static void __exit
edma_exit(void)
2354 platform_driver_unregister(&edma_driver
);
2356 module_exit(edma_exit
);
2358 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2359 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2360 MODULE_LICENSE("GPL v2");