2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
27 #include "../dmaengine.h"
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
40 #define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
49 (DWC_CTLL_DST_MSIZE(_dmsize) \
50 | DWC_CTLL_SRC_MSIZE(_smsize) \
53 | DWC_CTLL_DMS(_dwc->dst_master) \
54 | DWC_CTLL_SMS(_dwc->src_master)); \
58 * Number of descriptors to allocate for each channel. This should be
59 * made configurable somehow; preferably, the clients (at least the
60 * ones using slave transfers) should be able to give us a hint.
62 #define NR_DESCS_PER_CHANNEL 64
64 /* The set of bus widths supported by the DMA controller */
65 #define DW_DMA_BUSWIDTHS \
66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
71 /*----------------------------------------------------------------------*/
73 static struct device
*chan2dev(struct dma_chan
*chan
)
75 return &chan
->dev
->device
;
78 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
80 return to_dw_desc(dwc
->active_list
.next
);
83 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
85 struct dw_desc
*desc
, *_desc
;
86 struct dw_desc
*ret
= NULL
;
90 spin_lock_irqsave(&dwc
->lock
, flags
);
91 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
93 if (async_tx_test_ack(&desc
->txd
)) {
94 list_del(&desc
->desc_node
);
98 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
100 spin_unlock_irqrestore(&dwc
->lock
, flags
);
102 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
108 * Move a descriptor, including any children, to the free list.
109 * `desc' must not be on any lists.
111 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
116 struct dw_desc
*child
;
118 spin_lock_irqsave(&dwc
->lock
, flags
);
119 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
120 dev_vdbg(chan2dev(&dwc
->chan
),
121 "moving child desc %p to freelist\n",
123 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
124 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
125 list_add(&desc
->desc_node
, &dwc
->free_list
);
126 spin_unlock_irqrestore(&dwc
->lock
, flags
);
130 static void dwc_initialize(struct dw_dma_chan
*dwc
)
132 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
133 struct dw_dma_slave
*dws
= dwc
->chan
.private;
134 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
135 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
137 if (dwc
->initialized
== true)
142 * We need controller-specific data to set up slave
145 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
147 cfghi
|= DWC_CFGH_DST_PER(dws
->dst_id
);
148 cfghi
|= DWC_CFGH_SRC_PER(dws
->src_id
);
150 cfghi
|= DWC_CFGH_DST_PER(dwc
->dst_id
);
151 cfghi
|= DWC_CFGH_SRC_PER(dwc
->src_id
);
154 channel_writel(dwc
, CFG_LO
, cfglo
);
155 channel_writel(dwc
, CFG_HI
, cfghi
);
157 /* Enable interrupts */
158 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
159 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
161 dwc
->initialized
= true;
164 /*----------------------------------------------------------------------*/
166 static inline unsigned int dwc_fast_fls(unsigned long long v
)
169 * We can be a lot more clever here, but this should take care
170 * of the most common optimization.
181 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
183 dev_err(chan2dev(&dwc
->chan
),
184 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
185 channel_readl(dwc
, SAR
),
186 channel_readl(dwc
, DAR
),
187 channel_readl(dwc
, LLP
),
188 channel_readl(dwc
, CTL_HI
),
189 channel_readl(dwc
, CTL_LO
));
192 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
194 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
195 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
199 /*----------------------------------------------------------------------*/
201 /* Perform single block transfer */
202 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
203 struct dw_desc
*desc
)
205 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
209 * Software emulation of LLP mode relies on interrupts to continue
210 * multi block transfer.
212 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
214 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
215 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
216 channel_writel(dwc
, CTL_LO
, ctllo
);
217 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
218 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
220 /* Move pointer to next descriptor */
221 dwc
->tx_node_active
= dwc
->tx_node_active
->next
;
224 /* Called with dwc->lock held and bh disabled */
225 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
227 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
228 unsigned long was_soft_llp
;
230 /* ASSERT: channel is idle */
231 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
232 dev_err(chan2dev(&dwc
->chan
),
233 "BUG: Attempted to start non-idle channel\n");
234 dwc_dump_chan_regs(dwc
);
236 /* The tasklet will hopefully advance the queue... */
241 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
244 dev_err(chan2dev(&dwc
->chan
),
245 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
251 dwc
->residue
= first
->total_len
;
252 dwc
->tx_node_active
= &first
->tx_list
;
254 /* Submit first block */
255 dwc_do_single_block(dwc
, first
);
262 channel_writel(dwc
, LLP
, first
->txd
.phys
);
263 channel_writel(dwc
, CTL_LO
,
264 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
265 channel_writel(dwc
, CTL_HI
, 0);
266 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
269 static void dwc_dostart_first_queued(struct dw_dma_chan
*dwc
)
271 struct dw_desc
*desc
;
273 if (list_empty(&dwc
->queue
))
276 list_move(dwc
->queue
.next
, &dwc
->active_list
);
277 desc
= dwc_first_active(dwc
);
278 dev_vdbg(chan2dev(&dwc
->chan
), "%s: started %u\n", __func__
, desc
->txd
.cookie
);
279 dwc_dostart(dwc
, desc
);
282 /*----------------------------------------------------------------------*/
285 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
286 bool callback_required
)
288 dma_async_tx_callback callback
= NULL
;
290 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
291 struct dw_desc
*child
;
294 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
296 spin_lock_irqsave(&dwc
->lock
, flags
);
297 dma_cookie_complete(txd
);
298 if (callback_required
) {
299 callback
= txd
->callback
;
300 param
= txd
->callback_param
;
304 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
305 async_tx_ack(&child
->txd
);
306 async_tx_ack(&desc
->txd
);
308 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
309 list_move(&desc
->desc_node
, &dwc
->free_list
);
311 dma_descriptor_unmap(txd
);
312 spin_unlock_irqrestore(&dwc
->lock
, flags
);
318 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
320 struct dw_desc
*desc
, *_desc
;
324 spin_lock_irqsave(&dwc
->lock
, flags
);
325 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
326 dev_err(chan2dev(&dwc
->chan
),
327 "BUG: XFER bit set, but channel not idle!\n");
329 /* Try to continue after resetting the channel... */
330 dwc_chan_disable(dw
, dwc
);
334 * Submit queued descriptors ASAP, i.e. before we go through
335 * the completed ones.
337 list_splice_init(&dwc
->active_list
, &list
);
338 dwc_dostart_first_queued(dwc
);
340 spin_unlock_irqrestore(&dwc
->lock
, flags
);
342 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
343 dwc_descriptor_complete(dwc
, desc
, true);
346 /* Returns how many bytes were already received from source */
347 static inline u32
dwc_get_sent(struct dw_dma_chan
*dwc
)
349 u32 ctlhi
= channel_readl(dwc
, CTL_HI
);
350 u32 ctllo
= channel_readl(dwc
, CTL_LO
);
352 return (ctlhi
& DWC_CTLH_BLOCK_TS_MASK
) * (1 << (ctllo
>> 4 & 7));
355 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
358 struct dw_desc
*desc
, *_desc
;
359 struct dw_desc
*child
;
363 spin_lock_irqsave(&dwc
->lock
, flags
);
364 llp
= channel_readl(dwc
, LLP
);
365 status_xfer
= dma_readl(dw
, RAW
.XFER
);
367 if (status_xfer
& dwc
->mask
) {
368 /* Everything we've submitted is done */
369 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
371 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
372 struct list_head
*head
, *active
= dwc
->tx_node_active
;
375 * We are inside first active descriptor.
376 * Otherwise something is really wrong.
378 desc
= dwc_first_active(dwc
);
380 head
= &desc
->tx_list
;
381 if (active
!= head
) {
382 /* Update desc to reflect last sent one */
383 if (active
!= head
->next
)
384 desc
= to_dw_desc(active
->prev
);
386 dwc
->residue
-= desc
->len
;
388 child
= to_dw_desc(active
);
390 /* Submit next block */
391 dwc_do_single_block(dwc
, child
);
393 spin_unlock_irqrestore(&dwc
->lock
, flags
);
397 /* We are done here */
398 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
403 spin_unlock_irqrestore(&dwc
->lock
, flags
);
405 dwc_complete_all(dw
, dwc
);
409 if (list_empty(&dwc
->active_list
)) {
411 spin_unlock_irqrestore(&dwc
->lock
, flags
);
415 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
416 dev_vdbg(chan2dev(&dwc
->chan
), "%s: soft LLP mode\n", __func__
);
417 spin_unlock_irqrestore(&dwc
->lock
, flags
);
421 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=%pad\n", __func__
, &llp
);
423 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
424 /* Initial residue value */
425 dwc
->residue
= desc
->total_len
;
427 /* Check first descriptors addr */
428 if (desc
->txd
.phys
== llp
) {
429 spin_unlock_irqrestore(&dwc
->lock
, flags
);
433 /* Check first descriptors llp */
434 if (desc
->lli
.llp
== llp
) {
435 /* This one is currently in progress */
436 dwc
->residue
-= dwc_get_sent(dwc
);
437 spin_unlock_irqrestore(&dwc
->lock
, flags
);
441 dwc
->residue
-= desc
->len
;
442 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
443 if (child
->lli
.llp
== llp
) {
444 /* Currently in progress */
445 dwc
->residue
-= dwc_get_sent(dwc
);
446 spin_unlock_irqrestore(&dwc
->lock
, flags
);
449 dwc
->residue
-= child
->len
;
453 * No descriptors so far seem to be in progress, i.e.
454 * this one must be done.
456 spin_unlock_irqrestore(&dwc
->lock
, flags
);
457 dwc_descriptor_complete(dwc
, desc
, true);
458 spin_lock_irqsave(&dwc
->lock
, flags
);
461 dev_err(chan2dev(&dwc
->chan
),
462 "BUG: All descriptors done, but channel not idle!\n");
464 /* Try to continue after resetting the channel... */
465 dwc_chan_disable(dw
, dwc
);
467 dwc_dostart_first_queued(dwc
);
468 spin_unlock_irqrestore(&dwc
->lock
, flags
);
471 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
473 dev_crit(chan2dev(&dwc
->chan
), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
474 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
477 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
479 struct dw_desc
*bad_desc
;
480 struct dw_desc
*child
;
483 dwc_scan_descriptors(dw
, dwc
);
485 spin_lock_irqsave(&dwc
->lock
, flags
);
488 * The descriptor currently at the head of the active list is
489 * borked. Since we don't have any way to report errors, we'll
490 * just have to scream loudly and try to carry on.
492 bad_desc
= dwc_first_active(dwc
);
493 list_del_init(&bad_desc
->desc_node
);
494 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
496 /* Clear the error flag and try to restart the controller */
497 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
498 if (!list_empty(&dwc
->active_list
))
499 dwc_dostart(dwc
, dwc_first_active(dwc
));
502 * WARN may seem harsh, but since this only happens
503 * when someone submits a bad physical address in a
504 * descriptor, we should consider ourselves lucky that the
505 * controller flagged an error instead of scribbling over
506 * random memory locations.
508 dev_WARN(chan2dev(&dwc
->chan
), "Bad descriptor submitted for DMA!\n"
509 " cookie: %d\n", bad_desc
->txd
.cookie
);
510 dwc_dump_lli(dwc
, &bad_desc
->lli
);
511 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
512 dwc_dump_lli(dwc
, &child
->lli
);
514 spin_unlock_irqrestore(&dwc
->lock
, flags
);
516 /* Pretend the descriptor completed successfully */
517 dwc_descriptor_complete(dwc
, bad_desc
, true);
520 /* --------------------- Cyclic DMA API extensions -------------------- */
522 dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
524 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
525 return channel_readl(dwc
, SAR
);
527 EXPORT_SYMBOL(dw_dma_get_src_addr
);
529 dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
531 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
532 return channel_readl(dwc
, DAR
);
534 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
536 /* Called with dwc->lock held and all DMAC interrupts disabled */
537 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
538 u32 status_err
, u32 status_xfer
)
543 void (*callback
)(void *param
);
544 void *callback_param
;
546 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
547 channel_readl(dwc
, LLP
));
549 callback
= dwc
->cdesc
->period_callback
;
550 callback_param
= dwc
->cdesc
->period_callback_param
;
553 callback(callback_param
);
557 * Error and transfer complete are highly unlikely, and will most
558 * likely be due to a configuration error by the user.
560 if (unlikely(status_err
& dwc
->mask
) ||
561 unlikely(status_xfer
& dwc
->mask
)) {
564 dev_err(chan2dev(&dwc
->chan
),
565 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
566 status_xfer
? "xfer" : "error");
568 spin_lock_irqsave(&dwc
->lock
, flags
);
570 dwc_dump_chan_regs(dwc
);
572 dwc_chan_disable(dw
, dwc
);
574 /* Make sure DMA does not restart by loading a new list */
575 channel_writel(dwc
, LLP
, 0);
576 channel_writel(dwc
, CTL_LO
, 0);
577 channel_writel(dwc
, CTL_HI
, 0);
579 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
580 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
582 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
583 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
585 spin_unlock_irqrestore(&dwc
->lock
, flags
);
589 /* ------------------------------------------------------------------------- */
591 static void dw_dma_tasklet(unsigned long data
)
593 struct dw_dma
*dw
= (struct dw_dma
*)data
;
594 struct dw_dma_chan
*dwc
;
599 status_xfer
= dma_readl(dw
, RAW
.XFER
);
600 status_err
= dma_readl(dw
, RAW
.ERROR
);
602 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
604 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
606 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
607 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
608 else if (status_err
& (1 << i
))
609 dwc_handle_error(dw
, dwc
);
610 else if (status_xfer
& (1 << i
))
611 dwc_scan_descriptors(dw
, dwc
);
615 * Re-enable interrupts.
617 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
618 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
621 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
623 struct dw_dma
*dw
= dev_id
;
624 u32 status
= dma_readl(dw
, STATUS_INT
);
626 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
, status
);
628 /* Check if we have any interrupt from the DMAC */
629 if (!status
|| !dw
->in_use
)
633 * Just disable the interrupts. We'll turn them back on in the
636 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
637 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
639 status
= dma_readl(dw
, STATUS_INT
);
642 "BUG: Unexpected interrupts pending: 0x%x\n",
646 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
647 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
648 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
649 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
652 tasklet_schedule(&dw
->tasklet
);
657 /*----------------------------------------------------------------------*/
659 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
661 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
662 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
666 spin_lock_irqsave(&dwc
->lock
, flags
);
667 cookie
= dma_cookie_assign(tx
);
670 * REVISIT: We should attempt to chain as many descriptors as
671 * possible, perhaps even appending to those already submitted
672 * for DMA. But this is hard to do in a race-free manner.
675 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
, desc
->txd
.cookie
);
676 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
678 spin_unlock_irqrestore(&dwc
->lock
, flags
);
683 static struct dma_async_tx_descriptor
*
684 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
685 size_t len
, unsigned long flags
)
687 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
688 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
689 struct dw_desc
*desc
;
690 struct dw_desc
*first
;
691 struct dw_desc
*prev
;
694 unsigned int src_width
;
695 unsigned int dst_width
;
696 unsigned int data_width
;
699 dev_vdbg(chan2dev(chan
),
700 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__
,
701 &dest
, &src
, len
, flags
);
703 if (unlikely(!len
)) {
704 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
708 dwc
->direction
= DMA_MEM_TO_MEM
;
710 data_width
= min_t(unsigned int, dw
->data_width
[dwc
->src_master
],
711 dw
->data_width
[dwc
->dst_master
]);
713 src_width
= dst_width
= min_t(unsigned int, data_width
,
714 dwc_fast_fls(src
| dest
| len
));
716 ctllo
= DWC_DEFAULT_CTLLO(chan
)
717 | DWC_CTLL_DST_WIDTH(dst_width
)
718 | DWC_CTLL_SRC_WIDTH(src_width
)
724 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
725 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
728 desc
= dwc_desc_get(dwc
);
732 desc
->lli
.sar
= src
+ offset
;
733 desc
->lli
.dar
= dest
+ offset
;
734 desc
->lli
.ctllo
= ctllo
;
735 desc
->lli
.ctlhi
= xfer_count
;
736 desc
->len
= xfer_count
<< src_width
;
741 prev
->lli
.llp
= desc
->txd
.phys
;
742 list_add_tail(&desc
->desc_node
,
748 if (flags
& DMA_PREP_INTERRUPT
)
749 /* Trigger interrupt after last block */
750 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
753 first
->txd
.flags
= flags
;
754 first
->total_len
= len
;
759 dwc_desc_put(dwc
, first
);
763 static struct dma_async_tx_descriptor
*
764 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
765 unsigned int sg_len
, enum dma_transfer_direction direction
,
766 unsigned long flags
, void *context
)
768 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
769 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
770 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
771 struct dw_desc
*prev
;
772 struct dw_desc
*first
;
775 unsigned int reg_width
;
776 unsigned int mem_width
;
777 unsigned int data_width
;
779 struct scatterlist
*sg
;
780 size_t total_len
= 0;
782 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
784 if (unlikely(!is_slave_direction(direction
) || !sg_len
))
787 dwc
->direction
= direction
;
793 reg_width
= __fls(sconfig
->dst_addr_width
);
794 reg
= sconfig
->dst_addr
;
795 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
796 | DWC_CTLL_DST_WIDTH(reg_width
)
800 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
801 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
803 data_width
= dw
->data_width
[dwc
->src_master
];
805 for_each_sg(sgl
, sg
, sg_len
, i
) {
806 struct dw_desc
*desc
;
809 mem
= sg_dma_address(sg
);
810 len
= sg_dma_len(sg
);
812 mem_width
= min_t(unsigned int,
813 data_width
, dwc_fast_fls(mem
| len
));
815 slave_sg_todev_fill_desc
:
816 desc
= dwc_desc_get(dwc
);
818 dev_err(chan2dev(chan
),
819 "not enough descriptors available\n");
825 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
826 if ((len
>> mem_width
) > dwc
->block_size
) {
827 dlen
= dwc
->block_size
<< mem_width
;
835 desc
->lli
.ctlhi
= dlen
>> mem_width
;
841 prev
->lli
.llp
= desc
->txd
.phys
;
842 list_add_tail(&desc
->desc_node
,
849 goto slave_sg_todev_fill_desc
;
853 reg_width
= __fls(sconfig
->src_addr_width
);
854 reg
= sconfig
->src_addr
;
855 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
856 | DWC_CTLL_SRC_WIDTH(reg_width
)
860 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
861 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
863 data_width
= dw
->data_width
[dwc
->dst_master
];
865 for_each_sg(sgl
, sg
, sg_len
, i
) {
866 struct dw_desc
*desc
;
869 mem
= sg_dma_address(sg
);
870 len
= sg_dma_len(sg
);
872 mem_width
= min_t(unsigned int,
873 data_width
, dwc_fast_fls(mem
| len
));
875 slave_sg_fromdev_fill_desc
:
876 desc
= dwc_desc_get(dwc
);
878 dev_err(chan2dev(chan
),
879 "not enough descriptors available\n");
885 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
886 if ((len
>> reg_width
) > dwc
->block_size
) {
887 dlen
= dwc
->block_size
<< reg_width
;
894 desc
->lli
.ctlhi
= dlen
>> reg_width
;
900 prev
->lli
.llp
= desc
->txd
.phys
;
901 list_add_tail(&desc
->desc_node
,
908 goto slave_sg_fromdev_fill_desc
;
915 if (flags
& DMA_PREP_INTERRUPT
)
916 /* Trigger interrupt after last block */
917 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
920 first
->total_len
= total_len
;
925 dwc_desc_put(dwc
, first
);
929 bool dw_dma_filter(struct dma_chan
*chan
, void *param
)
931 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
932 struct dw_dma_slave
*dws
= param
;
934 if (!dws
|| dws
->dma_dev
!= chan
->device
->dev
)
937 /* We have to copy data since dws can be temporary storage */
939 dwc
->src_id
= dws
->src_id
;
940 dwc
->dst_id
= dws
->dst_id
;
942 dwc
->src_master
= dws
->src_master
;
943 dwc
->dst_master
= dws
->dst_master
;
947 EXPORT_SYMBOL_GPL(dw_dma_filter
);
950 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
951 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
953 * NOTE: burst size 2 is not supported by controller.
955 * This can be done by finding least significant bit set: n & (n - 1)
957 static inline void convert_burst(u32
*maxburst
)
960 *maxburst
= fls(*maxburst
) - 2;
965 static int dwc_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
967 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
969 /* Check if chan will be configured for slave transfers */
970 if (!is_slave_direction(sconfig
->direction
))
973 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
974 dwc
->direction
= sconfig
->direction
;
976 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
977 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
982 static int dwc_pause(struct dma_chan
*chan
)
984 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
986 unsigned int count
= 20; /* timeout iterations */
989 spin_lock_irqsave(&dwc
->lock
, flags
);
991 cfglo
= channel_readl(dwc
, CFG_LO
);
992 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
993 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
) && count
--)
998 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1003 static inline void dwc_chan_resume(struct dw_dma_chan
*dwc
)
1005 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1007 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
1009 dwc
->paused
= false;
1012 static int dwc_resume(struct dma_chan
*chan
)
1014 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1015 unsigned long flags
;
1020 spin_lock_irqsave(&dwc
->lock
, flags
);
1022 dwc_chan_resume(dwc
);
1024 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1029 static int dwc_terminate_all(struct dma_chan
*chan
)
1031 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1032 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1033 struct dw_desc
*desc
, *_desc
;
1034 unsigned long flags
;
1037 spin_lock_irqsave(&dwc
->lock
, flags
);
1039 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1041 dwc_chan_disable(dw
, dwc
);
1043 dwc_chan_resume(dwc
);
1045 /* active_list entries will end up before queued entries */
1046 list_splice_init(&dwc
->queue
, &list
);
1047 list_splice_init(&dwc
->active_list
, &list
);
1049 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1051 /* Flush all pending and queued descriptors */
1052 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1053 dwc_descriptor_complete(dwc
, desc
, false);
1058 static inline u32
dwc_get_residue(struct dw_dma_chan
*dwc
)
1060 unsigned long flags
;
1063 spin_lock_irqsave(&dwc
->lock
, flags
);
1065 residue
= dwc
->residue
;
1066 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
) && residue
)
1067 residue
-= dwc_get_sent(dwc
);
1069 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1073 static enum dma_status
1074 dwc_tx_status(struct dma_chan
*chan
,
1075 dma_cookie_t cookie
,
1076 struct dma_tx_state
*txstate
)
1078 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1079 enum dma_status ret
;
1081 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1082 if (ret
== DMA_COMPLETE
)
1085 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1087 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1088 if (ret
!= DMA_COMPLETE
)
1089 dma_set_residue(txstate
, dwc_get_residue(dwc
));
1091 if (dwc
->paused
&& ret
== DMA_IN_PROGRESS
)
1097 static void dwc_issue_pending(struct dma_chan
*chan
)
1099 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1100 unsigned long flags
;
1102 spin_lock_irqsave(&dwc
->lock
, flags
);
1103 if (list_empty(&dwc
->active_list
))
1104 dwc_dostart_first_queued(dwc
);
1105 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1108 /*----------------------------------------------------------------------*/
1110 static void dw_dma_off(struct dw_dma
*dw
)
1114 dma_writel(dw
, CFG
, 0);
1116 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1117 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1118 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1119 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1121 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1124 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1125 dw
->chan
[i
].initialized
= false;
1128 static void dw_dma_on(struct dw_dma
*dw
)
1130 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1133 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1135 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1136 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1137 struct dw_desc
*desc
;
1139 unsigned long flags
;
1141 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1143 /* ASSERT: channel is idle */
1144 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1145 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1149 dma_cookie_init(chan
);
1152 * NOTE: some controllers may have additional features that we
1153 * need to initialize here, like "scatter-gather" (which
1154 * doesn't mean what you think it means), and status writeback.
1157 /* Enable controller here if needed */
1160 dw
->in_use
|= dwc
->mask
;
1162 spin_lock_irqsave(&dwc
->lock
, flags
);
1163 i
= dwc
->descs_allocated
;
1164 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1167 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1169 desc
= dma_pool_alloc(dw
->desc_pool
, GFP_ATOMIC
, &phys
);
1171 goto err_desc_alloc
;
1173 memset(desc
, 0, sizeof(struct dw_desc
));
1175 INIT_LIST_HEAD(&desc
->tx_list
);
1176 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1177 desc
->txd
.tx_submit
= dwc_tx_submit
;
1178 desc
->txd
.flags
= DMA_CTRL_ACK
;
1179 desc
->txd
.phys
= phys
;
1181 dwc_desc_put(dwc
, desc
);
1183 spin_lock_irqsave(&dwc
->lock
, flags
);
1184 i
= ++dwc
->descs_allocated
;
1187 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1189 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1194 dev_info(chan2dev(chan
), "only allocated %d descriptors\n", i
);
1199 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1201 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1202 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1203 struct dw_desc
*desc
, *_desc
;
1204 unsigned long flags
;
1207 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1208 dwc
->descs_allocated
);
1210 /* ASSERT: channel is idle */
1211 BUG_ON(!list_empty(&dwc
->active_list
));
1212 BUG_ON(!list_empty(&dwc
->queue
));
1213 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1215 spin_lock_irqsave(&dwc
->lock
, flags
);
1216 list_splice_init(&dwc
->free_list
, &list
);
1217 dwc
->descs_allocated
= 0;
1218 dwc
->initialized
= false;
1220 /* Disable interrupts */
1221 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1222 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1224 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1226 /* Disable controller in case it was a last user */
1227 dw
->in_use
&= ~dwc
->mask
;
1231 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1232 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1233 dma_pool_free(dw
->desc_pool
, desc
, desc
->txd
.phys
);
1236 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1239 /* --------------------- Cyclic DMA API extensions -------------------- */
1242 * dw_dma_cyclic_start - start the cyclic DMA transfer
1243 * @chan: the DMA channel to start
1245 * Must be called with soft interrupts disabled. Returns zero on success or
1246 * -errno on failure.
1248 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1250 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1251 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1252 unsigned long flags
;
1254 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1255 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1259 spin_lock_irqsave(&dwc
->lock
, flags
);
1261 /* Assert channel is idle */
1262 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1263 dev_err(chan2dev(&dwc
->chan
),
1264 "BUG: Attempted to start non-idle channel\n");
1265 dwc_dump_chan_regs(dwc
);
1266 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1270 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1271 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1273 /* Setup DMAC channel registers */
1274 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1275 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1276 channel_writel(dwc
, CTL_HI
, 0);
1278 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1280 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1284 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1287 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1288 * @chan: the DMA channel to stop
1290 * Must be called with soft interrupts disabled.
1292 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1294 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1295 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1296 unsigned long flags
;
1298 spin_lock_irqsave(&dwc
->lock
, flags
);
1300 dwc_chan_disable(dw
, dwc
);
1302 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1304 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1307 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1308 * @chan: the DMA channel to prepare
1309 * @buf_addr: physical DMA address where the buffer starts
1310 * @buf_len: total number of bytes for the entire buffer
1311 * @period_len: number of bytes for each period
1312 * @direction: transfer direction, to or from device
1314 * Must be called before trying to start the transfer. Returns a valid struct
1315 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1317 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1318 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1319 enum dma_transfer_direction direction
)
1321 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1322 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1323 struct dw_cyclic_desc
*cdesc
;
1324 struct dw_cyclic_desc
*retval
= NULL
;
1325 struct dw_desc
*desc
;
1326 struct dw_desc
*last
= NULL
;
1327 unsigned long was_cyclic
;
1328 unsigned int reg_width
;
1329 unsigned int periods
;
1331 unsigned long flags
;
1333 spin_lock_irqsave(&dwc
->lock
, flags
);
1335 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1336 dev_dbg(chan2dev(&dwc
->chan
),
1337 "channel doesn't support LLP transfers\n");
1338 return ERR_PTR(-EINVAL
);
1341 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1342 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1343 dev_dbg(chan2dev(&dwc
->chan
),
1344 "queue and/or active list are not empty\n");
1345 return ERR_PTR(-EBUSY
);
1348 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1349 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1351 dev_dbg(chan2dev(&dwc
->chan
),
1352 "channel already prepared for cyclic DMA\n");
1353 return ERR_PTR(-EBUSY
);
1356 retval
= ERR_PTR(-EINVAL
);
1358 if (unlikely(!is_slave_direction(direction
)))
1361 dwc
->direction
= direction
;
1363 if (direction
== DMA_MEM_TO_DEV
)
1364 reg_width
= __ffs(sconfig
->dst_addr_width
);
1366 reg_width
= __ffs(sconfig
->src_addr_width
);
1368 periods
= buf_len
/ period_len
;
1370 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1371 if (period_len
> (dwc
->block_size
<< reg_width
))
1373 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1375 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1378 retval
= ERR_PTR(-ENOMEM
);
1380 if (periods
> NR_DESCS_PER_CHANNEL
)
1383 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1387 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1391 for (i
= 0; i
< periods
; i
++) {
1392 desc
= dwc_desc_get(dwc
);
1394 goto out_err_desc_get
;
1396 switch (direction
) {
1397 case DMA_MEM_TO_DEV
:
1398 desc
->lli
.dar
= sconfig
->dst_addr
;
1399 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1400 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1401 | DWC_CTLL_DST_WIDTH(reg_width
)
1402 | DWC_CTLL_SRC_WIDTH(reg_width
)
1407 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1408 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1409 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1412 case DMA_DEV_TO_MEM
:
1413 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1414 desc
->lli
.sar
= sconfig
->src_addr
;
1415 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1416 | DWC_CTLL_SRC_WIDTH(reg_width
)
1417 | DWC_CTLL_DST_WIDTH(reg_width
)
1422 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1423 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1424 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1431 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1432 cdesc
->desc
[i
] = desc
;
1435 last
->lli
.llp
= desc
->txd
.phys
;
1440 /* Let's make a cyclic list */
1441 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1443 dev_dbg(chan2dev(&dwc
->chan
),
1444 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1445 &buf_addr
, buf_len
, period_len
, periods
);
1447 cdesc
->periods
= periods
;
1454 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1458 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1459 return (struct dw_cyclic_desc
*)retval
;
1461 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1464 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1465 * @chan: the DMA channel to free
1467 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1469 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1470 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1471 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1473 unsigned long flags
;
1475 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1480 spin_lock_irqsave(&dwc
->lock
, flags
);
1482 dwc_chan_disable(dw
, dwc
);
1484 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1485 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1487 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1489 for (i
= 0; i
< cdesc
->periods
; i
++)
1490 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1495 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1497 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1499 /*----------------------------------------------------------------------*/
1501 int dw_dma_probe(struct dw_dma_chip
*chip
, struct dw_dma_platform_data
*pdata
)
1505 unsigned int dw_params
;
1506 unsigned int nr_channels
;
1507 unsigned int max_blk_size
= 0;
1511 dw
= devm_kzalloc(chip
->dev
, sizeof(*dw
), GFP_KERNEL
);
1515 dw
->regs
= chip
->regs
;
1518 pm_runtime_get_sync(chip
->dev
);
1520 dw_params
= dma_read_byaddr(chip
->regs
, DW_PARAMS
);
1521 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1523 dev_dbg(chip
->dev
, "DW_PARAMS: 0x%08x\n", dw_params
);
1525 if (!pdata
&& autocfg
) {
1526 pdata
= devm_kzalloc(chip
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1532 /* Fill platform data with the default values */
1533 pdata
->is_private
= true;
1534 pdata
->chan_allocation_order
= CHAN_ALLOCATION_ASCENDING
;
1535 pdata
->chan_priority
= CHAN_PRIORITY_ASCENDING
;
1536 } else if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
) {
1542 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1544 nr_channels
= pdata
->nr_channels
;
1546 dw
->chan
= devm_kcalloc(chip
->dev
, nr_channels
, sizeof(*dw
->chan
),
1553 /* Get hardware configuration parameters */
1555 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1557 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1558 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1560 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1563 dw
->nr_masters
= pdata
->nr_masters
;
1564 for (i
= 0; i
< dw
->nr_masters
; i
++)
1565 dw
->data_width
[i
] = pdata
->data_width
[i
];
1568 /* Calculate all channel mask before DMA setup */
1569 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1571 /* Force dma off, just in case */
1574 /* Disable BLOCK interrupts as well */
1575 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1577 /* Create a pool of consistent memory blocks for hardware descriptors */
1578 dw
->desc_pool
= dmam_pool_create("dw_dmac_desc_pool", chip
->dev
,
1579 sizeof(struct dw_desc
), 4, 0);
1580 if (!dw
->desc_pool
) {
1581 dev_err(chip
->dev
, "No memory for descriptors dma pool\n");
1586 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1588 err
= request_irq(chip
->irq
, dw_dma_interrupt
, IRQF_SHARED
,
1593 INIT_LIST_HEAD(&dw
->dma
.channels
);
1594 for (i
= 0; i
< nr_channels
; i
++) {
1595 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1596 int r
= nr_channels
- i
- 1;
1598 dwc
->chan
.device
= &dw
->dma
;
1599 dma_cookie_init(&dwc
->chan
);
1600 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1601 list_add_tail(&dwc
->chan
.device_node
,
1604 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1606 /* 7 is highest priority & 0 is lowest. */
1607 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1612 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1613 spin_lock_init(&dwc
->lock
);
1616 INIT_LIST_HEAD(&dwc
->active_list
);
1617 INIT_LIST_HEAD(&dwc
->queue
);
1618 INIT_LIST_HEAD(&dwc
->free_list
);
1620 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1622 dwc
->direction
= DMA_TRANS_NONE
;
1624 /* Hardware configuration */
1626 unsigned int dwc_params
;
1627 void __iomem
*addr
= chip
->regs
+ r
* sizeof(u32
);
1629 dwc_params
= dma_read_byaddr(addr
, DWC_PARAMS
);
1631 dev_dbg(chip
->dev
, "DWC_PARAMS[%d]: 0x%08x\n", i
,
1635 * Decode maximum block size for given channel. The
1636 * stored 4 bit value represents blocks from 0x00 for 3
1637 * up to 0x0a for 4095.
1640 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1642 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1644 dwc
->block_size
= pdata
->block_size
;
1646 /* Check if channel supports multi block transfer */
1647 channel_writel(dwc
, LLP
, 0xfffffffc);
1649 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1650 channel_writel(dwc
, LLP
, 0);
1654 /* Clear all interrupts on all channels. */
1655 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1656 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1657 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1658 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1659 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1661 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1662 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1663 if (pdata
->is_private
)
1664 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1665 dw
->dma
.dev
= chip
->dev
;
1666 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1667 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1669 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1670 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1672 dw
->dma
.device_config
= dwc_config
;
1673 dw
->dma
.device_pause
= dwc_pause
;
1674 dw
->dma
.device_resume
= dwc_resume
;
1675 dw
->dma
.device_terminate_all
= dwc_terminate_all
;
1677 dw
->dma
.device_tx_status
= dwc_tx_status
;
1678 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1680 /* DMA capabilities */
1681 dw
->dma
.src_addr_widths
= DW_DMA_BUSWIDTHS
;
1682 dw
->dma
.dst_addr_widths
= DW_DMA_BUSWIDTHS
;
1683 dw
->dma
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
) |
1684 BIT(DMA_MEM_TO_MEM
);
1685 dw
->dma
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1687 err
= dma_async_device_register(&dw
->dma
);
1689 goto err_dma_register
;
1691 dev_info(chip
->dev
, "DesignWare DMA Controller, %d channels\n",
1694 pm_runtime_put_sync_suspend(chip
->dev
);
1699 free_irq(chip
->irq
, dw
);
1701 pm_runtime_put_sync_suspend(chip
->dev
);
1704 EXPORT_SYMBOL_GPL(dw_dma_probe
);
1706 int dw_dma_remove(struct dw_dma_chip
*chip
)
1708 struct dw_dma
*dw
= chip
->dw
;
1709 struct dw_dma_chan
*dwc
, *_dwc
;
1711 pm_runtime_get_sync(chip
->dev
);
1714 dma_async_device_unregister(&dw
->dma
);
1716 free_irq(chip
->irq
, dw
);
1717 tasklet_kill(&dw
->tasklet
);
1719 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1721 list_del(&dwc
->chan
.device_node
);
1722 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1725 pm_runtime_put_sync_suspend(chip
->dev
);
1728 EXPORT_SYMBOL_GPL(dw_dma_remove
);
1730 int dw_dma_disable(struct dw_dma_chip
*chip
)
1732 struct dw_dma
*dw
= chip
->dw
;
1737 EXPORT_SYMBOL_GPL(dw_dma_disable
);
1739 int dw_dma_enable(struct dw_dma_chip
*chip
)
1741 struct dw_dma
*dw
= chip
->dw
;
1746 EXPORT_SYMBOL_GPL(dw_dma_enable
);
1748 MODULE_LICENSE("GPL v2");
1749 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1750 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1751 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");