2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/of_dma.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
29 #include "dw_dmac_regs.h"
30 #include "dmaengine.h"
33 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
34 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
35 * of which use ARM any more). See the "Databook" from Synopsys for
36 * information beyond what licensees probably provide.
38 * The driver has currently been tested only with the Atmel AT32AP7000,
39 * which does not support descriptor writeback.
42 static inline unsigned int dwc_get_dms(struct dw_dma_slave
*slave
)
44 return slave
? slave
->dst_master
: 0;
47 static inline unsigned int dwc_get_sms(struct dw_dma_slave
*slave
)
49 return slave
? slave
->src_master
: 1;
55 static inline unsigned int dwc_get_master(struct dma_chan
*chan
, int master
)
57 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
58 struct dw_dma_slave
*dws
= chan
->private;
61 if (master
== SRC_MASTER
)
66 return min_t(unsigned int, dw
->nr_masters
- 1, m
);
69 #define DWC_DEFAULT_CTLLO(_chan) ({ \
70 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
71 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
72 bool _is_slave = is_slave_direction(_dwc->direction); \
73 int _dms = dwc_get_master(_chan, DST_MASTER); \
74 int _sms = dwc_get_master(_chan, SRC_MASTER); \
75 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
77 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
80 (DWC_CTLL_DST_MSIZE(_dmsize) \
81 | DWC_CTLL_SRC_MSIZE(_smsize) \
84 | DWC_CTLL_DMS(_dms) \
85 | DWC_CTLL_SMS(_sms)); \
89 * Number of descriptors to allocate for each channel. This should be
90 * made configurable somehow; preferably, the clients (at least the
91 * ones using slave transfers) should be able to give us a hint.
93 #define NR_DESCS_PER_CHANNEL 64
95 static inline unsigned int dwc_get_data_width(struct dma_chan
*chan
, int master
)
97 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
99 return dw
->data_width
[dwc_get_master(chan
, master
)];
102 /*----------------------------------------------------------------------*/
104 static struct device
*chan2dev(struct dma_chan
*chan
)
106 return &chan
->dev
->device
;
108 static struct device
*chan2parent(struct dma_chan
*chan
)
110 return chan
->dev
->device
.parent
;
113 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
115 return to_dw_desc(dwc
->active_list
.next
);
118 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
120 struct dw_desc
*desc
, *_desc
;
121 struct dw_desc
*ret
= NULL
;
125 spin_lock_irqsave(&dwc
->lock
, flags
);
126 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
128 if (async_tx_test_ack(&desc
->txd
)) {
129 list_del(&desc
->desc_node
);
133 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
135 spin_unlock_irqrestore(&dwc
->lock
, flags
);
137 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
143 * Move a descriptor, including any children, to the free list.
144 * `desc' must not be on any lists.
146 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
151 struct dw_desc
*child
;
153 spin_lock_irqsave(&dwc
->lock
, flags
);
154 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
155 dev_vdbg(chan2dev(&dwc
->chan
),
156 "moving child desc %p to freelist\n",
158 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
159 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
160 list_add(&desc
->desc_node
, &dwc
->free_list
);
161 spin_unlock_irqrestore(&dwc
->lock
, flags
);
165 static void dwc_initialize(struct dw_dma_chan
*dwc
)
167 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
168 struct dw_dma_slave
*dws
= dwc
->chan
.private;
169 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
170 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
172 if (dwc
->initialized
== true)
175 if (dws
&& dws
->cfg_hi
== ~0 && dws
->cfg_lo
== ~0) {
176 /* autoconfigure based on request line from DT */
177 if (dwc
->direction
== DMA_MEM_TO_DEV
)
178 cfghi
= DWC_CFGH_DST_PER(dwc
->request_line
);
179 else if (dwc
->direction
== DMA_DEV_TO_MEM
)
180 cfghi
= DWC_CFGH_SRC_PER(dwc
->request_line
);
183 * We need controller-specific data to set up slave
186 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
189 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
191 if (dwc
->direction
== DMA_MEM_TO_DEV
)
192 cfghi
= DWC_CFGH_DST_PER(dwc
->dma_sconfig
.slave_id
);
193 else if (dwc
->direction
== DMA_DEV_TO_MEM
)
194 cfghi
= DWC_CFGH_SRC_PER(dwc
->dma_sconfig
.slave_id
);
197 channel_writel(dwc
, CFG_LO
, cfglo
);
198 channel_writel(dwc
, CFG_HI
, cfghi
);
200 /* Enable interrupts */
201 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
202 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
204 dwc
->initialized
= true;
207 /*----------------------------------------------------------------------*/
209 static inline unsigned int dwc_fast_fls(unsigned long long v
)
212 * We can be a lot more clever here, but this should take care
213 * of the most common optimization.
224 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
226 dev_err(chan2dev(&dwc
->chan
),
227 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
228 channel_readl(dwc
, SAR
),
229 channel_readl(dwc
, DAR
),
230 channel_readl(dwc
, LLP
),
231 channel_readl(dwc
, CTL_HI
),
232 channel_readl(dwc
, CTL_LO
));
235 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
237 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
238 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
242 /*----------------------------------------------------------------------*/
244 /* Perform single block transfer */
245 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
246 struct dw_desc
*desc
)
248 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
251 /* Software emulation of LLP mode relies on interrupts to continue
252 * multi block transfer. */
253 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
255 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
256 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
257 channel_writel(dwc
, CTL_LO
, ctllo
);
258 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
259 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
261 /* Move pointer to next descriptor */
262 dwc
->tx_node_active
= dwc
->tx_node_active
->next
;
265 /* Called with dwc->lock held and bh disabled */
266 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
268 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
269 unsigned long was_soft_llp
;
271 /* ASSERT: channel is idle */
272 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
273 dev_err(chan2dev(&dwc
->chan
),
274 "BUG: Attempted to start non-idle channel\n");
275 dwc_dump_chan_regs(dwc
);
277 /* The tasklet will hopefully advance the queue... */
282 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
285 dev_err(chan2dev(&dwc
->chan
),
286 "BUG: Attempted to start new LLP transfer "
287 "inside ongoing one\n");
293 dwc
->residue
= first
->total_len
;
294 dwc
->tx_node_active
= &first
->tx_list
;
296 /* Submit first block */
297 dwc_do_single_block(dwc
, first
);
304 channel_writel(dwc
, LLP
, first
->txd
.phys
);
305 channel_writel(dwc
, CTL_LO
,
306 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
307 channel_writel(dwc
, CTL_HI
, 0);
308 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
311 /*----------------------------------------------------------------------*/
314 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
315 bool callback_required
)
317 dma_async_tx_callback callback
= NULL
;
319 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
320 struct dw_desc
*child
;
323 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
325 spin_lock_irqsave(&dwc
->lock
, flags
);
326 dma_cookie_complete(txd
);
327 if (callback_required
) {
328 callback
= txd
->callback
;
329 param
= txd
->callback_param
;
333 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
334 async_tx_ack(&child
->txd
);
335 async_tx_ack(&desc
->txd
);
337 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
338 list_move(&desc
->desc_node
, &dwc
->free_list
);
340 if (!is_slave_direction(dwc
->direction
)) {
341 struct device
*parent
= chan2parent(&dwc
->chan
);
342 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
343 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
344 dma_unmap_single(parent
, desc
->lli
.dar
,
345 desc
->total_len
, DMA_FROM_DEVICE
);
347 dma_unmap_page(parent
, desc
->lli
.dar
,
348 desc
->total_len
, DMA_FROM_DEVICE
);
350 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
351 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
352 dma_unmap_single(parent
, desc
->lli
.sar
,
353 desc
->total_len
, DMA_TO_DEVICE
);
355 dma_unmap_page(parent
, desc
->lli
.sar
,
356 desc
->total_len
, DMA_TO_DEVICE
);
360 spin_unlock_irqrestore(&dwc
->lock
, flags
);
366 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
368 struct dw_desc
*desc
, *_desc
;
372 spin_lock_irqsave(&dwc
->lock
, flags
);
373 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
374 dev_err(chan2dev(&dwc
->chan
),
375 "BUG: XFER bit set, but channel not idle!\n");
377 /* Try to continue after resetting the channel... */
378 dwc_chan_disable(dw
, dwc
);
382 * Submit queued descriptors ASAP, i.e. before we go through
383 * the completed ones.
385 list_splice_init(&dwc
->active_list
, &list
);
386 if (!list_empty(&dwc
->queue
)) {
387 list_move(dwc
->queue
.next
, &dwc
->active_list
);
388 dwc_dostart(dwc
, dwc_first_active(dwc
));
391 spin_unlock_irqrestore(&dwc
->lock
, flags
);
393 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
394 dwc_descriptor_complete(dwc
, desc
, true);
397 /* Returns how many bytes were already received from source */
398 static inline u32
dwc_get_sent(struct dw_dma_chan
*dwc
)
400 u32 ctlhi
= channel_readl(dwc
, CTL_HI
);
401 u32 ctllo
= channel_readl(dwc
, CTL_LO
);
403 return (ctlhi
& DWC_CTLH_BLOCK_TS_MASK
) * (1 << (ctllo
>> 4 & 7));
406 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
409 struct dw_desc
*desc
, *_desc
;
410 struct dw_desc
*child
;
414 spin_lock_irqsave(&dwc
->lock
, flags
);
415 llp
= channel_readl(dwc
, LLP
);
416 status_xfer
= dma_readl(dw
, RAW
.XFER
);
418 if (status_xfer
& dwc
->mask
) {
419 /* Everything we've submitted is done */
420 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
422 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
423 struct list_head
*head
, *active
= dwc
->tx_node_active
;
426 * We are inside first active descriptor.
427 * Otherwise something is really wrong.
429 desc
= dwc_first_active(dwc
);
431 head
= &desc
->tx_list
;
432 if (active
!= head
) {
433 /* Update desc to reflect last sent one */
434 if (active
!= head
->next
)
435 desc
= to_dw_desc(active
->prev
);
437 dwc
->residue
-= desc
->len
;
439 child
= to_dw_desc(active
);
441 /* Submit next block */
442 dwc_do_single_block(dwc
, child
);
444 spin_unlock_irqrestore(&dwc
->lock
, flags
);
448 /* We are done here */
449 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
454 spin_unlock_irqrestore(&dwc
->lock
, flags
);
456 dwc_complete_all(dw
, dwc
);
460 if (list_empty(&dwc
->active_list
)) {
462 spin_unlock_irqrestore(&dwc
->lock
, flags
);
466 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
467 dev_vdbg(chan2dev(&dwc
->chan
), "%s: soft LLP mode\n", __func__
);
468 spin_unlock_irqrestore(&dwc
->lock
, flags
);
472 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=0x%llx\n", __func__
,
473 (unsigned long long)llp
);
475 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
476 /* initial residue value */
477 dwc
->residue
= desc
->total_len
;
479 /* check first descriptors addr */
480 if (desc
->txd
.phys
== llp
) {
481 spin_unlock_irqrestore(&dwc
->lock
, flags
);
485 /* check first descriptors llp */
486 if (desc
->lli
.llp
== llp
) {
487 /* This one is currently in progress */
488 dwc
->residue
-= dwc_get_sent(dwc
);
489 spin_unlock_irqrestore(&dwc
->lock
, flags
);
493 dwc
->residue
-= desc
->len
;
494 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
495 if (child
->lli
.llp
== llp
) {
496 /* Currently in progress */
497 dwc
->residue
-= dwc_get_sent(dwc
);
498 spin_unlock_irqrestore(&dwc
->lock
, flags
);
501 dwc
->residue
-= child
->len
;
505 * No descriptors so far seem to be in progress, i.e.
506 * this one must be done.
508 spin_unlock_irqrestore(&dwc
->lock
, flags
);
509 dwc_descriptor_complete(dwc
, desc
, true);
510 spin_lock_irqsave(&dwc
->lock
, flags
);
513 dev_err(chan2dev(&dwc
->chan
),
514 "BUG: All descriptors done, but channel not idle!\n");
516 /* Try to continue after resetting the channel... */
517 dwc_chan_disable(dw
, dwc
);
519 if (!list_empty(&dwc
->queue
)) {
520 list_move(dwc
->queue
.next
, &dwc
->active_list
);
521 dwc_dostart(dwc
, dwc_first_active(dwc
));
523 spin_unlock_irqrestore(&dwc
->lock
, flags
);
526 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
528 dev_crit(chan2dev(&dwc
->chan
), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
529 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
532 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
534 struct dw_desc
*bad_desc
;
535 struct dw_desc
*child
;
538 dwc_scan_descriptors(dw
, dwc
);
540 spin_lock_irqsave(&dwc
->lock
, flags
);
543 * The descriptor currently at the head of the active list is
544 * borked. Since we don't have any way to report errors, we'll
545 * just have to scream loudly and try to carry on.
547 bad_desc
= dwc_first_active(dwc
);
548 list_del_init(&bad_desc
->desc_node
);
549 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
551 /* Clear the error flag and try to restart the controller */
552 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
553 if (!list_empty(&dwc
->active_list
))
554 dwc_dostart(dwc
, dwc_first_active(dwc
));
557 * WARN may seem harsh, but since this only happens
558 * when someone submits a bad physical address in a
559 * descriptor, we should consider ourselves lucky that the
560 * controller flagged an error instead of scribbling over
561 * random memory locations.
563 dev_WARN(chan2dev(&dwc
->chan
), "Bad descriptor submitted for DMA!\n"
564 " cookie: %d\n", bad_desc
->txd
.cookie
);
565 dwc_dump_lli(dwc
, &bad_desc
->lli
);
566 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
567 dwc_dump_lli(dwc
, &child
->lli
);
569 spin_unlock_irqrestore(&dwc
->lock
, flags
);
571 /* Pretend the descriptor completed successfully */
572 dwc_descriptor_complete(dwc
, bad_desc
, true);
575 /* --------------------- Cyclic DMA API extensions -------------------- */
577 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
579 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
580 return channel_readl(dwc
, SAR
);
582 EXPORT_SYMBOL(dw_dma_get_src_addr
);
584 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
586 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
587 return channel_readl(dwc
, DAR
);
589 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
591 /* called with dwc->lock held and all DMAC interrupts disabled */
592 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
593 u32 status_err
, u32 status_xfer
)
598 void (*callback
)(void *param
);
599 void *callback_param
;
601 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
602 channel_readl(dwc
, LLP
));
604 callback
= dwc
->cdesc
->period_callback
;
605 callback_param
= dwc
->cdesc
->period_callback_param
;
608 callback(callback_param
);
612 * Error and transfer complete are highly unlikely, and will most
613 * likely be due to a configuration error by the user.
615 if (unlikely(status_err
& dwc
->mask
) ||
616 unlikely(status_xfer
& dwc
->mask
)) {
619 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
620 "interrupt, stopping DMA transfer\n",
621 status_xfer
? "xfer" : "error");
623 spin_lock_irqsave(&dwc
->lock
, flags
);
625 dwc_dump_chan_regs(dwc
);
627 dwc_chan_disable(dw
, dwc
);
629 /* make sure DMA does not restart by loading a new list */
630 channel_writel(dwc
, LLP
, 0);
631 channel_writel(dwc
, CTL_LO
, 0);
632 channel_writel(dwc
, CTL_HI
, 0);
634 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
635 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
637 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
638 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
640 spin_unlock_irqrestore(&dwc
->lock
, flags
);
644 /* ------------------------------------------------------------------------- */
646 static void dw_dma_tasklet(unsigned long data
)
648 struct dw_dma
*dw
= (struct dw_dma
*)data
;
649 struct dw_dma_chan
*dwc
;
654 status_xfer
= dma_readl(dw
, RAW
.XFER
);
655 status_err
= dma_readl(dw
, RAW
.ERROR
);
657 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
659 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
661 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
662 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
663 else if (status_err
& (1 << i
))
664 dwc_handle_error(dw
, dwc
);
665 else if (status_xfer
& (1 << i
))
666 dwc_scan_descriptors(dw
, dwc
);
670 * Re-enable interrupts.
672 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
673 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
676 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
678 struct dw_dma
*dw
= dev_id
;
681 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
,
682 dma_readl(dw
, STATUS_INT
));
685 * Just disable the interrupts. We'll turn them back on in the
688 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
689 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
691 status
= dma_readl(dw
, STATUS_INT
);
694 "BUG: Unexpected interrupts pending: 0x%x\n",
698 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
699 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
700 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
701 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
704 tasklet_schedule(&dw
->tasklet
);
709 /*----------------------------------------------------------------------*/
711 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
713 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
714 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
718 spin_lock_irqsave(&dwc
->lock
, flags
);
719 cookie
= dma_cookie_assign(tx
);
722 * REVISIT: We should attempt to chain as many descriptors as
723 * possible, perhaps even appending to those already submitted
724 * for DMA. But this is hard to do in a race-free manner.
726 if (list_empty(&dwc
->active_list
)) {
727 dev_vdbg(chan2dev(tx
->chan
), "%s: started %u\n", __func__
,
729 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
730 dwc_dostart(dwc
, dwc_first_active(dwc
));
732 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
,
735 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
738 spin_unlock_irqrestore(&dwc
->lock
, flags
);
743 static struct dma_async_tx_descriptor
*
744 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
745 size_t len
, unsigned long flags
)
747 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
748 struct dw_desc
*desc
;
749 struct dw_desc
*first
;
750 struct dw_desc
*prev
;
753 unsigned int src_width
;
754 unsigned int dst_width
;
755 unsigned int data_width
;
758 dev_vdbg(chan2dev(chan
),
759 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__
,
760 (unsigned long long)dest
, (unsigned long long)src
,
763 if (unlikely(!len
)) {
764 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
768 dwc
->direction
= DMA_MEM_TO_MEM
;
770 data_width
= min_t(unsigned int, dwc_get_data_width(chan
, SRC_MASTER
),
771 dwc_get_data_width(chan
, DST_MASTER
));
773 src_width
= dst_width
= min_t(unsigned int, data_width
,
774 dwc_fast_fls(src
| dest
| len
));
776 ctllo
= DWC_DEFAULT_CTLLO(chan
)
777 | DWC_CTLL_DST_WIDTH(dst_width
)
778 | DWC_CTLL_SRC_WIDTH(src_width
)
784 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
785 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
788 desc
= dwc_desc_get(dwc
);
792 desc
->lli
.sar
= src
+ offset
;
793 desc
->lli
.dar
= dest
+ offset
;
794 desc
->lli
.ctllo
= ctllo
;
795 desc
->lli
.ctlhi
= xfer_count
;
796 desc
->len
= xfer_count
<< src_width
;
801 prev
->lli
.llp
= desc
->txd
.phys
;
802 list_add_tail(&desc
->desc_node
,
808 if (flags
& DMA_PREP_INTERRUPT
)
809 /* Trigger interrupt after last block */
810 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
813 first
->txd
.flags
= flags
;
814 first
->total_len
= len
;
819 dwc_desc_put(dwc
, first
);
823 static struct dma_async_tx_descriptor
*
824 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
825 unsigned int sg_len
, enum dma_transfer_direction direction
,
826 unsigned long flags
, void *context
)
828 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
829 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
830 struct dw_desc
*prev
;
831 struct dw_desc
*first
;
834 unsigned int reg_width
;
835 unsigned int mem_width
;
836 unsigned int data_width
;
838 struct scatterlist
*sg
;
839 size_t total_len
= 0;
841 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
843 if (unlikely(!is_slave_direction(direction
) || !sg_len
))
846 dwc
->direction
= direction
;
852 reg_width
= __fls(sconfig
->dst_addr_width
);
853 reg
= sconfig
->dst_addr
;
854 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
855 | DWC_CTLL_DST_WIDTH(reg_width
)
859 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
860 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
862 data_width
= dwc_get_data_width(chan
, SRC_MASTER
);
864 for_each_sg(sgl
, sg
, sg_len
, i
) {
865 struct dw_desc
*desc
;
868 mem
= sg_dma_address(sg
);
869 len
= sg_dma_len(sg
);
871 mem_width
= min_t(unsigned int,
872 data_width
, dwc_fast_fls(mem
| len
));
874 slave_sg_todev_fill_desc
:
875 desc
= dwc_desc_get(dwc
);
877 dev_err(chan2dev(chan
),
878 "not enough descriptors available\n");
884 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
885 if ((len
>> mem_width
) > dwc
->block_size
) {
886 dlen
= dwc
->block_size
<< mem_width
;
894 desc
->lli
.ctlhi
= dlen
>> mem_width
;
900 prev
->lli
.llp
= desc
->txd
.phys
;
901 list_add_tail(&desc
->desc_node
,
908 goto slave_sg_todev_fill_desc
;
912 reg_width
= __fls(sconfig
->src_addr_width
);
913 reg
= sconfig
->src_addr
;
914 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
915 | DWC_CTLL_SRC_WIDTH(reg_width
)
919 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
920 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
922 data_width
= dwc_get_data_width(chan
, DST_MASTER
);
924 for_each_sg(sgl
, sg
, sg_len
, i
) {
925 struct dw_desc
*desc
;
928 mem
= sg_dma_address(sg
);
929 len
= sg_dma_len(sg
);
931 mem_width
= min_t(unsigned int,
932 data_width
, dwc_fast_fls(mem
| len
));
934 slave_sg_fromdev_fill_desc
:
935 desc
= dwc_desc_get(dwc
);
937 dev_err(chan2dev(chan
),
938 "not enough descriptors available\n");
944 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
945 if ((len
>> reg_width
) > dwc
->block_size
) {
946 dlen
= dwc
->block_size
<< reg_width
;
953 desc
->lli
.ctlhi
= dlen
>> reg_width
;
959 prev
->lli
.llp
= desc
->txd
.phys
;
960 list_add_tail(&desc
->desc_node
,
967 goto slave_sg_fromdev_fill_desc
;
974 if (flags
& DMA_PREP_INTERRUPT
)
975 /* Trigger interrupt after last block */
976 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
979 first
->total_len
= total_len
;
984 dwc_desc_put(dwc
, first
);
989 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
990 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
992 * NOTE: burst size 2 is not supported by controller.
994 * This can be done by finding least significant bit set: n & (n - 1)
996 static inline void convert_burst(u32
*maxburst
)
999 *maxburst
= fls(*maxburst
) - 2;
1005 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
1007 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1009 /* Check if chan will be configured for slave transfers */
1010 if (!is_slave_direction(sconfig
->direction
))
1013 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
1014 dwc
->direction
= sconfig
->direction
;
1016 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
1017 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
1022 static inline void dwc_chan_pause(struct dw_dma_chan
*dwc
)
1024 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1026 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
1027 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
))
1033 static inline void dwc_chan_resume(struct dw_dma_chan
*dwc
)
1035 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1037 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
1039 dwc
->paused
= false;
1042 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1045 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1046 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1047 struct dw_desc
*desc
, *_desc
;
1048 unsigned long flags
;
1051 if (cmd
== DMA_PAUSE
) {
1052 spin_lock_irqsave(&dwc
->lock
, flags
);
1054 dwc_chan_pause(dwc
);
1056 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1057 } else if (cmd
== DMA_RESUME
) {
1061 spin_lock_irqsave(&dwc
->lock
, flags
);
1063 dwc_chan_resume(dwc
);
1065 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1066 } else if (cmd
== DMA_TERMINATE_ALL
) {
1067 spin_lock_irqsave(&dwc
->lock
, flags
);
1069 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1071 dwc_chan_disable(dw
, dwc
);
1073 dwc_chan_resume(dwc
);
1075 /* active_list entries will end up before queued entries */
1076 list_splice_init(&dwc
->queue
, &list
);
1077 list_splice_init(&dwc
->active_list
, &list
);
1079 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1081 /* Flush all pending and queued descriptors */
1082 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1083 dwc_descriptor_complete(dwc
, desc
, false);
1084 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1085 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1093 static inline u32
dwc_get_residue(struct dw_dma_chan
*dwc
)
1095 unsigned long flags
;
1098 spin_lock_irqsave(&dwc
->lock
, flags
);
1100 residue
= dwc
->residue
;
1101 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
) && residue
)
1102 residue
-= dwc_get_sent(dwc
);
1104 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1108 static enum dma_status
1109 dwc_tx_status(struct dma_chan
*chan
,
1110 dma_cookie_t cookie
,
1111 struct dma_tx_state
*txstate
)
1113 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1114 enum dma_status ret
;
1116 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1117 if (ret
!= DMA_SUCCESS
) {
1118 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1120 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1123 if (ret
!= DMA_SUCCESS
)
1124 dma_set_residue(txstate
, dwc_get_residue(dwc
));
1132 static void dwc_issue_pending(struct dma_chan
*chan
)
1134 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1136 if (!list_empty(&dwc
->queue
))
1137 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1140 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1142 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1143 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1144 struct dw_desc
*desc
;
1146 unsigned long flags
;
1148 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1150 /* ASSERT: channel is idle */
1151 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1152 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1156 dma_cookie_init(chan
);
1159 * NOTE: some controllers may have additional features that we
1160 * need to initialize here, like "scatter-gather" (which
1161 * doesn't mean what you think it means), and status writeback.
1164 spin_lock_irqsave(&dwc
->lock
, flags
);
1165 i
= dwc
->descs_allocated
;
1166 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1169 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1171 desc
= dma_pool_alloc(dw
->desc_pool
, GFP_ATOMIC
, &phys
);
1173 goto err_desc_alloc
;
1175 memset(desc
, 0, sizeof(struct dw_desc
));
1177 INIT_LIST_HEAD(&desc
->tx_list
);
1178 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1179 desc
->txd
.tx_submit
= dwc_tx_submit
;
1180 desc
->txd
.flags
= DMA_CTRL_ACK
;
1181 desc
->txd
.phys
= phys
;
1183 dwc_desc_put(dwc
, desc
);
1185 spin_lock_irqsave(&dwc
->lock
, flags
);
1186 i
= ++dwc
->descs_allocated
;
1189 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1191 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1196 dev_info(chan2dev(chan
), "only allocated %d descriptors\n", i
);
1201 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1203 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1204 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1205 struct dw_desc
*desc
, *_desc
;
1206 unsigned long flags
;
1209 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1210 dwc
->descs_allocated
);
1212 /* ASSERT: channel is idle */
1213 BUG_ON(!list_empty(&dwc
->active_list
));
1214 BUG_ON(!list_empty(&dwc
->queue
));
1215 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1217 spin_lock_irqsave(&dwc
->lock
, flags
);
1218 list_splice_init(&dwc
->free_list
, &list
);
1219 dwc
->descs_allocated
= 0;
1220 dwc
->initialized
= false;
1222 /* Disable interrupts */
1223 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1224 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1226 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1228 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1229 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1230 dma_pool_free(dw
->desc_pool
, desc
, desc
->txd
.phys
);
1233 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1236 struct dw_dma_filter_args
{
1243 static bool dw_dma_generic_filter(struct dma_chan
*chan
, void *param
)
1245 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1246 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1247 struct dw_dma_filter_args
*fargs
= param
;
1248 struct dw_dma_slave
*dws
= &dwc
->slave
;
1250 /* ensure the device matches our channel */
1251 if (chan
->device
!= &fargs
->dw
->dma
)
1254 dws
->dma_dev
= dw
->dma
.dev
;
1257 dws
->src_master
= fargs
->src
;
1258 dws
->dst_master
= fargs
->dst
;
1260 dwc
->request_line
= fargs
->req
;
1262 chan
->private = dws
;
1267 static struct dma_chan
*dw_dma_xlate(struct of_phandle_args
*dma_spec
,
1268 struct of_dma
*ofdma
)
1270 struct dw_dma
*dw
= ofdma
->of_dma_data
;
1271 struct dw_dma_filter_args fargs
= {
1276 if (dma_spec
->args_count
!= 3)
1279 fargs
.req
= be32_to_cpup(dma_spec
->args
+0);
1280 fargs
.src
= be32_to_cpup(dma_spec
->args
+1);
1281 fargs
.dst
= be32_to_cpup(dma_spec
->args
+2);
1283 if (WARN_ON(fargs
.req
>= DW_DMA_MAX_NR_REQUESTS
||
1284 fargs
.src
>= dw
->nr_masters
||
1285 fargs
.dst
>= dw
->nr_masters
))
1289 dma_cap_set(DMA_SLAVE
, cap
);
1291 /* TODO: there should be a simpler way to do this */
1292 return dma_request_channel(cap
, dw_dma_generic_filter
, &fargs
);
1295 /* --------------------- Cyclic DMA API extensions -------------------- */
1298 * dw_dma_cyclic_start - start the cyclic DMA transfer
1299 * @chan: the DMA channel to start
1301 * Must be called with soft interrupts disabled. Returns zero on success or
1302 * -errno on failure.
1304 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1306 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1307 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1308 unsigned long flags
;
1310 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1311 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1315 spin_lock_irqsave(&dwc
->lock
, flags
);
1317 /* assert channel is idle */
1318 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1319 dev_err(chan2dev(&dwc
->chan
),
1320 "BUG: Attempted to start non-idle channel\n");
1321 dwc_dump_chan_regs(dwc
);
1322 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1326 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1327 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1329 /* setup DMAC channel registers */
1330 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1331 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1332 channel_writel(dwc
, CTL_HI
, 0);
1334 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1336 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1340 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1343 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1344 * @chan: the DMA channel to stop
1346 * Must be called with soft interrupts disabled.
1348 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1350 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1351 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1352 unsigned long flags
;
1354 spin_lock_irqsave(&dwc
->lock
, flags
);
1356 dwc_chan_disable(dw
, dwc
);
1358 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1360 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1363 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1364 * @chan: the DMA channel to prepare
1365 * @buf_addr: physical DMA address where the buffer starts
1366 * @buf_len: total number of bytes for the entire buffer
1367 * @period_len: number of bytes for each period
1368 * @direction: transfer direction, to or from device
1370 * Must be called before trying to start the transfer. Returns a valid struct
1371 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1373 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1374 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1375 enum dma_transfer_direction direction
)
1377 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1378 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1379 struct dw_cyclic_desc
*cdesc
;
1380 struct dw_cyclic_desc
*retval
= NULL
;
1381 struct dw_desc
*desc
;
1382 struct dw_desc
*last
= NULL
;
1383 unsigned long was_cyclic
;
1384 unsigned int reg_width
;
1385 unsigned int periods
;
1387 unsigned long flags
;
1389 spin_lock_irqsave(&dwc
->lock
, flags
);
1391 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1392 dev_dbg(chan2dev(&dwc
->chan
),
1393 "channel doesn't support LLP transfers\n");
1394 return ERR_PTR(-EINVAL
);
1397 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1398 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1399 dev_dbg(chan2dev(&dwc
->chan
),
1400 "queue and/or active list are not empty\n");
1401 return ERR_PTR(-EBUSY
);
1404 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1405 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1407 dev_dbg(chan2dev(&dwc
->chan
),
1408 "channel already prepared for cyclic DMA\n");
1409 return ERR_PTR(-EBUSY
);
1412 retval
= ERR_PTR(-EINVAL
);
1414 if (unlikely(!is_slave_direction(direction
)))
1417 dwc
->direction
= direction
;
1419 if (direction
== DMA_MEM_TO_DEV
)
1420 reg_width
= __ffs(sconfig
->dst_addr_width
);
1422 reg_width
= __ffs(sconfig
->src_addr_width
);
1424 periods
= buf_len
/ period_len
;
1426 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1427 if (period_len
> (dwc
->block_size
<< reg_width
))
1429 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1431 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1434 retval
= ERR_PTR(-ENOMEM
);
1436 if (periods
> NR_DESCS_PER_CHANNEL
)
1439 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1443 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1447 for (i
= 0; i
< periods
; i
++) {
1448 desc
= dwc_desc_get(dwc
);
1450 goto out_err_desc_get
;
1452 switch (direction
) {
1453 case DMA_MEM_TO_DEV
:
1454 desc
->lli
.dar
= sconfig
->dst_addr
;
1455 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1456 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1457 | DWC_CTLL_DST_WIDTH(reg_width
)
1458 | DWC_CTLL_SRC_WIDTH(reg_width
)
1463 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1464 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1465 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1468 case DMA_DEV_TO_MEM
:
1469 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1470 desc
->lli
.sar
= sconfig
->src_addr
;
1471 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1472 | DWC_CTLL_SRC_WIDTH(reg_width
)
1473 | DWC_CTLL_DST_WIDTH(reg_width
)
1478 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1479 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1480 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1487 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1488 cdesc
->desc
[i
] = desc
;
1491 last
->lli
.llp
= desc
->txd
.phys
;
1496 /* lets make a cyclic list */
1497 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1499 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%llx len %zu "
1500 "period %zu periods %d\n", (unsigned long long)buf_addr
,
1501 buf_len
, period_len
, periods
);
1503 cdesc
->periods
= periods
;
1510 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1514 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1515 return (struct dw_cyclic_desc
*)retval
;
1517 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1520 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1521 * @chan: the DMA channel to free
1523 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1525 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1526 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1527 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1529 unsigned long flags
;
1531 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1536 spin_lock_irqsave(&dwc
->lock
, flags
);
1538 dwc_chan_disable(dw
, dwc
);
1540 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1541 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1543 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1545 for (i
= 0; i
< cdesc
->periods
; i
++)
1546 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1551 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1553 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1555 /*----------------------------------------------------------------------*/
1557 static void dw_dma_off(struct dw_dma
*dw
)
1561 dma_writel(dw
, CFG
, 0);
1563 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1564 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1565 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1566 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1568 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1571 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1572 dw
->chan
[i
].initialized
= false;
1576 static struct dw_dma_platform_data
*
1577 dw_dma_parse_dt(struct platform_device
*pdev
)
1579 struct device_node
*np
= pdev
->dev
.of_node
;
1580 struct dw_dma_platform_data
*pdata
;
1584 dev_err(&pdev
->dev
, "Missing DT data\n");
1588 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1592 if (of_property_read_u32(np
, "dma-channels", &pdata
->nr_channels
))
1595 if (of_property_read_bool(np
, "is_private"))
1596 pdata
->is_private
= true;
1598 if (!of_property_read_u32(np
, "chan_allocation_order", &tmp
))
1599 pdata
->chan_allocation_order
= (unsigned char)tmp
;
1601 if (!of_property_read_u32(np
, "chan_priority", &tmp
))
1602 pdata
->chan_priority
= tmp
;
1604 if (!of_property_read_u32(np
, "block_size", &tmp
))
1605 pdata
->block_size
= tmp
;
1607 if (!of_property_read_u32(np
, "dma-masters", &tmp
)) {
1611 pdata
->nr_masters
= tmp
;
1614 if (!of_property_read_u32_array(np
, "data_width", arr
,
1616 for (tmp
= 0; tmp
< pdata
->nr_masters
; tmp
++)
1617 pdata
->data_width
[tmp
] = arr
[tmp
];
1622 static inline struct dw_dma_platform_data
*
1623 dw_dma_parse_dt(struct platform_device
*pdev
)
1629 static int dw_probe(struct platform_device
*pdev
)
1631 struct dw_dma_platform_data
*pdata
;
1632 struct resource
*io
;
1637 unsigned int dw_params
;
1638 unsigned int nr_channels
;
1639 unsigned int max_blk_size
= 0;
1644 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1648 irq
= platform_get_irq(pdev
, 0);
1652 regs
= devm_ioremap_resource(&pdev
->dev
, io
);
1654 return PTR_ERR(regs
);
1656 /* Apply default dma_mask if needed */
1657 if (!pdev
->dev
.dma_mask
) {
1658 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;
1659 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
1662 dw_params
= dma_read_byaddr(regs
, DW_PARAMS
);
1663 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1665 dev_dbg(&pdev
->dev
, "DW_PARAMS: 0x%08x\n", dw_params
);
1667 pdata
= dev_get_platdata(&pdev
->dev
);
1669 pdata
= dw_dma_parse_dt(pdev
);
1671 if (!pdata
&& autocfg
) {
1672 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1676 /* Fill platform data with the default values */
1677 pdata
->is_private
= true;
1678 pdata
->chan_allocation_order
= CHAN_ALLOCATION_ASCENDING
;
1679 pdata
->chan_priority
= CHAN_PRIORITY_ASCENDING
;
1680 } else if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1684 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1686 nr_channels
= pdata
->nr_channels
;
1688 size
= sizeof(struct dw_dma
) + nr_channels
* sizeof(struct dw_dma_chan
);
1689 dw
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
1693 dw
->clk
= devm_clk_get(&pdev
->dev
, "hclk");
1694 if (IS_ERR(dw
->clk
))
1695 return PTR_ERR(dw
->clk
);
1696 clk_prepare_enable(dw
->clk
);
1700 /* get hardware configuration parameters */
1702 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1704 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1705 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1707 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1710 dw
->nr_masters
= pdata
->nr_masters
;
1711 memcpy(dw
->data_width
, pdata
->data_width
, 4);
1714 /* Calculate all channel mask before DMA setup */
1715 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1717 /* force dma off, just in case */
1720 /* disable BLOCK interrupts as well */
1721 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1723 err
= devm_request_irq(&pdev
->dev
, irq
, dw_dma_interrupt
, 0,
1728 platform_set_drvdata(pdev
, dw
);
1730 /* create a pool of consistent memory blocks for hardware descriptors */
1731 dw
->desc_pool
= dmam_pool_create("dw_dmac_desc_pool", &pdev
->dev
,
1732 sizeof(struct dw_desc
), 4, 0);
1733 if (!dw
->desc_pool
) {
1734 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1738 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1740 INIT_LIST_HEAD(&dw
->dma
.channels
);
1741 for (i
= 0; i
< nr_channels
; i
++) {
1742 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1743 int r
= nr_channels
- i
- 1;
1745 dwc
->chan
.device
= &dw
->dma
;
1746 dma_cookie_init(&dwc
->chan
);
1747 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1748 list_add_tail(&dwc
->chan
.device_node
,
1751 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1753 /* 7 is highest priority & 0 is lowest. */
1754 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1759 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1760 spin_lock_init(&dwc
->lock
);
1763 INIT_LIST_HEAD(&dwc
->active_list
);
1764 INIT_LIST_HEAD(&dwc
->queue
);
1765 INIT_LIST_HEAD(&dwc
->free_list
);
1767 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1769 dwc
->direction
= DMA_TRANS_NONE
;
1771 /* hardware configuration */
1773 unsigned int dwc_params
;
1775 dwc_params
= dma_read_byaddr(regs
+ r
* sizeof(u32
),
1778 dev_dbg(&pdev
->dev
, "DWC_PARAMS[%d]: 0x%08x\n", i
,
1781 /* Decode maximum block size for given channel. The
1782 * stored 4 bit value represents blocks from 0x00 for 3
1783 * up to 0x0a for 4095. */
1785 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1787 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1789 dwc
->block_size
= pdata
->block_size
;
1791 /* Check if channel supports multi block transfer */
1792 channel_writel(dwc
, LLP
, 0xfffffffc);
1794 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1795 channel_writel(dwc
, LLP
, 0);
1799 /* Clear all interrupts on all channels. */
1800 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1801 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1802 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1803 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1804 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1806 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1807 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1808 if (pdata
->is_private
)
1809 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1810 dw
->dma
.dev
= &pdev
->dev
;
1811 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1812 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1814 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1816 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1817 dw
->dma
.device_control
= dwc_control
;
1819 dw
->dma
.device_tx_status
= dwc_tx_status
;
1820 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1822 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1824 dev_info(&pdev
->dev
, "DesignWare DMA Controller, %d channels\n",
1827 dma_async_device_register(&dw
->dma
);
1829 if (pdev
->dev
.of_node
) {
1830 err
= of_dma_controller_register(pdev
->dev
.of_node
,
1832 if (err
&& err
!= -ENODEV
)
1834 "could not register of_dma_controller\n");
1840 static int dw_remove(struct platform_device
*pdev
)
1842 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1843 struct dw_dma_chan
*dwc
, *_dwc
;
1845 if (pdev
->dev
.of_node
)
1846 of_dma_controller_free(pdev
->dev
.of_node
);
1848 dma_async_device_unregister(&dw
->dma
);
1850 tasklet_kill(&dw
->tasklet
);
1852 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1854 list_del(&dwc
->chan
.device_node
);
1855 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1861 static void dw_shutdown(struct platform_device
*pdev
)
1863 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1866 clk_disable_unprepare(dw
->clk
);
1869 static int dw_suspend_noirq(struct device
*dev
)
1871 struct platform_device
*pdev
= to_platform_device(dev
);
1872 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1875 clk_disable_unprepare(dw
->clk
);
1880 static int dw_resume_noirq(struct device
*dev
)
1882 struct platform_device
*pdev
= to_platform_device(dev
);
1883 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1885 clk_prepare_enable(dw
->clk
);
1886 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1891 static const struct dev_pm_ops dw_dev_pm_ops
= {
1892 .suspend_noirq
= dw_suspend_noirq
,
1893 .resume_noirq
= dw_resume_noirq
,
1894 .freeze_noirq
= dw_suspend_noirq
,
1895 .thaw_noirq
= dw_resume_noirq
,
1896 .restore_noirq
= dw_resume_noirq
,
1897 .poweroff_noirq
= dw_suspend_noirq
,
1901 static const struct of_device_id dw_dma_id_table
[] = {
1902 { .compatible
= "snps,dma-spear1340" },
1905 MODULE_DEVICE_TABLE(of
, dw_dma_id_table
);
1908 static const struct platform_device_id dw_dma_ids
[] = {
1913 static struct platform_driver dw_driver
= {
1915 .remove
= dw_remove
,
1916 .shutdown
= dw_shutdown
,
1919 .pm
= &dw_dev_pm_ops
,
1920 .of_match_table
= of_match_ptr(dw_dma_id_table
),
1922 .id_table
= dw_dma_ids
,
1925 static int __init
dw_init(void)
1927 return platform_driver_register(&dw_driver
);
1929 subsys_initcall(dw_init
);
1931 static void __exit
dw_exit(void)
1933 platform_driver_unregister(&dw_driver
);
1935 module_exit(dw_exit
);
1937 MODULE_LICENSE("GPL v2");
1938 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1939 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1940 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");