2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include "dw_dmac_regs.h"
28 #include "dmaengine.h"
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
36 * The driver has currently been tested only with the Atmel AT32AP7000,
37 * which does not support descriptor writeback.
40 static inline unsigned int dwc_get_dms(struct dw_dma_slave
*slave
)
42 return slave
? slave
->dst_master
: 0;
45 static inline unsigned int dwc_get_sms(struct dw_dma_slave
*slave
)
47 return slave
? slave
->src_master
: 1;
50 #define DWC_DEFAULT_CTLLO(_chan) ({ \
51 struct dw_dma_slave *__slave = (_chan->private); \
52 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
53 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
54 bool _is_slave = is_slave_direction(_dwc->direction); \
55 int _dms = dwc_get_dms(__slave); \
56 int _sms = dwc_get_sms(__slave); \
57 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
59 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
62 (DWC_CTLL_DST_MSIZE(_dmsize) \
63 | DWC_CTLL_SRC_MSIZE(_smsize) \
66 | DWC_CTLL_DMS(_dms) \
67 | DWC_CTLL_SMS(_sms)); \
71 * Number of descriptors to allocate for each channel. This should be
72 * made configurable somehow; preferably, the clients (at least the
73 * ones using slave transfers) should be able to give us a hint.
75 #define NR_DESCS_PER_CHANNEL 64
80 static inline unsigned int dwc_get_data_width(struct dma_chan
*chan
, int master
)
82 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
83 struct dw_dma_slave
*dws
= chan
->private;
85 if (master
== SRC_MASTER
)
86 return dw
->data_width
[dwc_get_sms(dws
)];
87 else if (master
== DST_MASTER
)
88 return dw
->data_width
[dwc_get_dms(dws
)];
93 /*----------------------------------------------------------------------*/
95 static struct device
*chan2dev(struct dma_chan
*chan
)
97 return &chan
->dev
->device
;
99 static struct device
*chan2parent(struct dma_chan
*chan
)
101 return chan
->dev
->device
.parent
;
104 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
106 return to_dw_desc(dwc
->active_list
.next
);
109 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
111 struct dw_desc
*desc
, *_desc
;
112 struct dw_desc
*ret
= NULL
;
116 spin_lock_irqsave(&dwc
->lock
, flags
);
117 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
119 if (async_tx_test_ack(&desc
->txd
)) {
120 list_del(&desc
->desc_node
);
124 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
126 spin_unlock_irqrestore(&dwc
->lock
, flags
);
128 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
134 * Move a descriptor, including any children, to the free list.
135 * `desc' must not be on any lists.
137 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
142 struct dw_desc
*child
;
144 spin_lock_irqsave(&dwc
->lock
, flags
);
145 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
146 dev_vdbg(chan2dev(&dwc
->chan
),
147 "moving child desc %p to freelist\n",
149 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
150 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
151 list_add(&desc
->desc_node
, &dwc
->free_list
);
152 spin_unlock_irqrestore(&dwc
->lock
, flags
);
156 static void dwc_initialize(struct dw_dma_chan
*dwc
)
158 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
159 struct dw_dma_slave
*dws
= dwc
->chan
.private;
160 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
161 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
163 if (dwc
->initialized
== true)
168 * We need controller-specific data to set up slave
171 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
174 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
176 if (dwc
->direction
== DMA_MEM_TO_DEV
)
177 cfghi
= DWC_CFGH_DST_PER(dwc
->dma_sconfig
.slave_id
);
178 else if (dwc
->direction
== DMA_DEV_TO_MEM
)
179 cfghi
= DWC_CFGH_SRC_PER(dwc
->dma_sconfig
.slave_id
);
182 channel_writel(dwc
, CFG_LO
, cfglo
);
183 channel_writel(dwc
, CFG_HI
, cfghi
);
185 /* Enable interrupts */
186 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
187 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
189 dwc
->initialized
= true;
192 /*----------------------------------------------------------------------*/
194 static inline unsigned int dwc_fast_fls(unsigned long long v
)
197 * We can be a lot more clever here, but this should take care
198 * of the most common optimization.
209 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
211 dev_err(chan2dev(&dwc
->chan
),
212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
213 channel_readl(dwc
, SAR
),
214 channel_readl(dwc
, DAR
),
215 channel_readl(dwc
, LLP
),
216 channel_readl(dwc
, CTL_HI
),
217 channel_readl(dwc
, CTL_LO
));
220 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
222 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
223 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
227 /*----------------------------------------------------------------------*/
229 /* Perform single block transfer */
230 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
231 struct dw_desc
*desc
)
233 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
236 /* Software emulation of LLP mode relies on interrupts to continue
237 * multi block transfer. */
238 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
240 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
241 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
242 channel_writel(dwc
, CTL_LO
, ctllo
);
243 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
244 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
246 /* Move pointer to next descriptor */
247 dwc
->tx_node_active
= dwc
->tx_node_active
->next
;
250 /* Called with dwc->lock held and bh disabled */
251 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
253 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
254 unsigned long was_soft_llp
;
256 /* ASSERT: channel is idle */
257 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
258 dev_err(chan2dev(&dwc
->chan
),
259 "BUG: Attempted to start non-idle channel\n");
260 dwc_dump_chan_regs(dwc
);
262 /* The tasklet will hopefully advance the queue... */
267 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
270 dev_err(chan2dev(&dwc
->chan
),
271 "BUG: Attempted to start new LLP transfer "
272 "inside ongoing one\n");
278 dwc
->tx_list
= &first
->tx_list
;
279 dwc
->tx_node_active
= &first
->tx_list
;
281 dwc_do_single_block(dwc
, first
);
288 channel_writel(dwc
, LLP
, first
->txd
.phys
);
289 channel_writel(dwc
, CTL_LO
,
290 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
291 channel_writel(dwc
, CTL_HI
, 0);
292 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
295 /*----------------------------------------------------------------------*/
298 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
299 bool callback_required
)
301 dma_async_tx_callback callback
= NULL
;
303 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
304 struct dw_desc
*child
;
307 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
309 spin_lock_irqsave(&dwc
->lock
, flags
);
310 dma_cookie_complete(txd
);
311 if (callback_required
) {
312 callback
= txd
->callback
;
313 param
= txd
->callback_param
;
317 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
318 async_tx_ack(&child
->txd
);
319 async_tx_ack(&desc
->txd
);
321 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
322 list_move(&desc
->desc_node
, &dwc
->free_list
);
324 if (!is_slave_direction(dwc
->direction
)) {
325 struct device
*parent
= chan2parent(&dwc
->chan
);
326 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
327 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
328 dma_unmap_single(parent
, desc
->lli
.dar
,
329 desc
->len
, DMA_FROM_DEVICE
);
331 dma_unmap_page(parent
, desc
->lli
.dar
,
332 desc
->len
, DMA_FROM_DEVICE
);
334 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
335 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
336 dma_unmap_single(parent
, desc
->lli
.sar
,
337 desc
->len
, DMA_TO_DEVICE
);
339 dma_unmap_page(parent
, desc
->lli
.sar
,
340 desc
->len
, DMA_TO_DEVICE
);
344 spin_unlock_irqrestore(&dwc
->lock
, flags
);
350 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
352 struct dw_desc
*desc
, *_desc
;
356 spin_lock_irqsave(&dwc
->lock
, flags
);
357 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
358 dev_err(chan2dev(&dwc
->chan
),
359 "BUG: XFER bit set, but channel not idle!\n");
361 /* Try to continue after resetting the channel... */
362 dwc_chan_disable(dw
, dwc
);
366 * Submit queued descriptors ASAP, i.e. before we go through
367 * the completed ones.
369 list_splice_init(&dwc
->active_list
, &list
);
370 if (!list_empty(&dwc
->queue
)) {
371 list_move(dwc
->queue
.next
, &dwc
->active_list
);
372 dwc_dostart(dwc
, dwc_first_active(dwc
));
375 spin_unlock_irqrestore(&dwc
->lock
, flags
);
377 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
378 dwc_descriptor_complete(dwc
, desc
, true);
381 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
384 struct dw_desc
*desc
, *_desc
;
385 struct dw_desc
*child
;
389 spin_lock_irqsave(&dwc
->lock
, flags
);
390 llp
= channel_readl(dwc
, LLP
);
391 status_xfer
= dma_readl(dw
, RAW
.XFER
);
393 if (status_xfer
& dwc
->mask
) {
394 /* Everything we've submitted is done */
395 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
396 spin_unlock_irqrestore(&dwc
->lock
, flags
);
398 dwc_complete_all(dw
, dwc
);
402 if (list_empty(&dwc
->active_list
)) {
403 spin_unlock_irqrestore(&dwc
->lock
, flags
);
407 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=0x%llx\n", __func__
,
408 (unsigned long long)llp
);
410 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
411 /* check first descriptors addr */
412 if (desc
->txd
.phys
== llp
) {
413 spin_unlock_irqrestore(&dwc
->lock
, flags
);
417 /* check first descriptors llp */
418 if (desc
->lli
.llp
== llp
) {
419 /* This one is currently in progress */
420 spin_unlock_irqrestore(&dwc
->lock
, flags
);
424 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
425 if (child
->lli
.llp
== llp
) {
426 /* Currently in progress */
427 spin_unlock_irqrestore(&dwc
->lock
, flags
);
432 * No descriptors so far seem to be in progress, i.e.
433 * this one must be done.
435 spin_unlock_irqrestore(&dwc
->lock
, flags
);
436 dwc_descriptor_complete(dwc
, desc
, true);
437 spin_lock_irqsave(&dwc
->lock
, flags
);
440 dev_err(chan2dev(&dwc
->chan
),
441 "BUG: All descriptors done, but channel not idle!\n");
443 /* Try to continue after resetting the channel... */
444 dwc_chan_disable(dw
, dwc
);
446 if (!list_empty(&dwc
->queue
)) {
447 list_move(dwc
->queue
.next
, &dwc
->active_list
);
448 dwc_dostart(dwc
, dwc_first_active(dwc
));
450 spin_unlock_irqrestore(&dwc
->lock
, flags
);
453 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
455 dev_crit(chan2dev(&dwc
->chan
), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
456 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
459 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
461 struct dw_desc
*bad_desc
;
462 struct dw_desc
*child
;
465 dwc_scan_descriptors(dw
, dwc
);
467 spin_lock_irqsave(&dwc
->lock
, flags
);
470 * The descriptor currently at the head of the active list is
471 * borked. Since we don't have any way to report errors, we'll
472 * just have to scream loudly and try to carry on.
474 bad_desc
= dwc_first_active(dwc
);
475 list_del_init(&bad_desc
->desc_node
);
476 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
478 /* Clear the error flag and try to restart the controller */
479 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
480 if (!list_empty(&dwc
->active_list
))
481 dwc_dostart(dwc
, dwc_first_active(dwc
));
484 * WARN may seem harsh, but since this only happens
485 * when someone submits a bad physical address in a
486 * descriptor, we should consider ourselves lucky that the
487 * controller flagged an error instead of scribbling over
488 * random memory locations.
490 dev_WARN(chan2dev(&dwc
->chan
), "Bad descriptor submitted for DMA!\n"
491 " cookie: %d\n", bad_desc
->txd
.cookie
);
492 dwc_dump_lli(dwc
, &bad_desc
->lli
);
493 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
494 dwc_dump_lli(dwc
, &child
->lli
);
496 spin_unlock_irqrestore(&dwc
->lock
, flags
);
498 /* Pretend the descriptor completed successfully */
499 dwc_descriptor_complete(dwc
, bad_desc
, true);
502 /* --------------------- Cyclic DMA API extensions -------------------- */
504 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
506 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
507 return channel_readl(dwc
, SAR
);
509 EXPORT_SYMBOL(dw_dma_get_src_addr
);
511 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
513 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
514 return channel_readl(dwc
, DAR
);
516 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
518 /* called with dwc->lock held and all DMAC interrupts disabled */
519 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
520 u32 status_err
, u32 status_xfer
)
525 void (*callback
)(void *param
);
526 void *callback_param
;
528 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
529 channel_readl(dwc
, LLP
));
531 callback
= dwc
->cdesc
->period_callback
;
532 callback_param
= dwc
->cdesc
->period_callback_param
;
535 callback(callback_param
);
539 * Error and transfer complete are highly unlikely, and will most
540 * likely be due to a configuration error by the user.
542 if (unlikely(status_err
& dwc
->mask
) ||
543 unlikely(status_xfer
& dwc
->mask
)) {
546 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
547 "interrupt, stopping DMA transfer\n",
548 status_xfer
? "xfer" : "error");
550 spin_lock_irqsave(&dwc
->lock
, flags
);
552 dwc_dump_chan_regs(dwc
);
554 dwc_chan_disable(dw
, dwc
);
556 /* make sure DMA does not restart by loading a new list */
557 channel_writel(dwc
, LLP
, 0);
558 channel_writel(dwc
, CTL_LO
, 0);
559 channel_writel(dwc
, CTL_HI
, 0);
561 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
562 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
564 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
565 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
567 spin_unlock_irqrestore(&dwc
->lock
, flags
);
571 /* ------------------------------------------------------------------------- */
573 static void dw_dma_tasklet(unsigned long data
)
575 struct dw_dma
*dw
= (struct dw_dma
*)data
;
576 struct dw_dma_chan
*dwc
;
581 status_xfer
= dma_readl(dw
, RAW
.XFER
);
582 status_err
= dma_readl(dw
, RAW
.ERROR
);
584 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
586 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
588 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
589 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
590 else if (status_err
& (1 << i
))
591 dwc_handle_error(dw
, dwc
);
592 else if (status_xfer
& (1 << i
)) {
595 spin_lock_irqsave(&dwc
->lock
, flags
);
596 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
597 if (dwc
->tx_node_active
!= dwc
->tx_list
) {
598 struct dw_desc
*desc
=
599 to_dw_desc(dwc
->tx_node_active
);
601 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
603 dwc_do_single_block(dwc
, desc
);
605 spin_unlock_irqrestore(&dwc
->lock
, flags
);
608 /* we are done here */
609 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
611 spin_unlock_irqrestore(&dwc
->lock
, flags
);
613 dwc_scan_descriptors(dw
, dwc
);
618 * Re-enable interrupts.
620 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
621 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
624 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
626 struct dw_dma
*dw
= dev_id
;
629 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
,
630 dma_readl(dw
, STATUS_INT
));
633 * Just disable the interrupts. We'll turn them back on in the
636 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
637 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
639 status
= dma_readl(dw
, STATUS_INT
);
642 "BUG: Unexpected interrupts pending: 0x%x\n",
646 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
647 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
648 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
649 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
652 tasklet_schedule(&dw
->tasklet
);
657 /*----------------------------------------------------------------------*/
659 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
661 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
662 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
666 spin_lock_irqsave(&dwc
->lock
, flags
);
667 cookie
= dma_cookie_assign(tx
);
670 * REVISIT: We should attempt to chain as many descriptors as
671 * possible, perhaps even appending to those already submitted
672 * for DMA. But this is hard to do in a race-free manner.
674 if (list_empty(&dwc
->active_list
)) {
675 dev_vdbg(chan2dev(tx
->chan
), "%s: started %u\n", __func__
,
677 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
678 dwc_dostart(dwc
, dwc_first_active(dwc
));
680 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
,
683 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
686 spin_unlock_irqrestore(&dwc
->lock
, flags
);
691 static struct dma_async_tx_descriptor
*
692 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
693 size_t len
, unsigned long flags
)
695 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
696 struct dw_desc
*desc
;
697 struct dw_desc
*first
;
698 struct dw_desc
*prev
;
701 unsigned int src_width
;
702 unsigned int dst_width
;
703 unsigned int data_width
;
706 dev_vdbg(chan2dev(chan
),
707 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__
,
708 (unsigned long long)dest
, (unsigned long long)src
,
711 if (unlikely(!len
)) {
712 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
716 dwc
->direction
= DMA_MEM_TO_MEM
;
718 data_width
= min_t(unsigned int, dwc_get_data_width(chan
, SRC_MASTER
),
719 dwc_get_data_width(chan
, DST_MASTER
));
721 src_width
= dst_width
= min_t(unsigned int, data_width
,
722 dwc_fast_fls(src
| dest
| len
));
724 ctllo
= DWC_DEFAULT_CTLLO(chan
)
725 | DWC_CTLL_DST_WIDTH(dst_width
)
726 | DWC_CTLL_SRC_WIDTH(src_width
)
732 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
733 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
736 desc
= dwc_desc_get(dwc
);
740 desc
->lli
.sar
= src
+ offset
;
741 desc
->lli
.dar
= dest
+ offset
;
742 desc
->lli
.ctllo
= ctllo
;
743 desc
->lli
.ctlhi
= xfer_count
;
748 prev
->lli
.llp
= desc
->txd
.phys
;
749 list_add_tail(&desc
->desc_node
,
755 if (flags
& DMA_PREP_INTERRUPT
)
756 /* Trigger interrupt after last block */
757 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
760 first
->txd
.flags
= flags
;
766 dwc_desc_put(dwc
, first
);
770 static struct dma_async_tx_descriptor
*
771 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
772 unsigned int sg_len
, enum dma_transfer_direction direction
,
773 unsigned long flags
, void *context
)
775 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
776 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
777 struct dw_desc
*prev
;
778 struct dw_desc
*first
;
781 unsigned int reg_width
;
782 unsigned int mem_width
;
783 unsigned int data_width
;
785 struct scatterlist
*sg
;
786 size_t total_len
= 0;
788 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
790 if (unlikely(!is_slave_direction(direction
) || !sg_len
))
793 dwc
->direction
= direction
;
799 reg_width
= __fls(sconfig
->dst_addr_width
);
800 reg
= sconfig
->dst_addr
;
801 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
802 | DWC_CTLL_DST_WIDTH(reg_width
)
806 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
807 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
809 data_width
= dwc_get_data_width(chan
, SRC_MASTER
);
811 for_each_sg(sgl
, sg
, sg_len
, i
) {
812 struct dw_desc
*desc
;
815 mem
= sg_dma_address(sg
);
816 len
= sg_dma_len(sg
);
818 mem_width
= min_t(unsigned int,
819 data_width
, dwc_fast_fls(mem
| len
));
821 slave_sg_todev_fill_desc
:
822 desc
= dwc_desc_get(dwc
);
824 dev_err(chan2dev(chan
),
825 "not enough descriptors available\n");
831 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
832 if ((len
>> mem_width
) > dwc
->block_size
) {
833 dlen
= dwc
->block_size
<< mem_width
;
841 desc
->lli
.ctlhi
= dlen
>> mem_width
;
846 prev
->lli
.llp
= desc
->txd
.phys
;
847 list_add_tail(&desc
->desc_node
,
854 goto slave_sg_todev_fill_desc
;
858 reg_width
= __fls(sconfig
->src_addr_width
);
859 reg
= sconfig
->src_addr
;
860 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
861 | DWC_CTLL_SRC_WIDTH(reg_width
)
865 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
866 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
868 data_width
= dwc_get_data_width(chan
, DST_MASTER
);
870 for_each_sg(sgl
, sg
, sg_len
, i
) {
871 struct dw_desc
*desc
;
874 mem
= sg_dma_address(sg
);
875 len
= sg_dma_len(sg
);
877 mem_width
= min_t(unsigned int,
878 data_width
, dwc_fast_fls(mem
| len
));
880 slave_sg_fromdev_fill_desc
:
881 desc
= dwc_desc_get(dwc
);
883 dev_err(chan2dev(chan
),
884 "not enough descriptors available\n");
890 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
891 if ((len
>> reg_width
) > dwc
->block_size
) {
892 dlen
= dwc
->block_size
<< reg_width
;
899 desc
->lli
.ctlhi
= dlen
>> reg_width
;
904 prev
->lli
.llp
= desc
->txd
.phys
;
905 list_add_tail(&desc
->desc_node
,
912 goto slave_sg_fromdev_fill_desc
;
919 if (flags
& DMA_PREP_INTERRUPT
)
920 /* Trigger interrupt after last block */
921 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
924 first
->len
= total_len
;
929 dwc_desc_put(dwc
, first
);
934 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
935 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
937 * NOTE: burst size 2 is not supported by controller.
939 * This can be done by finding least significant bit set: n & (n - 1)
941 static inline void convert_burst(u32
*maxburst
)
944 *maxburst
= fls(*maxburst
) - 2;
950 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
952 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
954 /* Check if chan will be configured for slave transfers */
955 if (!is_slave_direction(sconfig
->direction
))
958 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
959 dwc
->direction
= sconfig
->direction
;
961 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
962 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
967 static inline void dwc_chan_pause(struct dw_dma_chan
*dwc
)
969 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
971 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
972 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
))
978 static inline void dwc_chan_resume(struct dw_dma_chan
*dwc
)
980 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
982 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
987 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
990 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
991 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
992 struct dw_desc
*desc
, *_desc
;
996 if (cmd
== DMA_PAUSE
) {
997 spin_lock_irqsave(&dwc
->lock
, flags
);
1001 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1002 } else if (cmd
== DMA_RESUME
) {
1006 spin_lock_irqsave(&dwc
->lock
, flags
);
1008 dwc_chan_resume(dwc
);
1010 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1011 } else if (cmd
== DMA_TERMINATE_ALL
) {
1012 spin_lock_irqsave(&dwc
->lock
, flags
);
1014 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1016 dwc_chan_disable(dw
, dwc
);
1018 dwc_chan_resume(dwc
);
1020 /* active_list entries will end up before queued entries */
1021 list_splice_init(&dwc
->queue
, &list
);
1022 list_splice_init(&dwc
->active_list
, &list
);
1024 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1026 /* Flush all pending and queued descriptors */
1027 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1028 dwc_descriptor_complete(dwc
, desc
, false);
1029 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1030 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1038 static enum dma_status
1039 dwc_tx_status(struct dma_chan
*chan
,
1040 dma_cookie_t cookie
,
1041 struct dma_tx_state
*txstate
)
1043 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1044 enum dma_status ret
;
1046 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1047 if (ret
!= DMA_SUCCESS
) {
1048 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1050 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1053 if (ret
!= DMA_SUCCESS
)
1054 dma_set_residue(txstate
, dwc_first_active(dwc
)->len
);
1062 static void dwc_issue_pending(struct dma_chan
*chan
)
1064 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1066 if (!list_empty(&dwc
->queue
))
1067 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1070 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1072 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1073 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1074 struct dw_desc
*desc
;
1076 unsigned long flags
;
1078 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1080 /* ASSERT: channel is idle */
1081 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1082 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1086 dma_cookie_init(chan
);
1089 * NOTE: some controllers may have additional features that we
1090 * need to initialize here, like "scatter-gather" (which
1091 * doesn't mean what you think it means), and status writeback.
1094 spin_lock_irqsave(&dwc
->lock
, flags
);
1095 i
= dwc
->descs_allocated
;
1096 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1099 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1101 desc
= dma_pool_alloc(dw
->desc_pool
, GFP_ATOMIC
, &phys
);
1103 goto err_desc_alloc
;
1105 memset(desc
, 0, sizeof(struct dw_desc
));
1107 INIT_LIST_HEAD(&desc
->tx_list
);
1108 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1109 desc
->txd
.tx_submit
= dwc_tx_submit
;
1110 desc
->txd
.flags
= DMA_CTRL_ACK
;
1111 desc
->txd
.phys
= phys
;
1113 dwc_desc_put(dwc
, desc
);
1115 spin_lock_irqsave(&dwc
->lock
, flags
);
1116 i
= ++dwc
->descs_allocated
;
1119 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1121 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1126 dev_info(chan2dev(chan
), "only allocated %d descriptors\n", i
);
1131 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1133 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1134 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1135 struct dw_desc
*desc
, *_desc
;
1136 unsigned long flags
;
1139 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1140 dwc
->descs_allocated
);
1142 /* ASSERT: channel is idle */
1143 BUG_ON(!list_empty(&dwc
->active_list
));
1144 BUG_ON(!list_empty(&dwc
->queue
));
1145 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1147 spin_lock_irqsave(&dwc
->lock
, flags
);
1148 list_splice_init(&dwc
->free_list
, &list
);
1149 dwc
->descs_allocated
= 0;
1150 dwc
->initialized
= false;
1152 /* Disable interrupts */
1153 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1154 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1156 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1158 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1159 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1160 dma_pool_free(dw
->desc_pool
, desc
, desc
->txd
.phys
);
1163 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1166 bool dw_dma_generic_filter(struct dma_chan
*chan
, void *param
)
1168 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1169 static struct dw_dma
*last_dw
;
1170 static char *last_bus_id
;
1174 * dmaengine framework calls this routine for all channels of all dma
1175 * controller, until true is returned. If 'param' bus_id is not
1176 * registered with a dma controller (dw), then there is no need of
1177 * running below function for all channels of dw.
1179 * This block of code does this by saving the parameters of last
1180 * failure. If dw and param are same, i.e. trying on same dw with
1181 * different channel, return false.
1183 if ((last_dw
== dw
) && (last_bus_id
== param
))
1187 * - If dw_dma's platform data is not filled with slave info, then all
1188 * dma controllers are fine for transfer.
1189 * - Or if param is NULL
1191 if (!dw
->sd
|| !param
)
1194 while (++i
< dw
->sd_count
) {
1195 if (!strcmp(dw
->sd
[i
].bus_id
, param
)) {
1196 chan
->private = &dw
->sd
[i
];
1205 last_bus_id
= param
;
1208 EXPORT_SYMBOL(dw_dma_generic_filter
);
1210 /* --------------------- Cyclic DMA API extensions -------------------- */
1213 * dw_dma_cyclic_start - start the cyclic DMA transfer
1214 * @chan: the DMA channel to start
1216 * Must be called with soft interrupts disabled. Returns zero on success or
1217 * -errno on failure.
1219 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1221 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1222 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1223 unsigned long flags
;
1225 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1226 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1230 spin_lock_irqsave(&dwc
->lock
, flags
);
1232 /* assert channel is idle */
1233 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1234 dev_err(chan2dev(&dwc
->chan
),
1235 "BUG: Attempted to start non-idle channel\n");
1236 dwc_dump_chan_regs(dwc
);
1237 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1241 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1242 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1244 /* setup DMAC channel registers */
1245 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1246 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1247 channel_writel(dwc
, CTL_HI
, 0);
1249 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1251 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1255 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1258 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1259 * @chan: the DMA channel to stop
1261 * Must be called with soft interrupts disabled.
1263 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1265 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1266 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1267 unsigned long flags
;
1269 spin_lock_irqsave(&dwc
->lock
, flags
);
1271 dwc_chan_disable(dw
, dwc
);
1273 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1275 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1278 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1279 * @chan: the DMA channel to prepare
1280 * @buf_addr: physical DMA address where the buffer starts
1281 * @buf_len: total number of bytes for the entire buffer
1282 * @period_len: number of bytes for each period
1283 * @direction: transfer direction, to or from device
1285 * Must be called before trying to start the transfer. Returns a valid struct
1286 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1288 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1289 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1290 enum dma_transfer_direction direction
)
1292 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1293 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1294 struct dw_cyclic_desc
*cdesc
;
1295 struct dw_cyclic_desc
*retval
= NULL
;
1296 struct dw_desc
*desc
;
1297 struct dw_desc
*last
= NULL
;
1298 unsigned long was_cyclic
;
1299 unsigned int reg_width
;
1300 unsigned int periods
;
1302 unsigned long flags
;
1304 spin_lock_irqsave(&dwc
->lock
, flags
);
1306 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1307 dev_dbg(chan2dev(&dwc
->chan
),
1308 "channel doesn't support LLP transfers\n");
1309 return ERR_PTR(-EINVAL
);
1312 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1313 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1314 dev_dbg(chan2dev(&dwc
->chan
),
1315 "queue and/or active list are not empty\n");
1316 return ERR_PTR(-EBUSY
);
1319 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1320 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1322 dev_dbg(chan2dev(&dwc
->chan
),
1323 "channel already prepared for cyclic DMA\n");
1324 return ERR_PTR(-EBUSY
);
1327 retval
= ERR_PTR(-EINVAL
);
1329 if (unlikely(!is_slave_direction(direction
)))
1332 dwc
->direction
= direction
;
1334 if (direction
== DMA_MEM_TO_DEV
)
1335 reg_width
= __ffs(sconfig
->dst_addr_width
);
1337 reg_width
= __ffs(sconfig
->src_addr_width
);
1339 periods
= buf_len
/ period_len
;
1341 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1342 if (period_len
> (dwc
->block_size
<< reg_width
))
1344 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1346 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1349 retval
= ERR_PTR(-ENOMEM
);
1351 if (periods
> NR_DESCS_PER_CHANNEL
)
1354 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1358 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1362 for (i
= 0; i
< periods
; i
++) {
1363 desc
= dwc_desc_get(dwc
);
1365 goto out_err_desc_get
;
1367 switch (direction
) {
1368 case DMA_MEM_TO_DEV
:
1369 desc
->lli
.dar
= sconfig
->dst_addr
;
1370 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1371 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1372 | DWC_CTLL_DST_WIDTH(reg_width
)
1373 | DWC_CTLL_SRC_WIDTH(reg_width
)
1378 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1379 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1380 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1383 case DMA_DEV_TO_MEM
:
1384 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1385 desc
->lli
.sar
= sconfig
->src_addr
;
1386 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1387 | DWC_CTLL_SRC_WIDTH(reg_width
)
1388 | DWC_CTLL_DST_WIDTH(reg_width
)
1393 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1394 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1395 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1402 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1403 cdesc
->desc
[i
] = desc
;
1406 last
->lli
.llp
= desc
->txd
.phys
;
1411 /* lets make a cyclic list */
1412 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1414 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%llx len %zu "
1415 "period %zu periods %d\n", (unsigned long long)buf_addr
,
1416 buf_len
, period_len
, periods
);
1418 cdesc
->periods
= periods
;
1425 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1429 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1430 return (struct dw_cyclic_desc
*)retval
;
1432 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1435 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1436 * @chan: the DMA channel to free
1438 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1440 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1441 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1442 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1444 unsigned long flags
;
1446 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1451 spin_lock_irqsave(&dwc
->lock
, flags
);
1453 dwc_chan_disable(dw
, dwc
);
1455 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1456 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1458 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1460 for (i
= 0; i
< cdesc
->periods
; i
++)
1461 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1466 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1468 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1470 /*----------------------------------------------------------------------*/
1472 static void dw_dma_off(struct dw_dma
*dw
)
1476 dma_writel(dw
, CFG
, 0);
1478 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1479 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1480 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1481 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1483 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1486 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1487 dw
->chan
[i
].initialized
= false;
1491 static struct dw_dma_platform_data
*
1492 dw_dma_parse_dt(struct platform_device
*pdev
)
1494 struct device_node
*sn
, *cn
, *np
= pdev
->dev
.of_node
;
1495 struct dw_dma_platform_data
*pdata
;
1496 struct dw_dma_slave
*sd
;
1500 dev_err(&pdev
->dev
, "Missing DT data\n");
1504 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1508 if (of_property_read_u32(np
, "nr_channels", &pdata
->nr_channels
))
1511 if (of_property_read_bool(np
, "is_private"))
1512 pdata
->is_private
= true;
1514 if (!of_property_read_u32(np
, "chan_allocation_order", &tmp
))
1515 pdata
->chan_allocation_order
= (unsigned char)tmp
;
1517 if (!of_property_read_u32(np
, "chan_priority", &tmp
))
1518 pdata
->chan_priority
= tmp
;
1520 if (!of_property_read_u32(np
, "block_size", &tmp
))
1521 pdata
->block_size
= tmp
;
1523 if (!of_property_read_u32(np
, "nr_masters", &tmp
)) {
1527 pdata
->nr_masters
= tmp
;
1530 if (!of_property_read_u32_array(np
, "data_width", arr
,
1532 for (tmp
= 0; tmp
< pdata
->nr_masters
; tmp
++)
1533 pdata
->data_width
[tmp
] = arr
[tmp
];
1535 /* parse slave data */
1536 sn
= of_find_node_by_name(np
, "slave_info");
1540 /* calculate number of slaves */
1541 tmp
= of_get_child_count(sn
);
1545 sd
= devm_kzalloc(&pdev
->dev
, sizeof(*sd
) * tmp
, GFP_KERNEL
);
1550 pdata
->sd_count
= tmp
;
1552 for_each_child_of_node(sn
, cn
) {
1553 sd
->dma_dev
= &pdev
->dev
;
1554 of_property_read_string(cn
, "bus_id", &sd
->bus_id
);
1555 of_property_read_u32(cn
, "cfg_hi", &sd
->cfg_hi
);
1556 of_property_read_u32(cn
, "cfg_lo", &sd
->cfg_lo
);
1557 if (!of_property_read_u32(cn
, "src_master", &tmp
))
1558 sd
->src_master
= tmp
;
1560 if (!of_property_read_u32(cn
, "dst_master", &tmp
))
1561 sd
->dst_master
= tmp
;
1568 static inline struct dw_dma_platform_data
*
1569 dw_dma_parse_dt(struct platform_device
*pdev
)
1575 static int dw_probe(struct platform_device
*pdev
)
1577 struct dw_dma_platform_data
*pdata
;
1578 struct resource
*io
;
1583 unsigned int dw_params
;
1584 unsigned int nr_channels
;
1585 unsigned int max_blk_size
= 0;
1590 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1594 irq
= platform_get_irq(pdev
, 0);
1598 regs
= devm_request_and_ioremap(&pdev
->dev
, io
);
1602 dw_params
= dma_read_byaddr(regs
, DW_PARAMS
);
1603 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1605 pdata
= dev_get_platdata(&pdev
->dev
);
1607 pdata
= dw_dma_parse_dt(pdev
);
1609 if (!pdata
&& autocfg
) {
1610 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1614 /* Fill platform data with the default values */
1615 pdata
->is_private
= true;
1616 pdata
->chan_allocation_order
= CHAN_ALLOCATION_ASCENDING
;
1617 pdata
->chan_priority
= CHAN_PRIORITY_ASCENDING
;
1618 } else if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1622 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1624 nr_channels
= pdata
->nr_channels
;
1626 size
= sizeof(struct dw_dma
) + nr_channels
* sizeof(struct dw_dma_chan
);
1627 dw
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
1631 dw
->clk
= devm_clk_get(&pdev
->dev
, "hclk");
1632 if (IS_ERR(dw
->clk
))
1633 return PTR_ERR(dw
->clk
);
1634 clk_prepare_enable(dw
->clk
);
1638 dw
->sd_count
= pdata
->sd_count
;
1640 /* get hardware configuration parameters */
1642 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1644 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1645 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1647 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1650 dw
->nr_masters
= pdata
->nr_masters
;
1651 memcpy(dw
->data_width
, pdata
->data_width
, 4);
1654 /* Calculate all channel mask before DMA setup */
1655 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1657 /* force dma off, just in case */
1660 /* disable BLOCK interrupts as well */
1661 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1663 err
= devm_request_irq(&pdev
->dev
, irq
, dw_dma_interrupt
, 0,
1668 platform_set_drvdata(pdev
, dw
);
1670 /* create a pool of consistent memory blocks for hardware descriptors */
1671 dw
->desc_pool
= dmam_pool_create("dw_dmac_desc_pool", &pdev
->dev
,
1672 sizeof(struct dw_desc
), 4, 0);
1673 if (!dw
->desc_pool
) {
1674 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1678 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1680 INIT_LIST_HEAD(&dw
->dma
.channels
);
1681 for (i
= 0; i
< nr_channels
; i
++) {
1682 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1683 int r
= nr_channels
- i
- 1;
1685 dwc
->chan
.device
= &dw
->dma
;
1686 dma_cookie_init(&dwc
->chan
);
1687 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1688 list_add_tail(&dwc
->chan
.device_node
,
1691 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1693 /* 7 is highest priority & 0 is lowest. */
1694 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1699 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1700 spin_lock_init(&dwc
->lock
);
1703 INIT_LIST_HEAD(&dwc
->active_list
);
1704 INIT_LIST_HEAD(&dwc
->queue
);
1705 INIT_LIST_HEAD(&dwc
->free_list
);
1707 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1709 dwc
->direction
= DMA_TRANS_NONE
;
1711 /* hardware configuration */
1713 unsigned int dwc_params
;
1715 dwc_params
= dma_read_byaddr(regs
+ r
* sizeof(u32
),
1718 /* Decode maximum block size for given channel. The
1719 * stored 4 bit value represents blocks from 0x00 for 3
1720 * up to 0x0a for 4095. */
1722 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1724 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1726 dwc
->block_size
= pdata
->block_size
;
1728 /* Check if channel supports multi block transfer */
1729 channel_writel(dwc
, LLP
, 0xfffffffc);
1731 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1732 channel_writel(dwc
, LLP
, 0);
1736 /* Clear all interrupts on all channels. */
1737 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1738 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1739 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1740 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1741 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1743 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1744 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1745 if (pdata
->is_private
)
1746 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1747 dw
->dma
.dev
= &pdev
->dev
;
1748 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1749 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1751 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1753 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1754 dw
->dma
.device_control
= dwc_control
;
1756 dw
->dma
.device_tx_status
= dwc_tx_status
;
1757 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1759 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1761 dev_info(&pdev
->dev
, "DesignWare DMA Controller, %d channels\n",
1764 dma_async_device_register(&dw
->dma
);
1769 static int __devexit
dw_remove(struct platform_device
*pdev
)
1771 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1772 struct dw_dma_chan
*dwc
, *_dwc
;
1775 dma_async_device_unregister(&dw
->dma
);
1777 tasklet_kill(&dw
->tasklet
);
1779 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1781 list_del(&dwc
->chan
.device_node
);
1782 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1788 static void dw_shutdown(struct platform_device
*pdev
)
1790 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1793 clk_disable_unprepare(dw
->clk
);
1796 static int dw_suspend_noirq(struct device
*dev
)
1798 struct platform_device
*pdev
= to_platform_device(dev
);
1799 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1802 clk_disable_unprepare(dw
->clk
);
1807 static int dw_resume_noirq(struct device
*dev
)
1809 struct platform_device
*pdev
= to_platform_device(dev
);
1810 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1812 clk_prepare_enable(dw
->clk
);
1813 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1818 static const struct dev_pm_ops dw_dev_pm_ops
= {
1819 .suspend_noirq
= dw_suspend_noirq
,
1820 .resume_noirq
= dw_resume_noirq
,
1821 .freeze_noirq
= dw_suspend_noirq
,
1822 .thaw_noirq
= dw_resume_noirq
,
1823 .restore_noirq
= dw_resume_noirq
,
1824 .poweroff_noirq
= dw_suspend_noirq
,
1828 static const struct of_device_id dw_dma_id_table
[] = {
1829 { .compatible
= "snps,dma-spear1340" },
1832 MODULE_DEVICE_TABLE(of
, dw_dma_id_table
);
1835 static struct platform_driver dw_driver
= {
1837 .remove
= dw_remove
,
1838 .shutdown
= dw_shutdown
,
1841 .pm
= &dw_dev_pm_ops
,
1842 .of_match_table
= of_match_ptr(dw_dma_id_table
),
1846 static int __init
dw_init(void)
1848 return platform_driver_register(&dw_driver
);
1850 subsys_initcall(dw_init
);
1852 static void __exit
dw_exit(void)
1854 platform_driver_unregister(&dw_driver
);
1856 module_exit(dw_exit
);
1858 MODULE_LICENSE("GPL v2");
1859 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1860 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1861 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");