2 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
3 * Author: Sugar <shuge@allwinnertech.com>
5 * Copyright (C) 2014 Maxime Ripard
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
29 * There's 16 physical channels that can work in parallel.
31 * However we have 30 different endpoints for our requests.
33 * Since the channels are able to handle only an unidirectional
34 * transfer, we need to allocate more virtual channels so that
35 * everyone can grab one channel.
37 * Some devices can't work in both direction (mostly because it
38 * wouldn't make sense), so we have a bit fewer virtual channels than
39 * 2 channels per endpoints.
42 #define NR_MAX_CHANNELS 16
43 #define NR_MAX_REQUESTS 30
44 #define NR_MAX_VCHANS 53
49 #define DMA_IRQ_EN(x) ((x) * 0x04)
50 #define DMA_IRQ_HALF BIT(0)
51 #define DMA_IRQ_PKG BIT(1)
52 #define DMA_IRQ_QUEUE BIT(2)
54 #define DMA_IRQ_CHAN_NR 8
55 #define DMA_IRQ_CHAN_WIDTH 4
58 #define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
63 * Channels specific registers
65 #define DMA_CHAN_ENABLE 0x00
66 #define DMA_CHAN_ENABLE_START BIT(0)
67 #define DMA_CHAN_ENABLE_STOP 0
69 #define DMA_CHAN_PAUSE 0x04
70 #define DMA_CHAN_PAUSE_PAUSE BIT(1)
71 #define DMA_CHAN_PAUSE_RESUME 0
73 #define DMA_CHAN_LLI_ADDR 0x08
75 #define DMA_CHAN_CUR_CFG 0x0c
76 #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
77 #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
78 #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
79 #define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
80 #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
82 #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
83 #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
84 #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
85 #define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
86 #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
88 #define DMA_CHAN_CUR_SRC 0x10
90 #define DMA_CHAN_CUR_DST 0x14
92 #define DMA_CHAN_CUR_CNT 0x18
94 #define DMA_CHAN_CUR_PARA 0x1c
98 * Various hardware related defines
100 #define LLI_LAST_ITEM 0xfffff800
101 #define NORMAL_WAIT 8
105 * Hardware representation of the LLI
107 * The hardware will be fed the physical address of this structure,
108 * and read its content in order to start the transfer.
110 struct sun6i_dma_lli
{
119 * This field is not used by the DMA controller, but will be
120 * used by the CPU to go through the list (mostly for dumping
123 struct sun6i_dma_lli
*v_lli_next
;
128 struct virt_dma_desc vd
;
130 struct sun6i_dma_lli
*v_lli
;
136 struct sun6i_vchan
*vchan
;
137 struct sun6i_desc
*desc
;
138 struct sun6i_desc
*done
;
142 struct virt_dma_chan vc
;
143 struct list_head node
;
144 struct dma_slave_config cfg
;
145 struct sun6i_pchan
*phy
;
149 struct sun6i_dma_dev
{
150 struct dma_device slave
;
155 struct reset_control
*rstc
;
156 struct tasklet_struct task
;
157 atomic_t tasklet_shutdown
;
158 struct list_head pending
;
159 struct dma_pool
*pool
;
160 struct sun6i_pchan
*pchans
;
161 struct sun6i_vchan
*vchans
;
164 static struct device
*chan2dev(struct dma_chan
*chan
)
166 return &chan
->dev
->device
;
169 static inline struct sun6i_dma_dev
*to_sun6i_dma_dev(struct dma_device
*d
)
171 return container_of(d
, struct sun6i_dma_dev
, slave
);
174 static inline struct sun6i_vchan
*to_sun6i_vchan(struct dma_chan
*chan
)
176 return container_of(chan
, struct sun6i_vchan
, vc
.chan
);
179 static inline struct sun6i_desc
*
180 to_sun6i_desc(struct dma_async_tx_descriptor
*tx
)
182 return container_of(tx
, struct sun6i_desc
, vd
.tx
);
185 static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev
*sdev
)
187 dev_dbg(sdev
->slave
.dev
, "Common register:\n"
188 "\tmask0(%04x): 0x%08x\n"
189 "\tmask1(%04x): 0x%08x\n"
190 "\tpend0(%04x): 0x%08x\n"
191 "\tpend1(%04x): 0x%08x\n"
192 "\tstats(%04x): 0x%08x\n",
193 DMA_IRQ_EN(0), readl(sdev
->base
+ DMA_IRQ_EN(0)),
194 DMA_IRQ_EN(1), readl(sdev
->base
+ DMA_IRQ_EN(1)),
195 DMA_IRQ_STAT(0), readl(sdev
->base
+ DMA_IRQ_STAT(0)),
196 DMA_IRQ_STAT(1), readl(sdev
->base
+ DMA_IRQ_STAT(1)),
197 DMA_STAT
, readl(sdev
->base
+ DMA_STAT
));
200 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev
*sdev
,
201 struct sun6i_pchan
*pchan
)
203 phys_addr_t reg
= virt_to_phys(pchan
->base
);
205 dev_dbg(sdev
->slave
.dev
, "Chan %d reg: %pa\n"
206 "\t___en(%04x): \t0x%08x\n"
207 "\tpause(%04x): \t0x%08x\n"
208 "\tstart(%04x): \t0x%08x\n"
209 "\t__cfg(%04x): \t0x%08x\n"
210 "\t__src(%04x): \t0x%08x\n"
211 "\t__dst(%04x): \t0x%08x\n"
212 "\tcount(%04x): \t0x%08x\n"
213 "\t_para(%04x): \t0x%08x\n\n",
216 readl(pchan
->base
+ DMA_CHAN_ENABLE
),
218 readl(pchan
->base
+ DMA_CHAN_PAUSE
),
220 readl(pchan
->base
+ DMA_CHAN_LLI_ADDR
),
222 readl(pchan
->base
+ DMA_CHAN_CUR_CFG
),
224 readl(pchan
->base
+ DMA_CHAN_CUR_SRC
),
226 readl(pchan
->base
+ DMA_CHAN_CUR_DST
),
228 readl(pchan
->base
+ DMA_CHAN_CUR_CNT
),
230 readl(pchan
->base
+ DMA_CHAN_CUR_PARA
));
233 static inline s8
convert_burst(u32 maxburst
)
245 static inline s8
convert_buswidth(enum dma_slave_buswidth addr_width
)
247 if ((addr_width
< DMA_SLAVE_BUSWIDTH_1_BYTE
) ||
248 (addr_width
> DMA_SLAVE_BUSWIDTH_4_BYTES
))
251 return addr_width
>> 1;
254 static void *sun6i_dma_lli_add(struct sun6i_dma_lli
*prev
,
255 struct sun6i_dma_lli
*next
,
257 struct sun6i_desc
*txd
)
259 if ((!prev
&& !txd
) || !next
)
263 txd
->p_lli
= next_phy
;
266 prev
->p_lli_next
= next_phy
;
267 prev
->v_lli_next
= next
;
270 next
->p_lli_next
= LLI_LAST_ITEM
;
271 next
->v_lli_next
= NULL
;
276 static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli
*lli
,
278 dma_addr_t dst
, u32 len
,
279 struct dma_slave_config
*config
)
281 u8 src_width
, dst_width
, src_burst
, dst_burst
;
286 src_burst
= convert_burst(config
->src_maxburst
);
290 dst_burst
= convert_burst(config
->dst_maxburst
);
294 src_width
= convert_buswidth(config
->src_addr_width
);
298 dst_width
= convert_buswidth(config
->dst_addr_width
);
302 lli
->cfg
= DMA_CHAN_CFG_SRC_BURST(src_burst
) |
303 DMA_CHAN_CFG_SRC_WIDTH(src_width
) |
304 DMA_CHAN_CFG_DST_BURST(dst_burst
) |
305 DMA_CHAN_CFG_DST_WIDTH(dst_width
);
310 lli
->para
= NORMAL_WAIT
;
315 static inline void sun6i_dma_dump_lli(struct sun6i_vchan
*vchan
,
316 struct sun6i_dma_lli
*lli
)
318 phys_addr_t p_lli
= virt_to_phys(lli
);
320 dev_dbg(chan2dev(&vchan
->vc
.chan
),
321 "\n\tdesc: p - %pa v - 0x%p\n"
322 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
323 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
325 lli
->cfg
, lli
->src
, lli
->dst
,
326 lli
->len
, lli
->para
, lli
->p_lli_next
);
329 static void sun6i_dma_free_desc(struct virt_dma_desc
*vd
)
331 struct sun6i_desc
*txd
= to_sun6i_desc(&vd
->tx
);
332 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vd
->tx
.chan
->device
);
333 struct sun6i_dma_lli
*v_lli
, *v_next
;
334 dma_addr_t p_lli
, p_next
;
343 v_next
= v_lli
->v_lli_next
;
344 p_next
= v_lli
->p_lli_next
;
346 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
355 static int sun6i_dma_terminate_all(struct sun6i_vchan
*vchan
)
357 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vchan
->vc
.chan
.device
);
358 struct sun6i_pchan
*pchan
= vchan
->phy
;
362 spin_lock(&sdev
->lock
);
363 list_del_init(&vchan
->node
);
364 spin_unlock(&sdev
->lock
);
366 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
368 vchan_get_all_descriptors(&vchan
->vc
, &head
);
371 writel(DMA_CHAN_ENABLE_STOP
, pchan
->base
+ DMA_CHAN_ENABLE
);
372 writel(DMA_CHAN_PAUSE_RESUME
, pchan
->base
+ DMA_CHAN_PAUSE
);
380 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
382 vchan_dma_desc_free_list(&vchan
->vc
, &head
);
387 static int sun6i_dma_start_desc(struct sun6i_vchan
*vchan
)
389 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(vchan
->vc
.chan
.device
);
390 struct virt_dma_desc
*desc
= vchan_next_desc(&vchan
->vc
);
391 struct sun6i_pchan
*pchan
= vchan
->phy
;
392 u32 irq_val
, irq_reg
, irq_offset
;
403 list_del(&desc
->node
);
405 pchan
->desc
= to_sun6i_desc(&desc
->tx
);
408 sun6i_dma_dump_lli(vchan
, pchan
->desc
->v_lli
);
410 irq_reg
= pchan
->idx
/ DMA_IRQ_CHAN_NR
;
411 irq_offset
= pchan
->idx
% DMA_IRQ_CHAN_NR
;
413 irq_val
= readl(sdev
->base
+ DMA_IRQ_EN(irq_offset
));
414 irq_val
|= DMA_IRQ_QUEUE
<< (irq_offset
* DMA_IRQ_CHAN_WIDTH
);
415 writel(irq_val
, sdev
->base
+ DMA_IRQ_EN(irq_offset
));
417 writel(pchan
->desc
->p_lli
, pchan
->base
+ DMA_CHAN_LLI_ADDR
);
418 writel(DMA_CHAN_ENABLE_START
, pchan
->base
+ DMA_CHAN_ENABLE
);
420 sun6i_dma_dump_com_regs(sdev
);
421 sun6i_dma_dump_chan_regs(sdev
, pchan
);
426 static void sun6i_dma_tasklet(unsigned long data
)
428 struct sun6i_dma_dev
*sdev
= (struct sun6i_dma_dev
*)data
;
429 struct sun6i_vchan
*vchan
;
430 struct sun6i_pchan
*pchan
;
431 unsigned int pchan_alloc
= 0;
432 unsigned int pchan_idx
;
434 list_for_each_entry(vchan
, &sdev
->slave
.channels
, vc
.chan
.device_node
) {
435 spin_lock_irq(&vchan
->vc
.lock
);
439 if (pchan
&& pchan
->done
) {
440 if (sun6i_dma_start_desc(vchan
)) {
442 * No current txd associated with this channel
444 dev_dbg(sdev
->slave
.dev
, "pchan %u: free\n",
447 /* Mark this channel free */
452 spin_unlock_irq(&vchan
->vc
.lock
);
455 spin_lock_irq(&sdev
->lock
);
456 for (pchan_idx
= 0; pchan_idx
< NR_MAX_CHANNELS
; pchan_idx
++) {
457 pchan
= &sdev
->pchans
[pchan_idx
];
459 if (pchan
->vchan
|| list_empty(&sdev
->pending
))
462 vchan
= list_first_entry(&sdev
->pending
,
463 struct sun6i_vchan
, node
);
465 /* Remove from pending channels */
466 list_del_init(&vchan
->node
);
467 pchan_alloc
|= BIT(pchan_idx
);
469 /* Mark this channel allocated */
470 pchan
->vchan
= vchan
;
472 dev_dbg(sdev
->slave
.dev
, "pchan %u: alloc vchan %p\n",
473 pchan
->idx
, &vchan
->vc
);
475 spin_unlock_irq(&sdev
->lock
);
477 for (pchan_idx
= 0; pchan_idx
< NR_MAX_CHANNELS
; pchan_idx
++) {
478 if (!(pchan_alloc
& BIT(pchan_idx
)))
481 pchan
= sdev
->pchans
+ pchan_idx
;
482 vchan
= pchan
->vchan
;
484 spin_lock_irq(&vchan
->vc
.lock
);
485 sun6i_dma_start_desc(vchan
);
486 spin_unlock_irq(&vchan
->vc
.lock
);
491 static irqreturn_t
sun6i_dma_interrupt(int irq
, void *dev_id
)
493 struct sun6i_dma_dev
*sdev
= dev_id
;
494 struct sun6i_vchan
*vchan
;
495 struct sun6i_pchan
*pchan
;
496 int i
, j
, ret
= IRQ_NONE
;
499 for (i
= 0; i
< 2; i
++) {
500 status
= readl(sdev
->base
+ DMA_IRQ_STAT(i
));
504 dev_dbg(sdev
->slave
.dev
, "DMA irq status %s: 0x%x\n",
505 i
? "high" : "low", status
);
507 writel(status
, sdev
->base
+ DMA_IRQ_STAT(i
));
509 for (j
= 0; (j
< 8) && status
; j
++) {
510 if (status
& DMA_IRQ_QUEUE
) {
511 pchan
= sdev
->pchans
+ j
;
512 vchan
= pchan
->vchan
;
515 spin_lock(&vchan
->vc
.lock
);
516 vchan_cookie_complete(&pchan
->desc
->vd
);
517 pchan
->done
= pchan
->desc
;
518 spin_unlock(&vchan
->vc
.lock
);
522 status
= status
>> 4;
525 if (!atomic_read(&sdev
->tasklet_shutdown
))
526 tasklet_schedule(&sdev
->task
);
533 static struct dma_async_tx_descriptor
*sun6i_dma_prep_dma_memcpy(
534 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
535 size_t len
, unsigned long flags
)
537 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
538 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
539 struct sun6i_dma_lli
*v_lli
;
540 struct sun6i_desc
*txd
;
544 dev_dbg(chan2dev(chan
),
545 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
546 __func__
, vchan
->vc
.chan
.chan_id
, &dest
, &src
, len
, flags
);
551 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
555 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_NOWAIT
, &p_lli
);
557 dev_err(sdev
->slave
.dev
, "Failed to alloc lli memory\n");
564 v_lli
->para
= NORMAL_WAIT
;
566 burst
= convert_burst(8);
567 width
= convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES
);
568 v_lli
->cfg
|= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM
) |
569 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM
) |
570 DMA_CHAN_CFG_DST_LINEAR_MODE
|
571 DMA_CHAN_CFG_SRC_LINEAR_MODE
|
572 DMA_CHAN_CFG_SRC_BURST(burst
) |
573 DMA_CHAN_CFG_SRC_WIDTH(width
) |
574 DMA_CHAN_CFG_DST_BURST(burst
) |
575 DMA_CHAN_CFG_DST_WIDTH(width
);
577 sun6i_dma_lli_add(NULL
, v_lli
, p_lli
, txd
);
579 sun6i_dma_dump_lli(vchan
, v_lli
);
581 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
588 static struct dma_async_tx_descriptor
*sun6i_dma_prep_slave_sg(
589 struct dma_chan
*chan
, struct scatterlist
*sgl
,
590 unsigned int sg_len
, enum dma_transfer_direction dir
,
591 unsigned long flags
, void *context
)
593 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
594 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
595 struct dma_slave_config
*sconfig
= &vchan
->cfg
;
596 struct sun6i_dma_lli
*v_lli
, *prev
= NULL
;
597 struct sun6i_desc
*txd
;
598 struct scatterlist
*sg
;
605 if (!is_slave_direction(dir
)) {
606 dev_err(chan2dev(chan
), "Invalid DMA direction\n");
610 txd
= kzalloc(sizeof(*txd
), GFP_NOWAIT
);
614 for_each_sg(sgl
, sg
, sg_len
, i
) {
615 v_lli
= dma_pool_alloc(sdev
->pool
, GFP_NOWAIT
, &p_lli
);
619 if (dir
== DMA_MEM_TO_DEV
) {
620 ret
= sun6i_dma_cfg_lli(v_lli
, sg_dma_address(sg
),
621 sconfig
->dst_addr
, sg_dma_len(sg
),
624 goto err_cur_lli_free
;
626 v_lli
->cfg
|= DMA_CHAN_CFG_DST_IO_MODE
|
627 DMA_CHAN_CFG_SRC_LINEAR_MODE
|
628 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM
) |
629 DMA_CHAN_CFG_DST_DRQ(vchan
->port
);
631 dev_dbg(chan2dev(chan
),
632 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
633 __func__
, vchan
->vc
.chan
.chan_id
,
634 &sconfig
->dst_addr
, &sg_dma_address(sg
),
635 sg_dma_len(sg
), flags
);
638 ret
= sun6i_dma_cfg_lli(v_lli
, sconfig
->src_addr
,
639 sg_dma_address(sg
), sg_dma_len(sg
),
642 goto err_cur_lli_free
;
644 v_lli
->cfg
|= DMA_CHAN_CFG_DST_LINEAR_MODE
|
645 DMA_CHAN_CFG_SRC_IO_MODE
|
646 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM
) |
647 DMA_CHAN_CFG_SRC_DRQ(vchan
->port
);
649 dev_dbg(chan2dev(chan
),
650 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
651 __func__
, vchan
->vc
.chan
.chan_id
,
652 &sg_dma_address(sg
), &sconfig
->src_addr
,
653 sg_dma_len(sg
), flags
);
656 prev
= sun6i_dma_lli_add(prev
, v_lli
, p_lli
, txd
);
659 dev_dbg(chan2dev(chan
), "First: %pad\n", &txd
->p_lli
);
660 for (prev
= txd
->v_lli
; prev
; prev
= prev
->v_lli_next
)
661 sun6i_dma_dump_lli(vchan
, prev
);
663 return vchan_tx_prep(&vchan
->vc
, &txd
->vd
, flags
);
666 dma_pool_free(sdev
->pool
, v_lli
, p_lli
);
668 for (prev
= txd
->v_lli
; prev
; prev
= prev
->v_lli_next
)
669 dma_pool_free(sdev
->pool
, prev
, virt_to_phys(prev
));
674 static int sun6i_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
677 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
678 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
679 struct sun6i_pchan
*pchan
= vchan
->phy
;
685 dev_dbg(chan2dev(chan
), "vchan %p: resume\n", &vchan
->vc
);
687 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
690 writel(DMA_CHAN_PAUSE_RESUME
,
691 pchan
->base
+ DMA_CHAN_PAUSE
);
692 } else if (!list_empty(&vchan
->vc
.desc_issued
)) {
693 spin_lock(&sdev
->lock
);
694 list_add_tail(&vchan
->node
, &sdev
->pending
);
695 spin_unlock(&sdev
->lock
);
698 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
702 dev_dbg(chan2dev(chan
), "vchan %p: pause\n", &vchan
->vc
);
705 writel(DMA_CHAN_PAUSE_PAUSE
,
706 pchan
->base
+ DMA_CHAN_PAUSE
);
708 spin_lock(&sdev
->lock
);
709 list_del_init(&vchan
->node
);
710 spin_unlock(&sdev
->lock
);
714 case DMA_TERMINATE_ALL
:
715 ret
= sun6i_dma_terminate_all(vchan
);
717 case DMA_SLAVE_CONFIG
:
718 memcpy(&vchan
->cfg
, (void *)arg
, sizeof(struct dma_slave_config
));
727 static enum dma_status
sun6i_dma_tx_status(struct dma_chan
*chan
,
729 struct dma_tx_state
*state
)
731 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
732 struct sun6i_pchan
*pchan
= vchan
->phy
;
733 struct sun6i_dma_lli
*lli
;
734 struct virt_dma_desc
*vd
;
735 struct sun6i_desc
*txd
;
740 ret
= dma_cookie_status(chan
, cookie
, state
);
741 if (ret
== DMA_COMPLETE
)
744 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
746 vd
= vchan_find_desc(&vchan
->vc
, cookie
);
747 txd
= to_sun6i_desc(&vd
->tx
);
750 for (lli
= txd
->v_lli
; lli
!= NULL
; lli
= lli
->v_lli_next
)
752 } else if (!pchan
|| !pchan
->desc
) {
755 bytes
= readl(pchan
->base
+ DMA_CHAN_CUR_CNT
);
758 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
760 dma_set_residue(state
, bytes
);
765 static void sun6i_dma_issue_pending(struct dma_chan
*chan
)
767 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
768 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
771 spin_lock_irqsave(&vchan
->vc
.lock
, flags
);
773 if (vchan_issue_pending(&vchan
->vc
)) {
774 spin_lock(&sdev
->lock
);
776 if (!vchan
->phy
&& list_empty(&vchan
->node
)) {
777 list_add_tail(&vchan
->node
, &sdev
->pending
);
778 tasklet_schedule(&sdev
->task
);
779 dev_dbg(chan2dev(chan
), "vchan %p: issued\n",
783 spin_unlock(&sdev
->lock
);
785 dev_dbg(chan2dev(chan
), "vchan %p: nothing to issue\n",
789 spin_unlock_irqrestore(&vchan
->vc
.lock
, flags
);
792 static int sun6i_dma_alloc_chan_resources(struct dma_chan
*chan
)
797 static void sun6i_dma_free_chan_resources(struct dma_chan
*chan
)
799 struct sun6i_dma_dev
*sdev
= to_sun6i_dma_dev(chan
->device
);
800 struct sun6i_vchan
*vchan
= to_sun6i_vchan(chan
);
803 spin_lock_irqsave(&sdev
->lock
, flags
);
804 list_del_init(&vchan
->node
);
805 spin_unlock_irqrestore(&sdev
->lock
, flags
);
807 vchan_free_chan_resources(&vchan
->vc
);
810 static struct dma_chan
*sun6i_dma_of_xlate(struct of_phandle_args
*dma_spec
,
811 struct of_dma
*ofdma
)
813 struct sun6i_dma_dev
*sdev
= ofdma
->of_dma_data
;
814 struct sun6i_vchan
*vchan
;
815 struct dma_chan
*chan
;
816 u8 port
= dma_spec
->args
[0];
818 if (port
> NR_MAX_REQUESTS
)
821 chan
= dma_get_any_slave_channel(&sdev
->slave
);
825 vchan
= to_sun6i_vchan(chan
);
831 static inline void sun6i_kill_tasklet(struct sun6i_dma_dev
*sdev
)
833 /* Disable all interrupts from DMA */
834 writel(0, sdev
->base
+ DMA_IRQ_EN(0));
835 writel(0, sdev
->base
+ DMA_IRQ_EN(1));
837 /* Prevent spurious interrupts from scheduling the tasklet */
838 atomic_inc(&sdev
->tasklet_shutdown
);
840 /* Make sure we won't have any further interrupts */
841 devm_free_irq(sdev
->slave
.dev
, sdev
->irq
, sdev
);
843 /* Actually prevent the tasklet from being scheduled */
844 tasklet_kill(&sdev
->task
);
847 static inline void sun6i_dma_free(struct sun6i_dma_dev
*sdev
)
851 for (i
= 0; i
< NR_MAX_VCHANS
; i
++) {
852 struct sun6i_vchan
*vchan
= &sdev
->vchans
[i
];
854 list_del(&vchan
->vc
.chan
.device_node
);
855 tasklet_kill(&vchan
->vc
.task
);
859 static int sun6i_dma_probe(struct platform_device
*pdev
)
861 struct sun6i_dma_dev
*sdc
;
862 struct resource
*res
;
865 sdc
= devm_kzalloc(&pdev
->dev
, sizeof(*sdc
), GFP_KERNEL
);
869 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
870 sdc
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
871 if (IS_ERR(sdc
->base
))
872 return PTR_ERR(sdc
->base
);
874 sdc
->irq
= platform_get_irq(pdev
, 0);
876 dev_err(&pdev
->dev
, "Cannot claim IRQ\n");
880 sdc
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
881 if (IS_ERR(sdc
->clk
)) {
882 dev_err(&pdev
->dev
, "No clock specified\n");
883 return PTR_ERR(sdc
->clk
);
886 sdc
->rstc
= devm_reset_control_get(&pdev
->dev
, NULL
);
887 if (IS_ERR(sdc
->rstc
)) {
888 dev_err(&pdev
->dev
, "No reset controller specified\n");
889 return PTR_ERR(sdc
->rstc
);
892 sdc
->pool
= dmam_pool_create(dev_name(&pdev
->dev
), &pdev
->dev
,
893 sizeof(struct sun6i_dma_lli
), 4, 0);
895 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
899 platform_set_drvdata(pdev
, sdc
);
900 INIT_LIST_HEAD(&sdc
->pending
);
901 spin_lock_init(&sdc
->lock
);
903 dma_cap_set(DMA_PRIVATE
, sdc
->slave
.cap_mask
);
904 dma_cap_set(DMA_MEMCPY
, sdc
->slave
.cap_mask
);
905 dma_cap_set(DMA_SLAVE
, sdc
->slave
.cap_mask
);
907 INIT_LIST_HEAD(&sdc
->slave
.channels
);
908 sdc
->slave
.device_alloc_chan_resources
= sun6i_dma_alloc_chan_resources
;
909 sdc
->slave
.device_free_chan_resources
= sun6i_dma_free_chan_resources
;
910 sdc
->slave
.device_tx_status
= sun6i_dma_tx_status
;
911 sdc
->slave
.device_issue_pending
= sun6i_dma_issue_pending
;
912 sdc
->slave
.device_prep_slave_sg
= sun6i_dma_prep_slave_sg
;
913 sdc
->slave
.device_prep_dma_memcpy
= sun6i_dma_prep_dma_memcpy
;
914 sdc
->slave
.device_control
= sun6i_dma_control
;
915 sdc
->slave
.chancnt
= NR_MAX_VCHANS
;
916 sdc
->slave
.copy_align
= 4;
918 sdc
->slave
.dev
= &pdev
->dev
;
920 sdc
->pchans
= devm_kcalloc(&pdev
->dev
, NR_MAX_CHANNELS
,
921 sizeof(struct sun6i_pchan
), GFP_KERNEL
);
925 sdc
->vchans
= devm_kcalloc(&pdev
->dev
, NR_MAX_VCHANS
,
926 sizeof(struct sun6i_vchan
), GFP_KERNEL
);
930 tasklet_init(&sdc
->task
, sun6i_dma_tasklet
, (unsigned long)sdc
);
932 for (i
= 0; i
< NR_MAX_CHANNELS
; i
++) {
933 struct sun6i_pchan
*pchan
= &sdc
->pchans
[i
];
936 pchan
->base
= sdc
->base
+ 0x100 + i
* 0x40;
939 for (i
= 0; i
< NR_MAX_VCHANS
; i
++) {
940 struct sun6i_vchan
*vchan
= &sdc
->vchans
[i
];
942 INIT_LIST_HEAD(&vchan
->node
);
943 vchan
->vc
.desc_free
= sun6i_dma_free_desc
;
944 vchan_init(&vchan
->vc
, &sdc
->slave
);
947 ret
= reset_control_deassert(sdc
->rstc
);
949 dev_err(&pdev
->dev
, "Couldn't deassert the device from reset\n");
953 ret
= clk_prepare_enable(sdc
->clk
);
955 dev_err(&pdev
->dev
, "Couldn't enable the clock\n");
956 goto err_reset_assert
;
959 ret
= devm_request_irq(&pdev
->dev
, sdc
->irq
, sun6i_dma_interrupt
, 0,
960 dev_name(&pdev
->dev
), sdc
);
962 dev_err(&pdev
->dev
, "Cannot request IRQ\n");
963 goto err_clk_disable
;
966 ret
= dma_async_device_register(&sdc
->slave
);
968 dev_warn(&pdev
->dev
, "Failed to register DMA engine device\n");
969 goto err_irq_disable
;
972 ret
= of_dma_controller_register(pdev
->dev
.of_node
, sun6i_dma_of_xlate
,
975 dev_err(&pdev
->dev
, "of_dma_controller_register failed\n");
976 goto err_dma_unregister
;
982 dma_async_device_unregister(&sdc
->slave
);
984 sun6i_kill_tasklet(sdc
);
986 clk_disable_unprepare(sdc
->clk
);
988 reset_control_assert(sdc
->rstc
);
994 static int sun6i_dma_remove(struct platform_device
*pdev
)
996 struct sun6i_dma_dev
*sdc
= platform_get_drvdata(pdev
);
998 of_dma_controller_free(pdev
->dev
.of_node
);
999 dma_async_device_unregister(&sdc
->slave
);
1001 sun6i_kill_tasklet(sdc
);
1003 clk_disable_unprepare(sdc
->clk
);
1004 reset_control_assert(sdc
->rstc
);
1006 sun6i_dma_free(sdc
);
1011 static struct of_device_id sun6i_dma_match
[] = {
1012 { .compatible
= "allwinner,sun6i-a31-dma" },
1016 static struct platform_driver sun6i_dma_driver
= {
1017 .probe
= sun6i_dma_probe
,
1018 .remove
= sun6i_dma_remove
,
1020 .name
= "sun6i-dma",
1021 .of_match_table
= sun6i_dma_match
,
1024 module_platform_driver(sun6i_dma_driver
);
1026 MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1027 MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1028 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1029 MODULE_LICENSE("GPL");