2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/mmp_dma.h>
19 #include <linux/dmapool.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
23 #include <linux/dma/mmp-pdma.h>
25 #include "dmaengine.h"
35 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
36 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
37 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
38 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
39 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
40 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
41 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
42 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
44 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
45 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
46 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
47 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
48 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
49 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
50 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
52 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
54 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
60 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
61 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
62 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
63 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
64 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
65 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
66 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
67 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
78 struct mmp_pdma_desc_hw
{
79 u32 ddadr
; /* Points to the next descriptor + flags */
80 u32 dsadr
; /* DSADR value for the current transfer */
81 u32 dtadr
; /* DTADR value for the current transfer */
82 u32 dcmd
; /* DCMD value for the current transfer */
85 struct mmp_pdma_desc_sw
{
86 struct mmp_pdma_desc_hw desc
;
87 struct list_head node
;
88 struct list_head tx_list
;
89 struct dma_async_tx_descriptor async_tx
;
94 struct mmp_pdma_chan
{
97 struct dma_async_tx_descriptor desc
;
98 struct mmp_pdma_phy
*phy
;
99 enum dma_transfer_direction dir
;
101 /* channel's basic info */
102 struct tasklet_struct tasklet
;
108 spinlock_t desc_lock
; /* Descriptor list lock */
109 struct list_head chain_pending
; /* Link descriptors queue for pending */
110 struct list_head chain_running
; /* Link descriptors queue for running */
111 bool idle
; /* channel statue machine */
114 struct dma_pool
*desc_pool
; /* Descriptors pool */
117 struct mmp_pdma_phy
{
120 struct mmp_pdma_chan
*vchan
;
123 struct mmp_pdma_device
{
127 struct dma_device device
;
128 struct mmp_pdma_phy
*phy
;
129 spinlock_t phy_lock
; /* protect alloc/free phy channels */
132 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
133 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
134 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
135 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
137 static void set_desc(struct mmp_pdma_phy
*phy
, dma_addr_t addr
)
139 u32 reg
= (phy
->idx
<< 4) + DDADR
;
141 writel(addr
, phy
->base
+ reg
);
144 static void enable_chan(struct mmp_pdma_phy
*phy
)
151 reg
= DRCMR(phy
->vchan
->drcmr
);
152 writel(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
154 dalgn
= readl(phy
->base
+ DALGN
);
155 if (phy
->vchan
->byte_align
)
156 dalgn
|= 1 << phy
->idx
;
158 dalgn
&= ~(1 << phy
->idx
);
159 writel(dalgn
, phy
->base
+ DALGN
);
161 reg
= (phy
->idx
<< 2) + DCSR
;
162 writel(readl(phy
->base
+ reg
) | DCSR_RUN
,
166 static void disable_chan(struct mmp_pdma_phy
*phy
)
171 reg
= (phy
->idx
<< 2) + DCSR
;
172 writel(readl(phy
->base
+ reg
) & ~DCSR_RUN
,
177 static int clear_chan_irq(struct mmp_pdma_phy
*phy
)
180 u32 dint
= readl(phy
->base
+ DINT
);
181 u32 reg
= (phy
->idx
<< 2) + DCSR
;
183 if (dint
& BIT(phy
->idx
)) {
185 dcsr
= readl(phy
->base
+ reg
);
186 writel(dcsr
, phy
->base
+ reg
);
187 if ((dcsr
& DCSR_BUSERR
) && (phy
->vchan
))
188 dev_warn(phy
->vchan
->dev
, "DCSR_BUSERR\n");
194 static irqreturn_t
mmp_pdma_chan_handler(int irq
, void *dev_id
)
196 struct mmp_pdma_phy
*phy
= dev_id
;
198 if (clear_chan_irq(phy
) == 0) {
199 tasklet_schedule(&phy
->vchan
->tasklet
);
205 static irqreturn_t
mmp_pdma_int_handler(int irq
, void *dev_id
)
207 struct mmp_pdma_device
*pdev
= dev_id
;
208 struct mmp_pdma_phy
*phy
;
209 u32 dint
= readl(pdev
->base
+ DINT
);
217 ret
= mmp_pdma_chan_handler(irq
, phy
);
218 if (ret
== IRQ_HANDLED
)
228 /* lookup free phy channel as descending priority */
229 static struct mmp_pdma_phy
*lookup_phy(struct mmp_pdma_chan
*pchan
)
232 struct mmp_pdma_device
*pdev
= to_mmp_pdma_dev(pchan
->chan
.device
);
233 struct mmp_pdma_phy
*phy
, *found
= NULL
;
237 * dma channel priorities
238 * ch 0 - 3, 16 - 19 <--> (0)
239 * ch 4 - 7, 20 - 23 <--> (1)
240 * ch 8 - 11, 24 - 27 <--> (2)
241 * ch 12 - 15, 28 - 31 <--> (3)
244 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
245 for (prio
= 0; prio
<= (((pdev
->dma_channels
- 1) & 0xf) >> 2); prio
++) {
246 for (i
= 0; i
< pdev
->dma_channels
; i
++) {
247 if (prio
!= ((i
& 0xf) >> 2))
259 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
263 static void mmp_pdma_free_phy(struct mmp_pdma_chan
*pchan
)
265 struct mmp_pdma_device
*pdev
= to_mmp_pdma_dev(pchan
->chan
.device
);
272 /* clear the channel mapping in DRCMR */
273 reg
= DRCMR(pchan
->phy
->vchan
->drcmr
);
274 writel(0, pchan
->phy
->base
+ reg
);
276 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
277 pchan
->phy
->vchan
= NULL
;
279 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
283 * start_pending_queue - transfer any pending transactions
284 * pending list ==> running list
286 static void start_pending_queue(struct mmp_pdma_chan
*chan
)
288 struct mmp_pdma_desc_sw
*desc
;
290 /* still in running, irq will start the pending list */
292 dev_dbg(chan
->dev
, "DMA controller still busy\n");
296 if (list_empty(&chan
->chain_pending
)) {
297 /* chance to re-fetch phy channel with higher prio */
298 mmp_pdma_free_phy(chan
);
299 dev_dbg(chan
->dev
, "no pending list\n");
304 chan
->phy
= lookup_phy(chan
);
306 dev_dbg(chan
->dev
, "no free dma channel\n");
313 * reintilize pending list
315 desc
= list_first_entry(&chan
->chain_pending
,
316 struct mmp_pdma_desc_sw
, node
);
317 list_splice_tail_init(&chan
->chain_pending
, &chan
->chain_running
);
320 * Program the descriptor's address into the DMA controller,
321 * then start the DMA transaction
323 set_desc(chan
->phy
, desc
->async_tx
.phys
);
324 enable_chan(chan
->phy
);
329 /* desc->tx_list ==> pending list */
330 static dma_cookie_t
mmp_pdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
332 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(tx
->chan
);
333 struct mmp_pdma_desc_sw
*desc
= tx_to_mmp_pdma_desc(tx
);
334 struct mmp_pdma_desc_sw
*child
;
336 dma_cookie_t cookie
= -EBUSY
;
338 spin_lock_irqsave(&chan
->desc_lock
, flags
);
340 list_for_each_entry(child
, &desc
->tx_list
, node
) {
341 cookie
= dma_cookie_assign(&child
->async_tx
);
344 /* softly link to pending list - desc->tx_list ==> pending list */
345 list_splice_tail_init(&desc
->tx_list
, &chan
->chain_pending
);
347 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
352 static struct mmp_pdma_desc_sw
*
353 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan
*chan
)
355 struct mmp_pdma_desc_sw
*desc
;
358 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
360 dev_err(chan
->dev
, "out of memory for link descriptor\n");
364 memset(desc
, 0, sizeof(*desc
));
365 INIT_LIST_HEAD(&desc
->tx_list
);
366 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->chan
);
367 /* each desc has submit */
368 desc
->async_tx
.tx_submit
= mmp_pdma_tx_submit
;
369 desc
->async_tx
.phys
= pdesc
;
375 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
377 * This function will create a dma pool for descriptor allocation.
378 * Request irq only when channel is requested
379 * Return - The number of allocated descriptors.
382 static int mmp_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
384 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
390 dma_pool_create(dev_name(&dchan
->dev
->device
), chan
->dev
,
391 sizeof(struct mmp_pdma_desc_sw
),
392 __alignof__(struct mmp_pdma_desc_sw
), 0);
393 if (!chan
->desc_pool
) {
394 dev_err(chan
->dev
, "unable to allocate descriptor pool\n");
397 mmp_pdma_free_phy(chan
);
403 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan
*chan
,
404 struct list_head
*list
)
406 struct mmp_pdma_desc_sw
*desc
, *_desc
;
408 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
409 list_del(&desc
->node
);
410 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
414 static void mmp_pdma_free_chan_resources(struct dma_chan
*dchan
)
416 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
419 spin_lock_irqsave(&chan
->desc_lock
, flags
);
420 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
421 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
422 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
424 dma_pool_destroy(chan
->desc_pool
);
425 chan
->desc_pool
= NULL
;
428 mmp_pdma_free_phy(chan
);
432 static struct dma_async_tx_descriptor
*
433 mmp_pdma_prep_memcpy(struct dma_chan
*dchan
,
434 dma_addr_t dma_dst
, dma_addr_t dma_src
,
435 size_t len
, unsigned long flags
)
437 struct mmp_pdma_chan
*chan
;
438 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
447 chan
= to_mmp_pdma_chan(dchan
);
448 chan
->byte_align
= false;
451 chan
->dir
= DMA_MEM_TO_MEM
;
452 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_INCSRCADDR
;
453 chan
->dcmd
|= DCMD_BURST32
;
457 /* Allocate the link descriptor from DMA pool */
458 new = mmp_pdma_alloc_descriptor(chan
);
460 dev_err(chan
->dev
, "no memory for desc\n");
464 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
465 if (dma_src
& 0x7 || dma_dst
& 0x7)
466 chan
->byte_align
= true;
468 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& copy
);
469 new->desc
.dsadr
= dma_src
;
470 new->desc
.dtadr
= dma_dst
;
475 prev
->desc
.ddadr
= new->async_tx
.phys
;
477 new->async_tx
.cookie
= 0;
478 async_tx_ack(&new->async_tx
);
483 if (chan
->dir
== DMA_MEM_TO_DEV
) {
485 } else if (chan
->dir
== DMA_DEV_TO_MEM
) {
487 } else if (chan
->dir
== DMA_MEM_TO_MEM
) {
492 /* Insert the link descriptor to the LD ring */
493 list_add_tail(&new->node
, &first
->tx_list
);
496 first
->async_tx
.flags
= flags
; /* client is in control of this ack */
497 first
->async_tx
.cookie
= -EBUSY
;
499 /* last desc and fire IRQ */
500 new->desc
.ddadr
= DDADR_STOP
;
501 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
503 return &first
->async_tx
;
507 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
511 static struct dma_async_tx_descriptor
*
512 mmp_pdma_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
513 unsigned int sg_len
, enum dma_transfer_direction dir
,
514 unsigned long flags
, void *context
)
516 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
517 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
519 struct scatterlist
*sg
;
523 if ((sgl
== NULL
) || (sg_len
== 0))
526 chan
->byte_align
= false;
528 for_each_sg(sgl
, sg
, sg_len
, i
) {
529 addr
= sg_dma_address(sg
);
530 avail
= sg_dma_len(sgl
);
533 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
535 chan
->byte_align
= true;
537 /* allocate and populate the descriptor */
538 new = mmp_pdma_alloc_descriptor(chan
);
540 dev_err(chan
->dev
, "no memory for desc\n");
544 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& len
);
545 if (dir
== DMA_MEM_TO_DEV
) {
546 new->desc
.dsadr
= addr
;
547 new->desc
.dtadr
= chan
->dev_addr
;
549 new->desc
.dsadr
= chan
->dev_addr
;
550 new->desc
.dtadr
= addr
;
556 prev
->desc
.ddadr
= new->async_tx
.phys
;
558 new->async_tx
.cookie
= 0;
559 async_tx_ack(&new->async_tx
);
562 /* Insert the link descriptor to the LD ring */
563 list_add_tail(&new->node
, &first
->tx_list
);
565 /* update metadata */
571 first
->async_tx
.cookie
= -EBUSY
;
572 first
->async_tx
.flags
= flags
;
574 /* last desc and fire IRQ */
575 new->desc
.ddadr
= DDADR_STOP
;
576 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
578 return &first
->async_tx
;
582 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
586 static int mmp_pdma_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
589 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
590 struct dma_slave_config
*cfg
= (void *)arg
;
593 u32 maxburst
= 0, addr
= 0;
594 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
600 case DMA_TERMINATE_ALL
:
601 disable_chan(chan
->phy
);
602 mmp_pdma_free_phy(chan
);
603 spin_lock_irqsave(&chan
->desc_lock
, flags
);
604 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
605 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
606 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
609 case DMA_SLAVE_CONFIG
:
610 if (cfg
->direction
== DMA_DEV_TO_MEM
) {
611 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
612 maxburst
= cfg
->src_maxburst
;
613 width
= cfg
->src_addr_width
;
614 addr
= cfg
->src_addr
;
615 } else if (cfg
->direction
== DMA_MEM_TO_DEV
) {
616 chan
->dcmd
= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
617 maxburst
= cfg
->dst_maxburst
;
618 width
= cfg
->dst_addr_width
;
619 addr
= cfg
->dst_addr
;
622 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
623 chan
->dcmd
|= DCMD_WIDTH1
;
624 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
625 chan
->dcmd
|= DCMD_WIDTH2
;
626 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
627 chan
->dcmd
|= DCMD_WIDTH4
;
630 chan
->dcmd
|= DCMD_BURST8
;
631 else if (maxburst
== 16)
632 chan
->dcmd
|= DCMD_BURST16
;
633 else if (maxburst
== 32)
634 chan
->dcmd
|= DCMD_BURST32
;
636 chan
->dir
= cfg
->direction
;
637 chan
->dev_addr
= addr
;
638 /* FIXME: drivers should be ported over to use the filter
639 * function. Once that's done, the following two lines can
643 chan
->drcmr
= cfg
->slave_id
;
652 static enum dma_status
mmp_pdma_tx_status(struct dma_chan
*dchan
,
653 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
655 return dma_cookie_status(dchan
, cookie
, txstate
);
659 * mmp_pdma_issue_pending - Issue the DMA start command
660 * pending list ==> running list
662 static void mmp_pdma_issue_pending(struct dma_chan
*dchan
)
664 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
667 spin_lock_irqsave(&chan
->desc_lock
, flags
);
668 start_pending_queue(chan
);
669 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
677 static void dma_do_tasklet(unsigned long data
)
679 struct mmp_pdma_chan
*chan
= (struct mmp_pdma_chan
*)data
;
680 struct mmp_pdma_desc_sw
*desc
, *_desc
;
681 LIST_HEAD(chain_cleanup
);
684 /* submit pending list; callback for each desc; free desc */
686 spin_lock_irqsave(&chan
->desc_lock
, flags
);
688 list_for_each_entry_safe(desc
, _desc
, &chan
->chain_running
, node
) {
690 * move the descriptors to a temporary list so we can drop
691 * the lock during the entire cleanup operation
693 list_del(&desc
->node
);
694 list_add(&desc
->node
, &chain_cleanup
);
697 * Look for the first list entry which has the ENDIRQEN flag
698 * set. That is the descriptor we got an interrupt for, so
699 * complete that transaction and its cookie.
701 if (desc
->desc
.dcmd
& DCMD_ENDIRQEN
) {
702 dma_cookie_t cookie
= desc
->async_tx
.cookie
;
703 dma_cookie_complete(&desc
->async_tx
);
704 dev_dbg(chan
->dev
, "completed_cookie=%d\n", cookie
);
710 * The hardware is idle and ready for more when the
711 * chain_running list is empty.
713 chan
->idle
= list_empty(&chan
->chain_running
);
715 /* Start any pending transactions automatically */
716 start_pending_queue(chan
);
717 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
719 /* Run the callback for each descriptor, in order */
720 list_for_each_entry_safe(desc
, _desc
, &chain_cleanup
, node
) {
721 struct dma_async_tx_descriptor
*txd
= &desc
->async_tx
;
723 /* Remove from the list of transactions */
724 list_del(&desc
->node
);
725 /* Run the link descriptor callback function */
727 txd
->callback(txd
->callback_param
);
729 dma_pool_free(chan
->desc_pool
, desc
, txd
->phys
);
733 static int mmp_pdma_remove(struct platform_device
*op
)
735 struct mmp_pdma_device
*pdev
= platform_get_drvdata(op
);
737 dma_async_device_unregister(&pdev
->device
);
741 static int mmp_pdma_chan_init(struct mmp_pdma_device
*pdev
,
744 struct mmp_pdma_phy
*phy
= &pdev
->phy
[idx
];
745 struct mmp_pdma_chan
*chan
;
748 chan
= devm_kzalloc(pdev
->dev
,
749 sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
754 phy
->base
= pdev
->base
;
757 ret
= devm_request_irq(pdev
->dev
, irq
,
758 mmp_pdma_chan_handler
, IRQF_DISABLED
, "pdma", phy
);
760 dev_err(pdev
->dev
, "channel request irq fail!\n");
765 spin_lock_init(&chan
->desc_lock
);
766 chan
->dev
= pdev
->dev
;
767 chan
->chan
.device
= &pdev
->device
;
768 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
769 INIT_LIST_HEAD(&chan
->chain_pending
);
770 INIT_LIST_HEAD(&chan
->chain_running
);
772 /* register virt channel to dma engine */
773 list_add_tail(&chan
->chan
.device_node
,
774 &pdev
->device
.channels
);
779 static struct of_device_id mmp_pdma_dt_ids
[] = {
780 { .compatible
= "marvell,pdma-1.0", },
783 MODULE_DEVICE_TABLE(of
, mmp_pdma_dt_ids
);
785 static struct dma_chan
*mmp_pdma_dma_xlate(struct of_phandle_args
*dma_spec
,
786 struct of_dma
*ofdma
)
788 struct mmp_pdma_device
*d
= ofdma
->of_dma_data
;
789 struct dma_chan
*chan
, *candidate
;
794 /* walk the list of channels registered with the current instance and
795 * find one that is currently unused */
796 list_for_each_entry(chan
, &d
->device
.channels
, device_node
)
797 if (chan
->client_count
== 0) {
805 /* dma_get_slave_channel will return NULL if we lost a race between
806 * the lookup and the reservation */
807 chan
= dma_get_slave_channel(candidate
);
810 struct mmp_pdma_chan
*c
= to_mmp_pdma_chan(chan
);
811 c
->drcmr
= dma_spec
->args
[0];
818 static int mmp_pdma_probe(struct platform_device
*op
)
820 struct mmp_pdma_device
*pdev
;
821 const struct of_device_id
*of_id
;
822 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
823 struct resource
*iores
;
825 int dma_channels
= 0, irq_num
= 0;
827 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
830 pdev
->dev
= &op
->dev
;
832 spin_lock_init(&pdev
->phy_lock
);
834 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
835 pdev
->base
= devm_ioremap_resource(pdev
->dev
, iores
);
836 if (IS_ERR(pdev
->base
))
837 return PTR_ERR(pdev
->base
);
839 of_id
= of_match_device(mmp_pdma_dt_ids
, pdev
->dev
);
841 of_property_read_u32(pdev
->dev
->of_node
,
842 "#dma-channels", &dma_channels
);
843 else if (pdata
&& pdata
->dma_channels
)
844 dma_channels
= pdata
->dma_channels
;
846 dma_channels
= 32; /* default 32 channel */
847 pdev
->dma_channels
= dma_channels
;
849 for (i
= 0; i
< dma_channels
; i
++) {
850 if (platform_get_irq(op
, i
) > 0)
854 pdev
->phy
= devm_kzalloc(pdev
->dev
,
855 dma_channels
* sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
856 if (pdev
->phy
== NULL
)
859 INIT_LIST_HEAD(&pdev
->device
.channels
);
861 if (irq_num
!= dma_channels
) {
862 /* all chan share one irq, demux inside */
863 irq
= platform_get_irq(op
, 0);
864 ret
= devm_request_irq(pdev
->dev
, irq
,
865 mmp_pdma_int_handler
, IRQF_DISABLED
, "pdma", pdev
);
870 for (i
= 0; i
< dma_channels
; i
++) {
871 irq
= (irq_num
!= dma_channels
) ? 0 : platform_get_irq(op
, i
);
872 ret
= mmp_pdma_chan_init(pdev
, i
, irq
);
877 dma_cap_set(DMA_SLAVE
, pdev
->device
.cap_mask
);
878 dma_cap_set(DMA_MEMCPY
, pdev
->device
.cap_mask
);
879 pdev
->device
.dev
= &op
->dev
;
880 pdev
->device
.device_alloc_chan_resources
= mmp_pdma_alloc_chan_resources
;
881 pdev
->device
.device_free_chan_resources
= mmp_pdma_free_chan_resources
;
882 pdev
->device
.device_tx_status
= mmp_pdma_tx_status
;
883 pdev
->device
.device_prep_dma_memcpy
= mmp_pdma_prep_memcpy
;
884 pdev
->device
.device_prep_slave_sg
= mmp_pdma_prep_slave_sg
;
885 pdev
->device
.device_issue_pending
= mmp_pdma_issue_pending
;
886 pdev
->device
.device_control
= mmp_pdma_control
;
887 pdev
->device
.copy_align
= PDMA_ALIGNMENT
;
889 if (pdev
->dev
->coherent_dma_mask
)
890 dma_set_mask(pdev
->dev
, pdev
->dev
->coherent_dma_mask
);
892 dma_set_mask(pdev
->dev
, DMA_BIT_MASK(64));
894 ret
= dma_async_device_register(&pdev
->device
);
896 dev_err(pdev
->device
.dev
, "unable to register\n");
900 if (op
->dev
.of_node
) {
901 /* Device-tree DMA controller registration */
902 ret
= of_dma_controller_register(op
->dev
.of_node
,
903 mmp_pdma_dma_xlate
, pdev
);
905 dev_err(&op
->dev
, "of_dma_controller_register failed\n");
910 dev_info(pdev
->device
.dev
, "initialized %d channels\n", dma_channels
);
914 static const struct platform_device_id mmp_pdma_id_table
[] = {
919 static struct platform_driver mmp_pdma_driver
= {
922 .owner
= THIS_MODULE
,
923 .of_match_table
= mmp_pdma_dt_ids
,
925 .id_table
= mmp_pdma_id_table
,
926 .probe
= mmp_pdma_probe
,
927 .remove
= mmp_pdma_remove
,
930 bool mmp_pdma_filter_fn(struct dma_chan
*chan
, void *param
)
932 struct mmp_pdma_chan
*c
= to_mmp_pdma_chan(chan
);
934 if (chan
->device
->dev
->driver
!= &mmp_pdma_driver
.driver
)
937 c
->drcmr
= *(unsigned int *) param
;
941 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn
);
943 module_platform_driver(mmp_pdma_driver
);
945 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
946 MODULE_AUTHOR("Marvell International Ltd.");
947 MODULE_LICENSE("GPL v2");