1 #include <linux/dmaengine.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/platform_device.h>
4 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/of_dma.h>
8 #include <linux/of_irq.h>
9 #include <linux/dmapool.h>
10 #include <linux/interrupt.h>
11 #include <linux/of_address.h>
12 #include "dmaengine.h"
15 #define DESC_TYPE_HOST 0x10
16 #define DESC_TYPE_TEARD 0x13
18 #define TD_DESC_IS_RX (1 << 16)
19 #define TD_DESC_DMA_NUM 10
21 #define DESC_LENGTH_BITS_NUM 21
23 #define DESC_TYPE_USB (5 << 26)
24 #define DESC_PD_COMPLETE (1 << 31)
28 #define DMA_TXGCR(x) (0x800 + (x) * 0x20)
29 #define DMA_RXGCR(x) (0x808 + (x) * 0x20)
32 #define GCR_CHAN_ENABLE (1 << 31)
33 #define GCR_TEARDOWN (1 << 30)
34 #define GCR_STARV_RETRY (1 << 24)
35 #define GCR_DESC_TYPE_HOST (1 << 14)
38 #define DMA_SCHED_CTRL 0
39 #define DMA_SCHED_CTRL_EN (1 << 31)
40 #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
42 #define SCHED_ENTRY0_CHAN(x) ((x) << 0)
43 #define SCHED_ENTRY0_IS_RX (1 << 7)
45 #define SCHED_ENTRY1_CHAN(x) ((x) << 8)
46 #define SCHED_ENTRY1_IS_RX (1 << 15)
48 #define SCHED_ENTRY2_CHAN(x) ((x) << 16)
49 #define SCHED_ENTRY2_IS_RX (1 << 23)
51 #define SCHED_ENTRY3_CHAN(x) ((x) << 24)
52 #define SCHED_ENTRY3_IS_RX (1 << 31)
55 /* 4 KiB of memory for descriptors, 2 for each endpoint */
56 #define ALLOC_DECS_NUM 128
58 #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
59 #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
61 #define QMGR_LRAM0_BASE 0x80
62 #define QMGR_LRAM_SIZE 0x84
63 #define QMGR_LRAM1_BASE 0x88
64 #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
65 #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
66 #define QMGR_MEMCTRL_IDX_SH 16
67 #define QMGR_MEMCTRL_DESC_SH 8
69 #define QMGR_NUM_PEND 5
70 #define QMGR_PEND(x) (0x90 + (x) * 4)
72 #define QMGR_PENDING_SLOT_Q(x) (x / 32)
73 #define QMGR_PENDING_BIT_Q(x) (x % 32)
75 #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
76 #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
77 #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
78 #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
80 /* Glue layer specific */
81 /* USBSS / USB AM335x */
82 #define USBSS_IRQ_STATUS 0x28
83 #define USBSS_IRQ_ENABLER 0x2c
84 #define USBSS_IRQ_CLEARR 0x30
86 #define USBSS_IRQ_PD_COMP (1 << 2)
88 struct cppi41_channel
{
90 struct dma_async_tx_descriptor txd
;
91 struct cppi41_dd
*cdd
;
92 struct cppi41_desc
*desc
;
94 void __iomem
*gcr_reg
;
99 unsigned int q_comp_num
;
100 unsigned int port_num
;
103 unsigned td_queued
:1;
105 unsigned td_desc_seen
:1;
125 struct dma_device ddev
;
128 dma_addr_t scratch_phys
;
130 struct cppi41_desc
*cd
;
131 dma_addr_t descs_phys
;
133 struct cppi41_channel
*chan_busy
[ALLOC_DECS_NUM
];
135 void __iomem
*usbss_mem
;
136 void __iomem
*ctrl_mem
;
137 void __iomem
*sched_mem
;
138 void __iomem
*qmgr_mem
;
140 const struct chan_queues
*queues_rx
;
141 const struct chan_queues
*queues_tx
;
142 struct chan_queues td_queue
;
145 #define FIST_COMPLETION_QUEUE 93
146 static struct chan_queues usb_queues_tx
[] = {
148 [ 0] = { .submit
= 32, .complete
= 93},
149 [ 1] = { .submit
= 34, .complete
= 94},
150 [ 2] = { .submit
= 36, .complete
= 95},
151 [ 3] = { .submit
= 38, .complete
= 96},
152 [ 4] = { .submit
= 40, .complete
= 97},
153 [ 5] = { .submit
= 42, .complete
= 98},
154 [ 6] = { .submit
= 44, .complete
= 99},
155 [ 7] = { .submit
= 46, .complete
= 100},
156 [ 8] = { .submit
= 48, .complete
= 101},
157 [ 9] = { .submit
= 50, .complete
= 102},
158 [10] = { .submit
= 52, .complete
= 103},
159 [11] = { .submit
= 54, .complete
= 104},
160 [12] = { .submit
= 56, .complete
= 105},
161 [13] = { .submit
= 58, .complete
= 106},
162 [14] = { .submit
= 60, .complete
= 107},
165 [15] = { .submit
= 62, .complete
= 125},
166 [16] = { .submit
= 64, .complete
= 126},
167 [17] = { .submit
= 66, .complete
= 127},
168 [18] = { .submit
= 68, .complete
= 128},
169 [19] = { .submit
= 70, .complete
= 129},
170 [20] = { .submit
= 72, .complete
= 130},
171 [21] = { .submit
= 74, .complete
= 131},
172 [22] = { .submit
= 76, .complete
= 132},
173 [23] = { .submit
= 78, .complete
= 133},
174 [24] = { .submit
= 80, .complete
= 134},
175 [25] = { .submit
= 82, .complete
= 135},
176 [26] = { .submit
= 84, .complete
= 136},
177 [27] = { .submit
= 86, .complete
= 137},
178 [28] = { .submit
= 88, .complete
= 138},
179 [29] = { .submit
= 90, .complete
= 139},
182 static const struct chan_queues usb_queues_rx
[] = {
184 [ 0] = { .submit
= 1, .complete
= 109},
185 [ 1] = { .submit
= 2, .complete
= 110},
186 [ 2] = { .submit
= 3, .complete
= 111},
187 [ 3] = { .submit
= 4, .complete
= 112},
188 [ 4] = { .submit
= 5, .complete
= 113},
189 [ 5] = { .submit
= 6, .complete
= 114},
190 [ 6] = { .submit
= 7, .complete
= 115},
191 [ 7] = { .submit
= 8, .complete
= 116},
192 [ 8] = { .submit
= 9, .complete
= 117},
193 [ 9] = { .submit
= 10, .complete
= 118},
194 [10] = { .submit
= 11, .complete
= 119},
195 [11] = { .submit
= 12, .complete
= 120},
196 [12] = { .submit
= 13, .complete
= 121},
197 [13] = { .submit
= 14, .complete
= 122},
198 [14] = { .submit
= 15, .complete
= 123},
201 [15] = { .submit
= 16, .complete
= 141},
202 [16] = { .submit
= 17, .complete
= 142},
203 [17] = { .submit
= 18, .complete
= 143},
204 [18] = { .submit
= 19, .complete
= 144},
205 [19] = { .submit
= 20, .complete
= 145},
206 [20] = { .submit
= 21, .complete
= 146},
207 [21] = { .submit
= 22, .complete
= 147},
208 [22] = { .submit
= 23, .complete
= 148},
209 [23] = { .submit
= 24, .complete
= 149},
210 [24] = { .submit
= 25, .complete
= 150},
211 [25] = { .submit
= 26, .complete
= 151},
212 [26] = { .submit
= 27, .complete
= 152},
213 [27] = { .submit
= 28, .complete
= 153},
214 [28] = { .submit
= 29, .complete
= 154},
215 [29] = { .submit
= 30, .complete
= 155},
218 struct cppi_glue_infos
{
219 irqreturn_t (*isr
)(int irq
, void *data
);
220 const struct chan_queues
*queues_rx
;
221 const struct chan_queues
*queues_tx
;
222 struct chan_queues td_queue
;
225 static struct cppi41_channel
*to_cpp41_chan(struct dma_chan
*c
)
227 return container_of(c
, struct cppi41_channel
, chan
);
230 static struct cppi41_channel
*desc_to_chan(struct cppi41_dd
*cdd
, u32 desc
)
232 struct cppi41_channel
*c
;
236 descs_size
= sizeof(struct cppi41_desc
) * ALLOC_DECS_NUM
;
238 if (!((desc
>= cdd
->descs_phys
) &&
239 (desc
< (cdd
->descs_phys
+ descs_size
)))) {
243 desc_num
= (desc
- cdd
->descs_phys
) / sizeof(struct cppi41_desc
);
244 BUG_ON(desc_num
> ALLOC_DECS_NUM
);
245 c
= cdd
->chan_busy
[desc_num
];
246 cdd
->chan_busy
[desc_num
] = NULL
;
250 static void cppi_writel(u32 val
, void *__iomem
*mem
)
252 __raw_writel(val
, mem
);
255 static u32
cppi_readl(void *__iomem
*mem
)
257 return __raw_readl(mem
);
260 static u32
pd_trans_len(u32 val
)
262 return val
& ((1 << (DESC_LENGTH_BITS_NUM
+ 1)) - 1);
265 static irqreturn_t
cppi41_irq(int irq
, void *data
)
267 struct cppi41_dd
*cdd
= data
;
268 struct cppi41_channel
*c
;
272 status
= cppi_readl(cdd
->usbss_mem
+ USBSS_IRQ_STATUS
);
273 if (!(status
& USBSS_IRQ_PD_COMP
))
275 cppi_writel(status
, cdd
->usbss_mem
+ USBSS_IRQ_STATUS
);
277 for (i
= QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE
); i
< QMGR_NUM_PEND
;
282 val
= cppi_readl(cdd
->qmgr_mem
+ QMGR_PEND(i
));
283 if (i
== QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE
) && val
) {
285 /* set corresponding bit for completetion Q 93 */
286 mask
= 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE
);
287 /* not set all bits for queues less than Q 93 */
289 /* now invert and keep only Q 93+ set */
300 val
&= ~(1 << q_num
);
302 desc
= cppi_readl(cdd
->qmgr_mem
+ QMGR_QUEUE_D(q_num
));
304 c
= desc_to_chan(cdd
, desc
);
306 pr_err("%s() q %d desc %08x\n", __func__
,
310 c
->residue
= pd_trans_len(c
->desc
->pd6
) -
311 pd_trans_len(c
->desc
->pd0
);
313 dma_cookie_complete(&c
->txd
);
314 c
->txd
.callback(c
->txd
.callback_param
);
320 static dma_cookie_t
cppi41_tx_submit(struct dma_async_tx_descriptor
*tx
)
324 cookie
= dma_cookie_assign(tx
);
329 static int cppi41_dma_alloc_chan_resources(struct dma_chan
*chan
)
331 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
333 dma_cookie_init(chan
);
334 dma_async_tx_descriptor_init(&c
->txd
, chan
);
335 c
->txd
.tx_submit
= cppi41_tx_submit
;
338 cppi_writel(c
->q_num
, c
->gcr_reg
+ RXHPCRA0
);
343 static void cppi41_dma_free_chan_resources(struct dma_chan
*chan
)
347 static enum dma_status
cppi41_dma_tx_status(struct dma_chan
*chan
,
348 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
350 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
354 ret
= dma_cookie_status(chan
, cookie
, txstate
);
355 if (txstate
&& ret
== DMA_SUCCESS
)
356 txstate
->residue
= c
->residue
;
362 static void push_desc_queue(struct cppi41_channel
*c
)
364 struct cppi41_dd
*cdd
= c
->cdd
;
369 desc_phys
= lower_32_bits(c
->desc_phys
);
370 desc_num
= (desc_phys
- cdd
->descs_phys
) / sizeof(struct cppi41_desc
);
371 WARN_ON(cdd
->chan_busy
[desc_num
]);
372 cdd
->chan_busy
[desc_num
] = c
;
374 reg
= (sizeof(struct cppi41_desc
) - 24) / 4;
376 cppi_writel(reg
, cdd
->qmgr_mem
+ QMGR_QUEUE_D(c
->q_num
));
379 static void cppi41_dma_issue_pending(struct dma_chan
*chan
)
381 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
386 reg
= GCR_CHAN_ENABLE
;
388 reg
|= GCR_STARV_RETRY
;
389 reg
|= GCR_DESC_TYPE_HOST
;
390 reg
|= c
->q_comp_num
;
393 cppi_writel(reg
, c
->gcr_reg
);
396 * We don't use writel() but __raw_writel() so we have to make sure
397 * that the DMA descriptor in coherent memory made to the main memory
398 * before starting the dma engine.
404 static u32
get_host_pd0(u32 length
)
408 reg
= DESC_TYPE_HOST
<< DESC_TYPE
;
414 static u32
get_host_pd1(struct cppi41_channel
*c
)
423 static u32
get_host_pd2(struct cppi41_channel
*c
)
428 reg
|= c
->q_comp_num
;
433 static u32
get_host_pd3(u32 length
)
437 /* PD3 = packet size */
443 static u32
get_host_pd6(u32 length
)
447 /* PD6 buffer size */
448 reg
= DESC_PD_COMPLETE
;
454 static u32
get_host_pd4_or_7(u32 addr
)
463 static u32
get_host_pd5(void)
472 static struct dma_async_tx_descriptor
*cppi41_dma_prep_slave_sg(
473 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sg_len
,
474 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
476 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
477 struct cppi41_desc
*d
;
478 struct scatterlist
*sg
;
484 for_each_sg(sgl
, sg
, sg_len
, i
) {
488 /* We need to use more than one desc once musb supports sg */
490 addr
= lower_32_bits(sg_dma_address(sg
));
491 len
= sg_dma_len(sg
);
493 d
->pd0
= get_host_pd0(len
);
494 d
->pd1
= get_host_pd1(c
);
495 d
->pd2
= get_host_pd2(c
);
496 d
->pd3
= get_host_pd3(len
);
497 d
->pd4
= get_host_pd4_or_7(addr
);
498 d
->pd5
= get_host_pd5();
499 d
->pd6
= get_host_pd6(len
);
500 d
->pd7
= get_host_pd4_or_7(addr
);
508 static int cpp41_cfg_chan(struct cppi41_channel
*c
,
509 struct dma_slave_config
*cfg
)
514 static void cppi41_compute_td_desc(struct cppi41_desc
*d
)
516 d
->pd0
= DESC_TYPE_TEARD
<< DESC_TYPE
;
519 static u32
cppi41_pop_desc(struct cppi41_dd
*cdd
, unsigned queue_num
)
523 desc
= cppi_readl(cdd
->qmgr_mem
+ QMGR_QUEUE_D(queue_num
));
528 static int cppi41_tear_down_chan(struct cppi41_channel
*c
)
530 struct cppi41_dd
*cdd
= c
->cdd
;
531 struct cppi41_desc
*td
;
537 td
+= cdd
->first_td_desc
;
539 td_desc_phys
= cdd
->descs_phys
;
540 td_desc_phys
+= cdd
->first_td_desc
* sizeof(struct cppi41_desc
);
543 cppi41_compute_td_desc(td
);
546 reg
= (sizeof(struct cppi41_desc
) - 24) / 4;
548 cppi_writel(reg
, cdd
->qmgr_mem
+
549 QMGR_QUEUE_D(cdd
->td_queue
.submit
));
551 reg
= GCR_CHAN_ENABLE
;
553 reg
|= GCR_STARV_RETRY
;
554 reg
|= GCR_DESC_TYPE_HOST
;
555 reg
|= c
->q_comp_num
;
558 cppi_writel(reg
, c
->gcr_reg
);
564 unsigned td_comp_queue
;
567 td_comp_queue
= cdd
->td_queue
.complete
;
569 td_comp_queue
= c
->q_comp_num
;
571 desc_phys
= cppi41_pop_desc(cdd
, td_comp_queue
);
575 if (desc_phys
== td_desc_phys
) {
578 WARN_ON((pd0
>> DESC_TYPE
) != DESC_TYPE_TEARD
);
579 WARN_ON(!c
->is_tx
&& !(pd0
& TD_DESC_IS_RX
));
580 WARN_ON((pd0
& 0x1f) != c
->port_num
);
587 if (!c
->td_desc_seen
) {
588 desc_phys
= cppi41_pop_desc(cdd
, c
->q_comp_num
);
591 WARN_ON(c
->desc_phys
!= desc_phys
);
597 * If the TX descriptor / channel is in use, the caller needs to poke
598 * his TD bit multiple times. After that he hardware releases the
599 * transfer descriptor followed by TD descriptor. Waiting seems not to
600 * cause any difference.
601 * RX seems to be thrown out right away. However once the TearDown
602 * descriptor gets through we are done. If we have seens the transfer
603 * descriptor before the TD we fetch it from enqueue, it has to be
604 * there waiting for us.
606 if (!c
->td_seen
&& c
->td_retry
)
609 WARN_ON(!c
->td_retry
);
610 if (!c
->td_desc_seen
) {
611 desc_phys
= cppi_readl(cdd
->qmgr_mem
+ QMGR_QUEUE_D(c
->q_num
));
618 cppi_writel(0, c
->gcr_reg
);
622 static int cppi41_stop_chan(struct dma_chan
*chan
)
624 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
625 struct cppi41_dd
*cdd
= c
->cdd
;
630 ret
= cppi41_tear_down_chan(c
);
634 desc_phys
= lower_32_bits(c
->desc_phys
);
635 desc_num
= (desc_phys
- cdd
->descs_phys
) / sizeof(struct cppi41_desc
);
636 WARN_ON(!cdd
->chan_busy
[desc_num
]);
637 cdd
->chan_busy
[desc_num
] = NULL
;
642 static int cppi41_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
645 struct cppi41_channel
*c
= to_cpp41_chan(chan
);
649 case DMA_SLAVE_CONFIG
:
650 ret
= cpp41_cfg_chan(c
, (struct dma_slave_config
*) arg
);
653 case DMA_TERMINATE_ALL
:
654 ret
= cppi41_stop_chan(chan
);
664 static void cleanup_chans(struct cppi41_dd
*cdd
)
666 while (!list_empty(&cdd
->ddev
.channels
)) {
667 struct cppi41_channel
*cchan
;
669 cchan
= list_first_entry(&cdd
->ddev
.channels
,
670 struct cppi41_channel
, chan
.device_node
);
671 list_del(&cchan
->chan
.device_node
);
676 static int cppi41_add_chans(struct platform_device
*pdev
, struct cppi41_dd
*cdd
)
678 struct cppi41_channel
*cchan
;
683 ret
= of_property_read_u32(pdev
->dev
.of_node
, "#dma-channels",
688 * The channels can only be used as TX or as RX. So we add twice
689 * that much dma channels because USB can only do RX or TX.
693 for (i
= 0; i
< n_chans
; i
++) {
694 cchan
= kzalloc(sizeof(*cchan
), GFP_KERNEL
);
700 cchan
->gcr_reg
= cdd
->ctrl_mem
+ DMA_TXGCR(i
>> 1);
703 cchan
->gcr_reg
= cdd
->ctrl_mem
+ DMA_RXGCR(i
>> 1);
706 cchan
->port_num
= i
>> 1;
707 cchan
->desc
= &cdd
->cd
[i
];
708 cchan
->desc_phys
= cdd
->descs_phys
;
709 cchan
->desc_phys
+= i
* sizeof(struct cppi41_desc
);
710 cchan
->chan
.device
= &cdd
->ddev
;
711 list_add_tail(&cchan
->chan
.device_node
, &cdd
->ddev
.channels
);
713 cdd
->first_td_desc
= n_chans
;
721 static void purge_descs(struct platform_device
*pdev
, struct cppi41_dd
*cdd
)
723 unsigned int mem_decs
;
726 mem_decs
= ALLOC_DECS_NUM
* sizeof(struct cppi41_desc
);
728 for (i
= 0; i
< DESCS_AREAS
; i
++) {
730 cppi_writel(0, cdd
->qmgr_mem
+ QMGR_MEMBASE(i
));
731 cppi_writel(0, cdd
->qmgr_mem
+ QMGR_MEMCTRL(i
));
733 dma_free_coherent(&pdev
->dev
, mem_decs
, cdd
->cd
,
738 static void disable_sched(struct cppi41_dd
*cdd
)
740 cppi_writel(0, cdd
->sched_mem
+ DMA_SCHED_CTRL
);
743 static void deinit_cpii41(struct platform_device
*pdev
, struct cppi41_dd
*cdd
)
747 purge_descs(pdev
, cdd
);
749 cppi_writel(0, cdd
->qmgr_mem
+ QMGR_LRAM0_BASE
);
750 cppi_writel(0, cdd
->qmgr_mem
+ QMGR_LRAM0_BASE
);
751 dma_free_coherent(&pdev
->dev
, QMGR_SCRATCH_SIZE
, cdd
->qmgr_scratch
,
755 static int init_descs(struct platform_device
*pdev
, struct cppi41_dd
*cdd
)
757 unsigned int desc_size
;
758 unsigned int mem_decs
;
763 BUILD_BUG_ON(sizeof(struct cppi41_desc
) &
764 (sizeof(struct cppi41_desc
) - 1));
765 BUILD_BUG_ON(sizeof(struct cppi41_desc
) < 32);
766 BUILD_BUG_ON(ALLOC_DECS_NUM
< 32);
768 desc_size
= sizeof(struct cppi41_desc
);
769 mem_decs
= ALLOC_DECS_NUM
* desc_size
;
772 for (i
= 0; i
< DESCS_AREAS
; i
++) {
774 reg
= idx
<< QMGR_MEMCTRL_IDX_SH
;
775 reg
|= (ilog2(desc_size
) - 5) << QMGR_MEMCTRL_DESC_SH
;
776 reg
|= ilog2(ALLOC_DECS_NUM
) - 5;
778 BUILD_BUG_ON(DESCS_AREAS
!= 1);
779 cdd
->cd
= dma_alloc_coherent(&pdev
->dev
, mem_decs
,
780 &cdd
->descs_phys
, GFP_KERNEL
);
784 cppi_writel(cdd
->descs_phys
, cdd
->qmgr_mem
+ QMGR_MEMBASE(i
));
785 cppi_writel(reg
, cdd
->qmgr_mem
+ QMGR_MEMCTRL(i
));
787 idx
+= ALLOC_DECS_NUM
;
792 static void init_sched(struct cppi41_dd
*cdd
)
799 cppi_writel(0, cdd
->sched_mem
+ DMA_SCHED_CTRL
);
800 for (ch
= 0; ch
< 15 * 2; ch
+= 2) {
802 reg
= SCHED_ENTRY0_CHAN(ch
);
803 reg
|= SCHED_ENTRY1_CHAN(ch
) | SCHED_ENTRY1_IS_RX
;
805 reg
|= SCHED_ENTRY2_CHAN(ch
+ 1);
806 reg
|= SCHED_ENTRY3_CHAN(ch
+ 1) | SCHED_ENTRY3_IS_RX
;
807 cppi_writel(reg
, cdd
->sched_mem
+ DMA_SCHED_WORD(word
));
810 reg
= 15 * 2 * 2 - 1;
811 reg
|= DMA_SCHED_CTRL_EN
;
812 cppi_writel(reg
, cdd
->sched_mem
+ DMA_SCHED_CTRL
);
815 static int init_cppi41(struct platform_device
*pdev
, struct cppi41_dd
*cdd
)
819 BUILD_BUG_ON(QMGR_SCRATCH_SIZE
> ((1 << 14) - 1));
820 cdd
->qmgr_scratch
= dma_alloc_coherent(&pdev
->dev
, QMGR_SCRATCH_SIZE
,
821 &cdd
->scratch_phys
, GFP_KERNEL
);
822 if (!cdd
->qmgr_scratch
)
825 cppi_writel(cdd
->scratch_phys
, cdd
->qmgr_mem
+ QMGR_LRAM0_BASE
);
826 cppi_writel(QMGR_SCRATCH_SIZE
, cdd
->qmgr_mem
+ QMGR_LRAM_SIZE
);
827 cppi_writel(0, cdd
->qmgr_mem
+ QMGR_LRAM1_BASE
);
829 ret
= init_descs(pdev
, cdd
);
833 cppi_writel(cdd
->td_queue
.submit
, cdd
->ctrl_mem
+ DMA_TDFDQ
);
837 deinit_cpii41(pdev
, cdd
);
841 static struct platform_driver cpp41_dma_driver
;
843 * The param format is:
851 static bool cpp41_dma_filter_fn(struct dma_chan
*chan
, void *param
)
853 struct cppi41_channel
*cchan
;
854 struct cppi41_dd
*cdd
;
855 const struct chan_queues
*queues
;
858 if (chan
->device
->dev
->driver
!= &cpp41_dma_driver
.driver
)
861 cchan
= to_cpp41_chan(chan
);
863 if (cchan
->port_num
!= num
[INFO_PORT
])
866 if (cchan
->is_tx
&& !num
[INFO_IS_TX
])
870 queues
= cdd
->queues_tx
;
872 queues
= cdd
->queues_rx
;
874 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx
) != ARRAY_SIZE(usb_queues_tx
));
875 if (WARN_ON(cchan
->port_num
> ARRAY_SIZE(usb_queues_rx
)))
878 cchan
->q_num
= queues
[cchan
->port_num
].submit
;
879 cchan
->q_comp_num
= queues
[cchan
->port_num
].complete
;
883 static struct of_dma_filter_info cpp41_dma_info
= {
884 .filter_fn
= cpp41_dma_filter_fn
,
887 static struct dma_chan
*cppi41_dma_xlate(struct of_phandle_args
*dma_spec
,
888 struct of_dma
*ofdma
)
890 int count
= dma_spec
->args_count
;
891 struct of_dma_filter_info
*info
= ofdma
->of_dma_data
;
893 if (!info
|| !info
->filter_fn
)
899 return dma_request_channel(info
->dma_cap
, info
->filter_fn
,
903 static const struct cppi_glue_infos usb_infos
= {
905 .queues_rx
= usb_queues_rx
,
906 .queues_tx
= usb_queues_tx
,
907 .td_queue
= { .submit
= 31, .complete
= 0 },
910 static const struct of_device_id cppi41_dma_ids
[] = {
911 { .compatible
= "ti,am3359-cppi41", .data
= &usb_infos
},
914 MODULE_DEVICE_TABLE(of
, cppi41_dma_ids
);
916 static const struct cppi_glue_infos
*get_glue_info(struct platform_device
*pdev
)
918 const struct of_device_id
*of_id
;
920 of_id
= of_match_node(cppi41_dma_ids
, pdev
->dev
.of_node
);
926 static int cppi41_dma_probe(struct platform_device
*pdev
)
928 struct cppi41_dd
*cdd
;
929 const struct cppi_glue_infos
*glue_info
;
933 glue_info
= get_glue_info(pdev
);
937 cdd
= kzalloc(sizeof(*cdd
), GFP_KERNEL
);
941 dma_cap_set(DMA_SLAVE
, cdd
->ddev
.cap_mask
);
942 cdd
->ddev
.device_alloc_chan_resources
= cppi41_dma_alloc_chan_resources
;
943 cdd
->ddev
.device_free_chan_resources
= cppi41_dma_free_chan_resources
;
944 cdd
->ddev
.device_tx_status
= cppi41_dma_tx_status
;
945 cdd
->ddev
.device_issue_pending
= cppi41_dma_issue_pending
;
946 cdd
->ddev
.device_prep_slave_sg
= cppi41_dma_prep_slave_sg
;
947 cdd
->ddev
.device_control
= cppi41_dma_control
;
948 cdd
->ddev
.dev
= &pdev
->dev
;
949 INIT_LIST_HEAD(&cdd
->ddev
.channels
);
950 cpp41_dma_info
.dma_cap
= cdd
->ddev
.cap_mask
;
952 cdd
->usbss_mem
= of_iomap(pdev
->dev
.of_node
, 0);
953 cdd
->ctrl_mem
= of_iomap(pdev
->dev
.of_node
, 1);
954 cdd
->sched_mem
= of_iomap(pdev
->dev
.of_node
, 2);
955 cdd
->qmgr_mem
= of_iomap(pdev
->dev
.of_node
, 3);
957 if (!cdd
->usbss_mem
|| !cdd
->ctrl_mem
|| !cdd
->sched_mem
||
963 cdd
->queues_rx
= glue_info
->queues_rx
;
964 cdd
->queues_tx
= glue_info
->queues_tx
;
965 cdd
->td_queue
= glue_info
->td_queue
;
967 ret
= init_cppi41(pdev
, cdd
);
971 ret
= cppi41_add_chans(pdev
, cdd
);
975 irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
979 cppi_writel(USBSS_IRQ_PD_COMP
, cdd
->usbss_mem
+ USBSS_IRQ_ENABLER
);
981 ret
= request_irq(irq
, glue_info
->isr
, IRQF_SHARED
,
982 dev_name(&pdev
->dev
), cdd
);
987 ret
= dma_async_device_register(&cdd
->ddev
);
991 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
992 cppi41_dma_xlate
, &cpp41_dma_info
);
996 platform_set_drvdata(pdev
, cdd
);
999 dma_async_device_unregister(&cdd
->ddev
);
1003 cppi_writel(0, cdd
->usbss_mem
+ USBSS_IRQ_CLEARR
);
1006 deinit_cpii41(pdev
, cdd
);
1008 iounmap(cdd
->usbss_mem
);
1009 iounmap(cdd
->ctrl_mem
);
1010 iounmap(cdd
->sched_mem
);
1011 iounmap(cdd
->qmgr_mem
);
1017 static int cppi41_dma_remove(struct platform_device
*pdev
)
1019 struct cppi41_dd
*cdd
= platform_get_drvdata(pdev
);
1021 of_dma_controller_free(pdev
->dev
.of_node
);
1022 dma_async_device_unregister(&cdd
->ddev
);
1024 cppi_writel(0, cdd
->usbss_mem
+ USBSS_IRQ_CLEARR
);
1025 free_irq(cdd
->irq
, cdd
);
1027 deinit_cpii41(pdev
, cdd
);
1028 iounmap(cdd
->usbss_mem
);
1029 iounmap(cdd
->ctrl_mem
);
1030 iounmap(cdd
->sched_mem
);
1031 iounmap(cdd
->qmgr_mem
);
1036 static struct platform_driver cpp41_dma_driver
= {
1037 .probe
= cppi41_dma_probe
,
1038 .remove
= cppi41_dma_remove
,
1040 .name
= "cppi41-dma-engine",
1041 .owner
= THIS_MODULE
,
1042 .of_match_table
= of_match_ptr(cppi41_dma_ids
),
1046 module_platform_driver(cpp41_dma_driver
);
1047 MODULE_LICENSE("GPL");
1048 MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");