1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
10 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
12 #define EP_MODE_AUTOREG_NONE 0
13 #define EP_MODE_AUTOREG_ALL_NEOP 1
14 #define EP_MODE_AUTOREG_ALWAYS 3
16 #define EP_MODE_DMA_TRANSPARENT 0
17 #define EP_MODE_DMA_RNDIS 1
18 #define EP_MODE_DMA_GEN_RNDIS 3
20 #define USB_CTRL_TX_MODE 0x70
21 #define USB_CTRL_RX_MODE 0x74
22 #define USB_CTRL_AUTOREQ 0xd0
23 #define USB_TDOWN 0xd8
25 struct cppi41_dma_channel
{
26 struct dma_channel channel
;
27 struct cppi41_dma_controller
*controller
;
28 struct musb_hw_ep
*hw_ep
;
41 struct list_head tx_check
;
42 struct work_struct dma_completion
;
45 #define MUSB_DMA_NUM_CHANNELS 15
47 struct cppi41_dma_controller
{
48 struct dma_controller controller
;
49 struct cppi41_dma_channel rx_channel
[MUSB_DMA_NUM_CHANNELS
];
50 struct cppi41_dma_channel tx_channel
[MUSB_DMA_NUM_CHANNELS
];
52 struct hrtimer early_tx
;
53 struct list_head early_tx_list
;
59 static void save_rx_toggle(struct cppi41_dma_channel
*cppi41_channel
)
64 if (cppi41_channel
->is_tx
)
66 if (!is_host_active(cppi41_channel
->controller
->musb
))
69 csr
= musb_readw(cppi41_channel
->hw_ep
->regs
, MUSB_RXCSR
);
70 toggle
= csr
& MUSB_RXCSR_H_DATATOGGLE
? 1 : 0;
72 cppi41_channel
->usb_toggle
= toggle
;
75 static void update_rx_toggle(struct cppi41_dma_channel
*cppi41_channel
)
80 if (cppi41_channel
->is_tx
)
82 if (!is_host_active(cppi41_channel
->controller
->musb
))
85 csr
= musb_readw(cppi41_channel
->hw_ep
->regs
, MUSB_RXCSR
);
86 toggle
= csr
& MUSB_RXCSR_H_DATATOGGLE
? 1 : 0;
89 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
90 * data toggle may reset from DATA1 to DATA0 during receiving data from
91 * more than one endpoint.
93 if (!toggle
&& toggle
== cppi41_channel
->usb_toggle
) {
94 csr
|= MUSB_RXCSR_H_DATATOGGLE
| MUSB_RXCSR_H_WR_DATATOGGLE
;
95 musb_writew(cppi41_channel
->hw_ep
->regs
, MUSB_RXCSR
, csr
);
96 dev_dbg(cppi41_channel
->controller
->musb
->controller
,
97 "Restoring DATA1 toggle.\n");
100 cppi41_channel
->usb_toggle
= toggle
;
103 static bool musb_is_tx_fifo_empty(struct musb_hw_ep
*hw_ep
)
105 u8 epnum
= hw_ep
->epnum
;
106 struct musb
*musb
= hw_ep
->musb
;
107 void __iomem
*epio
= musb
->endpoints
[epnum
].regs
;
110 csr
= musb_readw(epio
, MUSB_TXCSR
);
111 if (csr
& MUSB_TXCSR_TXPKTRDY
)
116 static bool is_isoc(struct musb_hw_ep
*hw_ep
, bool in
)
118 if (in
&& hw_ep
->in_qh
) {
119 if (hw_ep
->in_qh
->type
== USB_ENDPOINT_XFER_ISOC
)
121 } else if (hw_ep
->out_qh
) {
122 if (hw_ep
->out_qh
->type
== USB_ENDPOINT_XFER_ISOC
)
128 static void cppi41_dma_callback(void *private_data
);
130 static void cppi41_trans_done(struct cppi41_dma_channel
*cppi41_channel
)
132 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
133 struct musb
*musb
= hw_ep
->musb
;
135 if (!cppi41_channel
->prog_len
||
136 (cppi41_channel
->channel
.status
== MUSB_DMA_STATUS_FREE
)) {
139 cppi41_channel
->channel
.actual_len
=
140 cppi41_channel
->transferred
;
141 cppi41_channel
->channel
.status
= MUSB_DMA_STATUS_FREE
;
142 musb_dma_completion(musb
, hw_ep
->epnum
, cppi41_channel
->is_tx
);
144 /* next iteration, reload */
145 struct dma_chan
*dc
= cppi41_channel
->dc
;
146 struct dma_async_tx_descriptor
*dma_desc
;
147 enum dma_transfer_direction direction
;
150 void __iomem
*epio
= cppi41_channel
->hw_ep
->regs
;
152 cppi41_channel
->buf_addr
+= cppi41_channel
->packet_sz
;
154 remain_bytes
= cppi41_channel
->total_len
;
155 remain_bytes
-= cppi41_channel
->transferred
;
156 remain_bytes
= min(remain_bytes
, cppi41_channel
->packet_sz
);
157 cppi41_channel
->prog_len
= remain_bytes
;
159 direction
= cppi41_channel
->is_tx
? DMA_MEM_TO_DEV
161 dma_desc
= dmaengine_prep_slave_single(dc
,
162 cppi41_channel
->buf_addr
,
165 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
166 if (WARN_ON(!dma_desc
))
169 dma_desc
->callback
= cppi41_dma_callback
;
170 dma_desc
->callback_param
= &cppi41_channel
->channel
;
171 cppi41_channel
->cookie
= dma_desc
->tx_submit(dma_desc
);
172 dma_async_issue_pending(dc
);
174 if (!cppi41_channel
->is_tx
) {
175 csr
= musb_readw(epio
, MUSB_RXCSR
);
176 csr
|= MUSB_RXCSR_H_REQPKT
;
177 musb_writew(epio
, MUSB_RXCSR
, csr
);
182 static void cppi_trans_done_work(struct work_struct
*work
)
185 struct cppi41_dma_channel
*cppi41_channel
=
186 container_of(work
, struct cppi41_dma_channel
, dma_completion
);
187 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
188 struct musb
*musb
= controller
->musb
;
189 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
192 if (!cppi41_channel
->is_tx
&& is_isoc(hw_ep
, 1)) {
193 spin_lock_irqsave(&musb
->lock
, flags
);
194 cppi41_trans_done(cppi41_channel
);
195 spin_unlock_irqrestore(&musb
->lock
, flags
);
197 empty
= musb_is_tx_fifo_empty(hw_ep
);
199 spin_lock_irqsave(&musb
->lock
, flags
);
200 cppi41_trans_done(cppi41_channel
);
201 spin_unlock_irqrestore(&musb
->lock
, flags
);
203 schedule_work(&cppi41_channel
->dma_completion
);
208 static enum hrtimer_restart
cppi41_recheck_tx_req(struct hrtimer
*timer
)
210 struct cppi41_dma_controller
*controller
;
211 struct cppi41_dma_channel
*cppi41_channel
, *n
;
214 enum hrtimer_restart ret
= HRTIMER_NORESTART
;
216 controller
= container_of(timer
, struct cppi41_dma_controller
,
218 musb
= controller
->musb
;
220 spin_lock_irqsave(&musb
->lock
, flags
);
221 list_for_each_entry_safe(cppi41_channel
, n
, &controller
->early_tx_list
,
224 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
226 empty
= musb_is_tx_fifo_empty(hw_ep
);
228 list_del_init(&cppi41_channel
->tx_check
);
229 cppi41_trans_done(cppi41_channel
);
233 if (!list_empty(&controller
->early_tx_list
)) {
234 ret
= HRTIMER_RESTART
;
235 hrtimer_forward_now(&controller
->early_tx
,
236 ktime_set(0, 150 * NSEC_PER_USEC
));
239 spin_unlock_irqrestore(&musb
->lock
, flags
);
243 static void cppi41_dma_callback(void *private_data
)
245 struct dma_channel
*channel
= private_data
;
246 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
247 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
248 struct musb
*musb
= hw_ep
->musb
;
250 struct dma_tx_state txstate
;
254 spin_lock_irqsave(&musb
->lock
, flags
);
256 dmaengine_tx_status(cppi41_channel
->dc
, cppi41_channel
->cookie
,
258 transferred
= cppi41_channel
->prog_len
- txstate
.residue
;
259 cppi41_channel
->transferred
+= transferred
;
261 dev_dbg(musb
->controller
, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
262 hw_ep
->epnum
, cppi41_channel
->transferred
,
263 cppi41_channel
->total_len
);
265 update_rx_toggle(cppi41_channel
);
267 if (cppi41_channel
->transferred
== cppi41_channel
->total_len
||
268 transferred
< cppi41_channel
->packet_sz
)
269 cppi41_channel
->prog_len
= 0;
271 if (!cppi41_channel
->is_tx
) {
272 if (is_isoc(hw_ep
, 1))
273 schedule_work(&cppi41_channel
->dma_completion
);
275 cppi41_trans_done(cppi41_channel
);
279 empty
= musb_is_tx_fifo_empty(hw_ep
);
281 cppi41_trans_done(cppi41_channel
);
283 struct cppi41_dma_controller
*controller
;
285 * On AM335x it has been observed that the TX interrupt fires
286 * too early that means the TXFIFO is not yet empty but the DMA
287 * engine says that it is done with the transfer. We don't
288 * receive a FIFO empty interrupt so the only thing we can do is
289 * to poll for the bit. On HS it usually takes 2us, on FS around
290 * 110us - 150us depending on the transfer size.
291 * We spin on HS (no longer than than 25us and setup a timer on
292 * FS to check for the bit and complete the transfer.
294 controller
= cppi41_channel
->controller
;
296 if (musb
->g
.speed
== USB_SPEED_HIGH
) {
300 empty
= musb_is_tx_fifo_empty(hw_ep
);
309 empty
= musb_is_tx_fifo_empty(hw_ep
);
311 cppi41_trans_done(cppi41_channel
);
315 if (is_isoc(hw_ep
, 0)) {
316 schedule_work(&cppi41_channel
->dma_completion
);
319 list_add_tail(&cppi41_channel
->tx_check
,
320 &controller
->early_tx_list
);
321 if (!hrtimer_is_queued(&controller
->early_tx
)) {
322 hrtimer_start_range_ns(&controller
->early_tx
,
323 ktime_set(0, 140 * NSEC_PER_USEC
),
329 spin_unlock_irqrestore(&musb
->lock
, flags
);
332 static u32
update_ep_mode(unsigned ep
, unsigned mode
, u32 old
)
336 shift
= (ep
- 1) * 2;
337 old
&= ~(3 << shift
);
338 old
|= mode
<< shift
;
342 static void cppi41_set_dma_mode(struct cppi41_dma_channel
*cppi41_channel
,
345 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
350 if (cppi41_channel
->is_tx
)
351 old_mode
= controller
->tx_mode
;
353 old_mode
= controller
->rx_mode
;
354 port
= cppi41_channel
->port_num
;
355 new_mode
= update_ep_mode(port
, mode
, old_mode
);
357 if (new_mode
== old_mode
)
359 if (cppi41_channel
->is_tx
) {
360 controller
->tx_mode
= new_mode
;
361 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_TX_MODE
,
364 controller
->rx_mode
= new_mode
;
365 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_RX_MODE
,
370 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel
*cppi41_channel
,
373 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
378 old_mode
= controller
->auto_req
;
379 port
= cppi41_channel
->port_num
;
380 new_mode
= update_ep_mode(port
, mode
, old_mode
);
382 if (new_mode
== old_mode
)
384 controller
->auto_req
= new_mode
;
385 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_AUTOREQ
, new_mode
);
388 static bool cppi41_configure_channel(struct dma_channel
*channel
,
389 u16 packet_sz
, u8 mode
,
390 dma_addr_t dma_addr
, u32 len
)
392 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
393 struct dma_chan
*dc
= cppi41_channel
->dc
;
394 struct dma_async_tx_descriptor
*dma_desc
;
395 enum dma_transfer_direction direction
;
396 struct musb
*musb
= cppi41_channel
->controller
->musb
;
397 unsigned use_gen_rndis
= 0;
399 dev_dbg(musb
->controller
,
400 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
401 cppi41_channel
->port_num
, RNDIS_REG(cppi41_channel
->port_num
),
402 packet_sz
, mode
, (unsigned long long) dma_addr
,
403 len
, cppi41_channel
->is_tx
);
405 cppi41_channel
->buf_addr
= dma_addr
;
406 cppi41_channel
->total_len
= len
;
407 cppi41_channel
->transferred
= 0;
408 cppi41_channel
->packet_sz
= packet_sz
;
411 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
412 * than max packet size at a time.
414 if (cppi41_channel
->is_tx
)
419 if (len
> packet_sz
) {
420 musb_writel(musb
->ctrl_base
,
421 RNDIS_REG(cppi41_channel
->port_num
), len
);
423 cppi41_set_dma_mode(cppi41_channel
,
424 EP_MODE_DMA_GEN_RNDIS
);
427 cppi41_set_autoreq_mode(cppi41_channel
,
428 EP_MODE_AUTOREG_ALL_NEOP
);
430 musb_writel(musb
->ctrl_base
,
431 RNDIS_REG(cppi41_channel
->port_num
), 0);
432 cppi41_set_dma_mode(cppi41_channel
,
433 EP_MODE_DMA_TRANSPARENT
);
434 cppi41_set_autoreq_mode(cppi41_channel
,
435 EP_MODE_AUTOREG_NONE
);
439 cppi41_set_dma_mode(cppi41_channel
, EP_MODE_DMA_TRANSPARENT
);
440 cppi41_set_autoreq_mode(cppi41_channel
, EP_MODE_AUTOREG_NONE
);
441 len
= min_t(u32
, packet_sz
, len
);
443 cppi41_channel
->prog_len
= len
;
444 direction
= cppi41_channel
->is_tx
? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM
;
445 dma_desc
= dmaengine_prep_slave_single(dc
, dma_addr
, len
, direction
,
446 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
450 dma_desc
->callback
= cppi41_dma_callback
;
451 dma_desc
->callback_param
= channel
;
452 cppi41_channel
->cookie
= dma_desc
->tx_submit(dma_desc
);
454 save_rx_toggle(cppi41_channel
);
455 dma_async_issue_pending(dc
);
459 static struct dma_channel
*cppi41_dma_channel_allocate(struct dma_controller
*c
,
460 struct musb_hw_ep
*hw_ep
, u8 is_tx
)
462 struct cppi41_dma_controller
*controller
= container_of(c
,
463 struct cppi41_dma_controller
, controller
);
464 struct cppi41_dma_channel
*cppi41_channel
= NULL
;
465 u8 ch_num
= hw_ep
->epnum
- 1;
467 if (ch_num
>= MUSB_DMA_NUM_CHANNELS
)
471 cppi41_channel
= &controller
->tx_channel
[ch_num
];
473 cppi41_channel
= &controller
->rx_channel
[ch_num
];
475 if (!cppi41_channel
->dc
)
478 if (cppi41_channel
->is_allocated
)
481 cppi41_channel
->hw_ep
= hw_ep
;
482 cppi41_channel
->is_allocated
= 1;
484 return &cppi41_channel
->channel
;
487 static void cppi41_dma_channel_release(struct dma_channel
*channel
)
489 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
491 if (cppi41_channel
->is_allocated
) {
492 cppi41_channel
->is_allocated
= 0;
493 channel
->status
= MUSB_DMA_STATUS_FREE
;
494 channel
->actual_len
= 0;
498 static int cppi41_dma_channel_program(struct dma_channel
*channel
,
499 u16 packet_sz
, u8 mode
,
500 dma_addr_t dma_addr
, u32 len
)
503 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
506 BUG_ON(channel
->status
== MUSB_DMA_STATUS_UNKNOWN
||
507 channel
->status
== MUSB_DMA_STATUS_BUSY
);
509 if (is_host_active(cppi41_channel
->controller
->musb
)) {
510 if (cppi41_channel
->is_tx
)
511 hb_mult
= cppi41_channel
->hw_ep
->out_qh
->hb_mult
;
513 hb_mult
= cppi41_channel
->hw_ep
->in_qh
->hb_mult
;
516 channel
->status
= MUSB_DMA_STATUS_BUSY
;
517 channel
->actual_len
= 0;
520 packet_sz
= hb_mult
* (packet_sz
& 0x7FF);
522 ret
= cppi41_configure_channel(channel
, packet_sz
, mode
, dma_addr
, len
);
524 channel
->status
= MUSB_DMA_STATUS_FREE
;
529 static int cppi41_is_compatible(struct dma_channel
*channel
, u16 maxpacket
,
530 void *buf
, u32 length
)
532 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
533 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
534 struct musb
*musb
= controller
->musb
;
536 if (is_host_active(musb
)) {
540 if (cppi41_channel
->hw_ep
->ep_in
.type
!= USB_ENDPOINT_XFER_BULK
)
542 if (cppi41_channel
->is_tx
)
544 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
548 static int cppi41_dma_channel_abort(struct dma_channel
*channel
)
550 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
551 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
552 struct musb
*musb
= controller
->musb
;
553 void __iomem
*epio
= cppi41_channel
->hw_ep
->regs
;
559 is_tx
= cppi41_channel
->is_tx
;
560 dev_dbg(musb
->controller
, "abort channel=%d, is_tx=%d\n",
561 cppi41_channel
->port_num
, is_tx
);
563 if (cppi41_channel
->channel
.status
== MUSB_DMA_STATUS_FREE
)
566 list_del_init(&cppi41_channel
->tx_check
);
568 csr
= musb_readw(epio
, MUSB_TXCSR
);
569 csr
&= ~MUSB_TXCSR_DMAENAB
;
570 musb_writew(epio
, MUSB_TXCSR
, csr
);
572 csr
= musb_readw(epio
, MUSB_RXCSR
);
573 csr
&= ~(MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_DMAENAB
);
574 musb_writew(epio
, MUSB_RXCSR
, csr
);
576 csr
= musb_readw(epio
, MUSB_RXCSR
);
577 if (csr
& MUSB_RXCSR_RXPKTRDY
) {
578 csr
|= MUSB_RXCSR_FLUSHFIFO
;
579 musb_writew(epio
, MUSB_RXCSR
, csr
);
580 musb_writew(epio
, MUSB_RXCSR
, csr
);
584 tdbit
= 1 << cppi41_channel
->port_num
;
589 musb_writel(musb
->ctrl_base
, USB_TDOWN
, tdbit
);
590 ret
= dmaengine_terminate_all(cppi41_channel
->dc
);
591 } while (ret
== -EAGAIN
);
593 musb_writel(musb
->ctrl_base
, USB_TDOWN
, tdbit
);
596 csr
= musb_readw(epio
, MUSB_TXCSR
);
597 if (csr
& MUSB_TXCSR_TXPKTRDY
) {
598 csr
|= MUSB_TXCSR_FLUSHFIFO
;
599 musb_writew(epio
, MUSB_TXCSR
, csr
);
603 cppi41_channel
->channel
.status
= MUSB_DMA_STATUS_FREE
;
607 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller
*ctrl
)
612 for (i
= 0; i
< MUSB_DMA_NUM_CHANNELS
; i
++) {
613 dc
= ctrl
->tx_channel
[i
].dc
;
615 dma_release_channel(dc
);
616 dc
= ctrl
->rx_channel
[i
].dc
;
618 dma_release_channel(dc
);
622 static void cppi41_dma_controller_stop(struct cppi41_dma_controller
*controller
)
624 cppi41_release_all_dma_chans(controller
);
627 static int cppi41_dma_controller_start(struct cppi41_dma_controller
*controller
)
629 struct musb
*musb
= controller
->musb
;
630 struct device
*dev
= musb
->controller
;
631 struct device_node
*np
= dev
->of_node
;
632 struct cppi41_dma_channel
*cppi41_channel
;
637 count
= of_property_count_strings(np
, "dma-names");
641 for (i
= 0; i
< count
; i
++) {
643 struct dma_channel
*musb_dma
;
648 ret
= of_property_read_string_index(np
, "dma-names", i
, &str
);
651 if (!strncmp(str
, "tx", 2))
653 else if (!strncmp(str
, "rx", 2))
656 dev_err(dev
, "Wrong dmatype %s\n", str
);
659 ret
= kstrtouint(str
+ 2, 0, &port
);
664 if (port
> MUSB_DMA_NUM_CHANNELS
|| !port
)
667 cppi41_channel
= &controller
->tx_channel
[port
- 1];
669 cppi41_channel
= &controller
->rx_channel
[port
- 1];
671 cppi41_channel
->controller
= controller
;
672 cppi41_channel
->port_num
= port
;
673 cppi41_channel
->is_tx
= is_tx
;
674 INIT_LIST_HEAD(&cppi41_channel
->tx_check
);
675 INIT_WORK(&cppi41_channel
->dma_completion
,
676 cppi_trans_done_work
);
678 musb_dma
= &cppi41_channel
->channel
;
679 musb_dma
->private_data
= cppi41_channel
;
680 musb_dma
->status
= MUSB_DMA_STATUS_FREE
;
681 musb_dma
->max_len
= SZ_4M
;
683 dc
= dma_request_slave_channel(dev
, str
);
685 dev_err(dev
, "Failed to request %s.\n", str
);
689 cppi41_channel
->dc
= dc
;
693 cppi41_release_all_dma_chans(controller
);
697 void dma_controller_destroy(struct dma_controller
*c
)
699 struct cppi41_dma_controller
*controller
= container_of(c
,
700 struct cppi41_dma_controller
, controller
);
702 hrtimer_cancel(&controller
->early_tx
);
703 cppi41_dma_controller_stop(controller
);
707 struct dma_controller
*dma_controller_create(struct musb
*musb
,
710 struct cppi41_dma_controller
*controller
;
713 if (!musb
->controller
->of_node
) {
714 dev_err(musb
->controller
, "Need DT for the DMA engine.\n");
718 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
722 hrtimer_init(&controller
->early_tx
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
723 controller
->early_tx
.function
= cppi41_recheck_tx_req
;
724 INIT_LIST_HEAD(&controller
->early_tx_list
);
725 controller
->musb
= musb
;
727 controller
->controller
.channel_alloc
= cppi41_dma_channel_allocate
;
728 controller
->controller
.channel_release
= cppi41_dma_channel_release
;
729 controller
->controller
.channel_program
= cppi41_dma_channel_program
;
730 controller
->controller
.channel_abort
= cppi41_dma_channel_abort
;
731 controller
->controller
.is_compatible
= cppi41_is_compatible
;
733 ret
= cppi41_dma_controller_start(controller
);
736 return &controller
->controller
;
741 if (ret
== -EPROBE_DEFER
)