4 * Copyright (C) 2011 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 #include <linux/delay.h>
19 #include <linux/scatterlist.h>
23 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
24 #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
26 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
31 void usbhs_pkt_init(struct usbhs_pkt
*pkt
)
33 INIT_LIST_HEAD(&pkt
->node
);
37 * packet control function
39 static int usbhsf_null_handle(struct usbhs_pkt
*pkt
, int *is_done
)
41 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pkt
->pipe
);
42 struct device
*dev
= usbhs_priv_to_dev(priv
);
44 dev_err(dev
, "null handler\n");
49 static struct usbhs_pkt_handle usbhsf_null_handler
= {
50 .prepare
= usbhsf_null_handle
,
51 .try_run
= usbhsf_null_handle
,
54 void usbhs_pkt_push(struct usbhs_pipe
*pipe
, struct usbhs_pkt
*pkt
,
55 void (*done
)(struct usbhs_priv
*priv
,
56 struct usbhs_pkt
*pkt
),
57 void *buf
, int len
, int zero
, int sequence
)
59 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
60 struct device
*dev
= usbhs_priv_to_dev(priv
);
64 dev_err(dev
, "no done function\n");
68 /******************** spin lock ********************/
69 usbhs_lock(priv
, flags
);
72 dev_err(dev
, "no handler function\n");
73 pipe
->handler
= &usbhsf_null_handler
;
76 list_move_tail(&pkt
->node
, &pipe
->list
);
79 * each pkt must hold own handler.
80 * because handler might be changed by its situation.
81 * dma handler -> pio handler.
85 pkt
->handler
= pipe
->handler
;
90 pkt
->sequence
= sequence
;
92 usbhs_unlock(priv
, flags
);
93 /******************** spin unlock ******************/
96 static void __usbhsf_pkt_del(struct usbhs_pkt
*pkt
)
98 list_del_init(&pkt
->node
);
101 static struct usbhs_pkt
*__usbhsf_pkt_get(struct usbhs_pipe
*pipe
)
103 if (list_empty(&pipe
->list
))
106 return list_first_entry(&pipe
->list
, struct usbhs_pkt
, node
);
109 static void usbhsf_fifo_clear(struct usbhs_pipe
*pipe
,
110 struct usbhs_fifo
*fifo
);
111 static void usbhsf_fifo_unselect(struct usbhs_pipe
*pipe
,
112 struct usbhs_fifo
*fifo
);
113 static struct dma_chan
*usbhsf_dma_chan_get(struct usbhs_fifo
*fifo
,
114 struct usbhs_pkt
*pkt
);
115 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
116 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
117 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt
*pkt
, int map
);
118 struct usbhs_pkt
*usbhs_pkt_pop(struct usbhs_pipe
*pipe
, struct usbhs_pkt
*pkt
)
120 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
121 struct usbhs_fifo
*fifo
= usbhs_pipe_to_fifo(pipe
);
124 /******************** spin lock ********************/
125 usbhs_lock(priv
, flags
);
127 usbhs_pipe_disable(pipe
);
130 pkt
= __usbhsf_pkt_get(pipe
);
133 struct dma_chan
*chan
= NULL
;
136 chan
= usbhsf_dma_chan_get(fifo
, pkt
);
138 dmaengine_terminate_all(chan
);
139 usbhsf_fifo_clear(pipe
, fifo
);
140 usbhsf_dma_unmap(pkt
);
143 __usbhsf_pkt_del(pkt
);
147 usbhsf_fifo_unselect(pipe
, fifo
);
149 usbhs_unlock(priv
, flags
);
150 /******************** spin unlock ******************/
161 static int usbhsf_pkt_handler(struct usbhs_pipe
*pipe
, int type
)
163 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
164 struct usbhs_pkt
*pkt
;
165 struct device
*dev
= usbhs_priv_to_dev(priv
);
166 int (*func
)(struct usbhs_pkt
*pkt
, int *is_done
);
171 /******************** spin lock ********************/
172 usbhs_lock(priv
, flags
);
174 pkt
= __usbhsf_pkt_get(pipe
);
176 goto __usbhs_pkt_handler_end
;
179 case USBHSF_PKT_PREPARE
:
180 func
= pkt
->handler
->prepare
;
182 case USBHSF_PKT_TRY_RUN
:
183 func
= pkt
->handler
->try_run
;
185 case USBHSF_PKT_DMA_DONE
:
186 func
= pkt
->handler
->dma_done
;
189 dev_err(dev
, "unknown pkt handler\n");
190 goto __usbhs_pkt_handler_end
;
193 ret
= func(pkt
, &is_done
);
196 __usbhsf_pkt_del(pkt
);
198 __usbhs_pkt_handler_end
:
199 usbhs_unlock(priv
, flags
);
200 /******************** spin unlock ******************/
203 pkt
->done(priv
, pkt
);
204 usbhs_pkt_start(pipe
);
210 void usbhs_pkt_start(struct usbhs_pipe
*pipe
)
212 usbhsf_pkt_handler(pipe
, USBHSF_PKT_PREPARE
);
216 * irq enable/disable function
218 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
219 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
220 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
222 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
223 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
224 u16 status = (1 << usbhs_pipe_number(pipe)); \
228 mod->status |= status; \
230 mod->status &= ~status; \
231 usbhs_irq_callback_update(priv, mod); \
234 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe
*pipe
, int enable
)
237 * And DCP pipe can NOT use "ready interrupt" for "send"
238 * it should use "empty" interrupt.
240 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
242 * on the other hand, normal pipe can use "ready interrupt" for "send"
243 * even though it is single/double buffer
245 if (usbhs_pipe_is_dcp(pipe
))
246 usbhsf_irq_empty_ctrl(pipe
, enable
);
248 usbhsf_irq_ready_ctrl(pipe
, enable
);
251 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe
*pipe
, int enable
)
253 usbhsf_irq_ready_ctrl(pipe
, enable
);
259 static void usbhsf_send_terminator(struct usbhs_pipe
*pipe
,
260 struct usbhs_fifo
*fifo
)
262 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
264 usbhs_bset(priv
, fifo
->ctr
, BVAL
, BVAL
);
267 static int usbhsf_fifo_barrier(struct usbhs_priv
*priv
,
268 struct usbhs_fifo
*fifo
)
273 /* The FIFO port is accessible */
274 if (usbhs_read(priv
, fifo
->ctr
) & FRDY
)
283 static void usbhsf_fifo_clear(struct usbhs_pipe
*pipe
,
284 struct usbhs_fifo
*fifo
)
286 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
288 if (!usbhs_pipe_is_dcp(pipe
))
289 usbhsf_fifo_barrier(priv
, fifo
);
291 usbhs_write(priv
, fifo
->ctr
, BCLR
);
294 static int usbhsf_fifo_rcv_len(struct usbhs_priv
*priv
,
295 struct usbhs_fifo
*fifo
)
297 return usbhs_read(priv
, fifo
->ctr
) & DTLN_MASK
;
300 static void usbhsf_fifo_unselect(struct usbhs_pipe
*pipe
,
301 struct usbhs_fifo
*fifo
)
303 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
305 usbhs_pipe_select_fifo(pipe
, NULL
);
306 usbhs_write(priv
, fifo
->sel
, 0);
309 static int usbhsf_fifo_select(struct usbhs_pipe
*pipe
,
310 struct usbhs_fifo
*fifo
,
313 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
314 struct device
*dev
= usbhs_priv_to_dev(priv
);
316 u16 mask
= ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
317 u16 base
= usbhs_pipe_number(pipe
); /* CURPIPE */
319 if (usbhs_pipe_is_busy(pipe
) ||
320 usbhsf_fifo_is_busy(fifo
))
323 if (usbhs_pipe_is_dcp(pipe
)) {
324 base
|= (1 == write
) << 5; /* ISEL */
326 if (usbhs_mod_is_host(priv
))
327 usbhs_dcp_dir_for_host(pipe
, write
);
330 /* "base" will be used below */
331 if (usbhs_get_dparam(priv
, has_sudmac
) && !usbhsf_is_cfifo(priv
, fifo
))
332 usbhs_write(priv
, fifo
->sel
, base
);
334 usbhs_write(priv
, fifo
->sel
, base
| MBW_32
);
336 /* check ISEL and CURPIPE value */
338 if (base
== (mask
& usbhs_read(priv
, fifo
->sel
))) {
339 usbhs_pipe_select_fifo(pipe
, fifo
);
345 dev_err(dev
, "fifo select error\n");
353 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt
*pkt
, int *is_done
)
355 struct usbhs_pipe
*pipe
= pkt
->pipe
;
356 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
357 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
); /* CFIFO */
358 struct device
*dev
= usbhs_priv_to_dev(priv
);
361 usbhs_pipe_disable(pipe
);
363 ret
= usbhsf_fifo_select(pipe
, fifo
, 1);
365 dev_err(dev
, "%s() faile\n", __func__
);
369 usbhs_pipe_sequence_data1(pipe
); /* DATA1 */
371 usbhsf_fifo_clear(pipe
, fifo
);
372 usbhsf_send_terminator(pipe
, fifo
);
374 usbhsf_fifo_unselect(pipe
, fifo
);
376 usbhsf_tx_irq_ctrl(pipe
, 1);
377 usbhs_pipe_enable(pipe
);
382 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt
*pkt
, int *is_done
)
384 struct usbhs_pipe
*pipe
= pkt
->pipe
;
385 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
386 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
); /* CFIFO */
387 struct device
*dev
= usbhs_priv_to_dev(priv
);
390 usbhs_pipe_disable(pipe
);
392 ret
= usbhsf_fifo_select(pipe
, fifo
, 0);
394 dev_err(dev
, "%s() fail\n", __func__
);
398 usbhs_pipe_sequence_data1(pipe
); /* DATA1 */
399 usbhsf_fifo_clear(pipe
, fifo
);
401 usbhsf_fifo_unselect(pipe
, fifo
);
403 usbhsf_rx_irq_ctrl(pipe
, 1);
404 usbhs_pipe_enable(pipe
);
410 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt
*pkt
, int *is_done
)
412 struct usbhs_pipe
*pipe
= pkt
->pipe
;
414 if (pkt
->handler
== &usbhs_dcp_status_stage_in_handler
)
415 usbhsf_tx_irq_ctrl(pipe
, 0);
417 usbhsf_rx_irq_ctrl(pipe
, 0);
419 pkt
->actual
= pkt
->length
;
425 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler
= {
426 .prepare
= usbhs_dcp_dir_switch_to_write
,
427 .try_run
= usbhs_dcp_dir_switch_done
,
430 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler
= {
431 .prepare
= usbhs_dcp_dir_switch_to_read
,
432 .try_run
= usbhs_dcp_dir_switch_done
,
436 * DCP data stage (push)
438 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt
*pkt
, int *is_done
)
440 struct usbhs_pipe
*pipe
= pkt
->pipe
;
442 usbhs_pipe_sequence_data1(pipe
); /* DATA1 */
445 * change handler to PIO push
447 pkt
->handler
= &usbhs_fifo_pio_push_handler
;
449 return pkt
->handler
->prepare(pkt
, is_done
);
452 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler
= {
453 .prepare
= usbhsf_dcp_data_stage_try_push
,
457 * DCP data stage (pop)
459 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt
*pkt
,
462 struct usbhs_pipe
*pipe
= pkt
->pipe
;
463 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
464 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
);
466 if (usbhs_pipe_is_busy(pipe
))
470 * prepare pop for DCP should
471 * - change DCP direction,
475 usbhs_pipe_disable(pipe
);
477 usbhs_pipe_sequence_data1(pipe
); /* DATA1 */
479 usbhsf_fifo_select(pipe
, fifo
, 0);
480 usbhsf_fifo_clear(pipe
, fifo
);
481 usbhsf_fifo_unselect(pipe
, fifo
);
484 * change handler to PIO pop
486 pkt
->handler
= &usbhs_fifo_pio_pop_handler
;
488 return pkt
->handler
->prepare(pkt
, is_done
);
491 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler
= {
492 .prepare
= usbhsf_dcp_data_stage_prepare_pop
,
498 static int usbhsf_pio_try_push(struct usbhs_pkt
*pkt
, int *is_done
)
500 struct usbhs_pipe
*pipe
= pkt
->pipe
;
501 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
502 struct device
*dev
= usbhs_priv_to_dev(priv
);
503 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
); /* CFIFO */
504 void __iomem
*addr
= priv
->base
+ fifo
->port
;
506 int maxp
= usbhs_pipe_get_maxpacket(pipe
);
511 usbhs_pipe_data_sequence(pipe
, pkt
->sequence
);
512 pkt
->sequence
= -1; /* -1 sequence will be ignored */
514 usbhs_pipe_set_trans_count_if_bulk(pipe
, pkt
->length
);
516 ret
= usbhsf_fifo_select(pipe
, fifo
, 1);
520 ret
= usbhs_pipe_is_accessible(pipe
);
522 /* inaccessible pipe is not an error */
524 goto usbhs_fifo_write_busy
;
527 ret
= usbhsf_fifo_barrier(priv
, fifo
);
529 goto usbhs_fifo_write_busy
;
531 buf
= pkt
->buf
+ pkt
->actual
;
532 len
= pkt
->length
- pkt
->actual
;
533 len
= min(len
, maxp
);
535 is_short
= total_len
< maxp
;
542 if (len
>= 4 && !((unsigned long)buf
& 0x03)) {
543 iowrite32_rep(addr
, buf
, len
/ 4);
545 buf
+= total_len
- len
;
548 /* the rest operation */
549 for (i
= 0; i
< len
; i
++)
550 iowrite8(buf
[i
], addr
+ (0x03 - (i
& 0x03)));
555 pkt
->actual
+= total_len
;
557 if (pkt
->actual
< pkt
->length
)
558 *is_done
= 0; /* there are remainder data */
560 *is_done
= 1; /* short packet */
562 *is_done
= !pkt
->zero
; /* send zero packet ? */
568 usbhsf_send_terminator(pipe
, fifo
);
570 usbhsf_tx_irq_ctrl(pipe
, !*is_done
);
571 usbhs_pipe_running(pipe
, !*is_done
);
572 usbhs_pipe_enable(pipe
);
574 dev_dbg(dev
, " send %d (%d/ %d/ %d/ %d)\n",
575 usbhs_pipe_number(pipe
),
576 pkt
->length
, pkt
->actual
, *is_done
, pkt
->zero
);
578 usbhsf_fifo_unselect(pipe
, fifo
);
582 usbhs_fifo_write_busy
:
583 usbhsf_fifo_unselect(pipe
, fifo
);
589 usbhsf_tx_irq_ctrl(pipe
, 1);
590 usbhs_pipe_running(pipe
, 1);
595 static int usbhsf_pio_prepare_push(struct usbhs_pkt
*pkt
, int *is_done
)
597 if (usbhs_pipe_is_running(pkt
->pipe
))
600 return usbhsf_pio_try_push(pkt
, is_done
);
603 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler
= {
604 .prepare
= usbhsf_pio_prepare_push
,
605 .try_run
= usbhsf_pio_try_push
,
611 static int usbhsf_prepare_pop(struct usbhs_pkt
*pkt
, int *is_done
)
613 struct usbhs_pipe
*pipe
= pkt
->pipe
;
615 if (usbhs_pipe_is_busy(pipe
))
618 if (usbhs_pipe_is_running(pipe
))
622 * pipe enable to prepare packet receive
624 usbhs_pipe_data_sequence(pipe
, pkt
->sequence
);
625 pkt
->sequence
= -1; /* -1 sequence will be ignored */
627 usbhs_pipe_set_trans_count_if_bulk(pipe
, pkt
->length
);
628 usbhs_pipe_enable(pipe
);
629 usbhs_pipe_running(pipe
, 1);
630 usbhsf_rx_irq_ctrl(pipe
, 1);
635 static int usbhsf_pio_try_pop(struct usbhs_pkt
*pkt
, int *is_done
)
637 struct usbhs_pipe
*pipe
= pkt
->pipe
;
638 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
639 struct device
*dev
= usbhs_priv_to_dev(priv
);
640 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
); /* CFIFO */
641 void __iomem
*addr
= priv
->base
+ fifo
->port
;
644 int maxp
= usbhs_pipe_get_maxpacket(pipe
);
649 ret
= usbhsf_fifo_select(pipe
, fifo
, 0);
653 ret
= usbhsf_fifo_barrier(priv
, fifo
);
655 goto usbhs_fifo_read_busy
;
657 rcv_len
= usbhsf_fifo_rcv_len(priv
, fifo
);
659 buf
= pkt
->buf
+ pkt
->actual
;
660 len
= pkt
->length
- pkt
->actual
;
661 len
= min(len
, rcv_len
);
665 * update actual length first here to decide disable pipe.
666 * if this pipe keeps BUF status and all data were popped,
667 * then, next interrupt/token will be issued again
669 pkt
->actual
+= total_len
;
671 if ((pkt
->actual
== pkt
->length
) || /* receive all data */
672 (total_len
< maxp
)) { /* short packet */
674 usbhsf_rx_irq_ctrl(pipe
, 0);
675 usbhs_pipe_running(pipe
, 0);
676 usbhs_pipe_disable(pipe
); /* disable pipe first */
680 * Buffer clear if Zero-Length packet
683 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
687 usbhsf_fifo_clear(pipe
, fifo
);
688 goto usbhs_fifo_read_end
;
696 if (len
>= 4 && !((unsigned long)buf
& 0x03)) {
697 ioread32_rep(addr
, buf
, len
/ 4);
699 buf
+= total_len
- len
;
702 /* the rest operation */
703 for (i
= 0; i
< len
; i
++) {
705 data
= ioread32(addr
);
707 buf
[i
] = (data
>> ((i
& 0x03) * 8)) & 0xff;
711 dev_dbg(dev
, " recv %d (%d/ %d/ %d/ %d)\n",
712 usbhs_pipe_number(pipe
),
713 pkt
->length
, pkt
->actual
, *is_done
, pkt
->zero
);
715 usbhs_fifo_read_busy
:
716 usbhsf_fifo_unselect(pipe
, fifo
);
721 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler
= {
722 .prepare
= usbhsf_prepare_pop
,
723 .try_run
= usbhsf_pio_try_pop
,
727 * DCP ctrol statge handler
729 static int usbhsf_ctrl_stage_end(struct usbhs_pkt
*pkt
, int *is_done
)
731 usbhs_dcp_control_transfer_done(pkt
->pipe
);
738 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler
= {
739 .prepare
= usbhsf_ctrl_stage_end
,
740 .try_run
= usbhsf_ctrl_stage_end
,
746 static struct dma_chan
*usbhsf_dma_chan_get(struct usbhs_fifo
*fifo
,
747 struct usbhs_pkt
*pkt
)
749 if (&usbhs_fifo_dma_push_handler
== pkt
->handler
)
750 return fifo
->tx_chan
;
752 if (&usbhs_fifo_dma_pop_handler
== pkt
->handler
)
753 return fifo
->rx_chan
;
758 static struct usbhs_fifo
*usbhsf_get_dma_fifo(struct usbhs_priv
*priv
,
759 struct usbhs_pkt
*pkt
)
761 struct usbhs_fifo
*fifo
;
764 usbhs_for_each_dfifo(priv
, fifo
, i
) {
765 if (usbhsf_dma_chan_get(fifo
, pkt
) &&
766 !usbhsf_fifo_is_busy(fifo
))
773 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
774 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
775 static void __usbhsf_dma_ctrl(struct usbhs_pipe
*pipe
,
776 struct usbhs_fifo
*fifo
,
779 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
781 usbhs_bset(priv
, fifo
->sel
, DREQE
, dreqe
);
784 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt
*pkt
, int map
)
786 struct usbhs_pipe
*pipe
= pkt
->pipe
;
787 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
788 struct usbhs_pipe_info
*info
= usbhs_priv_to_pipeinfo(priv
);
790 return info
->dma_map_ctrl(pkt
, map
);
793 static void usbhsf_dma_complete(void *arg
);
794 static void xfer_work(struct work_struct
*work
)
796 struct usbhs_pkt
*pkt
= container_of(work
, struct usbhs_pkt
, work
);
797 struct usbhs_pipe
*pipe
= pkt
->pipe
;
798 struct usbhs_fifo
*fifo
= usbhs_pipe_to_fifo(pipe
);
799 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
800 struct dma_async_tx_descriptor
*desc
;
801 struct dma_chan
*chan
= usbhsf_dma_chan_get(fifo
, pkt
);
802 struct device
*dev
= usbhs_priv_to_dev(priv
);
803 enum dma_transfer_direction dir
;
805 dir
= usbhs_pipe_is_dir_in(pipe
) ? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
807 desc
= dmaengine_prep_slave_single(chan
, pkt
->dma
+ pkt
->actual
,
809 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
813 desc
->callback
= usbhsf_dma_complete
;
814 desc
->callback_param
= pipe
;
816 pkt
->cookie
= dmaengine_submit(desc
);
817 if (pkt
->cookie
< 0) {
818 dev_err(dev
, "Failed to submit dma descriptor\n");
822 dev_dbg(dev
, " %s %d (%d/ %d)\n",
823 fifo
->name
, usbhs_pipe_number(pipe
), pkt
->length
, pkt
->zero
);
825 usbhs_pipe_running(pipe
, 1);
826 usbhsf_dma_start(pipe
, fifo
);
827 usbhs_pipe_set_trans_count_if_bulk(pipe
, pkt
->trans
);
828 dma_async_issue_pending(chan
);
829 usbhs_pipe_enable(pipe
);
835 static int usbhsf_dma_prepare_push(struct usbhs_pkt
*pkt
, int *is_done
)
837 struct usbhs_pipe
*pipe
= pkt
->pipe
;
838 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
839 struct usbhs_fifo
*fifo
;
840 int len
= pkt
->length
- pkt
->actual
;
842 uintptr_t align_mask
;
844 if (usbhs_pipe_is_busy(pipe
))
847 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
848 if ((len
< usbhs_get_dparam(priv
, pio_dma_border
)) ||
849 usbhs_pipe_is_dcp(pipe
))
850 goto usbhsf_pio_prepare_push
;
852 /* check data length if this driver don't use USB-DMAC */
853 if (!usbhs_get_dparam(priv
, has_usb_dmac
) && len
& 0x7)
854 goto usbhsf_pio_prepare_push
;
856 /* check buffer alignment */
857 align_mask
= usbhs_get_dparam(priv
, has_usb_dmac
) ?
858 USBHS_USB_DMAC_XFER_SIZE
- 1 : 0x7;
859 if ((uintptr_t)(pkt
->buf
+ pkt
->actual
) & align_mask
)
860 goto usbhsf_pio_prepare_push
;
862 /* return at this time if the pipe is running */
863 if (usbhs_pipe_is_running(pipe
))
866 /* get enable DMA fifo */
867 fifo
= usbhsf_get_dma_fifo(priv
, pkt
);
869 goto usbhsf_pio_prepare_push
;
871 if (usbhsf_dma_map(pkt
) < 0)
872 goto usbhsf_pio_prepare_push
;
874 ret
= usbhsf_fifo_select(pipe
, fifo
, 0);
876 goto usbhsf_pio_prepare_push_unmap
;
880 INIT_WORK(&pkt
->work
, xfer_work
);
881 schedule_work(&pkt
->work
);
885 usbhsf_pio_prepare_push_unmap
:
886 usbhsf_dma_unmap(pkt
);
887 usbhsf_pio_prepare_push
:
889 * change handler to PIO
891 pkt
->handler
= &usbhs_fifo_pio_push_handler
;
893 return pkt
->handler
->prepare(pkt
, is_done
);
896 static int usbhsf_dma_push_done(struct usbhs_pkt
*pkt
, int *is_done
)
898 struct usbhs_pipe
*pipe
= pkt
->pipe
;
899 int is_short
= pkt
->trans
% usbhs_pipe_get_maxpacket(pipe
);
901 pkt
->actual
+= pkt
->trans
;
903 if (pkt
->actual
< pkt
->length
)
904 *is_done
= 0; /* there are remainder data */
906 *is_done
= 1; /* short packet */
908 *is_done
= !pkt
->zero
; /* send zero packet? */
910 usbhs_pipe_running(pipe
, !*is_done
);
912 usbhsf_dma_stop(pipe
, pipe
->fifo
);
913 usbhsf_dma_unmap(pkt
);
914 usbhsf_fifo_unselect(pipe
, pipe
->fifo
);
917 /* change handler to PIO */
918 pkt
->handler
= &usbhs_fifo_pio_push_handler
;
919 return pkt
->handler
->try_run(pkt
, is_done
);
925 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler
= {
926 .prepare
= usbhsf_dma_prepare_push
,
927 .dma_done
= usbhsf_dma_push_done
,
934 static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt
*pkt
,
937 return usbhsf_prepare_pop(pkt
, is_done
);
940 static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt
*pkt
,
943 struct usbhs_pipe
*pipe
= pkt
->pipe
;
944 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
945 struct usbhs_fifo
*fifo
;
948 if (usbhs_pipe_is_busy(pipe
))
951 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
952 if ((pkt
->length
< usbhs_get_dparam(priv
, pio_dma_border
)) ||
953 usbhs_pipe_is_dcp(pipe
))
954 goto usbhsf_pio_prepare_pop
;
956 fifo
= usbhsf_get_dma_fifo(priv
, pkt
);
958 goto usbhsf_pio_prepare_pop
;
960 if ((uintptr_t)pkt
->buf
& (USBHS_USB_DMAC_XFER_SIZE
- 1))
961 goto usbhsf_pio_prepare_pop
;
963 usbhs_pipe_config_change_bfre(pipe
, 1);
965 ret
= usbhsf_fifo_select(pipe
, fifo
, 0);
967 goto usbhsf_pio_prepare_pop
;
969 if (usbhsf_dma_map(pkt
) < 0)
970 goto usbhsf_pio_prepare_pop_unselect
;
975 * usbhs_fifo_dma_pop_handler :: prepare
976 * enabled irq to come here.
977 * but it is no longer needed for DMA. disable it.
979 usbhsf_rx_irq_ctrl(pipe
, 0);
981 pkt
->trans
= pkt
->length
;
983 INIT_WORK(&pkt
->work
, xfer_work
);
984 schedule_work(&pkt
->work
);
988 usbhsf_pio_prepare_pop_unselect
:
989 usbhsf_fifo_unselect(pipe
, fifo
);
990 usbhsf_pio_prepare_pop
:
993 * change handler to PIO
995 pkt
->handler
= &usbhs_fifo_pio_pop_handler
;
996 usbhs_pipe_config_change_bfre(pipe
, 0);
998 return pkt
->handler
->prepare(pkt
, is_done
);
1001 static int usbhsf_dma_prepare_pop(struct usbhs_pkt
*pkt
, int *is_done
)
1003 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pkt
->pipe
);
1005 if (usbhs_get_dparam(priv
, has_usb_dmac
))
1006 return usbhsf_dma_prepare_pop_with_usb_dmac(pkt
, is_done
);
1008 return usbhsf_dma_prepare_pop_with_rx_irq(pkt
, is_done
);
1011 static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt
*pkt
, int *is_done
)
1013 struct usbhs_pipe
*pipe
= pkt
->pipe
;
1014 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
1015 struct usbhs_fifo
*fifo
;
1018 if (usbhs_pipe_is_busy(pipe
))
1021 if (usbhs_pipe_is_dcp(pipe
))
1022 goto usbhsf_pio_prepare_pop
;
1024 /* get enable DMA fifo */
1025 fifo
= usbhsf_get_dma_fifo(priv
, pkt
);
1027 goto usbhsf_pio_prepare_pop
;
1029 if ((uintptr_t)(pkt
->buf
+ pkt
->actual
) & 0x7) /* 8byte alignment */
1030 goto usbhsf_pio_prepare_pop
;
1032 ret
= usbhsf_fifo_select(pipe
, fifo
, 0);
1034 goto usbhsf_pio_prepare_pop
;
1036 /* use PIO if packet is less than pio_dma_border */
1037 len
= usbhsf_fifo_rcv_len(priv
, fifo
);
1038 len
= min(pkt
->length
- pkt
->actual
, len
);
1039 if (len
& 0x7) /* 8byte alignment */
1040 goto usbhsf_pio_prepare_pop_unselect
;
1042 if (len
< usbhs_get_dparam(priv
, pio_dma_border
))
1043 goto usbhsf_pio_prepare_pop_unselect
;
1045 ret
= usbhsf_fifo_barrier(priv
, fifo
);
1047 goto usbhsf_pio_prepare_pop_unselect
;
1049 if (usbhsf_dma_map(pkt
) < 0)
1050 goto usbhsf_pio_prepare_pop_unselect
;
1055 * usbhs_fifo_dma_pop_handler :: prepare
1056 * enabled irq to come here.
1057 * but it is no longer needed for DMA. disable it.
1059 usbhsf_rx_irq_ctrl(pipe
, 0);
1063 INIT_WORK(&pkt
->work
, xfer_work
);
1064 schedule_work(&pkt
->work
);
1068 usbhsf_pio_prepare_pop_unselect
:
1069 usbhsf_fifo_unselect(pipe
, fifo
);
1070 usbhsf_pio_prepare_pop
:
1073 * change handler to PIO
1075 pkt
->handler
= &usbhs_fifo_pio_pop_handler
;
1077 return pkt
->handler
->try_run(pkt
, is_done
);
1080 static int usbhsf_dma_try_pop(struct usbhs_pkt
*pkt
, int *is_done
)
1082 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pkt
->pipe
);
1084 BUG_ON(usbhs_get_dparam(priv
, has_usb_dmac
));
1086 return usbhsf_dma_try_pop_with_rx_irq(pkt
, is_done
);
1089 static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt
*pkt
, int *is_done
)
1091 struct usbhs_pipe
*pipe
= pkt
->pipe
;
1092 int maxp
= usbhs_pipe_get_maxpacket(pipe
);
1094 usbhsf_dma_stop(pipe
, pipe
->fifo
);
1095 usbhsf_dma_unmap(pkt
);
1096 usbhsf_fifo_unselect(pipe
, pipe
->fifo
);
1098 pkt
->actual
+= pkt
->trans
;
1100 if ((pkt
->actual
== pkt
->length
) || /* receive all data */
1101 (pkt
->trans
< maxp
)) { /* short packet */
1103 usbhs_pipe_running(pipe
, 0);
1106 usbhs_pipe_running(pipe
, 0);
1107 usbhsf_prepare_pop(pkt
, is_done
);
1113 static size_t usbhs_dma_calc_received_size(struct usbhs_pkt
*pkt
,
1114 struct dma_chan
*chan
, int dtln
)
1116 struct usbhs_pipe
*pipe
= pkt
->pipe
;
1117 struct dma_tx_state state
;
1118 size_t received_size
;
1119 int maxp
= usbhs_pipe_get_maxpacket(pipe
);
1121 dmaengine_tx_status(chan
, pkt
->cookie
, &state
);
1122 received_size
= pkt
->length
- state
.residue
;
1125 received_size
-= USBHS_USB_DMAC_XFER_SIZE
;
1126 received_size
&= ~(maxp
- 1);
1127 received_size
+= dtln
;
1130 return received_size
;
1133 static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt
*pkt
,
1136 struct usbhs_pipe
*pipe
= pkt
->pipe
;
1137 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
1138 struct usbhs_fifo
*fifo
= usbhs_pipe_to_fifo(pipe
);
1139 struct dma_chan
*chan
= usbhsf_dma_chan_get(fifo
, pkt
);
1143 * Since the driver disables rx_irq in DMA mode, the interrupt handler
1144 * cannot the BRDYSTS. So, the function clears it here because the
1145 * driver may use PIO mode next time.
1147 usbhs_xxxsts_clear(priv
, BRDYSTS
, usbhs_pipe_number(pipe
));
1149 rcv_len
= usbhsf_fifo_rcv_len(priv
, fifo
);
1150 usbhsf_fifo_clear(pipe
, fifo
);
1151 pkt
->actual
= usbhs_dma_calc_received_size(pkt
, chan
, rcv_len
);
1153 usbhsf_dma_stop(pipe
, fifo
);
1154 usbhsf_dma_unmap(pkt
);
1155 usbhsf_fifo_unselect(pipe
, pipe
->fifo
);
1157 /* The driver can assume the rx transaction is always "done" */
1163 static int usbhsf_dma_pop_done(struct usbhs_pkt
*pkt
, int *is_done
)
1165 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pkt
->pipe
);
1167 if (usbhs_get_dparam(priv
, has_usb_dmac
))
1168 return usbhsf_dma_pop_done_with_usb_dmac(pkt
, is_done
);
1170 return usbhsf_dma_pop_done_with_rx_irq(pkt
, is_done
);
1173 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler
= {
1174 .prepare
= usbhsf_dma_prepare_pop
,
1175 .try_run
= usbhsf_dma_try_pop
,
1176 .dma_done
= usbhsf_dma_pop_done
1182 static bool usbhsf_dma_filter(struct dma_chan
*chan
, void *param
)
1184 struct sh_dmae_slave
*slave
= param
;
1189 * usbhs doesn't recognize id = 0 as valid DMA
1191 if (0 == slave
->shdma_slave
.slave_id
)
1194 chan
->private = slave
;
1199 static void usbhsf_dma_quit(struct usbhs_priv
*priv
, struct usbhs_fifo
*fifo
)
1202 dma_release_channel(fifo
->tx_chan
);
1204 dma_release_channel(fifo
->rx_chan
);
1206 fifo
->tx_chan
= NULL
;
1207 fifo
->rx_chan
= NULL
;
1210 static void usbhsf_dma_init_pdev(struct usbhs_fifo
*fifo
)
1212 dma_cap_mask_t mask
;
1215 dma_cap_set(DMA_SLAVE
, mask
);
1216 fifo
->tx_chan
= dma_request_channel(mask
, usbhsf_dma_filter
,
1220 dma_cap_set(DMA_SLAVE
, mask
);
1221 fifo
->rx_chan
= dma_request_channel(mask
, usbhsf_dma_filter
,
1225 static void usbhsf_dma_init_dt(struct device
*dev
, struct usbhs_fifo
*fifo
,
1230 snprintf(name
, sizeof(name
), "tx%d", channel
);
1231 fifo
->tx_chan
= dma_request_slave_channel_reason(dev
, name
);
1232 if (IS_ERR(fifo
->tx_chan
))
1233 fifo
->tx_chan
= NULL
;
1235 snprintf(name
, sizeof(name
), "rx%d", channel
);
1236 fifo
->rx_chan
= dma_request_slave_channel_reason(dev
, name
);
1237 if (IS_ERR(fifo
->rx_chan
))
1238 fifo
->rx_chan
= NULL
;
1241 static void usbhsf_dma_init(struct usbhs_priv
*priv
, struct usbhs_fifo
*fifo
,
1244 struct device
*dev
= usbhs_priv_to_dev(priv
);
1247 usbhsf_dma_init_dt(dev
, fifo
, channel
);
1249 usbhsf_dma_init_pdev(fifo
);
1251 if (fifo
->tx_chan
|| fifo
->rx_chan
)
1252 dev_dbg(dev
, "enable DMAEngine (%s%s%s)\n",
1254 fifo
->tx_chan
? "[TX]" : " ",
1255 fifo
->rx_chan
? "[RX]" : " ");
1261 static int usbhsf_irq_empty(struct usbhs_priv
*priv
,
1262 struct usbhs_irq_state
*irq_state
)
1264 struct usbhs_pipe
*pipe
;
1265 struct device
*dev
= usbhs_priv_to_dev(priv
);
1268 if (!irq_state
->bempsts
) {
1269 dev_err(dev
, "debug %s !!\n", __func__
);
1273 dev_dbg(dev
, "irq empty [0x%04x]\n", irq_state
->bempsts
);
1276 * search interrupted "pipe"
1279 usbhs_for_each_pipe_with_dcp(pipe
, priv
, i
) {
1280 if (!(irq_state
->bempsts
& (1 << i
)))
1283 ret
= usbhsf_pkt_handler(pipe
, USBHSF_PKT_TRY_RUN
);
1285 dev_err(dev
, "irq_empty run_error %d : %d\n", i
, ret
);
1291 static int usbhsf_irq_ready(struct usbhs_priv
*priv
,
1292 struct usbhs_irq_state
*irq_state
)
1294 struct usbhs_pipe
*pipe
;
1295 struct device
*dev
= usbhs_priv_to_dev(priv
);
1298 if (!irq_state
->brdysts
) {
1299 dev_err(dev
, "debug %s !!\n", __func__
);
1303 dev_dbg(dev
, "irq ready [0x%04x]\n", irq_state
->brdysts
);
1306 * search interrupted "pipe"
1309 usbhs_for_each_pipe_with_dcp(pipe
, priv
, i
) {
1310 if (!(irq_state
->brdysts
& (1 << i
)))
1313 ret
= usbhsf_pkt_handler(pipe
, USBHSF_PKT_TRY_RUN
);
1315 dev_err(dev
, "irq_ready run_error %d : %d\n", i
, ret
);
1321 static void usbhsf_dma_complete(void *arg
)
1323 struct usbhs_pipe
*pipe
= arg
;
1324 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
1325 struct device
*dev
= usbhs_priv_to_dev(priv
);
1328 ret
= usbhsf_pkt_handler(pipe
, USBHSF_PKT_DMA_DONE
);
1330 dev_err(dev
, "dma_complete run_error %d : %d\n",
1331 usbhs_pipe_number(pipe
), ret
);
1334 void usbhs_fifo_clear_dcp(struct usbhs_pipe
*pipe
)
1336 struct usbhs_priv
*priv
= usbhs_pipe_to_priv(pipe
);
1337 struct usbhs_fifo
*fifo
= usbhsf_get_cfifo(priv
); /* CFIFO */
1339 /* clear DCP FIFO of transmission */
1340 if (usbhsf_fifo_select(pipe
, fifo
, 1) < 0)
1342 usbhsf_fifo_clear(pipe
, fifo
);
1343 usbhsf_fifo_unselect(pipe
, fifo
);
1345 /* clear DCP FIFO of reception */
1346 if (usbhsf_fifo_select(pipe
, fifo
, 0) < 0)
1348 usbhsf_fifo_clear(pipe
, fifo
);
1349 usbhsf_fifo_unselect(pipe
, fifo
);
1355 void usbhs_fifo_init(struct usbhs_priv
*priv
)
1357 struct usbhs_mod
*mod
= usbhs_mod_get_current(priv
);
1358 struct usbhs_fifo
*cfifo
= usbhsf_get_cfifo(priv
);
1359 struct usbhs_fifo
*dfifo
;
1362 mod
->irq_empty
= usbhsf_irq_empty
;
1363 mod
->irq_ready
= usbhsf_irq_ready
;
1364 mod
->irq_bempsts
= 0;
1365 mod
->irq_brdysts
= 0;
1368 usbhs_for_each_dfifo(priv
, dfifo
, i
)
1372 void usbhs_fifo_quit(struct usbhs_priv
*priv
)
1374 struct usbhs_mod
*mod
= usbhs_mod_get_current(priv
);
1376 mod
->irq_empty
= NULL
;
1377 mod
->irq_ready
= NULL
;
1378 mod
->irq_bempsts
= 0;
1379 mod
->irq_brdysts
= 0;
1382 #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
1384 fifo = usbhsf_get_dnfifo(priv, channel); \
1385 fifo->name = "D"#channel"FIFO"; \
1386 fifo->port = fifo_port; \
1387 fifo->sel = D##channel##FIFOSEL; \
1388 fifo->ctr = D##channel##FIFOCTR; \
1389 fifo->tx_slave.shdma_slave.slave_id = \
1390 usbhs_get_dparam(priv, d##channel##_tx_id); \
1391 fifo->rx_slave.shdma_slave.slave_id = \
1392 usbhs_get_dparam(priv, d##channel##_rx_id); \
1393 usbhsf_dma_init(priv, fifo, channel); \
1396 #define USBHS_DFIFO_INIT(priv, fifo, channel) \
1397 __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
1398 #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
1399 __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
1401 int usbhs_fifo_probe(struct usbhs_priv
*priv
)
1403 struct usbhs_fifo
*fifo
;
1406 fifo
= usbhsf_get_cfifo(priv
);
1407 fifo
->name
= "CFIFO";
1409 fifo
->sel
= CFIFOSEL
;
1410 fifo
->ctr
= CFIFOCTR
;
1413 USBHS_DFIFO_INIT(priv
, fifo
, 0);
1414 USBHS_DFIFO_INIT(priv
, fifo
, 1);
1415 USBHS_DFIFO_INIT_NO_PORT(priv
, fifo
, 2);
1416 USBHS_DFIFO_INIT_NO_PORT(priv
, fifo
, 3);
1421 void usbhs_fifo_remove(struct usbhs_priv
*priv
)
1423 struct usbhs_fifo
*fifo
;
1426 usbhs_for_each_dfifo(priv
, fifo
, i
)
1427 usbhsf_dma_quit(priv
, fifo
);