2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
99 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
100 struct urb
*urb
, unsigned int nOut
,
104 * Clear TX fifo. Needed to avoid BABBLE errors.
106 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
108 void __iomem
*epio
= ep
->regs
;
113 csr
= musb_readw(epio
, MUSB_TXCSR
);
114 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
116 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr
);
118 csr
|= MUSB_TXCSR_FLUSHFIFO
;
119 musb_writew(epio
, MUSB_TXCSR
, csr
);
120 csr
= musb_readw(epio
, MUSB_TXCSR
);
121 if (WARN(retries
-- < 1,
122 "Could not flush host TX%d fifo: csr: %04x\n",
129 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
131 void __iomem
*epio
= ep
->regs
;
135 /* scrub any data left in the fifo */
137 csr
= musb_readw(epio
, MUSB_TXCSR
);
138 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
140 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
141 csr
= musb_readw(epio
, MUSB_TXCSR
);
145 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
148 /* and reset for the next transfer */
149 musb_writew(epio
, MUSB_TXCSR
, 0);
153 * Start transmit. Caller is responsible for locking shared resources.
154 * musb must be locked.
156 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
160 /* NOTE: no locks here; caller should lock and select EP */
162 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
163 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
164 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
166 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
167 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
172 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
176 /* NOTE: no locks here; caller should lock and select EP */
177 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
178 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
179 if (is_cppi_enabled())
180 txcsr
|= MUSB_TXCSR_DMAMODE
;
181 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
185 * Start the URB at the front of an endpoint's queue
186 * end must be claimed from the caller.
188 * Context: controller locked, irqs blocked
191 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
196 void __iomem
*mbase
= musb
->mregs
;
197 struct urb
*urb
= next_urb(qh
);
198 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
199 unsigned pipe
= urb
->pipe
;
200 u8 address
= usb_pipedevice(pipe
);
201 int epnum
= hw_ep
->epnum
;
203 /* initialize software qh state */
207 /* gather right source of data */
209 case USB_ENDPOINT_XFER_CONTROL
:
210 /* control transfers always start with SETUP */
213 musb
->ep0_stage
= MUSB_EP0_START
;
214 buf
= urb
->setup_packet
;
217 case USB_ENDPOINT_XFER_ISOC
:
220 buf
= urb
->transfer_buffer
+ urb
->iso_frame_desc
[0].offset
;
221 len
= urb
->iso_frame_desc
[0].length
;
223 default: /* bulk, interrupt */
224 /* actual_length may be nonzero on retry paths */
225 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
226 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
229 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
230 qh
, urb
, address
, qh
->epnum
,
231 is_in
? "in" : "out",
232 ({char *s
; switch (qh
->type
) {
233 case USB_ENDPOINT_XFER_CONTROL
: s
= ""; break;
234 case USB_ENDPOINT_XFER_BULK
: s
= "-bulk"; break;
235 case USB_ENDPOINT_XFER_ISOC
: s
= "-iso"; break;
236 default: s
= "-intr"; break;
240 /* Configure endpoint */
241 if (is_in
|| hw_ep
->is_shared_fifo
)
245 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, len
);
247 /* transmit may have more work: start it when it is time */
251 /* determine if the time is right for a periodic transfer */
253 case USB_ENDPOINT_XFER_ISOC
:
254 case USB_ENDPOINT_XFER_INT
:
255 DBG(3, "check whether there's still time for periodic Tx\n");
257 frame
= musb_readw(mbase
, MUSB_FRAME
);
258 /* FIXME this doesn't implement that scheduling policy ...
259 * or handle framecounter wrapping
261 if ((urb
->transfer_flags
& URB_ISO_ASAP
)
262 || (frame
>= urb
->start_frame
)) {
263 /* REVISIT the SOF irq handler shouldn't duplicate
264 * this code; and we don't init urb->start_frame...
269 qh
->frame
= urb
->start_frame
;
270 /* enable SOF interrupt so we can count down */
271 DBG(1, "SOF for %d\n", epnum
);
272 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
273 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
279 DBG(4, "Start TX%d %s\n", epnum
,
280 hw_ep
->tx_channel
? "dma" : "pio");
282 if (!hw_ep
->tx_channel
)
283 musb_h_tx_start(hw_ep
);
284 else if (is_cppi_enabled() || tusb_dma_omap())
285 musb_h_tx_dma_start(hw_ep
);
289 /* caller owns controller lock, irqs are blocked */
291 __musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
292 __releases(musb
->lock
)
293 __acquires(musb
->lock
)
295 DBG(({ int level
; switch (status
) {
299 /* common/boring faults */
310 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
311 urb
, urb
->complete
, status
,
312 usb_pipedevice(urb
->pipe
),
313 usb_pipeendpoint(urb
->pipe
),
314 usb_pipein(urb
->pipe
) ? "in" : "out",
315 urb
->actual_length
, urb
->transfer_buffer_length
318 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb
), urb
);
319 spin_unlock(&musb
->lock
);
320 usb_hcd_giveback_urb(musb_to_hcd(musb
), urb
, status
);
321 spin_lock(&musb
->lock
);
324 /* for bulk/interrupt endpoints only */
326 musb_save_toggle(struct musb_hw_ep
*ep
, int is_in
, struct urb
*urb
)
328 struct usb_device
*udev
= urb
->dev
;
330 void __iomem
*epio
= ep
->regs
;
333 /* FIXME: the current Mentor DMA code seems to have
334 * problems getting toggle correct.
337 if (is_in
|| ep
->is_shared_fifo
)
343 csr
= musb_readw(epio
, MUSB_TXCSR
);
344 usb_settoggle(udev
, qh
->epnum
, 1,
345 (csr
& MUSB_TXCSR_H_DATATOGGLE
)
348 csr
= musb_readw(epio
, MUSB_RXCSR
);
349 usb_settoggle(udev
, qh
->epnum
, 0,
350 (csr
& MUSB_RXCSR_H_DATATOGGLE
)
355 /* caller owns controller lock, irqs are blocked */
356 static struct musb_qh
*
357 musb_giveback(struct musb_qh
*qh
, struct urb
*urb
, int status
)
359 struct musb_hw_ep
*ep
= qh
->hw_ep
;
360 struct musb
*musb
= ep
->musb
;
361 int is_in
= usb_pipein(urb
->pipe
);
362 int ready
= qh
->is_ready
;
364 /* save toggle eagerly, for paranoia */
366 case USB_ENDPOINT_XFER_BULK
:
367 case USB_ENDPOINT_XFER_INT
:
368 musb_save_toggle(ep
, is_in
, urb
);
370 case USB_ENDPOINT_XFER_ISOC
:
371 if (status
== 0 && urb
->error_count
)
377 __musb_giveback(musb
, urb
, status
);
378 qh
->is_ready
= ready
;
380 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
381 * invalidate qh as soon as list_empty(&hep->urb_list)
383 if (list_empty(&qh
->hep
->urb_list
)) {
384 struct list_head
*head
;
391 /* clobber old pointers to this qh */
392 if (is_in
|| ep
->is_shared_fifo
)
396 qh
->hep
->hcpriv
= NULL
;
400 case USB_ENDPOINT_XFER_CONTROL
:
401 case USB_ENDPOINT_XFER_BULK
:
402 /* fifo policy for these lists, except that NAKing
403 * should rotate a qh to the end (for fairness).
406 head
= qh
->ring
.prev
;
413 case USB_ENDPOINT_XFER_ISOC
:
414 case USB_ENDPOINT_XFER_INT
:
415 /* this is where periodic bandwidth should be
416 * de-allocated if it's tracked and allocated;
417 * and where we'd update the schedule tree...
428 * Advance this hardware endpoint's queue, completing the specified urb and
429 * advancing to either the next urb queued to that qh, or else invalidating
430 * that qh and advancing to the next qh scheduled after the current one.
432 * Context: caller owns controller lock, irqs are blocked
435 musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
436 struct musb_hw_ep
*hw_ep
, int is_in
)
440 if (is_in
|| hw_ep
->is_shared_fifo
)
445 if (urb
->status
== -EINPROGRESS
)
446 qh
= musb_giveback(qh
, urb
, 0);
448 qh
= musb_giveback(qh
, urb
, urb
->status
);
450 if (qh
!= NULL
&& qh
->is_ready
) {
451 DBG(4, "... next ep%d %cX urb %p\n",
452 hw_ep
->epnum
, is_in
? 'R' : 'T',
454 musb_start_urb(musb
, is_in
, qh
);
458 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
460 /* we don't want fifo to fill itself again;
461 * ignore dma (various models),
462 * leave toggle alone (may not have been saved yet)
464 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
465 csr
&= ~(MUSB_RXCSR_H_REQPKT
466 | MUSB_RXCSR_H_AUTOREQ
467 | MUSB_RXCSR_AUTOCLEAR
);
469 /* write 2x to allow double buffering */
470 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
471 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
473 /* flush writebuffer */
474 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
478 * PIO RX for a packet (or part of it).
481 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
489 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
490 void __iomem
*epio
= hw_ep
->regs
;
491 struct musb_qh
*qh
= hw_ep
->in_qh
;
492 int pipe
= urb
->pipe
;
493 void *buffer
= urb
->transfer_buffer
;
495 /* musb_ep_select(mbase, epnum); */
496 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
497 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum
, rx_count
,
498 urb
->transfer_buffer
, qh
->offset
,
499 urb
->transfer_buffer_length
);
502 if (usb_pipeisoc(pipe
)) {
504 struct usb_iso_packet_descriptor
*d
;
511 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
512 buf
= buffer
+ d
->offset
;
514 if (rx_count
> length
) {
519 DBG(2, "** OVERFLOW %d into %d\n", rx_count
, length
);
523 urb
->actual_length
+= length
;
524 d
->actual_length
= length
;
528 /* see if we are done */
529 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
532 buf
= buffer
+ qh
->offset
;
533 length
= urb
->transfer_buffer_length
- qh
->offset
;
534 if (rx_count
> length
) {
535 if (urb
->status
== -EINPROGRESS
)
536 urb
->status
= -EOVERFLOW
;
537 DBG(2, "** OVERFLOW %d into %d\n", rx_count
, length
);
541 urb
->actual_length
+= length
;
542 qh
->offset
+= length
;
544 /* see if we are done */
545 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
546 || (rx_count
< qh
->maxpacket
)
547 || (urb
->status
!= -EINPROGRESS
);
549 && (urb
->status
== -EINPROGRESS
)
550 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
551 && (urb
->actual_length
552 < urb
->transfer_buffer_length
))
553 urb
->status
= -EREMOTEIO
;
556 musb_read_fifo(hw_ep
, length
, buf
);
558 csr
= musb_readw(epio
, MUSB_RXCSR
);
559 csr
|= MUSB_RXCSR_H_WZC_BITS
;
560 if (unlikely(do_flush
))
561 musb_h_flush_rxfifo(hw_ep
, csr
);
563 /* REVISIT this assumes AUTOCLEAR is never set */
564 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
566 csr
|= MUSB_RXCSR_H_REQPKT
;
567 musb_writew(epio
, MUSB_RXCSR
, csr
);
573 /* we don't always need to reinit a given side of an endpoint...
574 * when we do, use tx/rx reinit routine and then construct a new CSR
575 * to address data toggle, NYET, and DMA or PIO.
577 * it's possible that driver bugs (especially for DMA) or aborting a
578 * transfer might have left the endpoint busier than it should be.
579 * the busy/not-empty tests are basically paranoia.
582 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, struct musb_hw_ep
*ep
)
586 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
587 * That always uses tx_reinit since ep0 repurposes TX register
588 * offsets; the initial SETUP packet is also a kind of OUT.
591 /* if programmed for Tx, put it in RX mode */
592 if (ep
->is_shared_fifo
) {
593 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
594 if (csr
& MUSB_TXCSR_MODE
) {
595 musb_h_tx_flush_fifo(ep
);
596 musb_writew(ep
->regs
, MUSB_TXCSR
,
597 MUSB_TXCSR_FRCDATATOG
);
599 /* clear mode (and everything else) to enable Rx */
600 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
602 /* scrub all previous state, clearing toggle */
604 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
605 if (csr
& MUSB_RXCSR_RXPKTRDY
)
606 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
607 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
609 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
612 /* target addr and (for multipoint) hub addr/port */
613 if (musb
->is_multipoint
) {
614 musb_write_rxfunaddr(ep
->target_regs
, qh
->addr_reg
);
615 musb_write_rxhubaddr(ep
->target_regs
, qh
->h_addr_reg
);
616 musb_write_rxhubport(ep
->target_regs
, qh
->h_port_reg
);
619 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
621 /* protocol/endpoint, interval/NAKlimit, i/o size */
622 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
623 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
624 /* NOTE: bulk combining rewrites high bits of maxpacket */
625 musb_writew(ep
->regs
, MUSB_RXMAXP
, qh
->maxpacket
);
632 * Program an HDRC endpoint as per the given URB
633 * Context: irqs blocked, controller lock held
635 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
636 struct urb
*urb
, unsigned int is_out
,
639 struct dma_controller
*dma_controller
;
640 struct dma_channel
*dma_channel
;
642 void __iomem
*mbase
= musb
->mregs
;
643 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
644 void __iomem
*epio
= hw_ep
->regs
;
648 if (!is_out
|| hw_ep
->is_shared_fifo
)
653 packet_sz
= qh
->maxpacket
;
655 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
656 "h_addr%02x h_port%02x bytes %d\n",
657 is_out
? "-->" : "<--",
658 epnum
, urb
, urb
->dev
->speed
,
659 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
660 qh
->h_addr_reg
, qh
->h_port_reg
,
663 musb_ep_select(mbase
, epnum
);
665 /* candidate for DMA? */
666 dma_controller
= musb
->dma_controller
;
667 if (is_dma_capable() && epnum
&& dma_controller
) {
668 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
670 dma_channel
= dma_controller
->channel_alloc(
671 dma_controller
, hw_ep
, is_out
);
673 hw_ep
->tx_channel
= dma_channel
;
675 hw_ep
->rx_channel
= dma_channel
;
680 /* make sure we clear DMAEnab, autoSet bits from previous run */
682 /* OUT/transmit/EP0 or IN/receive? */
688 csr
= musb_readw(epio
, MUSB_TXCSR
);
690 /* disable interrupt in case we flush */
691 int_txe
= musb_readw(mbase
, MUSB_INTRTXE
);
692 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
694 /* general endpoint setup */
696 /* ASSERT: TXCSR_DMAENAB was already cleared */
698 /* flush all old state, set default */
699 musb_h_tx_flush_fifo(hw_ep
);
700 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
702 | MUSB_TXCSR_FRCDATATOG
703 | MUSB_TXCSR_H_RXSTALL
705 | MUSB_TXCSR_TXPKTRDY
707 csr
|= MUSB_TXCSR_MODE
;
709 if (usb_gettoggle(urb
->dev
,
711 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
712 | MUSB_TXCSR_H_DATATOGGLE
;
714 csr
|= MUSB_TXCSR_CLRDATATOG
;
716 /* twice in case of double packet buffering */
717 musb_writew(epio
, MUSB_TXCSR
, csr
);
718 /* REVISIT may need to clear FLUSHFIFO ... */
719 musb_writew(epio
, MUSB_TXCSR
, csr
);
720 csr
= musb_readw(epio
, MUSB_TXCSR
);
722 /* endpoint 0: just flush */
723 musb_h_ep0_flush_fifo(hw_ep
);
726 /* target addr and (for multipoint) hub addr/port */
727 if (musb
->is_multipoint
) {
728 musb_write_txfunaddr(mbase
, epnum
, qh
->addr_reg
);
729 musb_write_txhubaddr(mbase
, epnum
, qh
->h_addr_reg
);
730 musb_write_txhubport(mbase
, epnum
, qh
->h_port_reg
);
731 /* FIXME if !epnum, do the same for RX ... */
733 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
735 /* protocol/endpoint/interval/NAKlimit */
737 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
738 if (can_bulk_split(musb
, qh
->type
))
739 musb_writew(epio
, MUSB_TXMAXP
,
741 | ((hw_ep
->max_packet_sz_tx
/
742 packet_sz
) - 1) << 11);
744 musb_writew(epio
, MUSB_TXMAXP
,
746 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
748 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
749 if (musb
->is_multipoint
)
750 musb_writeb(epio
, MUSB_TYPE0
,
754 if (can_bulk_split(musb
, qh
->type
))
755 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
758 load_count
= min((u32
) packet_sz
, len
);
760 #ifdef CONFIG_USB_INVENTRA_DMA
763 /* clear previous state */
764 csr
= musb_readw(epio
, MUSB_TXCSR
);
765 csr
&= ~(MUSB_TXCSR_AUTOSET
767 | MUSB_TXCSR_DMAENAB
);
768 csr
|= MUSB_TXCSR_MODE
;
769 musb_writew(epio
, MUSB_TXCSR
,
770 csr
| MUSB_TXCSR_MODE
);
772 qh
->segsize
= min(len
, dma_channel
->max_len
);
774 if (qh
->segsize
<= packet_sz
)
775 dma_channel
->desired_mode
= 0;
777 dma_channel
->desired_mode
= 1;
780 if (dma_channel
->desired_mode
== 0) {
781 csr
&= ~(MUSB_TXCSR_AUTOSET
782 | MUSB_TXCSR_DMAMODE
);
783 csr
|= (MUSB_TXCSR_DMAENAB
);
784 /* against programming guide */
786 csr
|= (MUSB_TXCSR_AUTOSET
788 | MUSB_TXCSR_DMAMODE
);
790 musb_writew(epio
, MUSB_TXCSR
, csr
);
792 dma_ok
= dma_controller
->channel_program(
793 dma_channel
, packet_sz
,
794 dma_channel
->desired_mode
,
800 dma_controller
->channel_release(dma_channel
);
802 hw_ep
->tx_channel
= NULL
;
804 hw_ep
->rx_channel
= NULL
;
810 /* candidate for DMA */
811 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
813 /* program endpoint CSRs first, then setup DMA.
814 * assume CPPI setup succeeds.
815 * defer enabling dma.
817 csr
= musb_readw(epio
, MUSB_TXCSR
);
818 csr
&= ~(MUSB_TXCSR_AUTOSET
820 | MUSB_TXCSR_DMAENAB
);
821 csr
|= MUSB_TXCSR_MODE
;
822 musb_writew(epio
, MUSB_TXCSR
,
823 csr
| MUSB_TXCSR_MODE
);
825 dma_channel
->actual_len
= 0L;
828 /* TX uses "rndis" mode automatically, but needs help
829 * to identify the zero-length-final-packet case.
831 dma_ok
= dma_controller
->channel_program(
832 dma_channel
, packet_sz
,
841 dma_controller
->channel_release(dma_channel
);
842 hw_ep
->tx_channel
= NULL
;
845 /* REVISIT there's an error path here that
846 * needs handling: can't do dma, but
847 * there's no pio buffer address...
853 /* ASSERT: TXCSR_DMAENAB was already cleared */
855 /* PIO to load FIFO */
856 qh
->segsize
= load_count
;
857 musb_write_fifo(hw_ep
, load_count
, buf
);
858 csr
= musb_readw(epio
, MUSB_TXCSR
);
859 csr
&= ~(MUSB_TXCSR_DMAENAB
861 | MUSB_TXCSR_AUTOSET
);
863 csr
|= MUSB_TXCSR_MODE
;
866 musb_writew(epio
, MUSB_TXCSR
, csr
);
869 /* re-enable interrupt */
870 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
876 if (hw_ep
->rx_reinit
) {
877 musb_rx_reinit(musb
, qh
, hw_ep
);
879 /* init new state: toggle and NYET, maybe DMA later */
880 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
881 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
882 | MUSB_RXCSR_H_DATATOGGLE
;
885 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
886 csr
|= MUSB_RXCSR_DISNYET
;
889 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
891 if (csr
& (MUSB_RXCSR_RXPKTRDY
893 | MUSB_RXCSR_H_REQPKT
))
894 ERR("broken !rx_reinit, ep%d csr %04x\n",
897 /* scrub any stale state, leaving toggle alone */
898 csr
&= MUSB_RXCSR_DISNYET
;
901 /* kick things off */
903 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
904 /* candidate for DMA */
906 dma_channel
->actual_len
= 0L;
909 /* AUTOREQ is in a DMA register */
910 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
911 csr
= musb_readw(hw_ep
->regs
,
914 /* unless caller treats short rx transfers as
915 * errors, we dare not queue multiple transfers.
917 dma_ok
= dma_controller
->channel_program(
918 dma_channel
, packet_sz
,
919 !(urb
->transfer_flags
924 dma_controller
->channel_release(
926 hw_ep
->rx_channel
= NULL
;
929 csr
|= MUSB_RXCSR_DMAENAB
;
933 csr
|= MUSB_RXCSR_H_REQPKT
;
934 DBG(7, "RXCSR%d := %04x\n", epnum
, csr
);
935 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
936 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
942 * Service the default endpoint (ep0) as host.
943 * Return true until it's time to start the status stage.
945 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
948 u8
*fifo_dest
= NULL
;
950 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
951 struct musb_qh
*qh
= hw_ep
->in_qh
;
952 struct usb_ctrlrequest
*request
;
954 switch (musb
->ep0_stage
) {
956 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
957 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
959 if (fifo_count
< len
)
960 urb
->status
= -EOVERFLOW
;
962 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
964 urb
->actual_length
+= fifo_count
;
965 if (len
< qh
->maxpacket
) {
966 /* always terminate on short read; it's
967 * rarely reported as an error.
969 } else if (urb
->actual_length
<
970 urb
->transfer_buffer_length
)
974 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
976 if (!request
->wLength
) {
977 DBG(4, "start no-DATA\n");
979 } else if (request
->bRequestType
& USB_DIR_IN
) {
980 DBG(4, "start IN-DATA\n");
981 musb
->ep0_stage
= MUSB_EP0_IN
;
985 DBG(4, "start OUT-DATA\n");
986 musb
->ep0_stage
= MUSB_EP0_OUT
;
991 fifo_count
= min_t(size_t, qh
->maxpacket
,
992 urb
->transfer_buffer_length
-
995 fifo_dest
= (u8
*) (urb
->transfer_buffer
996 + urb
->actual_length
);
997 DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
999 (fifo_count
== 1) ? "" : "s",
1001 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
1003 urb
->actual_length
+= fifo_count
;
1008 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
1016 * Handle default endpoint interrupt as host. Only called in IRQ time
1017 * from musb_interrupt().
1019 * called with controller irqlocked
1021 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
1026 void __iomem
*mbase
= musb
->mregs
;
1027 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
1028 void __iomem
*epio
= hw_ep
->regs
;
1029 struct musb_qh
*qh
= hw_ep
->in_qh
;
1030 bool complete
= false;
1031 irqreturn_t retval
= IRQ_NONE
;
1033 /* ep0 only has one queue, "in" */
1036 musb_ep_select(mbase
, 0);
1037 csr
= musb_readw(epio
, MUSB_CSR0
);
1038 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
1039 ? musb_readb(epio
, MUSB_COUNT0
)
1042 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1043 csr
, qh
, len
, urb
, musb
->ep0_stage
);
1045 /* if we just did status stage, we are done */
1046 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
1047 retval
= IRQ_HANDLED
;
1051 /* prepare status */
1052 if (csr
& MUSB_CSR0_H_RXSTALL
) {
1053 DBG(6, "STALLING ENDPOINT\n");
1056 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1057 DBG(2, "no response, csr0 %04x\n", csr
);
1060 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1061 DBG(2, "control NAK timeout\n");
1063 /* NOTE: this code path would be a good place to PAUSE a
1064 * control transfer, if another one is queued, so that
1065 * ep0 is more likely to stay busy. That's already done
1066 * for bulk RX transfers.
1068 * if (qh->ring.next != &musb->control), then
1069 * we have a candidate... NAKing is *NOT* an error
1071 musb_writew(epio
, MUSB_CSR0
, 0);
1072 retval
= IRQ_HANDLED
;
1076 DBG(6, "aborting\n");
1077 retval
= IRQ_HANDLED
;
1079 urb
->status
= status
;
1082 /* use the proper sequence to abort the transfer */
1083 if (csr
& MUSB_CSR0_H_REQPKT
) {
1084 csr
&= ~MUSB_CSR0_H_REQPKT
;
1085 musb_writew(epio
, MUSB_CSR0
, csr
);
1086 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1087 musb_writew(epio
, MUSB_CSR0
, csr
);
1089 musb_h_ep0_flush_fifo(hw_ep
);
1092 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1095 musb_writew(epio
, MUSB_CSR0
, 0);
1098 if (unlikely(!urb
)) {
1099 /* stop endpoint since we have no place for its data, this
1100 * SHOULD NEVER HAPPEN! */
1101 ERR("no URB for end 0\n");
1103 musb_h_ep0_flush_fifo(hw_ep
);
1108 /* call common logic and prepare response */
1109 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1110 /* more packets required */
1111 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1112 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1114 /* data transfer complete; perform status phase */
1115 if (usb_pipeout(urb
->pipe
)
1116 || !urb
->transfer_buffer_length
)
1117 csr
= MUSB_CSR0_H_STATUSPKT
1118 | MUSB_CSR0_H_REQPKT
;
1120 csr
= MUSB_CSR0_H_STATUSPKT
1121 | MUSB_CSR0_TXPKTRDY
;
1123 /* flag status stage */
1124 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1126 DBG(5, "ep0 STATUS, csr %04x\n", csr
);
1129 musb_writew(epio
, MUSB_CSR0
, csr
);
1130 retval
= IRQ_HANDLED
;
1132 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1134 /* call completion handler if done */
1136 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1142 #ifdef CONFIG_USB_INVENTRA_DMA
1144 /* Host side TX (OUT) using Mentor DMA works as follows:
1146 - if queue was empty, Program Endpoint
1147 - ... which starts DMA to fifo in mode 1 or 0
1149 DMA Isr (transfer complete) -> TxAvail()
1150 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1151 only in musb_cleanup_urb)
1152 - TxPktRdy has to be set in mode 0 or for
1153 short packets in mode 1.
1158 /* Service a Tx-Available or dma completion irq for the endpoint */
1159 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1167 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1168 void __iomem
*epio
= hw_ep
->regs
;
1169 struct musb_qh
*qh
= hw_ep
->is_shared_fifo
? hw_ep
->in_qh
1172 void __iomem
*mbase
= musb
->mregs
;
1173 struct dma_channel
*dma
;
1177 musb_ep_select(mbase
, epnum
);
1178 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1180 /* with CPPI, DMA sometimes triggers "extra" irqs */
1182 DBG(4, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1187 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1188 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum
, tx_csr
,
1189 dma
? ", dma" : "");
1191 /* check for errors */
1192 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1193 /* dma was disabled, fifo flushed */
1194 DBG(3, "TX end %d stall\n", epnum
);
1196 /* stall; record URB status */
1199 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1200 /* (NON-ISO) dma was disabled, fifo flushed */
1201 DBG(3, "TX 3strikes on ep=%d\n", epnum
);
1203 status
= -ETIMEDOUT
;
1205 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1206 DBG(6, "TX end=%d device not responding\n", epnum
);
1208 /* NOTE: this code path would be a good place to PAUSE a
1209 * transfer, if there's some other (nonperiodic) tx urb
1210 * that could use this fifo. (dma complicates it...)
1211 * That's already done for bulk RX transfers.
1213 * if (bulk && qh->ring.next != &musb->out_bulk), then
1214 * we have a candidate... NAKing is *NOT* an error
1216 musb_ep_select(mbase
, epnum
);
1217 musb_writew(epio
, MUSB_TXCSR
,
1218 MUSB_TXCSR_H_WZC_BITS
1219 | MUSB_TXCSR_TXPKTRDY
);
1224 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1225 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1226 (void) musb
->dma_controller
->channel_abort(dma
);
1229 /* do the proper sequence to abort the transfer in the
1230 * usb core; the dma engine should already be stopped.
1232 musb_h_tx_flush_fifo(hw_ep
);
1233 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1234 | MUSB_TXCSR_DMAENAB
1235 | MUSB_TXCSR_H_ERROR
1236 | MUSB_TXCSR_H_RXSTALL
1237 | MUSB_TXCSR_H_NAKTIMEOUT
1240 musb_ep_select(mbase
, epnum
);
1241 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1242 /* REVISIT may need to clear FLUSHFIFO ... */
1243 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1244 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1249 /* second cppi case */
1250 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1251 DBG(4, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1256 if (is_dma_capable() && dma
&& !status
) {
1258 * DMA has completed. But if we're using DMA mode 1 (multi
1259 * packet DMA), we need a terminal TXPKTRDY interrupt before
1260 * we can consider this transfer completed, lest we trash
1261 * its last packet when writing the next URB's data. So we
1262 * switch back to mode 0 to get that interrupt; we'll come
1263 * back here once it happens.
1265 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1267 * We shouldn't clear DMAMODE with DMAENAB set; so
1268 * clear them in a safe order. That should be OK
1269 * once TXPKTRDY has been set (and I've never seen
1270 * it being 0 at this moment -- DMA interrupt latency
1271 * is significant) but if it hasn't been then we have
1272 * no choice but to stop being polite and ignore the
1273 * programmer's guide... :-)
1275 * Note that we must write TXCSR with TXPKTRDY cleared
1276 * in order not to re-trigger the packet send (this bit
1277 * can't be cleared by CPU), and there's another caveat:
1278 * TXPKTRDY may be set shortly and then cleared in the
1279 * double-buffered FIFO mode, so we do an extra TXCSR
1280 * read for debouncing...
1282 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1283 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1284 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1285 MUSB_TXCSR_TXPKTRDY
);
1286 musb_writew(epio
, MUSB_TXCSR
,
1287 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1289 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1290 MUSB_TXCSR_TXPKTRDY
);
1291 musb_writew(epio
, MUSB_TXCSR
,
1292 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1295 * There is no guarantee that we'll get an interrupt
1296 * after clearing DMAMODE as we might have done this
1297 * too late (after TXPKTRDY was cleared by controller).
1298 * Re-read TXCSR as we have spoiled its previous value.
1300 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1304 * We may get here from a DMA completion or TXPKTRDY interrupt.
1305 * In any case, we must check the FIFO status here and bail out
1306 * only if the FIFO still has data -- that should prevent the
1307 * "missed" TXPKTRDY interrupts and deal with double-buffered
1310 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1311 DBG(2, "DMA complete but packet still in FIFO, "
1312 "CSR %04x\n", tx_csr
);
1317 /* REVISIT this looks wrong... */
1318 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1320 wLength
= dma
->actual_len
;
1322 wLength
= qh
->segsize
;
1323 qh
->offset
+= wLength
;
1325 if (usb_pipeisoc(pipe
)) {
1326 struct usb_iso_packet_descriptor
*d
;
1328 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1329 d
->actual_length
= qh
->segsize
;
1330 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1334 buf
= urb
->transfer_buffer
+ d
->offset
;
1335 wLength
= d
->length
;
1340 /* see if we need to send more data, or ZLP */
1341 if (qh
->segsize
< qh
->maxpacket
)
1343 else if (qh
->offset
== urb
->transfer_buffer_length
1344 && !(urb
->transfer_flags
1348 buf
= urb
->transfer_buffer
1350 wLength
= urb
->transfer_buffer_length
1356 /* urb->status != -EINPROGRESS means request has been faulted,
1357 * so we must abort this transfer after cleanup
1359 if (urb
->status
!= -EINPROGRESS
) {
1362 status
= urb
->status
;
1367 urb
->status
= status
;
1368 urb
->actual_length
= qh
->offset
;
1369 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1371 } else if (!(tx_csr
& MUSB_TXCSR_DMAENAB
)) {
1372 /* WARN_ON(!buf); */
1374 /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1375 * (and presumably, fifo is not half-full) we should write TWO
1376 * packets before updating TXCSR ... other docs disagree ...
1378 /* PIO: start next packet in this URB */
1379 if (wLength
> qh
->maxpacket
)
1380 wLength
= qh
->maxpacket
;
1381 musb_write_fifo(hw_ep
, wLength
, buf
);
1382 qh
->segsize
= wLength
;
1384 musb_ep_select(mbase
, epnum
);
1385 musb_writew(epio
, MUSB_TXCSR
,
1386 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1388 DBG(1, "not complete, but dma enabled?\n");
1395 #ifdef CONFIG_USB_INVENTRA_DMA
1397 /* Host side RX (IN) using Mentor DMA works as follows:
1399 - if queue was empty, ProgramEndpoint
1400 - first IN token is sent out (by setting ReqPkt)
1401 LinuxIsr -> RxReady()
1402 /\ => first packet is received
1403 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1404 | -> DMA Isr (transfer complete) -> RxReady()
1405 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1406 | - if urb not complete, send next IN token (ReqPkt)
1407 | | else complete urb.
1409 ---------------------------
1411 * Nuances of mode 1:
1412 * For short packets, no ack (+RxPktRdy) is sent automatically
1413 * (even if AutoClear is ON)
1414 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1415 * automatically => major problem, as collecting the next packet becomes
1416 * difficult. Hence mode 1 is not used.
1419 * All we care about at this driver level is that
1420 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1421 * (b) termination conditions are: short RX, or buffer full;
1422 * (c) fault modes include
1423 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1424 * (and that endpoint's dma queue stops immediately)
1425 * - overflow (full, PLUS more bytes in the terminal packet)
1427 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1428 * thus be a great candidate for using mode 1 ... for all but the
1429 * last packet of one URB's transfer.
1434 /* Schedule next QH from musb->in_bulk and move the current qh to
1435 * the end; avoids starvation for other endpoints.
1437 static void musb_bulk_rx_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
)
1439 struct dma_channel
*dma
;
1441 void __iomem
*mbase
= musb
->mregs
;
1442 void __iomem
*epio
= ep
->regs
;
1443 struct musb_qh
*cur_qh
, *next_qh
;
1446 musb_ep_select(mbase
, ep
->epnum
);
1447 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
1449 /* clear nak timeout bit */
1450 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1451 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1452 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1453 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1455 cur_qh
= first_qh(&musb
->in_bulk
);
1457 urb
= next_urb(cur_qh
);
1458 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1459 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1460 musb
->dma_controller
->channel_abort(dma
);
1461 urb
->actual_length
+= dma
->actual_len
;
1462 dma
->actual_len
= 0L;
1464 musb_save_toggle(ep
, 1, urb
);
1466 /* move cur_qh to end of queue */
1467 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1469 /* get the next qh from musb->in_bulk */
1470 next_qh
= first_qh(&musb
->in_bulk
);
1472 /* set rx_reinit and schedule the next qh */
1474 musb_start_urb(musb
, 1, next_qh
);
1479 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1480 * and high-bandwidth IN transfer cases.
1482 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1485 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1486 void __iomem
*epio
= hw_ep
->regs
;
1487 struct musb_qh
*qh
= hw_ep
->in_qh
;
1489 void __iomem
*mbase
= musb
->mregs
;
1492 bool iso_err
= false;
1495 struct dma_channel
*dma
;
1497 musb_ep_select(mbase
, epnum
);
1500 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1504 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1507 if (unlikely(!urb
)) {
1508 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1509 * usbtest #11 (unlinks) triggers it regularly, sometimes
1510 * with fifo full. (Only with DMA??)
1512 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum
, val
,
1513 musb_readw(epio
, MUSB_RXCOUNT
));
1514 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1520 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1521 epnum
, rx_csr
, urb
->actual_length
,
1522 dma
? dma
->actual_len
: 0);
1524 /* check for errors, concurrent stall & unlink is not really
1526 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1527 DBG(3, "RX end %d STALL\n", epnum
);
1529 /* stall; record URB status */
1532 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1533 DBG(3, "end %d RX proto error\n", epnum
);
1536 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1538 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1540 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1541 DBG(6, "RX end %d NAK timeout\n", epnum
);
1543 /* NOTE: NAKing is *NOT* an error, so we want to
1544 * continue. Except ... if there's a request for
1545 * another QH, use that instead of starving it.
1547 * Devices like Ethernet and serial adapters keep
1548 * reads posted at all times, which will starve
1549 * other devices without this logic.
1551 if (usb_pipebulk(urb
->pipe
)
1553 && !list_is_singular(&musb
->in_bulk
)) {
1554 musb_bulk_rx_nak_timeout(musb
, hw_ep
);
1557 musb_ep_select(mbase
, epnum
);
1558 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1559 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1560 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1564 DBG(4, "RX end %d ISO data error\n", epnum
);
1565 /* packet error reported later */
1570 /* faults abort the transfer */
1572 /* clean up dma and collect transfer count */
1573 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1574 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1575 (void) musb
->dma_controller
->channel_abort(dma
);
1576 xfer_len
= dma
->actual_len
;
1578 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1579 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1584 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1585 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1586 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1590 /* thorough shutdown for now ... given more precise fault handling
1591 * and better queueing support, we might keep a DMA pipeline going
1592 * while processing this irq for earlier completions.
1595 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1597 #ifndef CONFIG_USB_INVENTRA_DMA
1598 if (rx_csr
& MUSB_RXCSR_H_REQPKT
) {
1599 /* REVISIT this happened for a while on some short reads...
1600 * the cleanup still needs investigation... looks bad...
1601 * and also duplicates dma cleanup code above ... plus,
1602 * shouldn't this be the "half full" double buffer case?
1604 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1605 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1606 (void) musb
->dma_controller
->channel_abort(dma
);
1607 xfer_len
= dma
->actual_len
;
1611 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum
, rx_csr
,
1612 xfer_len
, dma
? ", dma" : "");
1613 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1615 musb_ep_select(mbase
, epnum
);
1616 musb_writew(epio
, MUSB_RXCSR
,
1617 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1620 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1621 xfer_len
= dma
->actual_len
;
1623 val
&= ~(MUSB_RXCSR_DMAENAB
1624 | MUSB_RXCSR_H_AUTOREQ
1625 | MUSB_RXCSR_AUTOCLEAR
1626 | MUSB_RXCSR_RXPKTRDY
);
1627 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1629 #ifdef CONFIG_USB_INVENTRA_DMA
1630 if (usb_pipeisoc(pipe
)) {
1631 struct usb_iso_packet_descriptor
*d
;
1633 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1634 d
->actual_length
= xfer_len
;
1636 /* even if there was an error, we did the dma
1637 * for iso_frame_desc->length
1639 if (d
->status
!= EILSEQ
&& d
->status
!= -EOVERFLOW
)
1642 if (++qh
->iso_idx
>= urb
->number_of_packets
)
1648 /* done if urb buffer is full or short packet is recd */
1649 done
= (urb
->actual_length
+ xfer_len
>=
1650 urb
->transfer_buffer_length
1651 || dma
->actual_len
< qh
->maxpacket
);
1654 /* send IN token for next packet, without AUTOREQ */
1656 val
|= MUSB_RXCSR_H_REQPKT
;
1657 musb_writew(epio
, MUSB_RXCSR
,
1658 MUSB_RXCSR_H_WZC_BITS
| val
);
1661 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum
,
1662 done
? "off" : "reset",
1663 musb_readw(epio
, MUSB_RXCSR
),
1664 musb_readw(epio
, MUSB_RXCOUNT
));
1668 } else if (urb
->status
== -EINPROGRESS
) {
1669 /* if no errors, be sure a packet is ready for unloading */
1670 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1672 ERR("Rx interrupt with no errors or packet!\n");
1674 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1677 /* do the proper sequence to abort the transfer */
1678 musb_ep_select(mbase
, epnum
);
1679 val
&= ~MUSB_RXCSR_H_REQPKT
;
1680 musb_writew(epio
, MUSB_RXCSR
, val
);
1684 /* we are expecting IN packets */
1685 #ifdef CONFIG_USB_INVENTRA_DMA
1687 struct dma_controller
*c
;
1692 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1694 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1697 + urb
->actual_length
,
1699 urb
->transfer_buffer_length
);
1701 c
= musb
->dma_controller
;
1703 if (usb_pipeisoc(pipe
)) {
1705 struct usb_iso_packet_descriptor
*d
;
1707 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1713 if (rx_count
> d
->length
) {
1715 status
= -EOVERFLOW
;
1718 DBG(2, "** OVERFLOW %d into %d\n",\
1719 rx_count
, d
->length
);
1725 buf
= urb
->transfer_dma
+ d
->offset
;
1728 buf
= urb
->transfer_dma
+
1732 dma
->desired_mode
= 0;
1734 /* because of the issue below, mode 1 will
1735 * only rarely behave with correct semantics.
1737 if ((urb
->transfer_flags
&
1739 && (urb
->transfer_buffer_length
-
1742 dma
->desired_mode
= 1;
1743 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1745 dma
->bDesiredMode
= 0;
1747 length
= urb
->transfer_buffer_length
;
1751 /* Disadvantage of using mode 1:
1752 * It's basically usable only for mass storage class; essentially all
1753 * other protocols also terminate transfers on short packets.
1756 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1757 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1758 * to use the extra IN token to grab the last packet using mode 0, then
1759 * the problem is that you cannot be sure when the device will send the
1760 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1761 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1762 * transfer, while sometimes it is recd just a little late so that if you
1763 * try to configure for mode 0 soon after the mode 1 transfer is
1764 * completed, you will find rxcount 0. Okay, so you might think why not
1765 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1768 val
= musb_readw(epio
, MUSB_RXCSR
);
1769 val
&= ~MUSB_RXCSR_H_REQPKT
;
1771 if (dma
->desired_mode
== 0)
1772 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1774 val
|= MUSB_RXCSR_H_AUTOREQ
;
1775 val
|= MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
;
1777 musb_writew(epio
, MUSB_RXCSR
,
1778 MUSB_RXCSR_H_WZC_BITS
| val
);
1780 /* REVISIT if when actual_length != 0,
1781 * transfer_buffer_length needs to be
1784 ret
= c
->channel_program(
1786 dma
->desired_mode
, buf
, length
);
1789 c
->channel_release(dma
);
1790 hw_ep
->rx_channel
= NULL
;
1792 /* REVISIT reset CSR */
1795 #endif /* Mentor DMA */
1798 done
= musb_host_packet_rx(musb
, urb
,
1800 DBG(6, "read %spacket\n", done
? "last " : "");
1805 urb
->actual_length
+= xfer_len
;
1806 qh
->offset
+= xfer_len
;
1808 if (urb
->status
== -EINPROGRESS
)
1809 urb
->status
= status
;
1810 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
1814 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1815 * the software schedule associates multiple such nodes with a given
1816 * host side hardware endpoint + direction; scheduling may activate
1817 * that hardware endpoint.
1819 static int musb_schedule(
1826 int best_end
, epnum
;
1827 struct musb_hw_ep
*hw_ep
= NULL
;
1828 struct list_head
*head
= NULL
;
1830 /* use fixed hardware for control and bulk */
1831 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
1832 head
= &musb
->control
;
1833 hw_ep
= musb
->control_ep
;
1837 /* else, periodic transfers get muxed to other endpoints */
1840 * We know this qh hasn't been scheduled, so all we need to do
1841 * is choose which hardware endpoint to put it on ...
1843 * REVISIT what we really want here is a regular schedule tree
1844 * like e.g. OHCI uses.
1849 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
1850 epnum
< musb
->nr_endpoints
;
1854 if (is_in
|| hw_ep
->is_shared_fifo
) {
1855 if (hw_ep
->in_qh
!= NULL
)
1857 } else if (hw_ep
->out_qh
!= NULL
)
1860 if (hw_ep
== musb
->bulk_ep
)
1864 diff
= hw_ep
->max_packet_sz_rx
- qh
->maxpacket
;
1866 diff
= hw_ep
->max_packet_sz_tx
- qh
->maxpacket
;
1868 if (diff
>= 0 && best_diff
> diff
) {
1873 /* use bulk reserved ep1 if no other ep is free */
1874 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
1875 hw_ep
= musb
->bulk_ep
;
1877 head
= &musb
->in_bulk
;
1879 head
= &musb
->out_bulk
;
1881 /* Enable bulk RX NAK timeout scheme when bulk requests are
1882 * multiplexed. This scheme doen't work in high speed to full
1883 * speed scenario as NAK interrupts are not coming from a
1884 * full speed device connected to a high speed device.
1885 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1886 * 4 (8 frame or 8ms) for FS device.
1888 if (is_in
&& qh
->dev
)
1890 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
1892 } else if (best_end
< 0) {
1898 hw_ep
= musb
->endpoints
+ best_end
;
1899 DBG(4, "qh %p periodic slot %d\n", qh
, best_end
);
1902 idle
= list_empty(head
);
1903 list_add_tail(&qh
->ring
, head
);
1907 qh
->hep
->hcpriv
= qh
;
1909 musb_start_urb(musb
, is_in
, qh
);
1913 static int musb_urb_enqueue(
1914 struct usb_hcd
*hcd
,
1918 unsigned long flags
;
1919 struct musb
*musb
= hcd_to_musb(hcd
);
1920 struct usb_host_endpoint
*hep
= urb
->ep
;
1922 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
1927 /* host role must be active */
1928 if (!is_host_active(musb
) || !musb
->is_active
)
1931 spin_lock_irqsave(&musb
->lock
, flags
);
1932 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1933 qh
= ret
? NULL
: hep
->hcpriv
;
1936 spin_unlock_irqrestore(&musb
->lock
, flags
);
1938 /* DMA mapping was already done, if needed, and this urb is on
1939 * hep->urb_list now ... so we're done, unless hep wasn't yet
1940 * scheduled onto a live qh.
1942 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1943 * disabled, testing for empty qh->ring and avoiding qh setup costs
1944 * except for the first urb queued after a config change.
1949 /* Allocate and initialize qh, minimizing the work done each time
1950 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1952 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1953 * for bugs in other kernel code to break this driver...
1955 qh
= kzalloc(sizeof *qh
, mem_flags
);
1957 spin_lock_irqsave(&musb
->lock
, flags
);
1958 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1959 spin_unlock_irqrestore(&musb
->lock
, flags
);
1965 INIT_LIST_HEAD(&qh
->ring
);
1968 qh
->maxpacket
= le16_to_cpu(epd
->wMaxPacketSize
);
1970 /* no high bandwidth support yet */
1971 if (qh
->maxpacket
& ~0x7ff) {
1976 qh
->epnum
= usb_endpoint_num(epd
);
1977 qh
->type
= usb_endpoint_type(epd
);
1979 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1980 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
1982 /* precompute rxtype/txtype/type0 register */
1983 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
1984 switch (urb
->dev
->speed
) {
1988 case USB_SPEED_FULL
:
1994 qh
->type_reg
= type_reg
;
1996 /* Precompute RXINTERVAL/TXINTERVAL register */
1998 case USB_ENDPOINT_XFER_INT
:
2000 * Full/low speeds use the linear encoding,
2001 * high speed uses the logarithmic encoding.
2003 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
2004 interval
= max_t(u8
, epd
->bInterval
, 1);
2008 case USB_ENDPOINT_XFER_ISOC
:
2009 /* ISO always uses logarithmic encoding */
2010 interval
= min_t(u8
, epd
->bInterval
, 16);
2013 /* REVISIT we actually want to use NAK limits, hinting to the
2014 * transfer scheduling logic to try some other qh, e.g. try
2017 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2019 * The downside of disabling this is that transfer scheduling
2020 * gets VERY unfair for nonperiodic transfers; a misbehaving
2021 * peripheral could make that hurt. That's perfectly normal
2022 * for reads from network or serial adapters ... so we have
2023 * partial NAKlimit support for bulk RX.
2025 * The upside of disabling it is simpler transfer scheduling.
2029 qh
->intv_reg
= interval
;
2031 /* precompute addressing for external hub/tt ports */
2032 if (musb
->is_multipoint
) {
2033 struct usb_device
*parent
= urb
->dev
->parent
;
2035 if (parent
!= hcd
->self
.root_hub
) {
2036 qh
->h_addr_reg
= (u8
) parent
->devnum
;
2038 /* set up tt info if needed */
2040 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
2041 if (urb
->dev
->tt
->hub
)
2043 (u8
) urb
->dev
->tt
->hub
->devnum
;
2044 if (urb
->dev
->tt
->multi
)
2045 qh
->h_addr_reg
|= 0x80;
2050 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2051 * until we get real dma queues (with an entry for each urb/buffer),
2052 * we only have work to do in the former case.
2054 spin_lock_irqsave(&musb
->lock
, flags
);
2056 /* some concurrent activity submitted another urb to hep...
2057 * odd, rare, error prone, but legal.
2062 ret
= musb_schedule(musb
, qh
,
2063 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2067 /* FIXME set urb->start_frame for iso/intr, it's tested in
2068 * musb_start_urb(), but otherwise only konicawc cares ...
2071 spin_unlock_irqrestore(&musb
->lock
, flags
);
2075 spin_lock_irqsave(&musb
->lock
, flags
);
2076 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2077 spin_unlock_irqrestore(&musb
->lock
, flags
);
2085 * abort a transfer that's at the head of a hardware queue.
2086 * called with controller locked, irqs blocked
2087 * that hardware queue advances to the next transfer, unless prevented
2089 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
, int is_in
)
2091 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2092 void __iomem
*epio
= ep
->regs
;
2093 unsigned hw_end
= ep
->epnum
;
2094 void __iomem
*regs
= ep
->musb
->mregs
;
2098 musb_ep_select(regs
, hw_end
);
2100 if (is_dma_capable()) {
2101 struct dma_channel
*dma
;
2103 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2105 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2107 "abort %cX%d DMA for urb %p --> %d\n",
2108 is_in
? 'R' : 'T', ep
->epnum
,
2110 urb
->actual_length
+= dma
->actual_len
;
2114 /* turn off DMA requests, discard state, stop polling ... */
2116 /* giveback saves bulk toggle */
2117 csr
= musb_h_flush_rxfifo(ep
, 0);
2119 /* REVISIT we still get an irq; should likely clear the
2120 * endpoint's irq status here to avoid bogus irqs.
2121 * clearing that status is platform-specific...
2123 } else if (ep
->epnum
) {
2124 musb_h_tx_flush_fifo(ep
);
2125 csr
= musb_readw(epio
, MUSB_TXCSR
);
2126 csr
&= ~(MUSB_TXCSR_AUTOSET
2127 | MUSB_TXCSR_DMAENAB
2128 | MUSB_TXCSR_H_RXSTALL
2129 | MUSB_TXCSR_H_NAKTIMEOUT
2130 | MUSB_TXCSR_H_ERROR
2131 | MUSB_TXCSR_TXPKTRDY
);
2132 musb_writew(epio
, MUSB_TXCSR
, csr
);
2133 /* REVISIT may need to clear FLUSHFIFO ... */
2134 musb_writew(epio
, MUSB_TXCSR
, csr
);
2135 /* flush cpu writebuffer */
2136 csr
= musb_readw(epio
, MUSB_TXCSR
);
2138 musb_h_ep0_flush_fifo(ep
);
2141 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2145 static int musb_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2147 struct musb
*musb
= hcd_to_musb(hcd
);
2149 struct list_head
*sched
;
2150 unsigned long flags
;
2153 DBG(4, "urb=%p, dev%d ep%d%s\n", urb
,
2154 usb_pipedevice(urb
->pipe
),
2155 usb_pipeendpoint(urb
->pipe
),
2156 usb_pipein(urb
->pipe
) ? "in" : "out");
2158 spin_lock_irqsave(&musb
->lock
, flags
);
2159 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2167 /* Any URB not actively programmed into endpoint hardware can be
2168 * immediately given back; that's any URB not at the head of an
2169 * endpoint queue, unless someday we get real DMA queues. And even
2170 * if it's at the head, it might not be known to the hardware...
2172 * Otherwise abort current transfer, pending dma, etc.; urb->status
2173 * has already been updated. This is a synchronous abort; it'd be
2174 * OK to hold off until after some IRQ, though.
2176 if (!qh
->is_ready
|| urb
->urb_list
.prev
!= &qh
->hep
->urb_list
)
2180 case USB_ENDPOINT_XFER_CONTROL
:
2181 sched
= &musb
->control
;
2183 case USB_ENDPOINT_XFER_BULK
:
2185 if (usb_pipein(urb
->pipe
))
2186 sched
= &musb
->in_bulk
;
2188 sched
= &musb
->out_bulk
;
2192 /* REVISIT when we get a schedule tree, periodic
2193 * transfers won't always be at the head of a
2194 * singleton queue...
2201 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2202 if (ret
< 0 || (sched
&& qh
!= first_qh(sched
))) {
2203 int ready
= qh
->is_ready
;
2207 __musb_giveback(musb
, urb
, 0);
2208 qh
->is_ready
= ready
;
2210 /* If nothing else (usually musb_giveback) is using it
2211 * and its URB list has emptied, recycle this qh.
2213 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2214 qh
->hep
->hcpriv
= NULL
;
2215 list_del(&qh
->ring
);
2219 ret
= musb_cleanup_urb(urb
, qh
, urb
->pipe
& USB_DIR_IN
);
2221 spin_unlock_irqrestore(&musb
->lock
, flags
);
2225 /* disable an endpoint */
2227 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2229 u8 epnum
= hep
->desc
.bEndpointAddress
;
2230 unsigned long flags
;
2231 struct musb
*musb
= hcd_to_musb(hcd
);
2232 u8 is_in
= epnum
& USB_DIR_IN
;
2235 struct list_head
*sched
;
2237 spin_lock_irqsave(&musb
->lock
, flags
);
2244 case USB_ENDPOINT_XFER_CONTROL
:
2245 sched
= &musb
->control
;
2247 case USB_ENDPOINT_XFER_BULK
:
2250 sched
= &musb
->in_bulk
;
2252 sched
= &musb
->out_bulk
;
2256 /* REVISIT when we get a schedule tree, periodic transfers
2257 * won't always be at the head of a singleton queue...
2263 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2265 /* kick first urb off the hardware, if needed */
2267 if (!sched
|| qh
== first_qh(sched
)) {
2270 /* make software (then hardware) stop ASAP */
2272 urb
->status
= -ESHUTDOWN
;
2275 musb_cleanup_urb(urb
, qh
, urb
->pipe
& USB_DIR_IN
);
2277 /* Then nuke all the others ... and advance the
2278 * queue on hw_ep (e.g. bulk ring) when we're done.
2280 while (!list_empty(&hep
->urb_list
)) {
2282 urb
->status
= -ESHUTDOWN
;
2283 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2286 /* Just empty the queue; the hardware is busy with
2287 * other transfers, and since !qh->is_ready nothing
2288 * will activate any of these as it advances.
2290 while (!list_empty(&hep
->urb_list
))
2291 __musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2294 list_del(&qh
->ring
);
2298 spin_unlock_irqrestore(&musb
->lock
, flags
);
2301 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2303 struct musb
*musb
= hcd_to_musb(hcd
);
2305 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2308 static int musb_h_start(struct usb_hcd
*hcd
)
2310 struct musb
*musb
= hcd_to_musb(hcd
);
2312 /* NOTE: musb_start() is called when the hub driver turns
2313 * on port power, or when (OTG) peripheral starts.
2315 hcd
->state
= HC_STATE_RUNNING
;
2316 musb
->port1_status
= 0;
2320 static void musb_h_stop(struct usb_hcd
*hcd
)
2322 musb_stop(hcd_to_musb(hcd
));
2323 hcd
->state
= HC_STATE_HALT
;
2326 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2328 struct musb
*musb
= hcd_to_musb(hcd
);
2330 if (musb
->xceiv
.state
== OTG_STATE_A_SUSPEND
)
2333 if (is_host_active(musb
) && musb
->is_active
) {
2334 WARNING("trying to suspend as %s is_active=%i\n",
2335 otg_state_string(musb
), musb
->is_active
);
2341 static int musb_bus_resume(struct usb_hcd
*hcd
)
2343 /* resuming child port does the work */
2347 const struct hc_driver musb_hc_driver
= {
2348 .description
= "musb-hcd",
2349 .product_desc
= "MUSB HDRC host driver",
2350 .hcd_priv_size
= sizeof(struct musb
),
2351 .flags
= HCD_USB2
| HCD_MEMORY
,
2353 /* not using irq handler or reset hooks from usbcore, since
2354 * those must be shared with peripheral code for OTG configs
2357 .start
= musb_h_start
,
2358 .stop
= musb_h_stop
,
2360 .get_frame_number
= musb_h_get_frame_number
,
2362 .urb_enqueue
= musb_urb_enqueue
,
2363 .urb_dequeue
= musb_urb_dequeue
,
2364 .endpoint_disable
= musb_h_disable
,
2366 .hub_status_data
= musb_hub_status_data
,
2367 .hub_control
= musb_hub_control
,
2368 .bus_suspend
= musb_bus_suspend
,
2369 .bus_resume
= musb_bus_resume
,
2370 /* .start_port_reset = NULL, */
2371 /* .hub_irq_enable = NULL, */