2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
99 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
100 struct urb
*urb
, int is_out
,
101 u8
*buf
, u32 offset
, u32 len
);
104 * Clear TX fifo. Needed to avoid BABBLE errors.
106 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
108 void __iomem
*epio
= ep
->regs
;
113 csr
= musb_readw(epio
, MUSB_TXCSR
);
114 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
116 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr
);
118 csr
|= MUSB_TXCSR_FLUSHFIFO
;
119 musb_writew(epio
, MUSB_TXCSR
, csr
);
120 csr
= musb_readw(epio
, MUSB_TXCSR
);
121 if (WARN(retries
-- < 1,
122 "Could not flush host TX%d fifo: csr: %04x\n",
129 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
131 void __iomem
*epio
= ep
->regs
;
135 /* scrub any data left in the fifo */
137 csr
= musb_readw(epio
, MUSB_TXCSR
);
138 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
140 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
141 csr
= musb_readw(epio
, MUSB_TXCSR
);
145 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
148 /* and reset for the next transfer */
149 musb_writew(epio
, MUSB_TXCSR
, 0);
153 * Start transmit. Caller is responsible for locking shared resources.
154 * musb must be locked.
156 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
160 /* NOTE: no locks here; caller should lock and select EP */
162 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
163 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
164 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
166 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
167 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
172 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
176 /* NOTE: no locks here; caller should lock and select EP */
177 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
178 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
179 if (is_cppi_enabled())
180 txcsr
|= MUSB_TXCSR_DMAMODE
;
181 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
184 static void musb_ep_set_qh(struct musb_hw_ep
*ep
, int is_in
, struct musb_qh
*qh
)
186 if (is_in
!= 0 || ep
->is_shared_fifo
)
188 if (is_in
== 0 || ep
->is_shared_fifo
)
192 static struct musb_qh
*musb_ep_get_qh(struct musb_hw_ep
*ep
, int is_in
)
194 return is_in
? ep
->in_qh
: ep
->out_qh
;
198 * Start the URB at the front of an endpoint's queue
199 * end must be claimed from the caller.
201 * Context: controller locked, irqs blocked
204 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
208 void __iomem
*mbase
= musb
->mregs
;
209 struct urb
*urb
= next_urb(qh
);
210 void *buf
= urb
->transfer_buffer
;
212 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
213 unsigned pipe
= urb
->pipe
;
214 u8 address
= usb_pipedevice(pipe
);
215 int epnum
= hw_ep
->epnum
;
217 /* initialize software qh state */
221 /* gather right source of data */
223 case USB_ENDPOINT_XFER_CONTROL
:
224 /* control transfers always start with SETUP */
226 musb
->ep0_stage
= MUSB_EP0_START
;
227 buf
= urb
->setup_packet
;
230 case USB_ENDPOINT_XFER_ISOC
:
233 offset
= urb
->iso_frame_desc
[0].offset
;
234 len
= urb
->iso_frame_desc
[0].length
;
236 default: /* bulk, interrupt */
237 /* actual_length may be nonzero on retry paths */
238 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
239 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
242 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
243 qh
, urb
, address
, qh
->epnum
,
244 is_in
? "in" : "out",
245 ({char *s
; switch (qh
->type
) {
246 case USB_ENDPOINT_XFER_CONTROL
: s
= ""; break;
247 case USB_ENDPOINT_XFER_BULK
: s
= "-bulk"; break;
248 case USB_ENDPOINT_XFER_ISOC
: s
= "-iso"; break;
249 default: s
= "-intr"; break;
251 epnum
, buf
+ offset
, len
);
253 /* Configure endpoint */
254 musb_ep_set_qh(hw_ep
, is_in
, qh
);
255 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, offset
, len
);
257 /* transmit may have more work: start it when it is time */
261 /* determine if the time is right for a periodic transfer */
263 case USB_ENDPOINT_XFER_ISOC
:
264 case USB_ENDPOINT_XFER_INT
:
265 DBG(3, "check whether there's still time for periodic Tx\n");
266 frame
= musb_readw(mbase
, MUSB_FRAME
);
267 /* FIXME this doesn't implement that scheduling policy ...
268 * or handle framecounter wrapping
270 if ((urb
->transfer_flags
& URB_ISO_ASAP
)
271 || (frame
>= urb
->start_frame
)) {
272 /* REVISIT the SOF irq handler shouldn't duplicate
273 * this code; and we don't init urb->start_frame...
278 qh
->frame
= urb
->start_frame
;
279 /* enable SOF interrupt so we can count down */
280 DBG(1, "SOF for %d\n", epnum
);
281 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
282 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
288 DBG(4, "Start TX%d %s\n", epnum
,
289 hw_ep
->tx_channel
? "dma" : "pio");
291 if (!hw_ep
->tx_channel
)
292 musb_h_tx_start(hw_ep
);
293 else if (is_cppi_enabled() || tusb_dma_omap())
294 musb_h_tx_dma_start(hw_ep
);
298 /* caller owns controller lock, irqs are blocked */
300 __musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
301 __releases(musb
->lock
)
302 __acquires(musb
->lock
)
304 DBG(({ int level
; switch (status
) {
308 /* common/boring faults */
319 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
320 urb
, urb
->complete
, status
,
321 usb_pipedevice(urb
->pipe
),
322 usb_pipeendpoint(urb
->pipe
),
323 usb_pipein(urb
->pipe
) ? "in" : "out",
324 urb
->actual_length
, urb
->transfer_buffer_length
327 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb
), urb
);
328 spin_unlock(&musb
->lock
);
329 usb_hcd_giveback_urb(musb_to_hcd(musb
), urb
, status
);
330 spin_lock(&musb
->lock
);
333 /* For bulk/interrupt endpoints only */
334 static inline void musb_save_toggle(struct musb_qh
*qh
, int is_in
,
337 void __iomem
*epio
= qh
->hw_ep
->regs
;
341 * FIXME: the current Mentor DMA code seems to have
342 * problems getting toggle correct.
346 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
348 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
350 usb_settoggle(urb
->dev
, qh
->epnum
, !is_in
, csr
? 1 : 0);
353 /* caller owns controller lock, irqs are blocked */
354 static struct musb_qh
*
355 musb_giveback(struct musb_qh
*qh
, struct urb
*urb
, int status
)
357 struct musb_hw_ep
*ep
= qh
->hw_ep
;
358 struct musb
*musb
= ep
->musb
;
359 int is_in
= usb_pipein(urb
->pipe
);
360 int ready
= qh
->is_ready
;
362 /* save toggle eagerly, for paranoia */
364 case USB_ENDPOINT_XFER_BULK
:
365 case USB_ENDPOINT_XFER_INT
:
366 musb_save_toggle(qh
, is_in
, urb
);
368 case USB_ENDPOINT_XFER_ISOC
:
369 if (status
== 0 && urb
->error_count
)
375 __musb_giveback(musb
, urb
, status
);
376 qh
->is_ready
= ready
;
378 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
379 * invalidate qh as soon as list_empty(&hep->urb_list)
381 if (list_empty(&qh
->hep
->urb_list
)) {
382 struct list_head
*head
;
389 /* Clobber old pointers to this qh */
390 musb_ep_set_qh(ep
, is_in
, NULL
);
391 qh
->hep
->hcpriv
= NULL
;
395 case USB_ENDPOINT_XFER_CONTROL
:
396 case USB_ENDPOINT_XFER_BULK
:
397 /* fifo policy for these lists, except that NAKing
398 * should rotate a qh to the end (for fairness).
401 head
= qh
->ring
.prev
;
408 case USB_ENDPOINT_XFER_ISOC
:
409 case USB_ENDPOINT_XFER_INT
:
410 /* this is where periodic bandwidth should be
411 * de-allocated if it's tracked and allocated;
412 * and where we'd update the schedule tree...
423 * Advance this hardware endpoint's queue, completing the specified urb and
424 * advancing to either the next urb queued to that qh, or else invalidating
425 * that qh and advancing to the next qh scheduled after the current one.
427 * Context: caller owns controller lock, irqs are blocked
430 musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
431 struct musb_hw_ep
*hw_ep
, int is_in
)
433 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, is_in
);
435 if (urb
->status
== -EINPROGRESS
)
436 qh
= musb_giveback(qh
, urb
, 0);
438 qh
= musb_giveback(qh
, urb
, urb
->status
);
440 if (qh
!= NULL
&& qh
->is_ready
) {
441 DBG(4, "... next ep%d %cX urb %p\n",
442 hw_ep
->epnum
, is_in
? 'R' : 'T',
444 musb_start_urb(musb
, is_in
, qh
);
448 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
450 /* we don't want fifo to fill itself again;
451 * ignore dma (various models),
452 * leave toggle alone (may not have been saved yet)
454 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
455 csr
&= ~(MUSB_RXCSR_H_REQPKT
456 | MUSB_RXCSR_H_AUTOREQ
457 | MUSB_RXCSR_AUTOCLEAR
);
459 /* write 2x to allow double buffering */
460 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
461 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
463 /* flush writebuffer */
464 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
468 * PIO RX for a packet (or part of it).
471 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
479 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
480 void __iomem
*epio
= hw_ep
->regs
;
481 struct musb_qh
*qh
= hw_ep
->in_qh
;
482 int pipe
= urb
->pipe
;
483 void *buffer
= urb
->transfer_buffer
;
485 /* musb_ep_select(mbase, epnum); */
486 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
487 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum
, rx_count
,
488 urb
->transfer_buffer
, qh
->offset
,
489 urb
->transfer_buffer_length
);
492 if (usb_pipeisoc(pipe
)) {
494 struct usb_iso_packet_descriptor
*d
;
501 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
502 buf
= buffer
+ d
->offset
;
504 if (rx_count
> length
) {
509 DBG(2, "** OVERFLOW %d into %d\n", rx_count
, length
);
513 urb
->actual_length
+= length
;
514 d
->actual_length
= length
;
518 /* see if we are done */
519 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
522 buf
= buffer
+ qh
->offset
;
523 length
= urb
->transfer_buffer_length
- qh
->offset
;
524 if (rx_count
> length
) {
525 if (urb
->status
== -EINPROGRESS
)
526 urb
->status
= -EOVERFLOW
;
527 DBG(2, "** OVERFLOW %d into %d\n", rx_count
, length
);
531 urb
->actual_length
+= length
;
532 qh
->offset
+= length
;
534 /* see if we are done */
535 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
536 || (rx_count
< qh
->maxpacket
)
537 || (urb
->status
!= -EINPROGRESS
);
539 && (urb
->status
== -EINPROGRESS
)
540 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
541 && (urb
->actual_length
542 < urb
->transfer_buffer_length
))
543 urb
->status
= -EREMOTEIO
;
546 musb_read_fifo(hw_ep
, length
, buf
);
548 csr
= musb_readw(epio
, MUSB_RXCSR
);
549 csr
|= MUSB_RXCSR_H_WZC_BITS
;
550 if (unlikely(do_flush
))
551 musb_h_flush_rxfifo(hw_ep
, csr
);
553 /* REVISIT this assumes AUTOCLEAR is never set */
554 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
556 csr
|= MUSB_RXCSR_H_REQPKT
;
557 musb_writew(epio
, MUSB_RXCSR
, csr
);
563 /* we don't always need to reinit a given side of an endpoint...
564 * when we do, use tx/rx reinit routine and then construct a new CSR
565 * to address data toggle, NYET, and DMA or PIO.
567 * it's possible that driver bugs (especially for DMA) or aborting a
568 * transfer might have left the endpoint busier than it should be.
569 * the busy/not-empty tests are basically paranoia.
572 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, struct musb_hw_ep
*ep
)
576 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
577 * That always uses tx_reinit since ep0 repurposes TX register
578 * offsets; the initial SETUP packet is also a kind of OUT.
581 /* if programmed for Tx, put it in RX mode */
582 if (ep
->is_shared_fifo
) {
583 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
584 if (csr
& MUSB_TXCSR_MODE
) {
585 musb_h_tx_flush_fifo(ep
);
586 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
587 musb_writew(ep
->regs
, MUSB_TXCSR
,
588 csr
| MUSB_TXCSR_FRCDATATOG
);
592 * Clear the MODE bit (and everything else) to enable Rx.
593 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
595 if (csr
& MUSB_TXCSR_DMAMODE
)
596 musb_writew(ep
->regs
, MUSB_TXCSR
, MUSB_TXCSR_DMAMODE
);
597 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
599 /* scrub all previous state, clearing toggle */
601 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
602 if (csr
& MUSB_RXCSR_RXPKTRDY
)
603 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
604 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
606 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
609 /* target addr and (for multipoint) hub addr/port */
610 if (musb
->is_multipoint
) {
611 musb_write_rxfunaddr(ep
->target_regs
, qh
->addr_reg
);
612 musb_write_rxhubaddr(ep
->target_regs
, qh
->h_addr_reg
);
613 musb_write_rxhubport(ep
->target_regs
, qh
->h_port_reg
);
616 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
618 /* protocol/endpoint, interval/NAKlimit, i/o size */
619 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
620 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
621 /* NOTE: bulk combining rewrites high bits of maxpacket */
622 musb_writew(ep
->regs
, MUSB_RXMAXP
, qh
->maxpacket
);
627 static bool musb_tx_dma_program(struct dma_controller
*dma
,
628 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
629 struct urb
*urb
, u32 offset
, u32 length
)
631 struct dma_channel
*channel
= hw_ep
->tx_channel
;
632 void __iomem
*epio
= hw_ep
->regs
;
633 u16 pkt_size
= qh
->maxpacket
;
637 #ifdef CONFIG_USB_INVENTRA_DMA
638 if (length
> channel
->max_len
)
639 length
= channel
->max_len
;
641 csr
= musb_readw(epio
, MUSB_TXCSR
);
642 if (length
> pkt_size
) {
644 csr
|= MUSB_TXCSR_AUTOSET
646 | MUSB_TXCSR_DMAENAB
;
649 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE
);
650 csr
|= MUSB_TXCSR_DMAENAB
; /* against programmer's guide */
652 channel
->desired_mode
= mode
;
653 musb_writew(epio
, MUSB_TXCSR
, csr
);
655 if (!is_cppi_enabled() && !tusb_dma_omap())
658 channel
->actual_len
= 0;
661 * TX uses "RNDIS" mode automatically but needs help
662 * to identify the zero-length-final-packet case.
664 mode
= (urb
->transfer_flags
& URB_ZERO_PACKET
) ? 1 : 0;
667 qh
->segsize
= length
;
669 if (!dma
->channel_program(channel
, pkt_size
, mode
,
670 urb
->transfer_dma
+ offset
, length
)) {
671 dma
->channel_release(channel
);
672 hw_ep
->tx_channel
= NULL
;
674 csr
= musb_readw(epio
, MUSB_TXCSR
);
675 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
);
676 musb_writew(epio
, MUSB_TXCSR
, csr
| MUSB_TXCSR_H_WZC_BITS
);
683 * Program an HDRC endpoint as per the given URB
684 * Context: irqs blocked, controller lock held
686 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
687 struct urb
*urb
, int is_out
,
688 u8
*buf
, u32 offset
, u32 len
)
690 struct dma_controller
*dma_controller
;
691 struct dma_channel
*dma_channel
;
693 void __iomem
*mbase
= musb
->mregs
;
694 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
695 void __iomem
*epio
= hw_ep
->regs
;
696 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, !is_out
);
697 u16 packet_sz
= qh
->maxpacket
;
699 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
700 "h_addr%02x h_port%02x bytes %d\n",
701 is_out
? "-->" : "<--",
702 epnum
, urb
, urb
->dev
->speed
,
703 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
704 qh
->h_addr_reg
, qh
->h_port_reg
,
707 musb_ep_select(mbase
, epnum
);
709 /* candidate for DMA? */
710 dma_controller
= musb
->dma_controller
;
711 if (is_dma_capable() && epnum
&& dma_controller
) {
712 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
714 dma_channel
= dma_controller
->channel_alloc(
715 dma_controller
, hw_ep
, is_out
);
717 hw_ep
->tx_channel
= dma_channel
;
719 hw_ep
->rx_channel
= dma_channel
;
724 /* make sure we clear DMAEnab, autoSet bits from previous run */
726 /* OUT/transmit/EP0 or IN/receive? */
732 csr
= musb_readw(epio
, MUSB_TXCSR
);
734 /* disable interrupt in case we flush */
735 int_txe
= musb_readw(mbase
, MUSB_INTRTXE
);
736 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
738 /* general endpoint setup */
740 /* flush all old state, set default */
741 musb_h_tx_flush_fifo(hw_ep
);
744 * We must not clear the DMAMODE bit before or in
745 * the same cycle with the DMAENAB bit, so we clear
746 * the latter first...
748 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
751 | MUSB_TXCSR_FRCDATATOG
752 | MUSB_TXCSR_H_RXSTALL
754 | MUSB_TXCSR_TXPKTRDY
756 csr
|= MUSB_TXCSR_MODE
;
758 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 1))
759 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
760 | MUSB_TXCSR_H_DATATOGGLE
;
762 csr
|= MUSB_TXCSR_CLRDATATOG
;
764 musb_writew(epio
, MUSB_TXCSR
, csr
);
765 /* REVISIT may need to clear FLUSHFIFO ... */
766 csr
&= ~MUSB_TXCSR_DMAMODE
;
767 musb_writew(epio
, MUSB_TXCSR
, csr
);
768 csr
= musb_readw(epio
, MUSB_TXCSR
);
770 /* endpoint 0: just flush */
771 musb_h_ep0_flush_fifo(hw_ep
);
774 /* target addr and (for multipoint) hub addr/port */
775 if (musb
->is_multipoint
) {
776 musb_write_txfunaddr(mbase
, epnum
, qh
->addr_reg
);
777 musb_write_txhubaddr(mbase
, epnum
, qh
->h_addr_reg
);
778 musb_write_txhubport(mbase
, epnum
, qh
->h_port_reg
);
779 /* FIXME if !epnum, do the same for RX ... */
781 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
783 /* protocol/endpoint/interval/NAKlimit */
785 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
786 if (can_bulk_split(musb
, qh
->type
))
787 musb_writew(epio
, MUSB_TXMAXP
,
789 | ((hw_ep
->max_packet_sz_tx
/
790 packet_sz
) - 1) << 11);
792 musb_writew(epio
, MUSB_TXMAXP
,
794 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
796 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
797 if (musb
->is_multipoint
)
798 musb_writeb(epio
, MUSB_TYPE0
,
802 if (can_bulk_split(musb
, qh
->type
))
803 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
806 load_count
= min((u32
) packet_sz
, len
);
808 if (dma_channel
&& musb_tx_dma_program(dma_controller
,
809 hw_ep
, qh
, urb
, offset
, len
))
813 /* PIO to load FIFO */
814 qh
->segsize
= load_count
;
815 musb_write_fifo(hw_ep
, load_count
, buf
);
818 /* re-enable interrupt */
819 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
825 if (hw_ep
->rx_reinit
) {
826 musb_rx_reinit(musb
, qh
, hw_ep
);
828 /* init new state: toggle and NYET, maybe DMA later */
829 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
830 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
831 | MUSB_RXCSR_H_DATATOGGLE
;
834 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
835 csr
|= MUSB_RXCSR_DISNYET
;
838 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
840 if (csr
& (MUSB_RXCSR_RXPKTRDY
842 | MUSB_RXCSR_H_REQPKT
))
843 ERR("broken !rx_reinit, ep%d csr %04x\n",
846 /* scrub any stale state, leaving toggle alone */
847 csr
&= MUSB_RXCSR_DISNYET
;
850 /* kick things off */
852 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
853 /* candidate for DMA */
855 dma_channel
->actual_len
= 0L;
858 /* AUTOREQ is in a DMA register */
859 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
860 csr
= musb_readw(hw_ep
->regs
,
863 /* unless caller treats short rx transfers as
864 * errors, we dare not queue multiple transfers.
866 dma_ok
= dma_controller
->channel_program(
867 dma_channel
, packet_sz
,
868 !(urb
->transfer_flags
870 urb
->transfer_dma
+ offset
,
873 dma_controller
->channel_release(
875 hw_ep
->rx_channel
= NULL
;
878 csr
|= MUSB_RXCSR_DMAENAB
;
882 csr
|= MUSB_RXCSR_H_REQPKT
;
883 DBG(7, "RXCSR%d := %04x\n", epnum
, csr
);
884 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
885 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
891 * Service the default endpoint (ep0) as host.
892 * Return true until it's time to start the status stage.
894 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
897 u8
*fifo_dest
= NULL
;
899 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
900 struct musb_qh
*qh
= hw_ep
->in_qh
;
901 struct usb_ctrlrequest
*request
;
903 switch (musb
->ep0_stage
) {
905 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
906 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
908 if (fifo_count
< len
)
909 urb
->status
= -EOVERFLOW
;
911 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
913 urb
->actual_length
+= fifo_count
;
914 if (len
< qh
->maxpacket
) {
915 /* always terminate on short read; it's
916 * rarely reported as an error.
918 } else if (urb
->actual_length
<
919 urb
->transfer_buffer_length
)
923 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
925 if (!request
->wLength
) {
926 DBG(4, "start no-DATA\n");
928 } else if (request
->bRequestType
& USB_DIR_IN
) {
929 DBG(4, "start IN-DATA\n");
930 musb
->ep0_stage
= MUSB_EP0_IN
;
934 DBG(4, "start OUT-DATA\n");
935 musb
->ep0_stage
= MUSB_EP0_OUT
;
940 fifo_count
= min_t(size_t, qh
->maxpacket
,
941 urb
->transfer_buffer_length
-
944 fifo_dest
= (u8
*) (urb
->transfer_buffer
945 + urb
->actual_length
);
946 DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
948 (fifo_count
== 1) ? "" : "s",
950 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
952 urb
->actual_length
+= fifo_count
;
957 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
965 * Handle default endpoint interrupt as host. Only called in IRQ time
966 * from musb_interrupt().
968 * called with controller irqlocked
970 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
975 void __iomem
*mbase
= musb
->mregs
;
976 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
977 void __iomem
*epio
= hw_ep
->regs
;
978 struct musb_qh
*qh
= hw_ep
->in_qh
;
979 bool complete
= false;
980 irqreturn_t retval
= IRQ_NONE
;
982 /* ep0 only has one queue, "in" */
985 musb_ep_select(mbase
, 0);
986 csr
= musb_readw(epio
, MUSB_CSR0
);
987 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
988 ? musb_readb(epio
, MUSB_COUNT0
)
991 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
992 csr
, qh
, len
, urb
, musb
->ep0_stage
);
994 /* if we just did status stage, we are done */
995 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
996 retval
= IRQ_HANDLED
;
1000 /* prepare status */
1001 if (csr
& MUSB_CSR0_H_RXSTALL
) {
1002 DBG(6, "STALLING ENDPOINT\n");
1005 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1006 DBG(2, "no response, csr0 %04x\n", csr
);
1009 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1010 DBG(2, "control NAK timeout\n");
1012 /* NOTE: this code path would be a good place to PAUSE a
1013 * control transfer, if another one is queued, so that
1014 * ep0 is more likely to stay busy. That's already done
1015 * for bulk RX transfers.
1017 * if (qh->ring.next != &musb->control), then
1018 * we have a candidate... NAKing is *NOT* an error
1020 musb_writew(epio
, MUSB_CSR0
, 0);
1021 retval
= IRQ_HANDLED
;
1025 DBG(6, "aborting\n");
1026 retval
= IRQ_HANDLED
;
1028 urb
->status
= status
;
1031 /* use the proper sequence to abort the transfer */
1032 if (csr
& MUSB_CSR0_H_REQPKT
) {
1033 csr
&= ~MUSB_CSR0_H_REQPKT
;
1034 musb_writew(epio
, MUSB_CSR0
, csr
);
1035 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1036 musb_writew(epio
, MUSB_CSR0
, csr
);
1038 musb_h_ep0_flush_fifo(hw_ep
);
1041 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1044 musb_writew(epio
, MUSB_CSR0
, 0);
1047 if (unlikely(!urb
)) {
1048 /* stop endpoint since we have no place for its data, this
1049 * SHOULD NEVER HAPPEN! */
1050 ERR("no URB for end 0\n");
1052 musb_h_ep0_flush_fifo(hw_ep
);
1057 /* call common logic and prepare response */
1058 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1059 /* more packets required */
1060 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1061 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1063 /* data transfer complete; perform status phase */
1064 if (usb_pipeout(urb
->pipe
)
1065 || !urb
->transfer_buffer_length
)
1066 csr
= MUSB_CSR0_H_STATUSPKT
1067 | MUSB_CSR0_H_REQPKT
;
1069 csr
= MUSB_CSR0_H_STATUSPKT
1070 | MUSB_CSR0_TXPKTRDY
;
1072 /* flag status stage */
1073 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1075 DBG(5, "ep0 STATUS, csr %04x\n", csr
);
1078 musb_writew(epio
, MUSB_CSR0
, csr
);
1079 retval
= IRQ_HANDLED
;
1081 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1083 /* call completion handler if done */
1085 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1091 #ifdef CONFIG_USB_INVENTRA_DMA
1093 /* Host side TX (OUT) using Mentor DMA works as follows:
1095 - if queue was empty, Program Endpoint
1096 - ... which starts DMA to fifo in mode 1 or 0
1098 DMA Isr (transfer complete) -> TxAvail()
1099 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1100 only in musb_cleanup_urb)
1101 - TxPktRdy has to be set in mode 0 or for
1102 short packets in mode 1.
1107 /* Service a Tx-Available or dma completion irq for the endpoint */
1108 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1115 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1116 void __iomem
*epio
= hw_ep
->regs
;
1117 struct musb_qh
*qh
= hw_ep
->out_qh
;
1118 struct urb
*urb
= next_urb(qh
);
1120 void __iomem
*mbase
= musb
->mregs
;
1121 struct dma_channel
*dma
;
1123 musb_ep_select(mbase
, epnum
);
1124 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1126 /* with CPPI, DMA sometimes triggers "extra" irqs */
1128 DBG(4, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1133 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1134 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum
, tx_csr
,
1135 dma
? ", dma" : "");
1137 /* check for errors */
1138 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1139 /* dma was disabled, fifo flushed */
1140 DBG(3, "TX end %d stall\n", epnum
);
1142 /* stall; record URB status */
1145 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1146 /* (NON-ISO) dma was disabled, fifo flushed */
1147 DBG(3, "TX 3strikes on ep=%d\n", epnum
);
1149 status
= -ETIMEDOUT
;
1151 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1152 DBG(6, "TX end=%d device not responding\n", epnum
);
1154 /* NOTE: this code path would be a good place to PAUSE a
1155 * transfer, if there's some other (nonperiodic) tx urb
1156 * that could use this fifo. (dma complicates it...)
1157 * That's already done for bulk RX transfers.
1159 * if (bulk && qh->ring.next != &musb->out_bulk), then
1160 * we have a candidate... NAKing is *NOT* an error
1162 musb_ep_select(mbase
, epnum
);
1163 musb_writew(epio
, MUSB_TXCSR
,
1164 MUSB_TXCSR_H_WZC_BITS
1165 | MUSB_TXCSR_TXPKTRDY
);
1170 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1171 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1172 (void) musb
->dma_controller
->channel_abort(dma
);
1175 /* do the proper sequence to abort the transfer in the
1176 * usb core; the dma engine should already be stopped.
1178 musb_h_tx_flush_fifo(hw_ep
);
1179 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1180 | MUSB_TXCSR_DMAENAB
1181 | MUSB_TXCSR_H_ERROR
1182 | MUSB_TXCSR_H_RXSTALL
1183 | MUSB_TXCSR_H_NAKTIMEOUT
1186 musb_ep_select(mbase
, epnum
);
1187 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1188 /* REVISIT may need to clear FLUSHFIFO ... */
1189 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1190 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1195 /* second cppi case */
1196 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1197 DBG(4, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1201 if (is_dma_capable() && dma
&& !status
) {
1203 * DMA has completed. But if we're using DMA mode 1 (multi
1204 * packet DMA), we need a terminal TXPKTRDY interrupt before
1205 * we can consider this transfer completed, lest we trash
1206 * its last packet when writing the next URB's data. So we
1207 * switch back to mode 0 to get that interrupt; we'll come
1208 * back here once it happens.
1210 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1212 * We shouldn't clear DMAMODE with DMAENAB set; so
1213 * clear them in a safe order. That should be OK
1214 * once TXPKTRDY has been set (and I've never seen
1215 * it being 0 at this moment -- DMA interrupt latency
1216 * is significant) but if it hasn't been then we have
1217 * no choice but to stop being polite and ignore the
1218 * programmer's guide... :-)
1220 * Note that we must write TXCSR with TXPKTRDY cleared
1221 * in order not to re-trigger the packet send (this bit
1222 * can't be cleared by CPU), and there's another caveat:
1223 * TXPKTRDY may be set shortly and then cleared in the
1224 * double-buffered FIFO mode, so we do an extra TXCSR
1225 * read for debouncing...
1227 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1228 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1229 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1230 MUSB_TXCSR_TXPKTRDY
);
1231 musb_writew(epio
, MUSB_TXCSR
,
1232 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1234 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1235 MUSB_TXCSR_TXPKTRDY
);
1236 musb_writew(epio
, MUSB_TXCSR
,
1237 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1240 * There is no guarantee that we'll get an interrupt
1241 * after clearing DMAMODE as we might have done this
1242 * too late (after TXPKTRDY was cleared by controller).
1243 * Re-read TXCSR as we have spoiled its previous value.
1245 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1249 * We may get here from a DMA completion or TXPKTRDY interrupt.
1250 * In any case, we must check the FIFO status here and bail out
1251 * only if the FIFO still has data -- that should prevent the
1252 * "missed" TXPKTRDY interrupts and deal with double-buffered
1255 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1256 DBG(2, "DMA complete but packet still in FIFO, "
1257 "CSR %04x\n", tx_csr
);
1262 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1264 length
= dma
->actual_len
;
1266 length
= qh
->segsize
;
1267 qh
->offset
+= length
;
1269 if (usb_pipeisoc(pipe
)) {
1270 struct usb_iso_packet_descriptor
*d
;
1272 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1273 d
->actual_length
= length
;
1275 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1285 /* see if we need to send more data, or ZLP */
1286 if (qh
->segsize
< qh
->maxpacket
)
1288 else if (qh
->offset
== urb
->transfer_buffer_length
1289 && !(urb
->transfer_flags
1293 offset
= qh
->offset
;
1294 length
= urb
->transfer_buffer_length
- offset
;
1299 /* urb->status != -EINPROGRESS means request has been faulted,
1300 * so we must abort this transfer after cleanup
1302 if (urb
->status
!= -EINPROGRESS
) {
1305 status
= urb
->status
;
1310 urb
->status
= status
;
1311 urb
->actual_length
= qh
->offset
;
1312 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1314 } else if (usb_pipeisoc(pipe
) && dma
) {
1315 if (musb_tx_dma_program(musb
->dma_controller
, hw_ep
, qh
, urb
,
1318 } else if (tx_csr
& MUSB_TXCSR_DMAENAB
) {
1319 DBG(1, "not complete, but DMA enabled?\n");
1324 * PIO: start next packet in this URB.
1326 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1327 * (and presumably, FIFO is not half-full) we should write *two*
1328 * packets before updating TXCSR; other docs disagree...
1330 if (length
> qh
->maxpacket
)
1331 length
= qh
->maxpacket
;
1332 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
+ offset
);
1333 qh
->segsize
= length
;
1335 musb_ep_select(mbase
, epnum
);
1336 musb_writew(epio
, MUSB_TXCSR
,
1337 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1341 #ifdef CONFIG_USB_INVENTRA_DMA
1343 /* Host side RX (IN) using Mentor DMA works as follows:
1345 - if queue was empty, ProgramEndpoint
1346 - first IN token is sent out (by setting ReqPkt)
1347 LinuxIsr -> RxReady()
1348 /\ => first packet is received
1349 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1350 | -> DMA Isr (transfer complete) -> RxReady()
1351 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1352 | - if urb not complete, send next IN token (ReqPkt)
1353 | | else complete urb.
1355 ---------------------------
1357 * Nuances of mode 1:
1358 * For short packets, no ack (+RxPktRdy) is sent automatically
1359 * (even if AutoClear is ON)
1360 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1361 * automatically => major problem, as collecting the next packet becomes
1362 * difficult. Hence mode 1 is not used.
1365 * All we care about at this driver level is that
1366 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1367 * (b) termination conditions are: short RX, or buffer full;
1368 * (c) fault modes include
1369 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1370 * (and that endpoint's dma queue stops immediately)
1371 * - overflow (full, PLUS more bytes in the terminal packet)
1373 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1374 * thus be a great candidate for using mode 1 ... for all but the
1375 * last packet of one URB's transfer.
1380 /* Schedule next QH from musb->in_bulk and move the current qh to
1381 * the end; avoids starvation for other endpoints.
1383 static void musb_bulk_rx_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
)
1385 struct dma_channel
*dma
;
1387 void __iomem
*mbase
= musb
->mregs
;
1388 void __iomem
*epio
= ep
->regs
;
1389 struct musb_qh
*cur_qh
, *next_qh
;
1392 musb_ep_select(mbase
, ep
->epnum
);
1393 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
1395 /* clear nak timeout bit */
1396 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1397 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1398 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1399 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1401 cur_qh
= first_qh(&musb
->in_bulk
);
1403 urb
= next_urb(cur_qh
);
1404 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1405 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1406 musb
->dma_controller
->channel_abort(dma
);
1407 urb
->actual_length
+= dma
->actual_len
;
1408 dma
->actual_len
= 0L;
1410 musb_save_toggle(cur_qh
, 1, urb
);
1412 /* move cur_qh to end of queue */
1413 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1415 /* get the next qh from musb->in_bulk */
1416 next_qh
= first_qh(&musb
->in_bulk
);
1418 /* set rx_reinit and schedule the next qh */
1420 musb_start_urb(musb
, 1, next_qh
);
1425 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1426 * and high-bandwidth IN transfer cases.
1428 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1431 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1432 void __iomem
*epio
= hw_ep
->regs
;
1433 struct musb_qh
*qh
= hw_ep
->in_qh
;
1435 void __iomem
*mbase
= musb
->mregs
;
1438 bool iso_err
= false;
1441 struct dma_channel
*dma
;
1443 musb_ep_select(mbase
, epnum
);
1446 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1450 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1453 if (unlikely(!urb
)) {
1454 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1455 * usbtest #11 (unlinks) triggers it regularly, sometimes
1456 * with fifo full. (Only with DMA??)
1458 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum
, val
,
1459 musb_readw(epio
, MUSB_RXCOUNT
));
1460 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1466 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1467 epnum
, rx_csr
, urb
->actual_length
,
1468 dma
? dma
->actual_len
: 0);
1470 /* check for errors, concurrent stall & unlink is not really
1472 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1473 DBG(3, "RX end %d STALL\n", epnum
);
1475 /* stall; record URB status */
1478 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1479 DBG(3, "end %d RX proto error\n", epnum
);
1482 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1484 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1486 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1487 DBG(6, "RX end %d NAK timeout\n", epnum
);
1489 /* NOTE: NAKing is *NOT* an error, so we want to
1490 * continue. Except ... if there's a request for
1491 * another QH, use that instead of starving it.
1493 * Devices like Ethernet and serial adapters keep
1494 * reads posted at all times, which will starve
1495 * other devices without this logic.
1497 if (usb_pipebulk(urb
->pipe
)
1499 && !list_is_singular(&musb
->in_bulk
)) {
1500 musb_bulk_rx_nak_timeout(musb
, hw_ep
);
1503 musb_ep_select(mbase
, epnum
);
1504 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1505 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1506 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1510 DBG(4, "RX end %d ISO data error\n", epnum
);
1511 /* packet error reported later */
1516 /* faults abort the transfer */
1518 /* clean up dma and collect transfer count */
1519 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1520 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1521 (void) musb
->dma_controller
->channel_abort(dma
);
1522 xfer_len
= dma
->actual_len
;
1524 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1525 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1530 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1531 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1532 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1536 /* thorough shutdown for now ... given more precise fault handling
1537 * and better queueing support, we might keep a DMA pipeline going
1538 * while processing this irq for earlier completions.
1541 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1543 #ifndef CONFIG_USB_INVENTRA_DMA
1544 if (rx_csr
& MUSB_RXCSR_H_REQPKT
) {
1545 /* REVISIT this happened for a while on some short reads...
1546 * the cleanup still needs investigation... looks bad...
1547 * and also duplicates dma cleanup code above ... plus,
1548 * shouldn't this be the "half full" double buffer case?
1550 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1551 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1552 (void) musb
->dma_controller
->channel_abort(dma
);
1553 xfer_len
= dma
->actual_len
;
1557 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum
, rx_csr
,
1558 xfer_len
, dma
? ", dma" : "");
1559 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1561 musb_ep_select(mbase
, epnum
);
1562 musb_writew(epio
, MUSB_RXCSR
,
1563 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1566 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1567 xfer_len
= dma
->actual_len
;
1569 val
&= ~(MUSB_RXCSR_DMAENAB
1570 | MUSB_RXCSR_H_AUTOREQ
1571 | MUSB_RXCSR_AUTOCLEAR
1572 | MUSB_RXCSR_RXPKTRDY
);
1573 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1575 #ifdef CONFIG_USB_INVENTRA_DMA
1576 if (usb_pipeisoc(pipe
)) {
1577 struct usb_iso_packet_descriptor
*d
;
1579 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1580 d
->actual_length
= xfer_len
;
1582 /* even if there was an error, we did the dma
1583 * for iso_frame_desc->length
1585 if (d
->status
!= EILSEQ
&& d
->status
!= -EOVERFLOW
)
1588 if (++qh
->iso_idx
>= urb
->number_of_packets
)
1594 /* done if urb buffer is full or short packet is recd */
1595 done
= (urb
->actual_length
+ xfer_len
>=
1596 urb
->transfer_buffer_length
1597 || dma
->actual_len
< qh
->maxpacket
);
1600 /* send IN token for next packet, without AUTOREQ */
1602 val
|= MUSB_RXCSR_H_REQPKT
;
1603 musb_writew(epio
, MUSB_RXCSR
,
1604 MUSB_RXCSR_H_WZC_BITS
| val
);
1607 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum
,
1608 done
? "off" : "reset",
1609 musb_readw(epio
, MUSB_RXCSR
),
1610 musb_readw(epio
, MUSB_RXCOUNT
));
1614 } else if (urb
->status
== -EINPROGRESS
) {
1615 /* if no errors, be sure a packet is ready for unloading */
1616 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1618 ERR("Rx interrupt with no errors or packet!\n");
1620 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1623 /* do the proper sequence to abort the transfer */
1624 musb_ep_select(mbase
, epnum
);
1625 val
&= ~MUSB_RXCSR_H_REQPKT
;
1626 musb_writew(epio
, MUSB_RXCSR
, val
);
1630 /* we are expecting IN packets */
1631 #ifdef CONFIG_USB_INVENTRA_DMA
1633 struct dma_controller
*c
;
1638 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1640 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1643 + urb
->actual_length
,
1645 urb
->transfer_buffer_length
);
1647 c
= musb
->dma_controller
;
1649 if (usb_pipeisoc(pipe
)) {
1651 struct usb_iso_packet_descriptor
*d
;
1653 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1659 if (rx_count
> d
->length
) {
1661 status
= -EOVERFLOW
;
1664 DBG(2, "** OVERFLOW %d into %d\n",\
1665 rx_count
, d
->length
);
1671 buf
= urb
->transfer_dma
+ d
->offset
;
1674 buf
= urb
->transfer_dma
+
1678 dma
->desired_mode
= 0;
1680 /* because of the issue below, mode 1 will
1681 * only rarely behave with correct semantics.
1683 if ((urb
->transfer_flags
&
1685 && (urb
->transfer_buffer_length
-
1688 dma
->desired_mode
= 1;
1689 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1691 dma
->bDesiredMode
= 0;
1693 length
= urb
->transfer_buffer_length
;
1697 /* Disadvantage of using mode 1:
1698 * It's basically usable only for mass storage class; essentially all
1699 * other protocols also terminate transfers on short packets.
1702 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1703 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1704 * to use the extra IN token to grab the last packet using mode 0, then
1705 * the problem is that you cannot be sure when the device will send the
1706 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1707 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1708 * transfer, while sometimes it is recd just a little late so that if you
1709 * try to configure for mode 0 soon after the mode 1 transfer is
1710 * completed, you will find rxcount 0. Okay, so you might think why not
1711 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1714 val
= musb_readw(epio
, MUSB_RXCSR
);
1715 val
&= ~MUSB_RXCSR_H_REQPKT
;
1717 if (dma
->desired_mode
== 0)
1718 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1720 val
|= MUSB_RXCSR_H_AUTOREQ
;
1721 val
|= MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
;
1723 musb_writew(epio
, MUSB_RXCSR
,
1724 MUSB_RXCSR_H_WZC_BITS
| val
);
1726 /* REVISIT if when actual_length != 0,
1727 * transfer_buffer_length needs to be
1730 ret
= c
->channel_program(
1732 dma
->desired_mode
, buf
, length
);
1735 c
->channel_release(dma
);
1736 hw_ep
->rx_channel
= NULL
;
1738 /* REVISIT reset CSR */
1741 #endif /* Mentor DMA */
1744 done
= musb_host_packet_rx(musb
, urb
,
1746 DBG(6, "read %spacket\n", done
? "last " : "");
1751 urb
->actual_length
+= xfer_len
;
1752 qh
->offset
+= xfer_len
;
1754 if (urb
->status
== -EINPROGRESS
)
1755 urb
->status
= status
;
1756 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
1760 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1761 * the software schedule associates multiple such nodes with a given
1762 * host side hardware endpoint + direction; scheduling may activate
1763 * that hardware endpoint.
1765 static int musb_schedule(
1772 int best_end
, epnum
;
1773 struct musb_hw_ep
*hw_ep
= NULL
;
1774 struct list_head
*head
= NULL
;
1776 /* use fixed hardware for control and bulk */
1777 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
1778 head
= &musb
->control
;
1779 hw_ep
= musb
->control_ep
;
1783 /* else, periodic transfers get muxed to other endpoints */
1786 * We know this qh hasn't been scheduled, so all we need to do
1787 * is choose which hardware endpoint to put it on ...
1789 * REVISIT what we really want here is a regular schedule tree
1790 * like e.g. OHCI uses.
1795 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
1796 epnum
< musb
->nr_endpoints
;
1800 if (musb_ep_get_qh(hw_ep
, is_in
) != NULL
)
1803 if (hw_ep
== musb
->bulk_ep
)
1807 diff
= hw_ep
->max_packet_sz_rx
- qh
->maxpacket
;
1809 diff
= hw_ep
->max_packet_sz_tx
- qh
->maxpacket
;
1811 if (diff
>= 0 && best_diff
> diff
) {
1816 /* use bulk reserved ep1 if no other ep is free */
1817 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
1818 hw_ep
= musb
->bulk_ep
;
1820 head
= &musb
->in_bulk
;
1822 head
= &musb
->out_bulk
;
1824 /* Enable bulk RX NAK timeout scheme when bulk requests are
1825 * multiplexed. This scheme doen't work in high speed to full
1826 * speed scenario as NAK interrupts are not coming from a
1827 * full speed device connected to a high speed device.
1828 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1829 * 4 (8 frame or 8ms) for FS device.
1831 if (is_in
&& qh
->dev
)
1833 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
1835 } else if (best_end
< 0) {
1841 hw_ep
= musb
->endpoints
+ best_end
;
1842 DBG(4, "qh %p periodic slot %d\n", qh
, best_end
);
1845 idle
= list_empty(head
);
1846 list_add_tail(&qh
->ring
, head
);
1850 qh
->hep
->hcpriv
= qh
;
1852 musb_start_urb(musb
, is_in
, qh
);
1856 static int musb_urb_enqueue(
1857 struct usb_hcd
*hcd
,
1861 unsigned long flags
;
1862 struct musb
*musb
= hcd_to_musb(hcd
);
1863 struct usb_host_endpoint
*hep
= urb
->ep
;
1865 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
1870 /* host role must be active */
1871 if (!is_host_active(musb
) || !musb
->is_active
)
1874 spin_lock_irqsave(&musb
->lock
, flags
);
1875 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1876 qh
= ret
? NULL
: hep
->hcpriv
;
1879 spin_unlock_irqrestore(&musb
->lock
, flags
);
1881 /* DMA mapping was already done, if needed, and this urb is on
1882 * hep->urb_list now ... so we're done, unless hep wasn't yet
1883 * scheduled onto a live qh.
1885 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1886 * disabled, testing for empty qh->ring and avoiding qh setup costs
1887 * except for the first urb queued after a config change.
1892 /* Allocate and initialize qh, minimizing the work done each time
1893 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1895 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1896 * for bugs in other kernel code to break this driver...
1898 qh
= kzalloc(sizeof *qh
, mem_flags
);
1900 spin_lock_irqsave(&musb
->lock
, flags
);
1901 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1902 spin_unlock_irqrestore(&musb
->lock
, flags
);
1908 INIT_LIST_HEAD(&qh
->ring
);
1911 qh
->maxpacket
= le16_to_cpu(epd
->wMaxPacketSize
);
1913 /* no high bandwidth support yet */
1914 if (qh
->maxpacket
& ~0x7ff) {
1919 qh
->epnum
= usb_endpoint_num(epd
);
1920 qh
->type
= usb_endpoint_type(epd
);
1922 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1923 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
1925 /* precompute rxtype/txtype/type0 register */
1926 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
1927 switch (urb
->dev
->speed
) {
1931 case USB_SPEED_FULL
:
1937 qh
->type_reg
= type_reg
;
1939 /* Precompute RXINTERVAL/TXINTERVAL register */
1941 case USB_ENDPOINT_XFER_INT
:
1943 * Full/low speeds use the linear encoding,
1944 * high speed uses the logarithmic encoding.
1946 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
1947 interval
= max_t(u8
, epd
->bInterval
, 1);
1951 case USB_ENDPOINT_XFER_ISOC
:
1952 /* ISO always uses logarithmic encoding */
1953 interval
= min_t(u8
, epd
->bInterval
, 16);
1956 /* REVISIT we actually want to use NAK limits, hinting to the
1957 * transfer scheduling logic to try some other qh, e.g. try
1960 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1962 * The downside of disabling this is that transfer scheduling
1963 * gets VERY unfair for nonperiodic transfers; a misbehaving
1964 * peripheral could make that hurt. That's perfectly normal
1965 * for reads from network or serial adapters ... so we have
1966 * partial NAKlimit support for bulk RX.
1968 * The upside of disabling it is simpler transfer scheduling.
1972 qh
->intv_reg
= interval
;
1974 /* precompute addressing for external hub/tt ports */
1975 if (musb
->is_multipoint
) {
1976 struct usb_device
*parent
= urb
->dev
->parent
;
1978 if (parent
!= hcd
->self
.root_hub
) {
1979 qh
->h_addr_reg
= (u8
) parent
->devnum
;
1981 /* set up tt info if needed */
1983 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
1984 if (urb
->dev
->tt
->hub
)
1986 (u8
) urb
->dev
->tt
->hub
->devnum
;
1987 if (urb
->dev
->tt
->multi
)
1988 qh
->h_addr_reg
|= 0x80;
1993 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1994 * until we get real dma queues (with an entry for each urb/buffer),
1995 * we only have work to do in the former case.
1997 spin_lock_irqsave(&musb
->lock
, flags
);
1999 /* some concurrent activity submitted another urb to hep...
2000 * odd, rare, error prone, but legal.
2005 ret
= musb_schedule(musb
, qh
,
2006 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2010 /* FIXME set urb->start_frame for iso/intr, it's tested in
2011 * musb_start_urb(), but otherwise only konicawc cares ...
2014 spin_unlock_irqrestore(&musb
->lock
, flags
);
2018 spin_lock_irqsave(&musb
->lock
, flags
);
2019 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2020 spin_unlock_irqrestore(&musb
->lock
, flags
);
2028 * abort a transfer that's at the head of a hardware queue.
2029 * called with controller locked, irqs blocked
2030 * that hardware queue advances to the next transfer, unless prevented
2032 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
, int is_in
)
2034 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2035 void __iomem
*epio
= ep
->regs
;
2036 unsigned hw_end
= ep
->epnum
;
2037 void __iomem
*regs
= ep
->musb
->mregs
;
2041 musb_ep_select(regs
, hw_end
);
2043 if (is_dma_capable()) {
2044 struct dma_channel
*dma
;
2046 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2048 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2050 "abort %cX%d DMA for urb %p --> %d\n",
2051 is_in
? 'R' : 'T', ep
->epnum
,
2053 urb
->actual_length
+= dma
->actual_len
;
2057 /* turn off DMA requests, discard state, stop polling ... */
2059 /* giveback saves bulk toggle */
2060 csr
= musb_h_flush_rxfifo(ep
, 0);
2062 /* REVISIT we still get an irq; should likely clear the
2063 * endpoint's irq status here to avoid bogus irqs.
2064 * clearing that status is platform-specific...
2066 } else if (ep
->epnum
) {
2067 musb_h_tx_flush_fifo(ep
);
2068 csr
= musb_readw(epio
, MUSB_TXCSR
);
2069 csr
&= ~(MUSB_TXCSR_AUTOSET
2070 | MUSB_TXCSR_DMAENAB
2071 | MUSB_TXCSR_H_RXSTALL
2072 | MUSB_TXCSR_H_NAKTIMEOUT
2073 | MUSB_TXCSR_H_ERROR
2074 | MUSB_TXCSR_TXPKTRDY
);
2075 musb_writew(epio
, MUSB_TXCSR
, csr
);
2076 /* REVISIT may need to clear FLUSHFIFO ... */
2077 musb_writew(epio
, MUSB_TXCSR
, csr
);
2078 /* flush cpu writebuffer */
2079 csr
= musb_readw(epio
, MUSB_TXCSR
);
2081 musb_h_ep0_flush_fifo(ep
);
2084 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2088 static int musb_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2090 struct musb
*musb
= hcd_to_musb(hcd
);
2092 unsigned long flags
;
2093 int is_in
= usb_pipein(urb
->pipe
);
2096 DBG(4, "urb=%p, dev%d ep%d%s\n", urb
,
2097 usb_pipedevice(urb
->pipe
),
2098 usb_pipeendpoint(urb
->pipe
),
2099 is_in
? "in" : "out");
2101 spin_lock_irqsave(&musb
->lock
, flags
);
2102 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2111 * Any URB not actively programmed into endpoint hardware can be
2112 * immediately given back; that's any URB not at the head of an
2113 * endpoint queue, unless someday we get real DMA queues. And even
2114 * if it's at the head, it might not be known to the hardware...
2116 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2117 * has already been updated. This is a synchronous abort; it'd be
2118 * OK to hold off until after some IRQ, though.
2120 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2123 || urb
->urb_list
.prev
!= &qh
->hep
->urb_list
2124 || musb_ep_get_qh(qh
->hw_ep
, is_in
) != qh
) {
2125 int ready
= qh
->is_ready
;
2128 __musb_giveback(musb
, urb
, 0);
2129 qh
->is_ready
= ready
;
2131 /* If nothing else (usually musb_giveback) is using it
2132 * and its URB list has emptied, recycle this qh.
2134 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2135 qh
->hep
->hcpriv
= NULL
;
2136 list_del(&qh
->ring
);
2140 ret
= musb_cleanup_urb(urb
, qh
, urb
->pipe
& USB_DIR_IN
);
2142 spin_unlock_irqrestore(&musb
->lock
, flags
);
2146 /* disable an endpoint */
2148 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2150 u8 is_in
= hep
->desc
.bEndpointAddress
& USB_DIR_IN
;
2151 unsigned long flags
;
2152 struct musb
*musb
= hcd_to_musb(hcd
);
2156 spin_lock_irqsave(&musb
->lock
, flags
);
2162 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2164 /* Kick the first URB off the hardware, if needed */
2166 if (musb_ep_get_qh(qh
->hw_ep
, is_in
) == qh
) {
2169 /* make software (then hardware) stop ASAP */
2171 urb
->status
= -ESHUTDOWN
;
2174 musb_cleanup_urb(urb
, qh
, urb
->pipe
& USB_DIR_IN
);
2176 /* Then nuke all the others ... and advance the
2177 * queue on hw_ep (e.g. bulk ring) when we're done.
2179 while (!list_empty(&hep
->urb_list
)) {
2181 urb
->status
= -ESHUTDOWN
;
2182 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2185 /* Just empty the queue; the hardware is busy with
2186 * other transfers, and since !qh->is_ready nothing
2187 * will activate any of these as it advances.
2189 while (!list_empty(&hep
->urb_list
))
2190 __musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2193 list_del(&qh
->ring
);
2197 spin_unlock_irqrestore(&musb
->lock
, flags
);
2200 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2202 struct musb
*musb
= hcd_to_musb(hcd
);
2204 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2207 static int musb_h_start(struct usb_hcd
*hcd
)
2209 struct musb
*musb
= hcd_to_musb(hcd
);
2211 /* NOTE: musb_start() is called when the hub driver turns
2212 * on port power, or when (OTG) peripheral starts.
2214 hcd
->state
= HC_STATE_RUNNING
;
2215 musb
->port1_status
= 0;
2219 static void musb_h_stop(struct usb_hcd
*hcd
)
2221 musb_stop(hcd_to_musb(hcd
));
2222 hcd
->state
= HC_STATE_HALT
;
2225 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2227 struct musb
*musb
= hcd_to_musb(hcd
);
2229 if (musb
->xceiv
.state
== OTG_STATE_A_SUSPEND
)
2232 if (is_host_active(musb
) && musb
->is_active
) {
2233 WARNING("trying to suspend as %s is_active=%i\n",
2234 otg_state_string(musb
), musb
->is_active
);
2240 static int musb_bus_resume(struct usb_hcd
*hcd
)
2242 /* resuming child port does the work */
2246 const struct hc_driver musb_hc_driver
= {
2247 .description
= "musb-hcd",
2248 .product_desc
= "MUSB HDRC host driver",
2249 .hcd_priv_size
= sizeof(struct musb
),
2250 .flags
= HCD_USB2
| HCD_MEMORY
,
2252 /* not using irq handler or reset hooks from usbcore, since
2253 * those must be shared with peripheral code for OTG configs
2256 .start
= musb_h_start
,
2257 .stop
= musb_h_stop
,
2259 .get_frame_number
= musb_h_get_frame_number
,
2261 .urb_enqueue
= musb_urb_enqueue
,
2262 .urb_dequeue
= musb_urb_dequeue
,
2263 .endpoint_disable
= musb_h_disable
,
2265 .hub_status_data
= musb_hub_status_data
,
2266 .hub_control
= musb_hub_control
,
2267 .bus_suspend
= musb_bus_suspend
,
2268 .bus_resume
= musb_bus_resume
,
2269 /* .start_port_reset = NULL, */
2270 /* .hub_irq_enable = NULL, */