2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/dma-mapping.h>
46 #include "musb_core.h"
47 #include "musb_host.h"
50 /* MUSB HOST status 22-mar-2006
52 * - There's still lots of partial code duplication for fault paths, so
53 * they aren't handled as consistently as they need to be.
55 * - PIO mostly behaved when last tested.
56 * + including ep0, with all usbtest cases 9, 10
57 * + usbtest 14 (ep0out) doesn't seem to run at all
58 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
59 * configurations, but otherwise double buffering passes basic tests.
60 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
62 * - DMA (CPPI) ... partially behaves, not currently recommended
63 * + about 1/15 the speed of typical EHCI implementations (PCI)
64 * + RX, all too often reqpkt seems to misbehave after tx
65 * + TX, no known issues (other than evident silicon issue)
67 * - DMA (Mentor/OMAP) ...has at least toggle update problems
69 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
70 * starvation ... nothing yet for TX, interrupt, or bulk.
72 * - Not tested with HNP, but some SRP paths seem to behave.
74 * NOTE 24-August-2006:
76 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
77 * extra endpoint for periodic use enabling hub + keybd + mouse. That
78 * mostly works, except that with "usbnet" it's easy to trigger cases
79 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
80 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
81 * although ARP RX wins. (That test was done with a full speed link.)
86 * NOTE on endpoint usage:
88 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
89 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
90 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
94 * So far that scheduling is both dumb and optimistic: the endpoint will be
95 * "claimed" until its software queue is no longer refilled. No multiplexing
96 * of transfers between endpoints, or anything clever.
100 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
101 struct urb
*urb
, int is_out
,
102 u8
*buf
, u32 offset
, u32 len
);
105 * Clear TX fifo. Needed to avoid BABBLE errors.
107 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
109 struct musb
*musb
= ep
->musb
;
110 void __iomem
*epio
= ep
->regs
;
115 csr
= musb_readw(epio
, MUSB_TXCSR
);
116 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
118 dev_dbg(musb
->controller
, "Host TX FIFONOTEMPTY csr: %02x\n", csr
);
120 csr
|= MUSB_TXCSR_FLUSHFIFO
;
121 musb_writew(epio
, MUSB_TXCSR
, csr
);
122 csr
= musb_readw(epio
, MUSB_TXCSR
);
123 if (WARN(retries
-- < 1,
124 "Could not flush host TX%d fifo: csr: %04x\n",
131 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
133 void __iomem
*epio
= ep
->regs
;
137 /* scrub any data left in the fifo */
139 csr
= musb_readw(epio
, MUSB_TXCSR
);
140 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
142 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
143 csr
= musb_readw(epio
, MUSB_TXCSR
);
147 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
150 /* and reset for the next transfer */
151 musb_writew(epio
, MUSB_TXCSR
, 0);
155 * Start transmit. Caller is responsible for locking shared resources.
156 * musb must be locked.
158 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
162 /* NOTE: no locks here; caller should lock and select EP */
164 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
165 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
166 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
168 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
169 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
174 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
178 /* NOTE: no locks here; caller should lock and select EP */
179 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
180 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
181 if (is_cppi_enabled())
182 txcsr
|= MUSB_TXCSR_DMAMODE
;
183 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
186 static void musb_ep_set_qh(struct musb_hw_ep
*ep
, int is_in
, struct musb_qh
*qh
)
188 if (is_in
!= 0 || ep
->is_shared_fifo
)
190 if (is_in
== 0 || ep
->is_shared_fifo
)
194 static struct musb_qh
*musb_ep_get_qh(struct musb_hw_ep
*ep
, int is_in
)
196 return is_in
? ep
->in_qh
: ep
->out_qh
;
200 * Start the URB at the front of an endpoint's queue
201 * end must be claimed from the caller.
203 * Context: controller locked, irqs blocked
206 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
210 void __iomem
*mbase
= musb
->mregs
;
211 struct urb
*urb
= next_urb(qh
);
212 void *buf
= urb
->transfer_buffer
;
214 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
215 unsigned pipe
= urb
->pipe
;
216 u8 address
= usb_pipedevice(pipe
);
217 int epnum
= hw_ep
->epnum
;
219 /* initialize software qh state */
223 /* gather right source of data */
225 case USB_ENDPOINT_XFER_CONTROL
:
226 /* control transfers always start with SETUP */
228 musb
->ep0_stage
= MUSB_EP0_START
;
229 buf
= urb
->setup_packet
;
232 case USB_ENDPOINT_XFER_ISOC
:
235 offset
= urb
->iso_frame_desc
[0].offset
;
236 len
= urb
->iso_frame_desc
[0].length
;
238 default: /* bulk, interrupt */
239 /* actual_length may be nonzero on retry paths */
240 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
241 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
244 dev_dbg(musb
->controller
, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
245 qh
, urb
, address
, qh
->epnum
,
246 is_in
? "in" : "out",
247 ({char *s
; switch (qh
->type
) {
248 case USB_ENDPOINT_XFER_CONTROL
: s
= ""; break;
249 case USB_ENDPOINT_XFER_BULK
: s
= "-bulk"; break;
250 case USB_ENDPOINT_XFER_ISOC
: s
= "-iso"; break;
251 default: s
= "-intr"; break;
253 epnum
, buf
+ offset
, len
);
255 /* Configure endpoint */
256 musb_ep_set_qh(hw_ep
, is_in
, qh
);
257 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, offset
, len
);
259 /* transmit may have more work: start it when it is time */
263 /* determine if the time is right for a periodic transfer */
265 case USB_ENDPOINT_XFER_ISOC
:
266 case USB_ENDPOINT_XFER_INT
:
267 dev_dbg(musb
->controller
, "check whether there's still time for periodic Tx\n");
268 frame
= musb_readw(mbase
, MUSB_FRAME
);
269 /* FIXME this doesn't implement that scheduling policy ...
270 * or handle framecounter wrapping
272 if ((urb
->transfer_flags
& URB_ISO_ASAP
)
273 || (frame
>= urb
->start_frame
)) {
274 /* REVISIT the SOF irq handler shouldn't duplicate
275 * this code; and we don't init urb->start_frame...
280 qh
->frame
= urb
->start_frame
;
281 /* enable SOF interrupt so we can count down */
282 dev_dbg(musb
->controller
, "SOF for %d\n", epnum
);
283 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
284 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
290 dev_dbg(musb
->controller
, "Start TX%d %s\n", epnum
,
291 hw_ep
->tx_channel
? "dma" : "pio");
293 if (!hw_ep
->tx_channel
)
294 musb_h_tx_start(hw_ep
);
295 else if (is_cppi_enabled() || tusb_dma_omap())
296 musb_h_tx_dma_start(hw_ep
);
300 /* Context: caller owns controller lock, IRQs are blocked */
301 static void musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
302 __releases(musb
->lock
)
303 __acquires(musb
->lock
)
305 dev_dbg(musb
->controller
,
306 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
307 urb
, urb
->complete
, status
,
308 usb_pipedevice(urb
->pipe
),
309 usb_pipeendpoint(urb
->pipe
),
310 usb_pipein(urb
->pipe
) ? "in" : "out",
311 urb
->actual_length
, urb
->transfer_buffer_length
314 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb
), urb
);
315 spin_unlock(&musb
->lock
);
316 usb_hcd_giveback_urb(musb_to_hcd(musb
), urb
, status
);
317 spin_lock(&musb
->lock
);
320 /* For bulk/interrupt endpoints only */
321 static inline void musb_save_toggle(struct musb_qh
*qh
, int is_in
,
324 void __iomem
*epio
= qh
->hw_ep
->regs
;
328 * FIXME: the current Mentor DMA code seems to have
329 * problems getting toggle correct.
333 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
335 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
337 usb_settoggle(urb
->dev
, qh
->epnum
, !is_in
, csr
? 1 : 0);
341 * Advance this hardware endpoint's queue, completing the specified URB and
342 * advancing to either the next URB queued to that qh, or else invalidating
343 * that qh and advancing to the next qh scheduled after the current one.
345 * Context: caller owns controller lock, IRQs are blocked
347 static void musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
348 struct musb_hw_ep
*hw_ep
, int is_in
)
350 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, is_in
);
351 struct musb_hw_ep
*ep
= qh
->hw_ep
;
352 int ready
= qh
->is_ready
;
355 status
= (urb
->status
== -EINPROGRESS
) ? 0 : urb
->status
;
357 /* save toggle eagerly, for paranoia */
359 case USB_ENDPOINT_XFER_BULK
:
360 case USB_ENDPOINT_XFER_INT
:
361 musb_save_toggle(qh
, is_in
, urb
);
363 case USB_ENDPOINT_XFER_ISOC
:
364 if (status
== 0 && urb
->error_count
)
370 musb_giveback(musb
, urb
, status
);
371 qh
->is_ready
= ready
;
373 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
374 * invalidate qh as soon as list_empty(&hep->urb_list)
376 if (list_empty(&qh
->hep
->urb_list
)) {
377 struct list_head
*head
;
378 struct dma_controller
*dma
= musb
->dma_controller
;
382 if (ep
->rx_channel
) {
383 dma
->channel_release(ep
->rx_channel
);
384 ep
->rx_channel
= NULL
;
388 if (ep
->tx_channel
) {
389 dma
->channel_release(ep
->tx_channel
);
390 ep
->tx_channel
= NULL
;
394 /* Clobber old pointers to this qh */
395 musb_ep_set_qh(ep
, is_in
, NULL
);
396 qh
->hep
->hcpriv
= NULL
;
400 case USB_ENDPOINT_XFER_CONTROL
:
401 case USB_ENDPOINT_XFER_BULK
:
402 /* fifo policy for these lists, except that NAKing
403 * should rotate a qh to the end (for fairness).
406 head
= qh
->ring
.prev
;
413 case USB_ENDPOINT_XFER_ISOC
:
414 case USB_ENDPOINT_XFER_INT
:
415 /* this is where periodic bandwidth should be
416 * de-allocated if it's tracked and allocated;
417 * and where we'd update the schedule tree...
425 if (qh
!= NULL
&& qh
->is_ready
) {
426 dev_dbg(musb
->controller
, "... next ep%d %cX urb %p\n",
427 hw_ep
->epnum
, is_in
? 'R' : 'T', next_urb(qh
));
428 musb_start_urb(musb
, is_in
, qh
);
432 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
434 /* we don't want fifo to fill itself again;
435 * ignore dma (various models),
436 * leave toggle alone (may not have been saved yet)
438 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
439 csr
&= ~(MUSB_RXCSR_H_REQPKT
440 | MUSB_RXCSR_H_AUTOREQ
441 | MUSB_RXCSR_AUTOCLEAR
);
443 /* write 2x to allow double buffering */
444 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
445 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
447 /* flush writebuffer */
448 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
452 * PIO RX for a packet (or part of it).
455 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
463 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
464 void __iomem
*epio
= hw_ep
->regs
;
465 struct musb_qh
*qh
= hw_ep
->in_qh
;
466 int pipe
= urb
->pipe
;
467 void *buffer
= urb
->transfer_buffer
;
469 /* musb_ep_select(mbase, epnum); */
470 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
471 dev_dbg(musb
->controller
, "RX%d count %d, buffer %p len %d/%d\n", epnum
, rx_count
,
472 urb
->transfer_buffer
, qh
->offset
,
473 urb
->transfer_buffer_length
);
476 if (usb_pipeisoc(pipe
)) {
478 struct usb_iso_packet_descriptor
*d
;
485 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
486 buf
= buffer
+ d
->offset
;
488 if (rx_count
> length
) {
493 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
497 urb
->actual_length
+= length
;
498 d
->actual_length
= length
;
502 /* see if we are done */
503 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
506 buf
= buffer
+ qh
->offset
;
507 length
= urb
->transfer_buffer_length
- qh
->offset
;
508 if (rx_count
> length
) {
509 if (urb
->status
== -EINPROGRESS
)
510 urb
->status
= -EOVERFLOW
;
511 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
515 urb
->actual_length
+= length
;
516 qh
->offset
+= length
;
518 /* see if we are done */
519 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
520 || (rx_count
< qh
->maxpacket
)
521 || (urb
->status
!= -EINPROGRESS
);
523 && (urb
->status
== -EINPROGRESS
)
524 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
525 && (urb
->actual_length
526 < urb
->transfer_buffer_length
))
527 urb
->status
= -EREMOTEIO
;
530 musb_read_fifo(hw_ep
, length
, buf
);
532 csr
= musb_readw(epio
, MUSB_RXCSR
);
533 csr
|= MUSB_RXCSR_H_WZC_BITS
;
534 if (unlikely(do_flush
))
535 musb_h_flush_rxfifo(hw_ep
, csr
);
537 /* REVISIT this assumes AUTOCLEAR is never set */
538 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
540 csr
|= MUSB_RXCSR_H_REQPKT
;
541 musb_writew(epio
, MUSB_RXCSR
, csr
);
547 /* we don't always need to reinit a given side of an endpoint...
548 * when we do, use tx/rx reinit routine and then construct a new CSR
549 * to address data toggle, NYET, and DMA or PIO.
551 * it's possible that driver bugs (especially for DMA) or aborting a
552 * transfer might have left the endpoint busier than it should be.
553 * the busy/not-empty tests are basically paranoia.
556 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, struct musb_hw_ep
*ep
)
560 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
561 * That always uses tx_reinit since ep0 repurposes TX register
562 * offsets; the initial SETUP packet is also a kind of OUT.
565 /* if programmed for Tx, put it in RX mode */
566 if (ep
->is_shared_fifo
) {
567 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
568 if (csr
& MUSB_TXCSR_MODE
) {
569 musb_h_tx_flush_fifo(ep
);
570 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
571 musb_writew(ep
->regs
, MUSB_TXCSR
,
572 csr
| MUSB_TXCSR_FRCDATATOG
);
576 * Clear the MODE bit (and everything else) to enable Rx.
577 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
579 if (csr
& MUSB_TXCSR_DMAMODE
)
580 musb_writew(ep
->regs
, MUSB_TXCSR
, MUSB_TXCSR_DMAMODE
);
581 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
583 /* scrub all previous state, clearing toggle */
585 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
586 if (csr
& MUSB_RXCSR_RXPKTRDY
)
587 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
588 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
590 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
593 /* target addr and (for multipoint) hub addr/port */
594 if (musb
->is_multipoint
) {
595 musb_write_rxfunaddr(ep
->target_regs
, qh
->addr_reg
);
596 musb_write_rxhubaddr(ep
->target_regs
, qh
->h_addr_reg
);
597 musb_write_rxhubport(ep
->target_regs
, qh
->h_port_reg
);
600 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
602 /* protocol/endpoint, interval/NAKlimit, i/o size */
603 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
604 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
605 /* NOTE: bulk combining rewrites high bits of maxpacket */
606 /* Set RXMAXP with the FIFO size of the endpoint
607 * to disable double buffer mode.
609 if (musb
->double_buffer_not_ok
)
610 musb_writew(ep
->regs
, MUSB_RXMAXP
, ep
->max_packet_sz_rx
);
612 musb_writew(ep
->regs
, MUSB_RXMAXP
,
613 qh
->maxpacket
| ((qh
->hb_mult
- 1) << 11));
618 static bool musb_tx_dma_program(struct dma_controller
*dma
,
619 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
620 struct urb
*urb
, u32 offset
, u32 length
)
622 struct dma_channel
*channel
= hw_ep
->tx_channel
;
623 void __iomem
*epio
= hw_ep
->regs
;
624 u16 pkt_size
= qh
->maxpacket
;
628 #ifdef CONFIG_USB_INVENTRA_DMA
629 if (length
> channel
->max_len
)
630 length
= channel
->max_len
;
632 csr
= musb_readw(epio
, MUSB_TXCSR
);
633 if (length
> pkt_size
) {
635 csr
|= MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_DMAENAB
;
636 /* autoset shouldn't be set in high bandwidth */
637 if (qh
->hb_mult
== 1)
638 csr
|= MUSB_TXCSR_AUTOSET
;
641 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE
);
642 csr
|= MUSB_TXCSR_DMAENAB
; /* against programmer's guide */
644 channel
->desired_mode
= mode
;
645 musb_writew(epio
, MUSB_TXCSR
, csr
);
647 if (!is_cppi_enabled() && !tusb_dma_omap())
650 channel
->actual_len
= 0;
653 * TX uses "RNDIS" mode automatically but needs help
654 * to identify the zero-length-final-packet case.
656 mode
= (urb
->transfer_flags
& URB_ZERO_PACKET
) ? 1 : 0;
659 qh
->segsize
= length
;
662 * Ensure the data reaches to main memory before starting
667 if (!dma
->channel_program(channel
, pkt_size
, mode
,
668 urb
->transfer_dma
+ offset
, length
)) {
669 dma
->channel_release(channel
);
670 hw_ep
->tx_channel
= NULL
;
672 csr
= musb_readw(epio
, MUSB_TXCSR
);
673 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
);
674 musb_writew(epio
, MUSB_TXCSR
, csr
| MUSB_TXCSR_H_WZC_BITS
);
681 * Program an HDRC endpoint as per the given URB
682 * Context: irqs blocked, controller lock held
684 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
685 struct urb
*urb
, int is_out
,
686 u8
*buf
, u32 offset
, u32 len
)
688 struct dma_controller
*dma_controller
;
689 struct dma_channel
*dma_channel
;
691 void __iomem
*mbase
= musb
->mregs
;
692 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
693 void __iomem
*epio
= hw_ep
->regs
;
694 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, !is_out
);
695 u16 packet_sz
= qh
->maxpacket
;
697 dev_dbg(musb
->controller
, "%s hw%d urb %p spd%d dev%d ep%d%s "
698 "h_addr%02x h_port%02x bytes %d\n",
699 is_out
? "-->" : "<--",
700 epnum
, urb
, urb
->dev
->speed
,
701 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
702 qh
->h_addr_reg
, qh
->h_port_reg
,
705 musb_ep_select(mbase
, epnum
);
707 /* candidate for DMA? */
708 dma_controller
= musb
->dma_controller
;
709 if (is_dma_capable() && epnum
&& dma_controller
) {
710 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
712 dma_channel
= dma_controller
->channel_alloc(
713 dma_controller
, hw_ep
, is_out
);
715 hw_ep
->tx_channel
= dma_channel
;
717 hw_ep
->rx_channel
= dma_channel
;
722 /* make sure we clear DMAEnab, autoSet bits from previous run */
724 /* OUT/transmit/EP0 or IN/receive? */
730 csr
= musb_readw(epio
, MUSB_TXCSR
);
732 /* disable interrupt in case we flush */
733 int_txe
= musb_readw(mbase
, MUSB_INTRTXE
);
734 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
736 /* general endpoint setup */
738 /* flush all old state, set default */
739 musb_h_tx_flush_fifo(hw_ep
);
742 * We must not clear the DMAMODE bit before or in
743 * the same cycle with the DMAENAB bit, so we clear
744 * the latter first...
746 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
749 | MUSB_TXCSR_FRCDATATOG
750 | MUSB_TXCSR_H_RXSTALL
752 | MUSB_TXCSR_TXPKTRDY
754 csr
|= MUSB_TXCSR_MODE
;
756 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 1))
757 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
758 | MUSB_TXCSR_H_DATATOGGLE
;
760 csr
|= MUSB_TXCSR_CLRDATATOG
;
762 musb_writew(epio
, MUSB_TXCSR
, csr
);
763 /* REVISIT may need to clear FLUSHFIFO ... */
764 csr
&= ~MUSB_TXCSR_DMAMODE
;
765 musb_writew(epio
, MUSB_TXCSR
, csr
);
766 csr
= musb_readw(epio
, MUSB_TXCSR
);
768 /* endpoint 0: just flush */
769 musb_h_ep0_flush_fifo(hw_ep
);
772 /* target addr and (for multipoint) hub addr/port */
773 if (musb
->is_multipoint
) {
774 musb_write_txfunaddr(mbase
, epnum
, qh
->addr_reg
);
775 musb_write_txhubaddr(mbase
, epnum
, qh
->h_addr_reg
);
776 musb_write_txhubport(mbase
, epnum
, qh
->h_port_reg
);
777 /* FIXME if !epnum, do the same for RX ... */
779 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
781 /* protocol/endpoint/interval/NAKlimit */
783 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
784 if (musb
->double_buffer_not_ok
)
785 musb_writew(epio
, MUSB_TXMAXP
,
786 hw_ep
->max_packet_sz_tx
);
787 else if (can_bulk_split(musb
, qh
->type
))
788 musb_writew(epio
, MUSB_TXMAXP
, packet_sz
789 | ((hw_ep
->max_packet_sz_tx
/
790 packet_sz
) - 1) << 11);
792 musb_writew(epio
, MUSB_TXMAXP
,
794 ((qh
->hb_mult
- 1) << 11));
795 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
797 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
798 if (musb
->is_multipoint
)
799 musb_writeb(epio
, MUSB_TYPE0
,
803 if (can_bulk_split(musb
, qh
->type
))
804 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
807 load_count
= min((u32
) packet_sz
, len
);
809 if (dma_channel
&& musb_tx_dma_program(dma_controller
,
810 hw_ep
, qh
, urb
, offset
, len
))
814 /* PIO to load FIFO */
815 qh
->segsize
= load_count
;
816 musb_write_fifo(hw_ep
, load_count
, buf
);
819 /* re-enable interrupt */
820 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
826 if (hw_ep
->rx_reinit
) {
827 musb_rx_reinit(musb
, qh
, hw_ep
);
829 /* init new state: toggle and NYET, maybe DMA later */
830 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
831 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
832 | MUSB_RXCSR_H_DATATOGGLE
;
835 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
836 csr
|= MUSB_RXCSR_DISNYET
;
839 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
841 if (csr
& (MUSB_RXCSR_RXPKTRDY
843 | MUSB_RXCSR_H_REQPKT
))
844 ERR("broken !rx_reinit, ep%d csr %04x\n",
847 /* scrub any stale state, leaving toggle alone */
848 csr
&= MUSB_RXCSR_DISNYET
;
851 /* kick things off */
853 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
854 /* Candidate for DMA */
855 dma_channel
->actual_len
= 0L;
858 /* AUTOREQ is in a DMA register */
859 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
860 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
863 * Unless caller treats short RX transfers as
864 * errors, we dare not queue multiple transfers.
866 dma_ok
= dma_controller
->channel_program(dma_channel
,
867 packet_sz
, !(urb
->transfer_flags
&
869 urb
->transfer_dma
+ offset
,
872 dma_controller
->channel_release(dma_channel
);
873 hw_ep
->rx_channel
= dma_channel
= NULL
;
875 csr
|= MUSB_RXCSR_DMAENAB
;
878 csr
|= MUSB_RXCSR_H_REQPKT
;
879 dev_dbg(musb
->controller
, "RXCSR%d := %04x\n", epnum
, csr
);
880 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
881 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
887 * Service the default endpoint (ep0) as host.
888 * Return true until it's time to start the status stage.
890 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
893 u8
*fifo_dest
= NULL
;
895 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
896 struct musb_qh
*qh
= hw_ep
->in_qh
;
897 struct usb_ctrlrequest
*request
;
899 switch (musb
->ep0_stage
) {
901 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
902 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
904 if (fifo_count
< len
)
905 urb
->status
= -EOVERFLOW
;
907 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
909 urb
->actual_length
+= fifo_count
;
910 if (len
< qh
->maxpacket
) {
911 /* always terminate on short read; it's
912 * rarely reported as an error.
914 } else if (urb
->actual_length
<
915 urb
->transfer_buffer_length
)
919 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
921 if (!request
->wLength
) {
922 dev_dbg(musb
->controller
, "start no-DATA\n");
924 } else if (request
->bRequestType
& USB_DIR_IN
) {
925 dev_dbg(musb
->controller
, "start IN-DATA\n");
926 musb
->ep0_stage
= MUSB_EP0_IN
;
930 dev_dbg(musb
->controller
, "start OUT-DATA\n");
931 musb
->ep0_stage
= MUSB_EP0_OUT
;
936 fifo_count
= min_t(size_t, qh
->maxpacket
,
937 urb
->transfer_buffer_length
-
940 fifo_dest
= (u8
*) (urb
->transfer_buffer
941 + urb
->actual_length
);
942 dev_dbg(musb
->controller
, "Sending %d byte%s to ep0 fifo %p\n",
944 (fifo_count
== 1) ? "" : "s",
946 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
948 urb
->actual_length
+= fifo_count
;
953 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
961 * Handle default endpoint interrupt as host. Only called in IRQ time
962 * from musb_interrupt().
964 * called with controller irqlocked
966 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
971 void __iomem
*mbase
= musb
->mregs
;
972 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
973 void __iomem
*epio
= hw_ep
->regs
;
974 struct musb_qh
*qh
= hw_ep
->in_qh
;
975 bool complete
= false;
976 irqreturn_t retval
= IRQ_NONE
;
978 /* ep0 only has one queue, "in" */
981 musb_ep_select(mbase
, 0);
982 csr
= musb_readw(epio
, MUSB_CSR0
);
983 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
984 ? musb_readb(epio
, MUSB_COUNT0
)
987 dev_dbg(musb
->controller
, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
988 csr
, qh
, len
, urb
, musb
->ep0_stage
);
990 /* if we just did status stage, we are done */
991 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
992 retval
= IRQ_HANDLED
;
997 if (csr
& MUSB_CSR0_H_RXSTALL
) {
998 dev_dbg(musb
->controller
, "STALLING ENDPOINT\n");
1001 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1002 dev_dbg(musb
->controller
, "no response, csr0 %04x\n", csr
);
1005 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1006 dev_dbg(musb
->controller
, "control NAK timeout\n");
1008 /* NOTE: this code path would be a good place to PAUSE a
1009 * control transfer, if another one is queued, so that
1010 * ep0 is more likely to stay busy. That's already done
1011 * for bulk RX transfers.
1013 * if (qh->ring.next != &musb->control), then
1014 * we have a candidate... NAKing is *NOT* an error
1016 musb_writew(epio
, MUSB_CSR0
, 0);
1017 retval
= IRQ_HANDLED
;
1021 dev_dbg(musb
->controller
, "aborting\n");
1022 retval
= IRQ_HANDLED
;
1024 urb
->status
= status
;
1027 /* use the proper sequence to abort the transfer */
1028 if (csr
& MUSB_CSR0_H_REQPKT
) {
1029 csr
&= ~MUSB_CSR0_H_REQPKT
;
1030 musb_writew(epio
, MUSB_CSR0
, csr
);
1031 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1032 musb_writew(epio
, MUSB_CSR0
, csr
);
1034 musb_h_ep0_flush_fifo(hw_ep
);
1037 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1040 musb_writew(epio
, MUSB_CSR0
, 0);
1043 if (unlikely(!urb
)) {
1044 /* stop endpoint since we have no place for its data, this
1045 * SHOULD NEVER HAPPEN! */
1046 ERR("no URB for end 0\n");
1048 musb_h_ep0_flush_fifo(hw_ep
);
1053 /* call common logic and prepare response */
1054 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1055 /* more packets required */
1056 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1057 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1059 /* data transfer complete; perform status phase */
1060 if (usb_pipeout(urb
->pipe
)
1061 || !urb
->transfer_buffer_length
)
1062 csr
= MUSB_CSR0_H_STATUSPKT
1063 | MUSB_CSR0_H_REQPKT
;
1065 csr
= MUSB_CSR0_H_STATUSPKT
1066 | MUSB_CSR0_TXPKTRDY
;
1068 /* flag status stage */
1069 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1071 dev_dbg(musb
->controller
, "ep0 STATUS, csr %04x\n", csr
);
1074 musb_writew(epio
, MUSB_CSR0
, csr
);
1075 retval
= IRQ_HANDLED
;
1077 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1079 /* call completion handler if done */
1081 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1087 #ifdef CONFIG_USB_INVENTRA_DMA
1089 /* Host side TX (OUT) using Mentor DMA works as follows:
1091 - if queue was empty, Program Endpoint
1092 - ... which starts DMA to fifo in mode 1 or 0
1094 DMA Isr (transfer complete) -> TxAvail()
1095 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1096 only in musb_cleanup_urb)
1097 - TxPktRdy has to be set in mode 0 or for
1098 short packets in mode 1.
1103 /* Service a Tx-Available or dma completion irq for the endpoint */
1104 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1111 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1112 void __iomem
*epio
= hw_ep
->regs
;
1113 struct musb_qh
*qh
= hw_ep
->out_qh
;
1114 struct urb
*urb
= next_urb(qh
);
1116 void __iomem
*mbase
= musb
->mregs
;
1117 struct dma_channel
*dma
;
1118 bool transfer_pending
= false;
1120 musb_ep_select(mbase
, epnum
);
1121 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1123 /* with CPPI, DMA sometimes triggers "extra" irqs */
1125 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1130 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1131 dev_dbg(musb
->controller
, "OUT/TX%d end, csr %04x%s\n", epnum
, tx_csr
,
1132 dma
? ", dma" : "");
1134 /* check for errors */
1135 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1136 /* dma was disabled, fifo flushed */
1137 dev_dbg(musb
->controller
, "TX end %d stall\n", epnum
);
1139 /* stall; record URB status */
1142 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1143 /* (NON-ISO) dma was disabled, fifo flushed */
1144 dev_dbg(musb
->controller
, "TX 3strikes on ep=%d\n", epnum
);
1146 status
= -ETIMEDOUT
;
1148 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1149 dev_dbg(musb
->controller
, "TX end=%d device not responding\n", epnum
);
1151 /* NOTE: this code path would be a good place to PAUSE a
1152 * transfer, if there's some other (nonperiodic) tx urb
1153 * that could use this fifo. (dma complicates it...)
1154 * That's already done for bulk RX transfers.
1156 * if (bulk && qh->ring.next != &musb->out_bulk), then
1157 * we have a candidate... NAKing is *NOT* an error
1159 musb_ep_select(mbase
, epnum
);
1160 musb_writew(epio
, MUSB_TXCSR
,
1161 MUSB_TXCSR_H_WZC_BITS
1162 | MUSB_TXCSR_TXPKTRDY
);
1167 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1168 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1169 (void) musb
->dma_controller
->channel_abort(dma
);
1172 /* do the proper sequence to abort the transfer in the
1173 * usb core; the dma engine should already be stopped.
1175 musb_h_tx_flush_fifo(hw_ep
);
1176 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1177 | MUSB_TXCSR_DMAENAB
1178 | MUSB_TXCSR_H_ERROR
1179 | MUSB_TXCSR_H_RXSTALL
1180 | MUSB_TXCSR_H_NAKTIMEOUT
1183 musb_ep_select(mbase
, epnum
);
1184 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1185 /* REVISIT may need to clear FLUSHFIFO ... */
1186 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1187 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1192 /* second cppi case */
1193 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1194 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1198 if (is_dma_capable() && dma
&& !status
) {
1200 * DMA has completed. But if we're using DMA mode 1 (multi
1201 * packet DMA), we need a terminal TXPKTRDY interrupt before
1202 * we can consider this transfer completed, lest we trash
1203 * its last packet when writing the next URB's data. So we
1204 * switch back to mode 0 to get that interrupt; we'll come
1205 * back here once it happens.
1207 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1209 * We shouldn't clear DMAMODE with DMAENAB set; so
1210 * clear them in a safe order. That should be OK
1211 * once TXPKTRDY has been set (and I've never seen
1212 * it being 0 at this moment -- DMA interrupt latency
1213 * is significant) but if it hasn't been then we have
1214 * no choice but to stop being polite and ignore the
1215 * programmer's guide... :-)
1217 * Note that we must write TXCSR with TXPKTRDY cleared
1218 * in order not to re-trigger the packet send (this bit
1219 * can't be cleared by CPU), and there's another caveat:
1220 * TXPKTRDY may be set shortly and then cleared in the
1221 * double-buffered FIFO mode, so we do an extra TXCSR
1222 * read for debouncing...
1224 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1225 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1226 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1227 MUSB_TXCSR_TXPKTRDY
);
1228 musb_writew(epio
, MUSB_TXCSR
,
1229 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1231 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1232 MUSB_TXCSR_TXPKTRDY
);
1233 musb_writew(epio
, MUSB_TXCSR
,
1234 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1237 * There is no guarantee that we'll get an interrupt
1238 * after clearing DMAMODE as we might have done this
1239 * too late (after TXPKTRDY was cleared by controller).
1240 * Re-read TXCSR as we have spoiled its previous value.
1242 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1246 * We may get here from a DMA completion or TXPKTRDY interrupt.
1247 * In any case, we must check the FIFO status here and bail out
1248 * only if the FIFO still has data -- that should prevent the
1249 * "missed" TXPKTRDY interrupts and deal with double-buffered
1252 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1253 dev_dbg(musb
->controller
, "DMA complete but packet still in FIFO, "
1254 "CSR %04x\n", tx_csr
);
1259 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1261 length
= dma
->actual_len
;
1263 length
= qh
->segsize
;
1264 qh
->offset
+= length
;
1266 if (usb_pipeisoc(pipe
)) {
1267 struct usb_iso_packet_descriptor
*d
;
1269 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1270 d
->actual_length
= length
;
1272 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1279 } else if (dma
&& urb
->transfer_buffer_length
== qh
->offset
) {
1282 /* see if we need to send more data, or ZLP */
1283 if (qh
->segsize
< qh
->maxpacket
)
1285 else if (qh
->offset
== urb
->transfer_buffer_length
1286 && !(urb
->transfer_flags
1290 offset
= qh
->offset
;
1291 length
= urb
->transfer_buffer_length
- offset
;
1292 transfer_pending
= true;
1297 /* urb->status != -EINPROGRESS means request has been faulted,
1298 * so we must abort this transfer after cleanup
1300 if (urb
->status
!= -EINPROGRESS
) {
1303 status
= urb
->status
;
1308 urb
->status
= status
;
1309 urb
->actual_length
= qh
->offset
;
1310 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1312 } else if ((usb_pipeisoc(pipe
) || transfer_pending
) && dma
) {
1313 if (musb_tx_dma_program(musb
->dma_controller
, hw_ep
, qh
, urb
,
1315 if (is_cppi_enabled() || tusb_dma_omap())
1316 musb_h_tx_dma_start(hw_ep
);
1319 } else if (tx_csr
& MUSB_TXCSR_DMAENAB
) {
1320 dev_dbg(musb
->controller
, "not complete, but DMA enabled?\n");
1325 * PIO: start next packet in this URB.
1327 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1328 * (and presumably, FIFO is not half-full) we should write *two*
1329 * packets before updating TXCSR; other docs disagree...
1331 if (length
> qh
->maxpacket
)
1332 length
= qh
->maxpacket
;
1333 /* Unmap the buffer so that CPU can use it */
1334 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1335 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
+ offset
);
1336 qh
->segsize
= length
;
1338 musb_ep_select(mbase
, epnum
);
1339 musb_writew(epio
, MUSB_TXCSR
,
1340 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1344 #ifdef CONFIG_USB_INVENTRA_DMA
1346 /* Host side RX (IN) using Mentor DMA works as follows:
1348 - if queue was empty, ProgramEndpoint
1349 - first IN token is sent out (by setting ReqPkt)
1350 LinuxIsr -> RxReady()
1351 /\ => first packet is received
1352 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1353 | -> DMA Isr (transfer complete) -> RxReady()
1354 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1355 | - if urb not complete, send next IN token (ReqPkt)
1356 | | else complete urb.
1358 ---------------------------
1360 * Nuances of mode 1:
1361 * For short packets, no ack (+RxPktRdy) is sent automatically
1362 * (even if AutoClear is ON)
1363 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1364 * automatically => major problem, as collecting the next packet becomes
1365 * difficult. Hence mode 1 is not used.
1368 * All we care about at this driver level is that
1369 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1370 * (b) termination conditions are: short RX, or buffer full;
1371 * (c) fault modes include
1372 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1373 * (and that endpoint's dma queue stops immediately)
1374 * - overflow (full, PLUS more bytes in the terminal packet)
1376 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1377 * thus be a great candidate for using mode 1 ... for all but the
1378 * last packet of one URB's transfer.
1383 /* Schedule next QH from musb->in_bulk and move the current qh to
1384 * the end; avoids starvation for other endpoints.
1386 static void musb_bulk_rx_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
)
1388 struct dma_channel
*dma
;
1390 void __iomem
*mbase
= musb
->mregs
;
1391 void __iomem
*epio
= ep
->regs
;
1392 struct musb_qh
*cur_qh
, *next_qh
;
1395 musb_ep_select(mbase
, ep
->epnum
);
1396 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
1398 /* clear nak timeout bit */
1399 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1400 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1401 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1402 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1404 cur_qh
= first_qh(&musb
->in_bulk
);
1406 urb
= next_urb(cur_qh
);
1407 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1408 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1409 musb
->dma_controller
->channel_abort(dma
);
1410 urb
->actual_length
+= dma
->actual_len
;
1411 dma
->actual_len
= 0L;
1413 musb_save_toggle(cur_qh
, 1, urb
);
1415 /* move cur_qh to end of queue */
1416 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1418 /* get the next qh from musb->in_bulk */
1419 next_qh
= first_qh(&musb
->in_bulk
);
1421 /* set rx_reinit and schedule the next qh */
1423 musb_start_urb(musb
, 1, next_qh
);
1428 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1429 * and high-bandwidth IN transfer cases.
1431 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1434 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1435 void __iomem
*epio
= hw_ep
->regs
;
1436 struct musb_qh
*qh
= hw_ep
->in_qh
;
1438 void __iomem
*mbase
= musb
->mregs
;
1441 bool iso_err
= false;
1444 struct dma_channel
*dma
;
1446 musb_ep_select(mbase
, epnum
);
1449 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1453 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1456 if (unlikely(!urb
)) {
1457 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1458 * usbtest #11 (unlinks) triggers it regularly, sometimes
1459 * with fifo full. (Only with DMA??)
1461 dev_dbg(musb
->controller
, "BOGUS RX%d ready, csr %04x, count %d\n", epnum
, val
,
1462 musb_readw(epio
, MUSB_RXCOUNT
));
1463 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1469 dev_dbg(musb
->controller
, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1470 epnum
, rx_csr
, urb
->actual_length
,
1471 dma
? dma
->actual_len
: 0);
1473 /* check for errors, concurrent stall & unlink is not really
1475 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1476 dev_dbg(musb
->controller
, "RX end %d STALL\n", epnum
);
1478 /* stall; record URB status */
1481 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1482 dev_dbg(musb
->controller
, "end %d RX proto error\n", epnum
);
1485 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1487 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1489 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1490 dev_dbg(musb
->controller
, "RX end %d NAK timeout\n", epnum
);
1492 /* NOTE: NAKing is *NOT* an error, so we want to
1493 * continue. Except ... if there's a request for
1494 * another QH, use that instead of starving it.
1496 * Devices like Ethernet and serial adapters keep
1497 * reads posted at all times, which will starve
1498 * other devices without this logic.
1500 if (usb_pipebulk(urb
->pipe
)
1502 && !list_is_singular(&musb
->in_bulk
)) {
1503 musb_bulk_rx_nak_timeout(musb
, hw_ep
);
1506 musb_ep_select(mbase
, epnum
);
1507 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1508 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1509 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1513 dev_dbg(musb
->controller
, "RX end %d ISO data error\n", epnum
);
1514 /* packet error reported later */
1517 } else if (rx_csr
& MUSB_RXCSR_INCOMPRX
) {
1518 dev_dbg(musb
->controller
, "end %d high bandwidth incomplete ISO packet RX\n",
1523 /* faults abort the transfer */
1525 /* clean up dma and collect transfer count */
1526 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1527 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1528 (void) musb
->dma_controller
->channel_abort(dma
);
1529 xfer_len
= dma
->actual_len
;
1531 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1532 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1537 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1538 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1539 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1543 /* thorough shutdown for now ... given more precise fault handling
1544 * and better queueing support, we might keep a DMA pipeline going
1545 * while processing this irq for earlier completions.
1548 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1550 #ifndef CONFIG_USB_INVENTRA_DMA
1551 if (rx_csr
& MUSB_RXCSR_H_REQPKT
) {
1552 /* REVISIT this happened for a while on some short reads...
1553 * the cleanup still needs investigation... looks bad...
1554 * and also duplicates dma cleanup code above ... plus,
1555 * shouldn't this be the "half full" double buffer case?
1557 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1558 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1559 (void) musb
->dma_controller
->channel_abort(dma
);
1560 xfer_len
= dma
->actual_len
;
1564 dev_dbg(musb
->controller
, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum
, rx_csr
,
1565 xfer_len
, dma
? ", dma" : "");
1566 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1568 musb_ep_select(mbase
, epnum
);
1569 musb_writew(epio
, MUSB_RXCSR
,
1570 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1573 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1574 xfer_len
= dma
->actual_len
;
1576 val
&= ~(MUSB_RXCSR_DMAENAB
1577 | MUSB_RXCSR_H_AUTOREQ
1578 | MUSB_RXCSR_AUTOCLEAR
1579 | MUSB_RXCSR_RXPKTRDY
);
1580 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1582 #ifdef CONFIG_USB_INVENTRA_DMA
1583 if (usb_pipeisoc(pipe
)) {
1584 struct usb_iso_packet_descriptor
*d
;
1586 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1587 d
->actual_length
= xfer_len
;
1589 /* even if there was an error, we did the dma
1590 * for iso_frame_desc->length
1592 if (d
->status
!= -EILSEQ
&& d
->status
!= -EOVERFLOW
)
1595 if (++qh
->iso_idx
>= urb
->number_of_packets
)
1601 /* done if urb buffer is full or short packet is recd */
1602 done
= (urb
->actual_length
+ xfer_len
>=
1603 urb
->transfer_buffer_length
1604 || dma
->actual_len
< qh
->maxpacket
);
1607 /* send IN token for next packet, without AUTOREQ */
1609 val
|= MUSB_RXCSR_H_REQPKT
;
1610 musb_writew(epio
, MUSB_RXCSR
,
1611 MUSB_RXCSR_H_WZC_BITS
| val
);
1614 dev_dbg(musb
->controller
, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum
,
1615 done
? "off" : "reset",
1616 musb_readw(epio
, MUSB_RXCSR
),
1617 musb_readw(epio
, MUSB_RXCOUNT
));
1621 } else if (urb
->status
== -EINPROGRESS
) {
1622 /* if no errors, be sure a packet is ready for unloading */
1623 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1625 ERR("Rx interrupt with no errors or packet!\n");
1627 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1630 /* do the proper sequence to abort the transfer */
1631 musb_ep_select(mbase
, epnum
);
1632 val
&= ~MUSB_RXCSR_H_REQPKT
;
1633 musb_writew(epio
, MUSB_RXCSR
, val
);
1637 /* we are expecting IN packets */
1638 #ifdef CONFIG_USB_INVENTRA_DMA
1640 struct dma_controller
*c
;
1645 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1647 dev_dbg(musb
->controller
, "RX%d count %d, buffer 0x%x len %d/%d\n",
1650 + urb
->actual_length
,
1652 urb
->transfer_buffer_length
);
1654 c
= musb
->dma_controller
;
1656 if (usb_pipeisoc(pipe
)) {
1658 struct usb_iso_packet_descriptor
*d
;
1660 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1666 if (rx_count
> d
->length
) {
1667 if (d_status
== 0) {
1668 d_status
= -EOVERFLOW
;
1671 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n",\
1672 rx_count
, d
->length
);
1677 d
->status
= d_status
;
1678 buf
= urb
->transfer_dma
+ d
->offset
;
1681 buf
= urb
->transfer_dma
+
1685 dma
->desired_mode
= 0;
1687 /* because of the issue below, mode 1 will
1688 * only rarely behave with correct semantics.
1690 if ((urb
->transfer_flags
&
1692 && (urb
->transfer_buffer_length
-
1695 dma
->desired_mode
= 1;
1696 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1698 dma
->desired_mode
= 0;
1700 length
= urb
->transfer_buffer_length
;
1704 /* Disadvantage of using mode 1:
1705 * It's basically usable only for mass storage class; essentially all
1706 * other protocols also terminate transfers on short packets.
1709 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1710 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1711 * to use the extra IN token to grab the last packet using mode 0, then
1712 * the problem is that you cannot be sure when the device will send the
1713 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1714 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1715 * transfer, while sometimes it is recd just a little late so that if you
1716 * try to configure for mode 0 soon after the mode 1 transfer is
1717 * completed, you will find rxcount 0. Okay, so you might think why not
1718 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1721 val
= musb_readw(epio
, MUSB_RXCSR
);
1722 val
&= ~MUSB_RXCSR_H_REQPKT
;
1724 if (dma
->desired_mode
== 0)
1725 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1727 val
|= MUSB_RXCSR_H_AUTOREQ
;
1728 val
|= MUSB_RXCSR_DMAENAB
;
1730 /* autoclear shouldn't be set in high bandwidth */
1731 if (qh
->hb_mult
== 1)
1732 val
|= MUSB_RXCSR_AUTOCLEAR
;
1734 musb_writew(epio
, MUSB_RXCSR
,
1735 MUSB_RXCSR_H_WZC_BITS
| val
);
1737 /* REVISIT if when actual_length != 0,
1738 * transfer_buffer_length needs to be
1741 ret
= c
->channel_program(
1743 dma
->desired_mode
, buf
, length
);
1746 c
->channel_release(dma
);
1747 hw_ep
->rx_channel
= NULL
;
1749 /* REVISIT reset CSR */
1752 #endif /* Mentor DMA */
1755 /* Unmap the buffer so that CPU can use it */
1756 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1757 done
= musb_host_packet_rx(musb
, urb
,
1759 dev_dbg(musb
->controller
, "read %spacket\n", done
? "last " : "");
1764 urb
->actual_length
+= xfer_len
;
1765 qh
->offset
+= xfer_len
;
1767 if (urb
->status
== -EINPROGRESS
)
1768 urb
->status
= status
;
1769 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
1773 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1774 * the software schedule associates multiple such nodes with a given
1775 * host side hardware endpoint + direction; scheduling may activate
1776 * that hardware endpoint.
1778 static int musb_schedule(
1785 int best_end
, epnum
;
1786 struct musb_hw_ep
*hw_ep
= NULL
;
1787 struct list_head
*head
= NULL
;
1790 struct urb
*urb
= next_urb(qh
);
1792 /* use fixed hardware for control and bulk */
1793 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
1794 head
= &musb
->control
;
1795 hw_ep
= musb
->control_ep
;
1799 /* else, periodic transfers get muxed to other endpoints */
1802 * We know this qh hasn't been scheduled, so all we need to do
1803 * is choose which hardware endpoint to put it on ...
1805 * REVISIT what we really want here is a regular schedule tree
1806 * like e.g. OHCI uses.
1811 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
1812 epnum
< musb
->nr_endpoints
;
1816 if (musb_ep_get_qh(hw_ep
, is_in
) != NULL
)
1819 if (hw_ep
== musb
->bulk_ep
)
1823 diff
= hw_ep
->max_packet_sz_rx
;
1825 diff
= hw_ep
->max_packet_sz_tx
;
1826 diff
-= (qh
->maxpacket
* qh
->hb_mult
);
1828 if (diff
>= 0 && best_diff
> diff
) {
1831 * Mentor controller has a bug in that if we schedule
1832 * a BULK Tx transfer on an endpoint that had earlier
1833 * handled ISOC then the BULK transfer has to start on
1834 * a zero toggle. If the BULK transfer starts on a 1
1835 * toggle then this transfer will fail as the mentor
1836 * controller starts the Bulk transfer on a 0 toggle
1837 * irrespective of the programming of the toggle bits
1838 * in the TXCSR register. Check for this condition
1839 * while allocating the EP for a Tx Bulk transfer. If
1842 hw_ep
= musb
->endpoints
+ epnum
;
1843 toggle
= usb_gettoggle(urb
->dev
, qh
->epnum
, !is_in
);
1844 txtype
= (musb_readb(hw_ep
->regs
, MUSB_TXTYPE
)
1846 if (!is_in
&& (qh
->type
== USB_ENDPOINT_XFER_BULK
) &&
1847 toggle
&& (txtype
== USB_ENDPOINT_XFER_ISOC
))
1854 /* use bulk reserved ep1 if no other ep is free */
1855 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
1856 hw_ep
= musb
->bulk_ep
;
1858 head
= &musb
->in_bulk
;
1860 head
= &musb
->out_bulk
;
1862 /* Enable bulk RX NAK timeout scheme when bulk requests are
1863 * multiplexed. This scheme doen't work in high speed to full
1864 * speed scenario as NAK interrupts are not coming from a
1865 * full speed device connected to a high speed device.
1866 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1867 * 4 (8 frame or 8ms) for FS device.
1869 if (is_in
&& qh
->dev
)
1871 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
1873 } else if (best_end
< 0) {
1879 hw_ep
= musb
->endpoints
+ best_end
;
1880 dev_dbg(musb
->controller
, "qh %p periodic slot %d\n", qh
, best_end
);
1883 idle
= list_empty(head
);
1884 list_add_tail(&qh
->ring
, head
);
1888 qh
->hep
->hcpriv
= qh
;
1890 musb_start_urb(musb
, is_in
, qh
);
1894 static int musb_urb_enqueue(
1895 struct usb_hcd
*hcd
,
1899 unsigned long flags
;
1900 struct musb
*musb
= hcd_to_musb(hcd
);
1901 struct usb_host_endpoint
*hep
= urb
->ep
;
1903 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
1908 /* host role must be active */
1909 if (!is_host_active(musb
) || !musb
->is_active
)
1912 spin_lock_irqsave(&musb
->lock
, flags
);
1913 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1914 qh
= ret
? NULL
: hep
->hcpriv
;
1917 spin_unlock_irqrestore(&musb
->lock
, flags
);
1919 /* DMA mapping was already done, if needed, and this urb is on
1920 * hep->urb_list now ... so we're done, unless hep wasn't yet
1921 * scheduled onto a live qh.
1923 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1924 * disabled, testing for empty qh->ring and avoiding qh setup costs
1925 * except for the first urb queued after a config change.
1930 /* Allocate and initialize qh, minimizing the work done each time
1931 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1933 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1934 * for bugs in other kernel code to break this driver...
1936 qh
= kzalloc(sizeof *qh
, mem_flags
);
1938 spin_lock_irqsave(&musb
->lock
, flags
);
1939 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1940 spin_unlock_irqrestore(&musb
->lock
, flags
);
1946 INIT_LIST_HEAD(&qh
->ring
);
1949 qh
->maxpacket
= usb_endpoint_maxp(epd
);
1950 qh
->type
= usb_endpoint_type(epd
);
1952 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1953 * Some musb cores don't support high bandwidth ISO transfers; and
1954 * we don't (yet!) support high bandwidth interrupt transfers.
1956 qh
->hb_mult
= 1 + ((qh
->maxpacket
>> 11) & 0x03);
1957 if (qh
->hb_mult
> 1) {
1958 int ok
= (qh
->type
== USB_ENDPOINT_XFER_ISOC
);
1961 ok
= (usb_pipein(urb
->pipe
) && musb
->hb_iso_rx
)
1962 || (usb_pipeout(urb
->pipe
) && musb
->hb_iso_tx
);
1967 qh
->maxpacket
&= 0x7ff;
1970 qh
->epnum
= usb_endpoint_num(epd
);
1972 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1973 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
1975 /* precompute rxtype/txtype/type0 register */
1976 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
1977 switch (urb
->dev
->speed
) {
1981 case USB_SPEED_FULL
:
1987 qh
->type_reg
= type_reg
;
1989 /* Precompute RXINTERVAL/TXINTERVAL register */
1991 case USB_ENDPOINT_XFER_INT
:
1993 * Full/low speeds use the linear encoding,
1994 * high speed uses the logarithmic encoding.
1996 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
1997 interval
= max_t(u8
, epd
->bInterval
, 1);
2001 case USB_ENDPOINT_XFER_ISOC
:
2002 /* ISO always uses logarithmic encoding */
2003 interval
= min_t(u8
, epd
->bInterval
, 16);
2006 /* REVISIT we actually want to use NAK limits, hinting to the
2007 * transfer scheduling logic to try some other qh, e.g. try
2010 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2012 * The downside of disabling this is that transfer scheduling
2013 * gets VERY unfair for nonperiodic transfers; a misbehaving
2014 * peripheral could make that hurt. That's perfectly normal
2015 * for reads from network or serial adapters ... so we have
2016 * partial NAKlimit support for bulk RX.
2018 * The upside of disabling it is simpler transfer scheduling.
2022 qh
->intv_reg
= interval
;
2024 /* precompute addressing for external hub/tt ports */
2025 if (musb
->is_multipoint
) {
2026 struct usb_device
*parent
= urb
->dev
->parent
;
2028 if (parent
!= hcd
->self
.root_hub
) {
2029 qh
->h_addr_reg
= (u8
) parent
->devnum
;
2031 /* set up tt info if needed */
2033 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
2034 if (urb
->dev
->tt
->hub
)
2036 (u8
) urb
->dev
->tt
->hub
->devnum
;
2037 if (urb
->dev
->tt
->multi
)
2038 qh
->h_addr_reg
|= 0x80;
2043 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2044 * until we get real dma queues (with an entry for each urb/buffer),
2045 * we only have work to do in the former case.
2047 spin_lock_irqsave(&musb
->lock
, flags
);
2049 /* some concurrent activity submitted another urb to hep...
2050 * odd, rare, error prone, but legal.
2056 ret
= musb_schedule(musb
, qh
,
2057 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2061 /* FIXME set urb->start_frame for iso/intr, it's tested in
2062 * musb_start_urb(), but otherwise only konicawc cares ...
2065 spin_unlock_irqrestore(&musb
->lock
, flags
);
2069 spin_lock_irqsave(&musb
->lock
, flags
);
2070 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2071 spin_unlock_irqrestore(&musb
->lock
, flags
);
2079 * abort a transfer that's at the head of a hardware queue.
2080 * called with controller locked, irqs blocked
2081 * that hardware queue advances to the next transfer, unless prevented
2083 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
)
2085 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2086 struct musb
*musb
= ep
->musb
;
2087 void __iomem
*epio
= ep
->regs
;
2088 unsigned hw_end
= ep
->epnum
;
2089 void __iomem
*regs
= ep
->musb
->mregs
;
2090 int is_in
= usb_pipein(urb
->pipe
);
2094 musb_ep_select(regs
, hw_end
);
2096 if (is_dma_capable()) {
2097 struct dma_channel
*dma
;
2099 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2101 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2102 dev_dbg(musb
->controller
,
2103 "abort %cX%d DMA for urb %p --> %d\n",
2104 is_in
? 'R' : 'T', ep
->epnum
,
2106 urb
->actual_length
+= dma
->actual_len
;
2110 /* turn off DMA requests, discard state, stop polling ... */
2111 if (ep
->epnum
&& is_in
) {
2112 /* giveback saves bulk toggle */
2113 csr
= musb_h_flush_rxfifo(ep
, 0);
2115 /* REVISIT we still get an irq; should likely clear the
2116 * endpoint's irq status here to avoid bogus irqs.
2117 * clearing that status is platform-specific...
2119 } else if (ep
->epnum
) {
2120 musb_h_tx_flush_fifo(ep
);
2121 csr
= musb_readw(epio
, MUSB_TXCSR
);
2122 csr
&= ~(MUSB_TXCSR_AUTOSET
2123 | MUSB_TXCSR_DMAENAB
2124 | MUSB_TXCSR_H_RXSTALL
2125 | MUSB_TXCSR_H_NAKTIMEOUT
2126 | MUSB_TXCSR_H_ERROR
2127 | MUSB_TXCSR_TXPKTRDY
);
2128 musb_writew(epio
, MUSB_TXCSR
, csr
);
2129 /* REVISIT may need to clear FLUSHFIFO ... */
2130 musb_writew(epio
, MUSB_TXCSR
, csr
);
2131 /* flush cpu writebuffer */
2132 csr
= musb_readw(epio
, MUSB_TXCSR
);
2134 musb_h_ep0_flush_fifo(ep
);
2137 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2141 static int musb_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2143 struct musb
*musb
= hcd_to_musb(hcd
);
2145 unsigned long flags
;
2146 int is_in
= usb_pipein(urb
->pipe
);
2149 dev_dbg(musb
->controller
, "urb=%p, dev%d ep%d%s\n", urb
,
2150 usb_pipedevice(urb
->pipe
),
2151 usb_pipeendpoint(urb
->pipe
),
2152 is_in
? "in" : "out");
2154 spin_lock_irqsave(&musb
->lock
, flags
);
2155 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2164 * Any URB not actively programmed into endpoint hardware can be
2165 * immediately given back; that's any URB not at the head of an
2166 * endpoint queue, unless someday we get real DMA queues. And even
2167 * if it's at the head, it might not be known to the hardware...
2169 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2170 * has already been updated. This is a synchronous abort; it'd be
2171 * OK to hold off until after some IRQ, though.
2173 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2176 || urb
->urb_list
.prev
!= &qh
->hep
->urb_list
2177 || musb_ep_get_qh(qh
->hw_ep
, is_in
) != qh
) {
2178 int ready
= qh
->is_ready
;
2181 musb_giveback(musb
, urb
, 0);
2182 qh
->is_ready
= ready
;
2184 /* If nothing else (usually musb_giveback) is using it
2185 * and its URB list has emptied, recycle this qh.
2187 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2188 qh
->hep
->hcpriv
= NULL
;
2189 list_del(&qh
->ring
);
2193 ret
= musb_cleanup_urb(urb
, qh
);
2195 spin_unlock_irqrestore(&musb
->lock
, flags
);
2199 /* disable an endpoint */
2201 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2203 u8 is_in
= hep
->desc
.bEndpointAddress
& USB_DIR_IN
;
2204 unsigned long flags
;
2205 struct musb
*musb
= hcd_to_musb(hcd
);
2209 spin_lock_irqsave(&musb
->lock
, flags
);
2215 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2217 /* Kick the first URB off the hardware, if needed */
2219 if (musb_ep_get_qh(qh
->hw_ep
, is_in
) == qh
) {
2222 /* make software (then hardware) stop ASAP */
2224 urb
->status
= -ESHUTDOWN
;
2227 musb_cleanup_urb(urb
, qh
);
2229 /* Then nuke all the others ... and advance the
2230 * queue on hw_ep (e.g. bulk ring) when we're done.
2232 while (!list_empty(&hep
->urb_list
)) {
2234 urb
->status
= -ESHUTDOWN
;
2235 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2238 /* Just empty the queue; the hardware is busy with
2239 * other transfers, and since !qh->is_ready nothing
2240 * will activate any of these as it advances.
2242 while (!list_empty(&hep
->urb_list
))
2243 musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2246 list_del(&qh
->ring
);
2250 spin_unlock_irqrestore(&musb
->lock
, flags
);
2253 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2255 struct musb
*musb
= hcd_to_musb(hcd
);
2257 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2260 static int musb_h_start(struct usb_hcd
*hcd
)
2262 struct musb
*musb
= hcd_to_musb(hcd
);
2264 /* NOTE: musb_start() is called when the hub driver turns
2265 * on port power, or when (OTG) peripheral starts.
2267 hcd
->state
= HC_STATE_RUNNING
;
2268 musb
->port1_status
= 0;
2272 static void musb_h_stop(struct usb_hcd
*hcd
)
2274 musb_stop(hcd_to_musb(hcd
));
2275 hcd
->state
= HC_STATE_HALT
;
2278 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2280 struct musb
*musb
= hcd_to_musb(hcd
);
2283 if (!is_host_active(musb
))
2286 switch (musb
->xceiv
->state
) {
2287 case OTG_STATE_A_SUSPEND
:
2289 case OTG_STATE_A_WAIT_VRISE
:
2290 /* ID could be grounded even if there's no device
2291 * on the other end of the cable. NOTE that the
2292 * A_WAIT_VRISE timers are messy with MUSB...
2294 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2295 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
)
2296 musb
->xceiv
->state
= OTG_STATE_A_WAIT_BCON
;
2302 if (musb
->is_active
) {
2303 WARNING("trying to suspend as %s while active\n",
2304 otg_state_string(musb
->xceiv
->state
));
2310 static int musb_bus_resume(struct usb_hcd
*hcd
)
2312 /* resuming child port does the work */
2316 const struct hc_driver musb_hc_driver
= {
2317 .description
= "musb-hcd",
2318 .product_desc
= "MUSB HDRC host driver",
2319 .hcd_priv_size
= sizeof(struct musb
),
2320 .flags
= HCD_USB2
| HCD_MEMORY
,
2322 /* not using irq handler or reset hooks from usbcore, since
2323 * those must be shared with peripheral code for OTG configs
2326 .start
= musb_h_start
,
2327 .stop
= musb_h_stop
,
2329 .get_frame_number
= musb_h_get_frame_number
,
2331 .urb_enqueue
= musb_urb_enqueue
,
2332 .urb_dequeue
= musb_urb_dequeue
,
2333 .endpoint_disable
= musb_h_disable
,
2335 .hub_status_data
= musb_hub_status_data
,
2336 .hub_control
= musb_hub_control
,
2337 .bus_suspend
= musb_bus_suspend
,
2338 .bus_resume
= musb_bus_resume
,
2339 /* .start_port_reset = NULL, */
2340 /* .hub_irq_enable = NULL, */