2 * OHCI HCD (Host Controller Driver) for USB.
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
7 * This file is licenced under the GPL.
10 #include <linux/irq.h>
11 #include <linux/slab.h>
13 static void urb_free_priv (struct ohci_hcd
*hc
, urb_priv_t
*urb_priv
)
15 int last
= urb_priv
->length
- 1;
21 for (i
= 0; i
<= last
; i
++) {
22 td
= urb_priv
->td
[i
];
28 list_del (&urb_priv
->pending
);
32 /*-------------------------------------------------------------------------*/
35 * URB goes back to driver, and isn't reissued.
36 * It's completely gone from HC data structures.
37 * PRECONDITION: ohci lock held, irqs blocked.
40 finish_urb(struct ohci_hcd
*ohci
, struct urb
*urb
, int status
)
41 __releases(ohci
->lock
)
42 __acquires(ohci
->lock
)
44 struct device
*dev
= ohci_to_hcd(ohci
)->self
.controller
;
45 struct usb_host_endpoint
*ep
= urb
->ep
;
46 struct urb_priv
*urb_priv
;
48 // ASSERT (urb->hcpriv != 0);
51 urb_free_priv (ohci
, urb
->hcpriv
);
53 if (likely(status
== -EINPROGRESS
))
56 switch (usb_pipetype (urb
->pipe
)) {
57 case PIPE_ISOCHRONOUS
:
58 ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
--;
59 if (ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
== 0) {
60 if (quirk_amdiso(ohci
))
61 usb_amd_quirk_pll_enable();
62 if (quirk_amdprefetch(ohci
))
63 sb800_prefetch(dev
, 0);
67 ohci_to_hcd(ohci
)->self
.bandwidth_int_reqs
--;
71 #ifdef OHCI_VERBOSE_DEBUG
72 urb_print(urb
, "RET", usb_pipeout (urb
->pipe
), status
);
75 /* urb->complete() can reenter this HCD */
76 usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci
), urb
);
77 spin_unlock (&ohci
->lock
);
78 usb_hcd_giveback_urb(ohci_to_hcd(ohci
), urb
, status
);
79 spin_lock (&ohci
->lock
);
81 /* stop periodic dma if it's not needed */
82 if (ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
== 0
83 && ohci_to_hcd(ohci
)->self
.bandwidth_int_reqs
== 0) {
84 ohci
->hc_control
&= ~(OHCI_CTRL_PLE
|OHCI_CTRL_IE
);
85 ohci_writel (ohci
, ohci
->hc_control
, &ohci
->regs
->control
);
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
94 if (!list_empty(&ep
->urb_list
)) {
95 urb
= list_first_entry(&ep
->urb_list
, struct urb
, urb_list
);
96 urb_priv
= urb
->hcpriv
;
97 if (urb_priv
->td_cnt
> urb_priv
->length
) {
105 /*-------------------------------------------------------------------------*
106 * ED handling functions
107 *-------------------------------------------------------------------------*/
109 /* search for the right schedule branch to use for a periodic ed.
110 * does some load balancing; returns the branch, or negative errno.
112 static int balance (struct ohci_hcd
*ohci
, int interval
, int load
)
114 int i
, branch
= -ENOSPC
;
116 /* iso periods can be huge; iso tds specify frame numbers */
117 if (interval
> NUM_INTS
)
120 /* search for the least loaded schedule branch of that period
121 * that has enough bandwidth left unreserved.
123 for (i
= 0; i
< interval
; i
++) {
124 if (branch
< 0 || ohci
->load
[branch
] > ohci
->load
[i
]) {
127 /* usb 1.1 says 90% of one frame */
128 for (j
= i
; j
< NUM_INTS
; j
+= interval
) {
129 if ((ohci
->load
[j
] + load
) > 900)
140 /*-------------------------------------------------------------------------*/
142 /* both iso and interrupt requests have periods; this routine puts them
143 * into the schedule tree in the apppropriate place. most iso devices use
144 * 1msec periods, but that's not required.
146 static void periodic_link (struct ohci_hcd
*ohci
, struct ed
*ed
)
150 ohci_vdbg (ohci
, "link %sed %p branch %d [%dus.], interval %d\n",
151 (ed
->hwINFO
& cpu_to_hc32 (ohci
, ED_ISO
)) ? "iso " : "",
152 ed
, ed
->branch
, ed
->load
, ed
->interval
);
154 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
155 struct ed
**prev
= &ohci
->periodic
[i
];
156 __hc32
*prev_p
= &ohci
->hcca
->int_table
[i
];
157 struct ed
*here
= *prev
;
159 /* sorting each branch by period (slow before fast)
160 * lets us share the faster parts of the tree.
161 * (plus maybe: put interrupt eds before iso)
163 while (here
&& ed
!= here
) {
164 if (ed
->interval
> here
->interval
)
166 prev
= &here
->ed_next
;
167 prev_p
= &here
->hwNextED
;
173 ed
->hwNextED
= *prev_p
;
176 *prev_p
= cpu_to_hc32(ohci
, ed
->dma
);
179 ohci
->load
[i
] += ed
->load
;
181 ohci_to_hcd(ohci
)->self
.bandwidth_allocated
+= ed
->load
/ ed
->interval
;
184 /* link an ed into one of the HC chains */
186 static int ed_schedule (struct ohci_hcd
*ohci
, struct ed
*ed
)
194 if (quirk_zfmicro(ohci
)
195 && (ed
->type
== PIPE_INTERRUPT
)
196 && !(ohci
->eds_scheduled
++))
197 mod_timer(&ohci
->unlink_watchdog
, round_jiffies(jiffies
+ HZ
));
200 /* we care about rm_list when setting CLE/BLE in case the HC was at
201 * work on some TD when CLE/BLE was turned off, and isn't quiesced
202 * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
204 * control and bulk EDs are doubly linked (ed_next, ed_prev), but
205 * periodic ones are singly linked (ed_next). that's because the
206 * periodic schedule encodes a tree like figure 3-5 in the ohci
207 * spec: each qh can have several "previous" nodes, and the tree
208 * doesn't have unused/idle descriptors.
212 if (ohci
->ed_controltail
== NULL
) {
213 WARN_ON (ohci
->hc_control
& OHCI_CTRL_CLE
);
214 ohci_writel (ohci
, ed
->dma
,
215 &ohci
->regs
->ed_controlhead
);
217 ohci
->ed_controltail
->ed_next
= ed
;
218 ohci
->ed_controltail
->hwNextED
= cpu_to_hc32 (ohci
,
221 ed
->ed_prev
= ohci
->ed_controltail
;
222 if (!ohci
->ed_controltail
&& !ohci
->ed_rm_list
) {
224 ohci
->hc_control
|= OHCI_CTRL_CLE
;
225 ohci_writel (ohci
, 0, &ohci
->regs
->ed_controlcurrent
);
226 ohci_writel (ohci
, ohci
->hc_control
,
227 &ohci
->regs
->control
);
229 ohci
->ed_controltail
= ed
;
233 if (ohci
->ed_bulktail
== NULL
) {
234 WARN_ON (ohci
->hc_control
& OHCI_CTRL_BLE
);
235 ohci_writel (ohci
, ed
->dma
, &ohci
->regs
->ed_bulkhead
);
237 ohci
->ed_bulktail
->ed_next
= ed
;
238 ohci
->ed_bulktail
->hwNextED
= cpu_to_hc32 (ohci
,
241 ed
->ed_prev
= ohci
->ed_bulktail
;
242 if (!ohci
->ed_bulktail
&& !ohci
->ed_rm_list
) {
244 ohci
->hc_control
|= OHCI_CTRL_BLE
;
245 ohci_writel (ohci
, 0, &ohci
->regs
->ed_bulkcurrent
);
246 ohci_writel (ohci
, ohci
->hc_control
,
247 &ohci
->regs
->control
);
249 ohci
->ed_bulktail
= ed
;
252 // case PIPE_INTERRUPT:
253 // case PIPE_ISOCHRONOUS:
255 branch
= balance (ohci
, ed
->interval
, ed
->load
);
258 "ERR %d, interval %d msecs, load %d\n",
259 branch
, ed
->interval
, ed
->load
);
260 // FIXME if there are TDs queued, fail them!
264 periodic_link (ohci
, ed
);
267 /* the HC may not see the schedule updates yet, but if it does
268 * then they'll be properly ordered.
273 /*-------------------------------------------------------------------------*/
275 /* scan the periodic table to find and unlink this ED */
276 static void periodic_unlink (struct ohci_hcd
*ohci
, struct ed
*ed
)
280 for (i
= ed
->branch
; i
< NUM_INTS
; i
+= ed
->interval
) {
282 struct ed
**prev
= &ohci
->periodic
[i
];
283 __hc32
*prev_p
= &ohci
->hcca
->int_table
[i
];
285 while (*prev
&& (temp
= *prev
) != ed
) {
286 prev_p
= &temp
->hwNextED
;
287 prev
= &temp
->ed_next
;
290 *prev_p
= ed
->hwNextED
;
293 ohci
->load
[i
] -= ed
->load
;
295 ohci_to_hcd(ohci
)->self
.bandwidth_allocated
-= ed
->load
/ ed
->interval
;
297 ohci_vdbg (ohci
, "unlink %sed %p branch %d [%dus.], interval %d\n",
298 (ed
->hwINFO
& cpu_to_hc32 (ohci
, ED_ISO
)) ? "iso " : "",
299 ed
, ed
->branch
, ed
->load
, ed
->interval
);
302 /* unlink an ed from one of the HC chains.
303 * just the link to the ed is unlinked.
304 * the link from the ed still points to another operational ed or 0
305 * so the HC can eventually finish the processing of the unlinked ed
306 * (assuming it already started that, which needn't be true).
308 * ED_UNLINK is a transient state: the HC may still see this ED, but soon
309 * it won't. ED_SKIP means the HC will finish its current transaction,
310 * but won't start anything new. The TD queue may still grow; device
311 * drivers don't know about this HCD-internal state.
313 * When the HC can't see the ED, something changes ED_UNLINK to one of:
315 * - ED_OPER: when there's any request queued, the ED gets rescheduled
316 * immediately. HC should be working on them.
318 * - ED_IDLE: when there's no TD queue. there's no reason for the HC
319 * to care about this ED; safe to disable the endpoint.
321 * When finish_unlinks() runs later, after SOF interrupt, it will often
322 * complete one or more URB unlinks before making that state change.
324 static void ed_deschedule (struct ohci_hcd
*ohci
, struct ed
*ed
)
326 ed
->hwINFO
|= cpu_to_hc32 (ohci
, ED_SKIP
);
328 ed
->state
= ED_UNLINK
;
330 /* To deschedule something from the control or bulk list, just
331 * clear CLE/BLE and wait. There's no safe way to scrub out list
332 * head/current registers until later, and "later" isn't very
333 * tightly specified. Figure 6-5 and Section 6.4.2.2 show how
334 * the HC is reading the ED queues (while we modify them).
336 * For now, ed_schedule() is "later". It might be good paranoia
337 * to scrub those registers in finish_unlinks(), in case of bugs
338 * that make the HC try to use them.
342 /* remove ED from the HC's list: */
343 if (ed
->ed_prev
== NULL
) {
345 ohci
->hc_control
&= ~OHCI_CTRL_CLE
;
346 ohci_writel (ohci
, ohci
->hc_control
,
347 &ohci
->regs
->control
);
348 // a ohci_readl() later syncs CLE with the HC
351 hc32_to_cpup (ohci
, &ed
->hwNextED
),
352 &ohci
->regs
->ed_controlhead
);
354 ed
->ed_prev
->ed_next
= ed
->ed_next
;
355 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
357 /* remove ED from the HCD's list: */
358 if (ohci
->ed_controltail
== ed
) {
359 ohci
->ed_controltail
= ed
->ed_prev
;
360 if (ohci
->ed_controltail
)
361 ohci
->ed_controltail
->ed_next
= NULL
;
362 } else if (ed
->ed_next
) {
363 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
368 /* remove ED from the HC's list: */
369 if (ed
->ed_prev
== NULL
) {
371 ohci
->hc_control
&= ~OHCI_CTRL_BLE
;
372 ohci_writel (ohci
, ohci
->hc_control
,
373 &ohci
->regs
->control
);
374 // a ohci_readl() later syncs BLE with the HC
377 hc32_to_cpup (ohci
, &ed
->hwNextED
),
378 &ohci
->regs
->ed_bulkhead
);
380 ed
->ed_prev
->ed_next
= ed
->ed_next
;
381 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
383 /* remove ED from the HCD's list: */
384 if (ohci
->ed_bulktail
== ed
) {
385 ohci
->ed_bulktail
= ed
->ed_prev
;
386 if (ohci
->ed_bulktail
)
387 ohci
->ed_bulktail
->ed_next
= NULL
;
388 } else if (ed
->ed_next
) {
389 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
393 // case PIPE_INTERRUPT:
394 // case PIPE_ISOCHRONOUS:
396 periodic_unlink (ohci
, ed
);
402 /*-------------------------------------------------------------------------*/
404 /* get and maybe (re)init an endpoint. init _should_ be done only as part
405 * of enumeration, usb_set_configuration() or usb_set_interface().
407 static struct ed
*ed_get (
408 struct ohci_hcd
*ohci
,
409 struct usb_host_endpoint
*ep
,
410 struct usb_device
*udev
,
417 spin_lock_irqsave (&ohci
->lock
, flags
);
419 if (!(ed
= ep
->hcpriv
)) {
424 ed
= ed_alloc (ohci
, GFP_ATOMIC
);
430 /* dummy td; end of td list for ed */
431 td
= td_alloc (ohci
, GFP_ATOMIC
);
439 ed
->hwTailP
= cpu_to_hc32 (ohci
, td
->td_dma
);
440 ed
->hwHeadP
= ed
->hwTailP
; /* ED_C, ED_H zeroed */
443 is_out
= !(ep
->desc
.bEndpointAddress
& USB_DIR_IN
);
445 /* FIXME usbcore changes dev->devnum before SET_ADDRESS
446 * succeeds ... otherwise we wouldn't need "pipe".
448 info
= usb_pipedevice (pipe
);
449 ed
->type
= usb_pipetype(pipe
);
451 info
|= (ep
->desc
.bEndpointAddress
& ~USB_DIR_IN
) << 7;
452 info
|= usb_endpoint_maxp(&ep
->desc
) << 16;
453 if (udev
->speed
== USB_SPEED_LOW
)
455 /* only control transfers store pids in tds */
456 if (ed
->type
!= PIPE_CONTROL
) {
457 info
|= is_out
? ED_OUT
: ED_IN
;
458 if (ed
->type
!= PIPE_BULK
) {
459 /* periodic transfers... */
460 if (ed
->type
== PIPE_ISOCHRONOUS
)
462 else if (interval
> 32) /* iso can be bigger */
464 ed
->interval
= interval
;
465 ed
->load
= usb_calc_bus_time (
466 udev
->speed
, !is_out
,
467 ed
->type
== PIPE_ISOCHRONOUS
,
468 usb_endpoint_maxp(&ep
->desc
))
472 ed
->hwINFO
= cpu_to_hc32(ohci
, info
);
478 spin_unlock_irqrestore (&ohci
->lock
, flags
);
482 /*-------------------------------------------------------------------------*/
484 /* request unlinking of an endpoint from an operational HC.
485 * put the ep on the rm_list
486 * real work is done at the next start frame (SF) hardware interrupt
487 * caller guarantees HCD is running, so hardware access is safe,
488 * and that ed->state is ED_OPER
490 static void start_ed_unlink (struct ohci_hcd
*ohci
, struct ed
*ed
)
492 ed
->hwINFO
|= cpu_to_hc32 (ohci
, ED_DEQUEUE
);
493 ed_deschedule (ohci
, ed
);
495 /* rm_list is just singly linked, for simplicity */
496 ed
->ed_next
= ohci
->ed_rm_list
;
498 ohci
->ed_rm_list
= ed
;
500 /* enable SOF interrupt */
501 ohci_writel (ohci
, OHCI_INTR_SF
, &ohci
->regs
->intrstatus
);
502 ohci_writel (ohci
, OHCI_INTR_SF
, &ohci
->regs
->intrenable
);
503 // flush those writes, and get latest HCCA contents
504 (void) ohci_readl (ohci
, &ohci
->regs
->control
);
506 /* SF interrupt might get delayed; record the frame counter value that
507 * indicates when the HC isn't looking at it, so concurrent unlinks
508 * behave. frame_no wraps every 2^16 msec, and changes right before
511 ed
->tick
= ohci_frame_no(ohci
) + 1;
515 /*-------------------------------------------------------------------------*
516 * TD handling functions
517 *-------------------------------------------------------------------------*/
519 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
522 td_fill (struct ohci_hcd
*ohci
, u32 info
,
523 dma_addr_t data
, int len
,
524 struct urb
*urb
, int index
)
526 struct td
*td
, *td_pt
;
527 struct urb_priv
*urb_priv
= urb
->hcpriv
;
528 int is_iso
= info
& TD_ISO
;
531 // ASSERT (index < urb_priv->length);
533 /* aim for only one interrupt per urb. mostly applies to control
534 * and iso; other urbs rarely need more than one TD per urb.
535 * this way, only final tds (or ones with an error) cause IRQs.
536 * at least immediately; use DI=6 in case any control request is
537 * tempted to die part way through. (and to force the hc to flush
538 * its donelist soonish, even on unlink paths.)
540 * NOTE: could delay interrupts even for the last TD, and get fewer
541 * interrupts ... increasing per-urb latency by sharing interrupts.
542 * Drivers that queue bulk urbs may request that behavior.
544 if (index
!= (urb_priv
->length
- 1)
545 || (urb
->transfer_flags
& URB_NO_INTERRUPT
))
546 info
|= TD_DI_SET (6);
548 /* use this td as the next dummy */
549 td_pt
= urb_priv
->td
[index
];
551 /* fill the old dummy TD */
552 td
= urb_priv
->td
[index
] = urb_priv
->ed
->dummy
;
553 urb_priv
->ed
->dummy
= td_pt
;
555 td
->ed
= urb_priv
->ed
;
556 td
->next_dl_td
= NULL
;
563 td
->hwINFO
= cpu_to_hc32 (ohci
, info
);
565 td
->hwCBP
= cpu_to_hc32 (ohci
, data
& 0xFFFFF000);
566 *ohci_hwPSWp(ohci
, td
, 0) = cpu_to_hc16 (ohci
,
567 (data
& 0x0FFF) | 0xE000);
569 td
->hwCBP
= cpu_to_hc32 (ohci
, data
);
572 td
->hwBE
= cpu_to_hc32 (ohci
, data
+ len
- 1);
575 td
->hwNextTD
= cpu_to_hc32 (ohci
, td_pt
->td_dma
);
577 /* append to queue */
578 list_add_tail (&td
->td_list
, &td
->ed
->td_list
);
580 /* hash it for later reverse mapping */
581 hash
= TD_HASH_FUNC (td
->td_dma
);
582 td
->td_hash
= ohci
->td_hash
[hash
];
583 ohci
->td_hash
[hash
] = td
;
585 /* HC might read the TD (or cachelines) right away ... */
587 td
->ed
->hwTailP
= td
->hwNextTD
;
590 /*-------------------------------------------------------------------------*/
592 /* Prepare all TDs of a transfer, and queue them onto the ED.
593 * Caller guarantees HC is active.
594 * Usually the ED is already on the schedule, so TDs might be
595 * processed as soon as they're queued.
597 static void td_submit_urb (
598 struct ohci_hcd
*ohci
,
601 struct urb_priv
*urb_priv
= urb
->hcpriv
;
602 struct device
*dev
= ohci_to_hcd(ohci
)->self
.controller
;
604 int data_len
= urb
->transfer_buffer_length
;
607 int is_out
= usb_pipeout (urb
->pipe
);
610 /* OHCI handles the bulk/interrupt data toggles itself. We just
611 * use the device toggle bits for resetting, and rely on the fact
612 * that resetting toggle is meaningless if the endpoint is active.
614 if (!usb_gettoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), is_out
)) {
615 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
),
617 urb_priv
->ed
->hwHeadP
&= ~cpu_to_hc32 (ohci
, ED_C
);
620 list_add (&urb_priv
->pending
, &ohci
->pending
);
623 data
= urb
->transfer_dma
;
627 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
628 * using TD_CC_GET, as well as by seeing them on the done list.
629 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
631 switch (urb_priv
->ed
->type
) {
633 /* Bulk and interrupt are identical except for where in the schedule
637 /* ... and periodic urbs have extra accounting */
638 periodic
= ohci_to_hcd(ohci
)->self
.bandwidth_int_reqs
++ == 0
639 && ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
== 0;
643 ? TD_T_TOGGLE
| TD_CC
| TD_DP_OUT
644 : TD_T_TOGGLE
| TD_CC
| TD_DP_IN
;
645 /* TDs _could_ transfer up to 8K each */
646 while (data_len
> 4096) {
647 td_fill (ohci
, info
, data
, 4096, urb
, cnt
);
652 /* maybe avoid ED halt on final TD short read */
653 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
655 td_fill (ohci
, info
, data
, data_len
, urb
, cnt
);
657 if ((urb
->transfer_flags
& URB_ZERO_PACKET
)
658 && cnt
< urb_priv
->length
) {
659 td_fill (ohci
, info
, 0, 0, urb
, cnt
);
662 /* maybe kickstart bulk list */
663 if (urb_priv
->ed
->type
== PIPE_BULK
) {
665 ohci_writel (ohci
, OHCI_BLF
, &ohci
->regs
->cmdstatus
);
669 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
670 * any DATA phase works normally, and the STATUS ack is special.
673 info
= TD_CC
| TD_DP_SETUP
| TD_T_DATA0
;
674 td_fill (ohci
, info
, urb
->setup_dma
, 8, urb
, cnt
++);
676 info
= TD_CC
| TD_R
| TD_T_DATA1
;
677 info
|= is_out
? TD_DP_OUT
: TD_DP_IN
;
678 /* NOTE: mishandles transfers >8K, some >4K */
679 td_fill (ohci
, info
, data
, data_len
, urb
, cnt
++);
681 info
= (is_out
|| data_len
== 0)
682 ? TD_CC
| TD_DP_IN
| TD_T_DATA1
683 : TD_CC
| TD_DP_OUT
| TD_T_DATA1
;
684 td_fill (ohci
, info
, data
, 0, urb
, cnt
++);
685 /* maybe kickstart control list */
687 ohci_writel (ohci
, OHCI_CLF
, &ohci
->regs
->cmdstatus
);
690 /* ISO has no retransmit, so no toggle; and it uses special TDs.
691 * Each TD could handle multiple consecutive frames (interval 1);
692 * we could often reduce the number of TDs here.
694 case PIPE_ISOCHRONOUS
:
695 for (cnt
= urb_priv
->td_cnt
; cnt
< urb
->number_of_packets
;
697 int frame
= urb
->start_frame
;
699 // FIXME scheduling should handle frame counter
700 // roll-around ... exotic case (and OHCI has
701 // a 2^16 iso range, vs other HCs max of 2^10)
702 frame
+= cnt
* urb
->interval
;
704 td_fill (ohci
, TD_CC
| TD_ISO
| frame
,
705 data
+ urb
->iso_frame_desc
[cnt
].offset
,
706 urb
->iso_frame_desc
[cnt
].length
, urb
, cnt
);
708 if (ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
== 0) {
709 if (quirk_amdiso(ohci
))
710 usb_amd_quirk_pll_disable();
711 if (quirk_amdprefetch(ohci
))
712 sb800_prefetch(dev
, 1);
714 periodic
= ohci_to_hcd(ohci
)->self
.bandwidth_isoc_reqs
++ == 0
715 && ohci_to_hcd(ohci
)->self
.bandwidth_int_reqs
== 0;
719 /* start periodic dma if needed */
722 ohci
->hc_control
|= OHCI_CTRL_PLE
|OHCI_CTRL_IE
;
723 ohci_writel (ohci
, ohci
->hc_control
, &ohci
->regs
->control
);
726 // ASSERT (urb_priv->length == cnt);
729 /*-------------------------------------------------------------------------*
730 * Done List handling functions
731 *-------------------------------------------------------------------------*/
733 /* calculate transfer length/status and update the urb */
734 static int td_done(struct ohci_hcd
*ohci
, struct urb
*urb
, struct td
*td
)
736 u32 tdINFO
= hc32_to_cpup (ohci
, &td
->hwINFO
);
738 int status
= -EINPROGRESS
;
740 list_del (&td
->td_list
);
742 /* ISO ... drivers see per-TD length/status */
743 if (tdINFO
& TD_ISO
) {
744 u16 tdPSW
= ohci_hwPSW(ohci
, td
, 0);
747 /* NOTE: assumes FC in tdINFO == 0, and that
748 * only the first of 0..MAXPSW psws is used.
751 cc
= (tdPSW
>> 12) & 0xF;
752 if (tdINFO
& TD_CC
) /* hc didn't touch? */
755 if (usb_pipeout (urb
->pipe
))
756 dlen
= urb
->iso_frame_desc
[td
->index
].length
;
758 /* short reads are always OK for ISO */
759 if (cc
== TD_DATAUNDERRUN
)
761 dlen
= tdPSW
& 0x3ff;
763 urb
->actual_length
+= dlen
;
764 urb
->iso_frame_desc
[td
->index
].actual_length
= dlen
;
765 urb
->iso_frame_desc
[td
->index
].status
= cc_to_error
[cc
];
767 if (cc
!= TD_CC_NOERROR
)
769 "urb %p iso td %p (%d) len %d cc %d\n",
770 urb
, td
, 1 + td
->index
, dlen
, cc
);
772 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
773 * except that "setup" bytes aren't counted and "short" transfers
774 * might not be reported as errors.
777 int type
= usb_pipetype (urb
->pipe
);
778 u32 tdBE
= hc32_to_cpup (ohci
, &td
->hwBE
);
780 cc
= TD_CC_GET (tdINFO
);
782 /* update packet status if needed (short is normally ok) */
783 if (cc
== TD_DATAUNDERRUN
784 && !(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
786 if (cc
!= TD_CC_NOERROR
&& cc
< 0x0E)
787 status
= cc_to_error
[cc
];
789 /* count all non-empty packets except control SETUP packet */
790 if ((type
!= PIPE_CONTROL
|| td
->index
!= 0) && tdBE
!= 0) {
792 urb
->actual_length
+= tdBE
- td
->data_dma
+ 1;
794 urb
->actual_length
+=
795 hc32_to_cpup (ohci
, &td
->hwCBP
)
799 if (cc
!= TD_CC_NOERROR
&& cc
< 0x0E)
801 "urb %p td %p (%d) cc %d, len=%d/%d\n",
802 urb
, td
, 1 + td
->index
, cc
,
804 urb
->transfer_buffer_length
);
809 /*-------------------------------------------------------------------------*/
811 static void ed_halted(struct ohci_hcd
*ohci
, struct td
*td
, int cc
)
813 struct urb
*urb
= td
->urb
;
814 urb_priv_t
*urb_priv
= urb
->hcpriv
;
815 struct ed
*ed
= td
->ed
;
816 struct list_head
*tmp
= td
->td_list
.next
;
817 __hc32 toggle
= ed
->hwHeadP
& cpu_to_hc32 (ohci
, ED_C
);
819 /* clear ed halt; this is the td that caused it, but keep it inactive
820 * until its urb->complete() has a chance to clean up.
822 ed
->hwINFO
|= cpu_to_hc32 (ohci
, ED_SKIP
);
824 ed
->hwHeadP
&= ~cpu_to_hc32 (ohci
, ED_H
);
826 /* Get rid of all later tds from this urb. We don't have
827 * to be careful: no errors and nothing was transferred.
828 * Also patch the ed so it looks as if those tds completed normally.
830 while (tmp
!= &ed
->td_list
) {
833 next
= list_entry (tmp
, struct td
, td_list
);
834 tmp
= next
->td_list
.next
;
836 if (next
->urb
!= urb
)
839 /* NOTE: if multi-td control DATA segments get supported,
840 * this urb had one of them, this td wasn't the last td
841 * in that segment (TD_R clear), this ed halted because
842 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
843 * then we need to leave the control STATUS packet queued
847 list_del(&next
->td_list
);
849 ed
->hwHeadP
= next
->hwNextTD
| toggle
;
852 /* help for troubleshooting: report anything that
853 * looks odd ... that doesn't include protocol stalls
854 * (or maybe some other things)
857 case TD_DATAUNDERRUN
:
858 if ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)
862 if (usb_pipecontrol (urb
->pipe
))
867 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
868 urb
, urb
->dev
->devpath
,
869 usb_pipeendpoint (urb
->pipe
),
870 usb_pipein (urb
->pipe
) ? "in" : "out",
871 hc32_to_cpu (ohci
, td
->hwINFO
),
872 cc
, cc_to_error
[cc
]);
876 /* replies to the request have to be on a FIFO basis so
877 * we unreverse the hc-reversed done-list
879 static struct td
*dl_reverse_done_list (struct ohci_hcd
*ohci
)
882 struct td
*td_rev
= NULL
;
883 struct td
*td
= NULL
;
885 td_dma
= hc32_to_cpup (ohci
, &ohci
->hcca
->done_head
);
886 ohci
->hcca
->done_head
= 0;
889 /* get TD from hc's singly linked list, and
890 * prepend to ours. ed->td_list changes later.
895 td
= dma_to_td (ohci
, td_dma
);
897 ohci_err (ohci
, "bad entry %8x\n", td_dma
);
901 td
->hwINFO
|= cpu_to_hc32 (ohci
, TD_DONE
);
902 cc
= TD_CC_GET (hc32_to_cpup (ohci
, &td
->hwINFO
));
904 /* Non-iso endpoints can halt on error; un-halt,
905 * and dequeue any other TDs from this urb.
906 * No other TD could have caused the halt.
908 if (cc
!= TD_CC_NOERROR
909 && (td
->ed
->hwHeadP
& cpu_to_hc32 (ohci
, ED_H
)))
910 ed_halted(ohci
, td
, cc
);
912 td
->next_dl_td
= td_rev
;
914 td_dma
= hc32_to_cpup (ohci
, &td
->hwNextTD
);
919 /*-------------------------------------------------------------------------*/
921 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
923 finish_unlinks (struct ohci_hcd
*ohci
, u16 tick
)
925 struct ed
*ed
, **last
;
928 for (last
= &ohci
->ed_rm_list
, ed
= *last
; ed
!= NULL
; ed
= *last
) {
929 struct list_head
*entry
, *tmp
;
930 int completed
, modified
;
933 /* only take off EDs that the HC isn't using, accounting for
934 * frame counter wraps and EDs with partially retired TDs
936 if (likely(ohci
->rh_state
== OHCI_RH_RUNNING
)) {
937 if (tick_before (tick
, ed
->tick
)) {
943 if (!list_empty (&ed
->td_list
)) {
947 td
= list_entry (ed
->td_list
.next
, struct td
,
949 head
= hc32_to_cpu (ohci
, ed
->hwHeadP
) &
952 /* INTR_WDH may need to clean up first */
953 if (td
->td_dma
!= head
) {
954 if (ed
== ohci
->ed_to_check
)
955 ohci
->ed_to_check
= NULL
;
962 /* reentrancy: if we drop the schedule lock, someone might
963 * have modified this list. normally it's just prepending
964 * entries (which we'd ignore), but paranoia won't hurt.
970 /* unlink urbs as requested, but rescan the list after
971 * we call a completion since it might have unlinked
972 * another (earlier) urb
974 * When we get here, the HC doesn't see this ed. But it
975 * must not be rescheduled until all completed URBs have
976 * been given back to the driver.
981 list_for_each_safe (entry
, tmp
, &ed
->td_list
) {
984 urb_priv_t
*urb_priv
;
988 td
= list_entry (entry
, struct td
, td_list
);
990 urb_priv
= td
->urb
->hcpriv
;
992 if (!urb
->unlinked
) {
993 prev
= &td
->hwNextTD
;
997 /* patch pointer hc uses */
998 savebits
= *prev
& ~cpu_to_hc32 (ohci
, TD_MASK
);
999 *prev
= td
->hwNextTD
| savebits
;
1001 /* If this was unlinked, the TD may not have been
1002 * retired ... so manually save the data toggle.
1003 * The controller ignores the value we save for
1004 * control and ISO endpoints.
1006 tdINFO
= hc32_to_cpup(ohci
, &td
->hwINFO
);
1007 if ((tdINFO
& TD_T
) == TD_T_DATA0
)
1008 ed
->hwHeadP
&= ~cpu_to_hc32(ohci
, ED_C
);
1009 else if ((tdINFO
& TD_T
) == TD_T_DATA1
)
1010 ed
->hwHeadP
|= cpu_to_hc32(ohci
, ED_C
);
1012 /* HC may have partly processed this TD */
1013 td_done (ohci
, urb
, td
);
1016 /* if URB is done, clean up */
1017 if (urb_priv
->td_cnt
>= urb_priv
->length
) {
1018 modified
= completed
= 1;
1019 finish_urb(ohci
, urb
, 0);
1022 if (completed
&& !list_empty (&ed
->td_list
))
1025 /* ED's now officially unlinked, hc doesn't see */
1026 ed
->state
= ED_IDLE
;
1027 if (quirk_zfmicro(ohci
) && ed
->type
== PIPE_INTERRUPT
)
1028 ohci
->eds_scheduled
--;
1029 ed
->hwHeadP
&= ~cpu_to_hc32(ohci
, ED_H
);
1032 ed
->hwINFO
&= ~cpu_to_hc32 (ohci
, ED_SKIP
| ED_DEQUEUE
);
1034 /* but if there's work queued, reschedule */
1035 if (!list_empty (&ed
->td_list
)) {
1036 if (ohci
->rh_state
== OHCI_RH_RUNNING
)
1037 ed_schedule (ohci
, ed
);
1044 /* maybe reenable control and bulk lists */
1045 if (ohci
->rh_state
== OHCI_RH_RUNNING
&& !ohci
->ed_rm_list
) {
1046 u32 command
= 0, control
= 0;
1048 if (ohci
->ed_controltail
) {
1049 command
|= OHCI_CLF
;
1050 if (quirk_zfmicro(ohci
))
1052 if (!(ohci
->hc_control
& OHCI_CTRL_CLE
)) {
1053 control
|= OHCI_CTRL_CLE
;
1054 ohci_writel (ohci
, 0,
1055 &ohci
->regs
->ed_controlcurrent
);
1058 if (ohci
->ed_bulktail
) {
1059 command
|= OHCI_BLF
;
1060 if (quirk_zfmicro(ohci
))
1062 if (!(ohci
->hc_control
& OHCI_CTRL_BLE
)) {
1063 control
|= OHCI_CTRL_BLE
;
1064 ohci_writel (ohci
, 0,
1065 &ohci
->regs
->ed_bulkcurrent
);
1069 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1071 ohci
->hc_control
|= control
;
1072 if (quirk_zfmicro(ohci
))
1074 ohci_writel (ohci
, ohci
->hc_control
,
1075 &ohci
->regs
->control
);
1078 if (quirk_zfmicro(ohci
))
1080 ohci_writel (ohci
, command
, &ohci
->regs
->cmdstatus
);
1087 /*-------------------------------------------------------------------------*/
1090 * Used to take back a TD from the host controller. This would normally be
1091 * called from within dl_done_list, however it may be called directly if the
1092 * HC no longer sees the TD and it has not appeared on the donelist (after
1093 * two frames). This bug has been observed on ZF Micro systems.
1095 static void takeback_td(struct ohci_hcd
*ohci
, struct td
*td
)
1097 struct urb
*urb
= td
->urb
;
1098 urb_priv_t
*urb_priv
= urb
->hcpriv
;
1099 struct ed
*ed
= td
->ed
;
1102 /* update URB's length and status from TD */
1103 status
= td_done(ohci
, urb
, td
);
1106 /* If all this urb's TDs are done, call complete() */
1107 if (urb_priv
->td_cnt
>= urb_priv
->length
)
1108 finish_urb(ohci
, urb
, status
);
1110 /* clean schedule: unlink EDs that are no longer busy */
1111 if (list_empty(&ed
->td_list
)) {
1112 if (ed
->state
== ED_OPER
)
1113 start_ed_unlink(ohci
, ed
);
1115 /* ... reenabling halted EDs only after fault cleanup */
1116 } else if ((ed
->hwINFO
& cpu_to_hc32(ohci
, ED_SKIP
| ED_DEQUEUE
))
1117 == cpu_to_hc32(ohci
, ED_SKIP
)) {
1118 td
= list_entry(ed
->td_list
.next
, struct td
, td_list
);
1119 if (!(td
->hwINFO
& cpu_to_hc32(ohci
, TD_DONE
))) {
1120 ed
->hwINFO
&= ~cpu_to_hc32(ohci
, ED_SKIP
);
1121 /* ... hc may need waking-up */
1124 ohci_writel(ohci
, OHCI_CLF
,
1125 &ohci
->regs
->cmdstatus
);
1128 ohci_writel(ohci
, OHCI_BLF
,
1129 &ohci
->regs
->cmdstatus
);
1137 * Process normal completions (error or success) and clean the schedules.
1139 * This is the main path for handing urbs back to drivers. The only other
1140 * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
1141 * instead of scanning the (re-reversed) donelist as this does. There's
1142 * an abnormal path too, handling a quirk in some Compaq silicon: URBs
1143 * with TDs that appear to be orphaned are directly reclaimed.
1146 dl_done_list (struct ohci_hcd
*ohci
)
1148 struct td
*td
= dl_reverse_done_list (ohci
);
1151 struct td
*td_next
= td
->next_dl_td
;
1152 struct ed
*ed
= td
->ed
;
1155 * Some OHCI controllers (NVIDIA for sure, maybe others)
1156 * occasionally forget to add TDs to the done queue. Since
1157 * TDs for a given endpoint are always processed in order,
1158 * if we find a TD on the donelist then all of its
1159 * predecessors must be finished as well.
1164 td2
= list_first_entry(&ed
->td_list
, struct td
,
1168 takeback_td(ohci
, td2
);
1171 takeback_td(ohci
, td
);
This page took 0.074935 seconds and 5 git commands to generate.