Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * OHCI HCD (Host Controller Driver) for USB. | |
dd9048af | 3 | * |
1da177e4 LT |
4 | * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
5 | * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> | |
dd9048af | 6 | * |
1da177e4 LT |
7 | * This file is licenced under the GPL. |
8 | */ | |
9 | ||
7d12e780 DH |
10 | #include <linux/irq.h> |
11 | ||
1da177e4 LT |
12 | static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) |
13 | { | |
14 | int last = urb_priv->length - 1; | |
15 | ||
16 | if (last >= 0) { | |
17 | int i; | |
18 | struct td *td; | |
19 | ||
20 | for (i = 0; i <= last; i++) { | |
21 | td = urb_priv->td [i]; | |
22 | if (td) | |
23 | td_free (hc, td); | |
24 | } | |
25 | } | |
26 | ||
27 | list_del (&urb_priv->pending); | |
28 | kfree (urb_priv); | |
29 | } | |
30 | ||
31 | /*-------------------------------------------------------------------------*/ | |
32 | ||
33 | /* | |
34 | * URB goes back to driver, and isn't reissued. | |
35 | * It's completely gone from HC data structures. | |
36 | * PRECONDITION: ohci lock held, irqs blocked. | |
37 | */ | |
38 | static void | |
55d84968 | 39 | finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) |
1da177e4 LT |
40 | __releases(ohci->lock) |
41 | __acquires(ohci->lock) | |
42 | { | |
43 | // ASSERT (urb->hcpriv != 0); | |
44 | ||
45 | urb_free_priv (ohci, urb->hcpriv); | |
55d84968 AS |
46 | if (likely(status == -EINPROGRESS)) |
47 | status = 0; | |
1da177e4 LT |
48 | |
49 | switch (usb_pipetype (urb->pipe)) { | |
50 | case PIPE_ISOCHRONOUS: | |
51 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | |
52 | break; | |
53 | case PIPE_INTERRUPT: | |
54 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | |
55 | break; | |
56 | } | |
57 | ||
58 | #ifdef OHCI_VERBOSE_DEBUG | |
55d84968 | 59 | urb_print(urb, "RET", usb_pipeout (urb->pipe), status); |
1da177e4 LT |
60 | #endif |
61 | ||
62 | /* urb->complete() can reenter this HCD */ | |
e9df41c5 | 63 | usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb); |
1da177e4 | 64 | spin_unlock (&ohci->lock); |
4a00027d | 65 | usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status); |
1da177e4 LT |
66 | spin_lock (&ohci->lock); |
67 | ||
68 | /* stop periodic dma if it's not needed */ | |
69 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | |
70 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) { | |
71 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); | |
72 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
73 | } | |
74 | } | |
75 | ||
76 | ||
77 | /*-------------------------------------------------------------------------* | |
78 | * ED handling functions | |
dd9048af | 79 | *-------------------------------------------------------------------------*/ |
1da177e4 LT |
80 | |
81 | /* search for the right schedule branch to use for a periodic ed. | |
82 | * does some load balancing; returns the branch, or negative errno. | |
83 | */ | |
84 | static int balance (struct ohci_hcd *ohci, int interval, int load) | |
85 | { | |
86 | int i, branch = -ENOSPC; | |
87 | ||
88 | /* iso periods can be huge; iso tds specify frame numbers */ | |
89 | if (interval > NUM_INTS) | |
90 | interval = NUM_INTS; | |
91 | ||
92 | /* search for the least loaded schedule branch of that period | |
93 | * that has enough bandwidth left unreserved. | |
94 | */ | |
95 | for (i = 0; i < interval ; i++) { | |
96 | if (branch < 0 || ohci->load [branch] > ohci->load [i]) { | |
1da177e4 LT |
97 | int j; |
98 | ||
99 | /* usb 1.1 says 90% of one frame */ | |
100 | for (j = i; j < NUM_INTS; j += interval) { | |
101 | if ((ohci->load [j] + load) > 900) | |
102 | break; | |
103 | } | |
104 | if (j < NUM_INTS) | |
105 | continue; | |
dd9048af | 106 | branch = i; |
1da177e4 LT |
107 | } |
108 | } | |
109 | return branch; | |
110 | } | |
111 | ||
112 | /*-------------------------------------------------------------------------*/ | |
113 | ||
114 | /* both iso and interrupt requests have periods; this routine puts them | |
115 | * into the schedule tree in the apppropriate place. most iso devices use | |
116 | * 1msec periods, but that's not required. | |
117 | */ | |
118 | static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) | |
119 | { | |
120 | unsigned i; | |
121 | ||
122 | ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n", | |
123 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | |
124 | ed, ed->branch, ed->load, ed->interval); | |
125 | ||
126 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
127 | struct ed **prev = &ohci->periodic [i]; | |
128 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
129 | struct ed *here = *prev; | |
130 | ||
131 | /* sorting each branch by period (slow before fast) | |
132 | * lets us share the faster parts of the tree. | |
133 | * (plus maybe: put interrupt eds before iso) | |
134 | */ | |
135 | while (here && ed != here) { | |
136 | if (ed->interval > here->interval) | |
137 | break; | |
138 | prev = &here->ed_next; | |
139 | prev_p = &here->hwNextED; | |
140 | here = *prev; | |
141 | } | |
142 | if (ed != here) { | |
143 | ed->ed_next = here; | |
144 | if (here) | |
145 | ed->hwNextED = *prev_p; | |
146 | wmb (); | |
147 | *prev = ed; | |
148 | *prev_p = cpu_to_hc32(ohci, ed->dma); | |
149 | wmb(); | |
150 | } | |
151 | ohci->load [i] += ed->load; | |
152 | } | |
153 | ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval; | |
154 | } | |
155 | ||
156 | /* link an ed into one of the HC chains */ | |
157 | ||
158 | static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 159 | { |
1da177e4 LT |
160 | int branch; |
161 | ||
162 | if (ohci_to_hcd(ohci)->state == HC_STATE_QUIESCING) | |
163 | return -EAGAIN; | |
164 | ||
165 | ed->state = ED_OPER; | |
166 | ed->ed_prev = NULL; | |
167 | ed->ed_next = NULL; | |
168 | ed->hwNextED = 0; | |
89a0fd18 MN |
169 | if (quirk_zfmicro(ohci) |
170 | && (ed->type == PIPE_INTERRUPT) | |
171 | && !(ohci->eds_scheduled++)) | |
9cebcdc7 | 172 | mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ)); |
1da177e4 LT |
173 | wmb (); |
174 | ||
175 | /* we care about rm_list when setting CLE/BLE in case the HC was at | |
176 | * work on some TD when CLE/BLE was turned off, and isn't quiesced | |
177 | * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF. | |
178 | * | |
179 | * control and bulk EDs are doubly linked (ed_next, ed_prev), but | |
180 | * periodic ones are singly linked (ed_next). that's because the | |
181 | * periodic schedule encodes a tree like figure 3-5 in the ohci | |
182 | * spec: each qh can have several "previous" nodes, and the tree | |
183 | * doesn't have unused/idle descriptors. | |
184 | */ | |
185 | switch (ed->type) { | |
186 | case PIPE_CONTROL: | |
187 | if (ohci->ed_controltail == NULL) { | |
188 | WARN_ON (ohci->hc_control & OHCI_CTRL_CLE); | |
189 | ohci_writel (ohci, ed->dma, | |
190 | &ohci->regs->ed_controlhead); | |
191 | } else { | |
192 | ohci->ed_controltail->ed_next = ed; | |
193 | ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci, | |
194 | ed->dma); | |
195 | } | |
196 | ed->ed_prev = ohci->ed_controltail; | |
197 | if (!ohci->ed_controltail && !ohci->ed_rm_list) { | |
198 | wmb(); | |
199 | ohci->hc_control |= OHCI_CTRL_CLE; | |
200 | ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); | |
201 | ohci_writel (ohci, ohci->hc_control, | |
202 | &ohci->regs->control); | |
203 | } | |
204 | ohci->ed_controltail = ed; | |
205 | break; | |
206 | ||
207 | case PIPE_BULK: | |
208 | if (ohci->ed_bulktail == NULL) { | |
209 | WARN_ON (ohci->hc_control & OHCI_CTRL_BLE); | |
210 | ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead); | |
211 | } else { | |
212 | ohci->ed_bulktail->ed_next = ed; | |
213 | ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci, | |
214 | ed->dma); | |
215 | } | |
216 | ed->ed_prev = ohci->ed_bulktail; | |
217 | if (!ohci->ed_bulktail && !ohci->ed_rm_list) { | |
218 | wmb(); | |
219 | ohci->hc_control |= OHCI_CTRL_BLE; | |
220 | ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); | |
221 | ohci_writel (ohci, ohci->hc_control, | |
222 | &ohci->regs->control); | |
223 | } | |
224 | ohci->ed_bulktail = ed; | |
225 | break; | |
226 | ||
227 | // case PIPE_INTERRUPT: | |
228 | // case PIPE_ISOCHRONOUS: | |
229 | default: | |
230 | branch = balance (ohci, ed->interval, ed->load); | |
231 | if (branch < 0) { | |
232 | ohci_dbg (ohci, | |
233 | "ERR %d, interval %d msecs, load %d\n", | |
234 | branch, ed->interval, ed->load); | |
235 | // FIXME if there are TDs queued, fail them! | |
236 | return branch; | |
237 | } | |
238 | ed->branch = branch; | |
239 | periodic_link (ohci, ed); | |
dd9048af | 240 | } |
1da177e4 LT |
241 | |
242 | /* the HC may not see the schedule updates yet, but if it does | |
243 | * then they'll be properly ordered. | |
244 | */ | |
245 | return 0; | |
246 | } | |
247 | ||
248 | /*-------------------------------------------------------------------------*/ | |
249 | ||
250 | /* scan the periodic table to find and unlink this ED */ | |
251 | static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
252 | { | |
253 | int i; | |
254 | ||
255 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
256 | struct ed *temp; | |
257 | struct ed **prev = &ohci->periodic [i]; | |
258 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
259 | ||
260 | while (*prev && (temp = *prev) != ed) { | |
261 | prev_p = &temp->hwNextED; | |
262 | prev = &temp->ed_next; | |
263 | } | |
264 | if (*prev) { | |
265 | *prev_p = ed->hwNextED; | |
266 | *prev = ed->ed_next; | |
267 | } | |
268 | ohci->load [i] -= ed->load; | |
dd9048af | 269 | } |
1da177e4 LT |
270 | ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; |
271 | ||
272 | ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", | |
273 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | |
274 | ed, ed->branch, ed->load, ed->interval); | |
275 | } | |
276 | ||
dd9048af | 277 | /* unlink an ed from one of the HC chains. |
1da177e4 LT |
278 | * just the link to the ed is unlinked. |
279 | * the link from the ed still points to another operational ed or 0 | |
280 | * so the HC can eventually finish the processing of the unlinked ed | |
281 | * (assuming it already started that, which needn't be true). | |
282 | * | |
283 | * ED_UNLINK is a transient state: the HC may still see this ED, but soon | |
284 | * it won't. ED_SKIP means the HC will finish its current transaction, | |
285 | * but won't start anything new. The TD queue may still grow; device | |
286 | * drivers don't know about this HCD-internal state. | |
287 | * | |
288 | * When the HC can't see the ED, something changes ED_UNLINK to one of: | |
289 | * | |
290 | * - ED_OPER: when there's any request queued, the ED gets rescheduled | |
291 | * immediately. HC should be working on them. | |
292 | * | |
293 | * - ED_IDLE: when there's no TD queue. there's no reason for the HC | |
294 | * to care about this ED; safe to disable the endpoint. | |
295 | * | |
296 | * When finish_unlinks() runs later, after SOF interrupt, it will often | |
297 | * complete one or more URB unlinks before making that state change. | |
298 | */ | |
dd9048af | 299 | static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) |
1da177e4 LT |
300 | { |
301 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
302 | wmb (); | |
303 | ed->state = ED_UNLINK; | |
304 | ||
305 | /* To deschedule something from the control or bulk list, just | |
306 | * clear CLE/BLE and wait. There's no safe way to scrub out list | |
307 | * head/current registers until later, and "later" isn't very | |
308 | * tightly specified. Figure 6-5 and Section 6.4.2.2 show how | |
309 | * the HC is reading the ED queues (while we modify them). | |
310 | * | |
311 | * For now, ed_schedule() is "later". It might be good paranoia | |
312 | * to scrub those registers in finish_unlinks(), in case of bugs | |
313 | * that make the HC try to use them. | |
314 | */ | |
315 | switch (ed->type) { | |
316 | case PIPE_CONTROL: | |
317 | /* remove ED from the HC's list: */ | |
318 | if (ed->ed_prev == NULL) { | |
319 | if (!ed->hwNextED) { | |
320 | ohci->hc_control &= ~OHCI_CTRL_CLE; | |
321 | ohci_writel (ohci, ohci->hc_control, | |
322 | &ohci->regs->control); | |
323 | // a ohci_readl() later syncs CLE with the HC | |
324 | } else | |
325 | ohci_writel (ohci, | |
326 | hc32_to_cpup (ohci, &ed->hwNextED), | |
327 | &ohci->regs->ed_controlhead); | |
328 | } else { | |
329 | ed->ed_prev->ed_next = ed->ed_next; | |
330 | ed->ed_prev->hwNextED = ed->hwNextED; | |
331 | } | |
332 | /* remove ED from the HCD's list: */ | |
333 | if (ohci->ed_controltail == ed) { | |
334 | ohci->ed_controltail = ed->ed_prev; | |
335 | if (ohci->ed_controltail) | |
336 | ohci->ed_controltail->ed_next = NULL; | |
337 | } else if (ed->ed_next) { | |
338 | ed->ed_next->ed_prev = ed->ed_prev; | |
339 | } | |
340 | break; | |
341 | ||
342 | case PIPE_BULK: | |
343 | /* remove ED from the HC's list: */ | |
344 | if (ed->ed_prev == NULL) { | |
345 | if (!ed->hwNextED) { | |
346 | ohci->hc_control &= ~OHCI_CTRL_BLE; | |
347 | ohci_writel (ohci, ohci->hc_control, | |
348 | &ohci->regs->control); | |
349 | // a ohci_readl() later syncs BLE with the HC | |
350 | } else | |
351 | ohci_writel (ohci, | |
352 | hc32_to_cpup (ohci, &ed->hwNextED), | |
353 | &ohci->regs->ed_bulkhead); | |
354 | } else { | |
355 | ed->ed_prev->ed_next = ed->ed_next; | |
356 | ed->ed_prev->hwNextED = ed->hwNextED; | |
357 | } | |
358 | /* remove ED from the HCD's list: */ | |
359 | if (ohci->ed_bulktail == ed) { | |
360 | ohci->ed_bulktail = ed->ed_prev; | |
361 | if (ohci->ed_bulktail) | |
362 | ohci->ed_bulktail->ed_next = NULL; | |
363 | } else if (ed->ed_next) { | |
364 | ed->ed_next->ed_prev = ed->ed_prev; | |
365 | } | |
366 | break; | |
367 | ||
368 | // case PIPE_INTERRUPT: | |
369 | // case PIPE_ISOCHRONOUS: | |
370 | default: | |
371 | periodic_unlink (ohci, ed); | |
372 | break; | |
373 | } | |
374 | } | |
375 | ||
376 | ||
377 | /*-------------------------------------------------------------------------*/ | |
378 | ||
379 | /* get and maybe (re)init an endpoint. init _should_ be done only as part | |
380 | * of enumeration, usb_set_configuration() or usb_set_interface(). | |
381 | */ | |
382 | static struct ed *ed_get ( | |
383 | struct ohci_hcd *ohci, | |
384 | struct usb_host_endpoint *ep, | |
385 | struct usb_device *udev, | |
386 | unsigned int pipe, | |
387 | int interval | |
388 | ) { | |
dd9048af | 389 | struct ed *ed; |
1da177e4 LT |
390 | unsigned long flags; |
391 | ||
392 | spin_lock_irqsave (&ohci->lock, flags); | |
393 | ||
394 | if (!(ed = ep->hcpriv)) { | |
395 | struct td *td; | |
396 | int is_out; | |
397 | u32 info; | |
398 | ||
399 | ed = ed_alloc (ohci, GFP_ATOMIC); | |
400 | if (!ed) { | |
401 | /* out of memory */ | |
402 | goto done; | |
403 | } | |
404 | ||
dd9048af | 405 | /* dummy td; end of td list for ed */ |
1da177e4 | 406 | td = td_alloc (ohci, GFP_ATOMIC); |
dd9048af | 407 | if (!td) { |
1da177e4 LT |
408 | /* out of memory */ |
409 | ed_free (ohci, ed); | |
410 | ed = NULL; | |
411 | goto done; | |
412 | } | |
413 | ed->dummy = td; | |
414 | ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma); | |
415 | ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ | |
416 | ed->state = ED_IDLE; | |
417 | ||
418 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); | |
419 | ||
420 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS | |
421 | * suceeds ... otherwise we wouldn't need "pipe". | |
422 | */ | |
423 | info = usb_pipedevice (pipe); | |
424 | ed->type = usb_pipetype(pipe); | |
425 | ||
426 | info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7; | |
427 | info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16; | |
428 | if (udev->speed == USB_SPEED_LOW) | |
429 | info |= ED_LOWSPEED; | |
430 | /* only control transfers store pids in tds */ | |
431 | if (ed->type != PIPE_CONTROL) { | |
432 | info |= is_out ? ED_OUT : ED_IN; | |
433 | if (ed->type != PIPE_BULK) { | |
434 | /* periodic transfers... */ | |
435 | if (ed->type == PIPE_ISOCHRONOUS) | |
436 | info |= ED_ISO; | |
437 | else if (interval > 32) /* iso can be bigger */ | |
438 | interval = 32; | |
439 | ed->interval = interval; | |
440 | ed->load = usb_calc_bus_time ( | |
441 | udev->speed, !is_out, | |
442 | ed->type == PIPE_ISOCHRONOUS, | |
443 | le16_to_cpu(ep->desc.wMaxPacketSize)) | |
444 | / 1000; | |
445 | } | |
446 | } | |
447 | ed->hwINFO = cpu_to_hc32(ohci, info); | |
448 | ||
449 | ep->hcpriv = ed; | |
450 | } | |
451 | ||
452 | done: | |
453 | spin_unlock_irqrestore (&ohci->lock, flags); | |
dd9048af | 454 | return ed; |
1da177e4 LT |
455 | } |
456 | ||
457 | /*-------------------------------------------------------------------------*/ | |
458 | ||
459 | /* request unlinking of an endpoint from an operational HC. | |
460 | * put the ep on the rm_list | |
461 | * real work is done at the next start frame (SF) hardware interrupt | |
462 | * caller guarantees HCD is running, so hardware access is safe, | |
463 | * and that ed->state is ED_OPER | |
464 | */ | |
465 | static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 466 | { |
1da177e4 LT |
467 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE); |
468 | ed_deschedule (ohci, ed); | |
469 | ||
470 | /* rm_list is just singly linked, for simplicity */ | |
471 | ed->ed_next = ohci->ed_rm_list; | |
472 | ed->ed_prev = NULL; | |
473 | ohci->ed_rm_list = ed; | |
474 | ||
475 | /* enable SOF interrupt */ | |
476 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus); | |
477 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable); | |
478 | // flush those writes, and get latest HCCA contents | |
479 | (void) ohci_readl (ohci, &ohci->regs->control); | |
480 | ||
481 | /* SF interrupt might get delayed; record the frame counter value that | |
482 | * indicates when the HC isn't looking at it, so concurrent unlinks | |
483 | * behave. frame_no wraps every 2^16 msec, and changes right before | |
484 | * SF is triggered. | |
485 | */ | |
486 | ed->tick = ohci_frame_no(ohci) + 1; | |
487 | ||
488 | } | |
489 | ||
490 | /*-------------------------------------------------------------------------* | |
491 | * TD handling functions | |
492 | *-------------------------------------------------------------------------*/ | |
493 | ||
494 | /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ | |
495 | ||
496 | static void | |
497 | td_fill (struct ohci_hcd *ohci, u32 info, | |
498 | dma_addr_t data, int len, | |
499 | struct urb *urb, int index) | |
500 | { | |
501 | struct td *td, *td_pt; | |
502 | struct urb_priv *urb_priv = urb->hcpriv; | |
503 | int is_iso = info & TD_ISO; | |
504 | int hash; | |
505 | ||
506 | // ASSERT (index < urb_priv->length); | |
507 | ||
508 | /* aim for only one interrupt per urb. mostly applies to control | |
509 | * and iso; other urbs rarely need more than one TD per urb. | |
510 | * this way, only final tds (or ones with an error) cause IRQs. | |
511 | * at least immediately; use DI=6 in case any control request is | |
512 | * tempted to die part way through. (and to force the hc to flush | |
513 | * its donelist soonish, even on unlink paths.) | |
514 | * | |
515 | * NOTE: could delay interrupts even for the last TD, and get fewer | |
516 | * interrupts ... increasing per-urb latency by sharing interrupts. | |
517 | * Drivers that queue bulk urbs may request that behavior. | |
518 | */ | |
519 | if (index != (urb_priv->length - 1) | |
520 | || (urb->transfer_flags & URB_NO_INTERRUPT)) | |
521 | info |= TD_DI_SET (6); | |
522 | ||
523 | /* use this td as the next dummy */ | |
524 | td_pt = urb_priv->td [index]; | |
525 | ||
526 | /* fill the old dummy TD */ | |
527 | td = urb_priv->td [index] = urb_priv->ed->dummy; | |
528 | urb_priv->ed->dummy = td_pt; | |
529 | ||
530 | td->ed = urb_priv->ed; | |
531 | td->next_dl_td = NULL; | |
532 | td->index = index; | |
dd9048af | 533 | td->urb = urb; |
1da177e4 LT |
534 | td->data_dma = data; |
535 | if (!len) | |
536 | data = 0; | |
537 | ||
538 | td->hwINFO = cpu_to_hc32 (ohci, info); | |
539 | if (is_iso) { | |
540 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); | |
541 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, | |
542 | (data & 0x0FFF) | 0xE000); | |
543 | td->ed->last_iso = info & 0xffff; | |
544 | } else { | |
dd9048af DB |
545 | td->hwCBP = cpu_to_hc32 (ohci, data); |
546 | } | |
1da177e4 LT |
547 | if (data) |
548 | td->hwBE = cpu_to_hc32 (ohci, data + len - 1); | |
549 | else | |
550 | td->hwBE = 0; | |
551 | td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma); | |
552 | ||
553 | /* append to queue */ | |
554 | list_add_tail (&td->td_list, &td->ed->td_list); | |
555 | ||
556 | /* hash it for later reverse mapping */ | |
557 | hash = TD_HASH_FUNC (td->td_dma); | |
558 | td->td_hash = ohci->td_hash [hash]; | |
559 | ohci->td_hash [hash] = td; | |
560 | ||
561 | /* HC might read the TD (or cachelines) right away ... */ | |
562 | wmb (); | |
563 | td->ed->hwTailP = td->hwNextTD; | |
564 | } | |
565 | ||
566 | /*-------------------------------------------------------------------------*/ | |
567 | ||
568 | /* Prepare all TDs of a transfer, and queue them onto the ED. | |
569 | * Caller guarantees HC is active. | |
570 | * Usually the ED is already on the schedule, so TDs might be | |
571 | * processed as soon as they're queued. | |
572 | */ | |
573 | static void td_submit_urb ( | |
574 | struct ohci_hcd *ohci, | |
575 | struct urb *urb | |
576 | ) { | |
577 | struct urb_priv *urb_priv = urb->hcpriv; | |
578 | dma_addr_t data; | |
579 | int data_len = urb->transfer_buffer_length; | |
580 | int cnt = 0; | |
581 | u32 info = 0; | |
582 | int is_out = usb_pipeout (urb->pipe); | |
583 | int periodic = 0; | |
584 | ||
585 | /* OHCI handles the bulk/interrupt data toggles itself. We just | |
586 | * use the device toggle bits for resetting, and rely on the fact | |
587 | * that resetting toggle is meaningless if the endpoint is active. | |
588 | */ | |
dd9048af | 589 | if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) { |
1da177e4 LT |
590 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), |
591 | is_out, 1); | |
592 | urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C); | |
593 | } | |
594 | ||
595 | urb_priv->td_cnt = 0; | |
596 | list_add (&urb_priv->pending, &ohci->pending); | |
597 | ||
598 | if (data_len) | |
599 | data = urb->transfer_dma; | |
600 | else | |
601 | data = 0; | |
602 | ||
603 | /* NOTE: TD_CC is set so we can tell which TDs the HC processed by | |
604 | * using TD_CC_GET, as well as by seeing them on the done list. | |
605 | * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.) | |
606 | */ | |
607 | switch (urb_priv->ed->type) { | |
608 | ||
609 | /* Bulk and interrupt are identical except for where in the schedule | |
610 | * their EDs live. | |
611 | */ | |
612 | case PIPE_INTERRUPT: | |
613 | /* ... and periodic urbs have extra accounting */ | |
614 | periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0 | |
615 | && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0; | |
616 | /* FALLTHROUGH */ | |
617 | case PIPE_BULK: | |
618 | info = is_out | |
619 | ? TD_T_TOGGLE | TD_CC | TD_DP_OUT | |
620 | : TD_T_TOGGLE | TD_CC | TD_DP_IN; | |
621 | /* TDs _could_ transfer up to 8K each */ | |
622 | while (data_len > 4096) { | |
623 | td_fill (ohci, info, data, 4096, urb, cnt); | |
624 | data += 4096; | |
625 | data_len -= 4096; | |
626 | cnt++; | |
627 | } | |
628 | /* maybe avoid ED halt on final TD short read */ | |
629 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
630 | info |= TD_R; | |
631 | td_fill (ohci, info, data, data_len, urb, cnt); | |
632 | cnt++; | |
633 | if ((urb->transfer_flags & URB_ZERO_PACKET) | |
634 | && cnt < urb_priv->length) { | |
635 | td_fill (ohci, info, 0, 0, urb, cnt); | |
636 | cnt++; | |
637 | } | |
638 | /* maybe kickstart bulk list */ | |
639 | if (urb_priv->ed->type == PIPE_BULK) { | |
640 | wmb (); | |
641 | ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus); | |
642 | } | |
643 | break; | |
644 | ||
645 | /* control manages DATA0/DATA1 toggle per-request; SETUP resets it, | |
646 | * any DATA phase works normally, and the STATUS ack is special. | |
647 | */ | |
648 | case PIPE_CONTROL: | |
649 | info = TD_CC | TD_DP_SETUP | TD_T_DATA0; | |
650 | td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++); | |
651 | if (data_len > 0) { | |
652 | info = TD_CC | TD_R | TD_T_DATA1; | |
653 | info |= is_out ? TD_DP_OUT : TD_DP_IN; | |
654 | /* NOTE: mishandles transfers >8K, some >4K */ | |
655 | td_fill (ohci, info, data, data_len, urb, cnt++); | |
656 | } | |
657 | info = (is_out || data_len == 0) | |
658 | ? TD_CC | TD_DP_IN | TD_T_DATA1 | |
659 | : TD_CC | TD_DP_OUT | TD_T_DATA1; | |
660 | td_fill (ohci, info, data, 0, urb, cnt++); | |
661 | /* maybe kickstart control list */ | |
662 | wmb (); | |
663 | ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus); | |
664 | break; | |
665 | ||
666 | /* ISO has no retransmit, so no toggle; and it uses special TDs. | |
667 | * Each TD could handle multiple consecutive frames (interval 1); | |
668 | * we could often reduce the number of TDs here. | |
669 | */ | |
670 | case PIPE_ISOCHRONOUS: | |
671 | for (cnt = 0; cnt < urb->number_of_packets; cnt++) { | |
672 | int frame = urb->start_frame; | |
673 | ||
674 | // FIXME scheduling should handle frame counter | |
675 | // roll-around ... exotic case (and OHCI has | |
676 | // a 2^16 iso range, vs other HCs max of 2^10) | |
677 | frame += cnt * urb->interval; | |
678 | frame &= 0xffff; | |
679 | td_fill (ohci, TD_CC | TD_ISO | frame, | |
680 | data + urb->iso_frame_desc [cnt].offset, | |
681 | urb->iso_frame_desc [cnt].length, urb, cnt); | |
682 | } | |
683 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 | |
684 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | |
685 | break; | |
686 | } | |
687 | ||
688 | /* start periodic dma if needed */ | |
689 | if (periodic) { | |
690 | wmb (); | |
691 | ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE; | |
692 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
693 | } | |
694 | ||
695 | // ASSERT (urb_priv->length == cnt); | |
696 | } | |
697 | ||
698 | /*-------------------------------------------------------------------------* | |
699 | * Done List handling functions | |
700 | *-------------------------------------------------------------------------*/ | |
701 | ||
55d84968 AS |
702 | /* calculate transfer length/status and update the urb */ |
703 | static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) | |
1da177e4 LT |
704 | { |
705 | u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO); | |
706 | int cc = 0; | |
55d84968 | 707 | int status = -EINPROGRESS; |
1da177e4 LT |
708 | |
709 | list_del (&td->td_list); | |
710 | ||
711 | /* ISO ... drivers see per-TD length/status */ | |
dd9048af | 712 | if (tdINFO & TD_ISO) { |
55d84968 | 713 | u16 tdPSW = ohci_hwPSW(ohci, td, 0); |
1da177e4 LT |
714 | int dlen = 0; |
715 | ||
716 | /* NOTE: assumes FC in tdINFO == 0, and that | |
717 | * only the first of 0..MAXPSW psws is used. | |
718 | */ | |
719 | ||
dd9048af DB |
720 | cc = (tdPSW >> 12) & 0xF; |
721 | if (tdINFO & TD_CC) /* hc didn't touch? */ | |
55d84968 | 722 | return status; |
1da177e4 LT |
723 | |
724 | if (usb_pipeout (urb->pipe)) | |
725 | dlen = urb->iso_frame_desc [td->index].length; | |
726 | else { | |
727 | /* short reads are always OK for ISO */ | |
728 | if (cc == TD_DATAUNDERRUN) | |
729 | cc = TD_CC_NOERROR; | |
730 | dlen = tdPSW & 0x3ff; | |
731 | } | |
732 | urb->actual_length += dlen; | |
733 | urb->iso_frame_desc [td->index].actual_length = dlen; | |
734 | urb->iso_frame_desc [td->index].status = cc_to_error [cc]; | |
735 | ||
736 | if (cc != TD_CC_NOERROR) | |
737 | ohci_vdbg (ohci, | |
738 | "urb %p iso td %p (%d) len %d cc %d\n", | |
739 | urb, td, 1 + td->index, dlen, cc); | |
740 | ||
741 | /* BULK, INT, CONTROL ... drivers see aggregate length/status, | |
742 | * except that "setup" bytes aren't counted and "short" transfers | |
743 | * might not be reported as errors. | |
744 | */ | |
745 | } else { | |
746 | int type = usb_pipetype (urb->pipe); | |
747 | u32 tdBE = hc32_to_cpup (ohci, &td->hwBE); | |
748 | ||
dd9048af | 749 | cc = TD_CC_GET (tdINFO); |
1da177e4 LT |
750 | |
751 | /* update packet status if needed (short is normally ok) */ | |
752 | if (cc == TD_DATAUNDERRUN | |
753 | && !(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
754 | cc = TD_CC_NOERROR; | |
55d84968 AS |
755 | if (cc != TD_CC_NOERROR && cc < 0x0E) |
756 | status = cc_to_error[cc]; | |
1da177e4 LT |
757 | |
758 | /* count all non-empty packets except control SETUP packet */ | |
759 | if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { | |
760 | if (td->hwCBP == 0) | |
761 | urb->actual_length += tdBE - td->data_dma + 1; | |
762 | else | |
763 | urb->actual_length += | |
764 | hc32_to_cpup (ohci, &td->hwCBP) | |
765 | - td->data_dma; | |
766 | } | |
767 | ||
768 | if (cc != TD_CC_NOERROR && cc < 0x0E) | |
769 | ohci_vdbg (ohci, | |
770 | "urb %p td %p (%d) cc %d, len=%d/%d\n", | |
771 | urb, td, 1 + td->index, cc, | |
772 | urb->actual_length, | |
773 | urb->transfer_buffer_length); | |
dd9048af | 774 | } |
55d84968 | 775 | return status; |
1da177e4 LT |
776 | } |
777 | ||
778 | /*-------------------------------------------------------------------------*/ | |
779 | ||
6e8fe43b | 780 | static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc) |
1da177e4 | 781 | { |
dd9048af | 782 | struct urb *urb = td->urb; |
6e8fe43b | 783 | urb_priv_t *urb_priv = urb->hcpriv; |
1da177e4 LT |
784 | struct ed *ed = td->ed; |
785 | struct list_head *tmp = td->td_list.next; | |
786 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); | |
787 | ||
788 | /* clear ed halt; this is the td that caused it, but keep it inactive | |
789 | * until its urb->complete() has a chance to clean up. | |
790 | */ | |
791 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
792 | wmb (); | |
dd9048af | 793 | ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); |
1da177e4 | 794 | |
6e8fe43b AS |
795 | /* Get rid of all later tds from this urb. We don't have |
796 | * to be careful: no errors and nothing was transferred. | |
797 | * Also patch the ed so it looks as if those tds completed normally. | |
1da177e4 LT |
798 | */ |
799 | while (tmp != &ed->td_list) { | |
800 | struct td *next; | |
1da177e4 LT |
801 | |
802 | next = list_entry (tmp, struct td, td_list); | |
803 | tmp = next->td_list.next; | |
804 | ||
805 | if (next->urb != urb) | |
806 | break; | |
807 | ||
808 | /* NOTE: if multi-td control DATA segments get supported, | |
809 | * this urb had one of them, this td wasn't the last td | |
810 | * in that segment (TD_R clear), this ed halted because | |
811 | * of a short read, _and_ URB_SHORT_NOT_OK is clear ... | |
812 | * then we need to leave the control STATUS packet queued | |
813 | * and clear ED_SKIP. | |
814 | */ | |
1da177e4 | 815 | |
6e8fe43b AS |
816 | list_del(&next->td_list); |
817 | urb_priv->td_cnt++; | |
1da177e4 LT |
818 | ed->hwHeadP = next->hwNextTD | toggle; |
819 | } | |
820 | ||
821 | /* help for troubleshooting: report anything that | |
822 | * looks odd ... that doesn't include protocol stalls | |
823 | * (or maybe some other things) | |
824 | */ | |
825 | switch (cc) { | |
826 | case TD_DATAUNDERRUN: | |
827 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) | |
828 | break; | |
829 | /* fallthrough */ | |
830 | case TD_CC_STALL: | |
831 | if (usb_pipecontrol (urb->pipe)) | |
832 | break; | |
833 | /* fallthrough */ | |
834 | default: | |
835 | ohci_dbg (ohci, | |
836 | "urb %p path %s ep%d%s %08x cc %d --> status %d\n", | |
837 | urb, urb->dev->devpath, | |
838 | usb_pipeendpoint (urb->pipe), | |
839 | usb_pipein (urb->pipe) ? "in" : "out", | |
840 | hc32_to_cpu (ohci, td->hwINFO), | |
841 | cc, cc_to_error [cc]); | |
842 | } | |
1da177e4 LT |
843 | } |
844 | ||
845 | /* replies to the request have to be on a FIFO basis so | |
846 | * we unreverse the hc-reversed done-list | |
847 | */ | |
848 | static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) | |
849 | { | |
850 | u32 td_dma; | |
851 | struct td *td_rev = NULL; | |
852 | struct td *td = NULL; | |
853 | ||
854 | td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); | |
855 | ohci->hcca->done_head = 0; | |
856 | wmb(); | |
857 | ||
858 | /* get TD from hc's singly linked list, and | |
859 | * prepend to ours. ed->td_list changes later. | |
860 | */ | |
dd9048af DB |
861 | while (td_dma) { |
862 | int cc; | |
1da177e4 LT |
863 | |
864 | td = dma_to_td (ohci, td_dma); | |
865 | if (!td) { | |
866 | ohci_err (ohci, "bad entry %8x\n", td_dma); | |
867 | break; | |
868 | } | |
869 | ||
870 | td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE); | |
871 | cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO)); | |
872 | ||
873 | /* Non-iso endpoints can halt on error; un-halt, | |
874 | * and dequeue any other TDs from this urb. | |
875 | * No other TD could have caused the halt. | |
876 | */ | |
877 | if (cc != TD_CC_NOERROR | |
878 | && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) | |
6e8fe43b | 879 | ed_halted(ohci, td, cc); |
1da177e4 | 880 | |
dd9048af | 881 | td->next_dl_td = td_rev; |
1da177e4 LT |
882 | td_rev = td; |
883 | td_dma = hc32_to_cpup (ohci, &td->hwNextTD); | |
dd9048af | 884 | } |
1da177e4 LT |
885 | return td_rev; |
886 | } | |
887 | ||
888 | /*-------------------------------------------------------------------------*/ | |
889 | ||
890 | /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ | |
891 | static void | |
7d12e780 | 892 | finish_unlinks (struct ohci_hcd *ohci, u16 tick) |
1da177e4 LT |
893 | { |
894 | struct ed *ed, **last; | |
895 | ||
896 | rescan_all: | |
897 | for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { | |
898 | struct list_head *entry, *tmp; | |
899 | int completed, modified; | |
900 | __hc32 *prev; | |
901 | ||
902 | /* only take off EDs that the HC isn't using, accounting for | |
903 | * frame counter wraps and EDs with partially retired TDs | |
904 | */ | |
da66b719 | 905 | if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) { |
1da177e4 LT |
906 | if (tick_before (tick, ed->tick)) { |
907 | skip_ed: | |
908 | last = &ed->ed_next; | |
909 | continue; | |
910 | } | |
911 | ||
912 | if (!list_empty (&ed->td_list)) { | |
913 | struct td *td; | |
914 | u32 head; | |
915 | ||
916 | td = list_entry (ed->td_list.next, struct td, | |
917 | td_list); | |
918 | head = hc32_to_cpu (ohci, ed->hwHeadP) & | |
919 | TD_MASK; | |
920 | ||
921 | /* INTR_WDH may need to clean up first */ | |
89a0fd18 MN |
922 | if (td->td_dma != head) { |
923 | if (ed == ohci->ed_to_check) | |
924 | ohci->ed_to_check = NULL; | |
925 | else | |
926 | goto skip_ed; | |
927 | } | |
1da177e4 LT |
928 | } |
929 | } | |
930 | ||
931 | /* reentrancy: if we drop the schedule lock, someone might | |
932 | * have modified this list. normally it's just prepending | |
933 | * entries (which we'd ignore), but paranoia won't hurt. | |
934 | */ | |
935 | *last = ed->ed_next; | |
936 | ed->ed_next = NULL; | |
937 | modified = 0; | |
938 | ||
939 | /* unlink urbs as requested, but rescan the list after | |
940 | * we call a completion since it might have unlinked | |
941 | * another (earlier) urb | |
942 | * | |
943 | * When we get here, the HC doesn't see this ed. But it | |
944 | * must not be rescheduled until all completed URBs have | |
945 | * been given back to the driver. | |
946 | */ | |
947 | rescan_this: | |
948 | completed = 0; | |
949 | prev = &ed->hwHeadP; | |
950 | list_for_each_safe (entry, tmp, &ed->td_list) { | |
951 | struct td *td; | |
952 | struct urb *urb; | |
953 | urb_priv_t *urb_priv; | |
954 | __hc32 savebits; | |
955 | ||
956 | td = list_entry (entry, struct td, td_list); | |
957 | urb = td->urb; | |
958 | urb_priv = td->urb->hcpriv; | |
959 | ||
eb231054 | 960 | if (!urb->unlinked) { |
1da177e4 LT |
961 | prev = &td->hwNextTD; |
962 | continue; | |
963 | } | |
964 | ||
965 | /* patch pointer hc uses */ | |
966 | savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); | |
967 | *prev = td->hwNextTD | savebits; | |
968 | ||
969 | /* HC may have partly processed this TD */ | |
970 | td_done (ohci, urb, td); | |
971 | urb_priv->td_cnt++; | |
972 | ||
973 | /* if URB is done, clean up */ | |
974 | if (urb_priv->td_cnt == urb_priv->length) { | |
975 | modified = completed = 1; | |
55d84968 | 976 | finish_urb(ohci, urb, 0); |
1da177e4 LT |
977 | } |
978 | } | |
979 | if (completed && !list_empty (&ed->td_list)) | |
980 | goto rescan_this; | |
981 | ||
982 | /* ED's now officially unlinked, hc doesn't see */ | |
983 | ed->state = ED_IDLE; | |
89a0fd18 MN |
984 | if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) |
985 | ohci->eds_scheduled--; | |
1da177e4 LT |
986 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
987 | ed->hwNextED = 0; | |
988 | wmb (); | |
989 | ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); | |
990 | ||
991 | /* but if there's work queued, reschedule */ | |
992 | if (!list_empty (&ed->td_list)) { | |
993 | if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)) | |
994 | ed_schedule (ohci, ed); | |
995 | } | |
996 | ||
997 | if (modified) | |
998 | goto rescan_all; | |
dd9048af | 999 | } |
1da177e4 | 1000 | |
dd9048af | 1001 | /* maybe reenable control and bulk lists */ |
1da177e4 LT |
1002 | if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state) |
1003 | && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING | |
1004 | && !ohci->ed_rm_list) { | |
1005 | u32 command = 0, control = 0; | |
1006 | ||
1007 | if (ohci->ed_controltail) { | |
1008 | command |= OHCI_CLF; | |
89a0fd18 | 1009 | if (quirk_zfmicro(ohci)) |
0e498763 | 1010 | mdelay(1); |
1da177e4 LT |
1011 | if (!(ohci->hc_control & OHCI_CTRL_CLE)) { |
1012 | control |= OHCI_CTRL_CLE; | |
1013 | ohci_writel (ohci, 0, | |
1014 | &ohci->regs->ed_controlcurrent); | |
1015 | } | |
1016 | } | |
1017 | if (ohci->ed_bulktail) { | |
1018 | command |= OHCI_BLF; | |
89a0fd18 | 1019 | if (quirk_zfmicro(ohci)) |
0e498763 | 1020 | mdelay(1); |
1da177e4 LT |
1021 | if (!(ohci->hc_control & OHCI_CTRL_BLE)) { |
1022 | control |= OHCI_CTRL_BLE; | |
1023 | ohci_writel (ohci, 0, | |
1024 | &ohci->regs->ed_bulkcurrent); | |
1025 | } | |
1026 | } | |
dd9048af | 1027 | |
1da177e4 LT |
1028 | /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ |
1029 | if (control) { | |
1030 | ohci->hc_control |= control; | |
89a0fd18 | 1031 | if (quirk_zfmicro(ohci)) |
0e498763 | 1032 | mdelay(1); |
dd9048af DB |
1033 | ohci_writel (ohci, ohci->hc_control, |
1034 | &ohci->regs->control); | |
1035 | } | |
0e498763 | 1036 | if (command) { |
89a0fd18 | 1037 | if (quirk_zfmicro(ohci)) |
0e498763 | 1038 | mdelay(1); |
dd9048af DB |
1039 | ohci_writel (ohci, command, &ohci->regs->cmdstatus); |
1040 | } | |
0e498763 | 1041 | } |
1da177e4 LT |
1042 | } |
1043 | ||
1044 | ||
1045 | ||
1046 | /*-------------------------------------------------------------------------*/ | |
1047 | ||
89a0fd18 MN |
1048 | /* |
1049 | * Used to take back a TD from the host controller. This would normally be | |
1050 | * called from within dl_done_list, however it may be called directly if the | |
1051 | * HC no longer sees the TD and it has not appeared on the donelist (after | |
1052 | * two frames). This bug has been observed on ZF Micro systems. | |
1053 | */ | |
1054 | static void takeback_td(struct ohci_hcd *ohci, struct td *td) | |
1055 | { | |
1056 | struct urb *urb = td->urb; | |
1057 | urb_priv_t *urb_priv = urb->hcpriv; | |
1058 | struct ed *ed = td->ed; | |
55d84968 | 1059 | int status; |
89a0fd18 MN |
1060 | |
1061 | /* update URB's length and status from TD */ | |
55d84968 | 1062 | status = td_done(ohci, urb, td); |
89a0fd18 MN |
1063 | urb_priv->td_cnt++; |
1064 | ||
1065 | /* If all this urb's TDs are done, call complete() */ | |
1066 | if (urb_priv->td_cnt == urb_priv->length) | |
55d84968 | 1067 | finish_urb(ohci, urb, status); |
89a0fd18 MN |
1068 | |
1069 | /* clean schedule: unlink EDs that are no longer busy */ | |
1070 | if (list_empty(&ed->td_list)) { | |
1071 | if (ed->state == ED_OPER) | |
1072 | start_ed_unlink(ohci, ed); | |
1073 | ||
1074 | /* ... reenabling halted EDs only after fault cleanup */ | |
1075 | } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE)) | |
1076 | == cpu_to_hc32(ohci, ED_SKIP)) { | |
1077 | td = list_entry(ed->td_list.next, struct td, td_list); | |
1078 | if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) { | |
1079 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP); | |
1080 | /* ... hc may need waking-up */ | |
1081 | switch (ed->type) { | |
1082 | case PIPE_CONTROL: | |
1083 | ohci_writel(ohci, OHCI_CLF, | |
1084 | &ohci->regs->cmdstatus); | |
1085 | break; | |
1086 | case PIPE_BULK: | |
1087 | ohci_writel(ohci, OHCI_BLF, | |
1088 | &ohci->regs->cmdstatus); | |
1089 | break; | |
1090 | } | |
1091 | } | |
1092 | } | |
1093 | } | |
1094 | ||
1da177e4 LT |
1095 | /* |
1096 | * Process normal completions (error or success) and clean the schedules. | |
1097 | * | |
1098 | * This is the main path for handing urbs back to drivers. The only other | |
89a0fd18 MN |
1099 | * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list, |
1100 | * instead of scanning the (re-reversed) donelist as this does. There's | |
1101 | * an abnormal path too, handling a quirk in some Compaq silicon: URBs | |
1102 | * with TDs that appear to be orphaned are directly reclaimed. | |
1da177e4 LT |
1103 | */ |
1104 | static void | |
7d12e780 | 1105 | dl_done_list (struct ohci_hcd *ohci) |
1da177e4 LT |
1106 | { |
1107 | struct td *td = dl_reverse_done_list (ohci); | |
1108 | ||
dd9048af | 1109 | while (td) { |
1da177e4 | 1110 | struct td *td_next = td->next_dl_td; |
89a0fd18 | 1111 | takeback_td(ohci, td); |
dd9048af DB |
1112 | td = td_next; |
1113 | } | |
1da177e4 | 1114 | } |