Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * OHCI HCD (Host Controller Driver) for USB. | |
dd9048af | 3 | * |
1da177e4 LT |
4 | * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
5 | * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> | |
dd9048af | 6 | * |
1da177e4 LT |
7 | * This file is licenced under the GPL. |
8 | */ | |
9 | ||
7d12e780 | 10 | #include <linux/irq.h> |
5a0e3ad6 | 11 | #include <linux/slab.h> |
7d12e780 | 12 | |
1da177e4 LT |
13 | static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) |
14 | { | |
15 | int last = urb_priv->length - 1; | |
16 | ||
17 | if (last >= 0) { | |
18 | int i; | |
19 | struct td *td; | |
20 | ||
21 | for (i = 0; i <= last; i++) { | |
22 | td = urb_priv->td [i]; | |
23 | if (td) | |
24 | td_free (hc, td); | |
25 | } | |
26 | } | |
27 | ||
28 | list_del (&urb_priv->pending); | |
29 | kfree (urb_priv); | |
30 | } | |
31 | ||
32 | /*-------------------------------------------------------------------------*/ | |
33 | ||
34 | /* | |
35 | * URB goes back to driver, and isn't reissued. | |
36 | * It's completely gone from HC data structures. | |
37 | * PRECONDITION: ohci lock held, irqs blocked. | |
38 | */ | |
39 | static void | |
55d84968 | 40 | finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) |
1da177e4 LT |
41 | __releases(ohci->lock) |
42 | __acquires(ohci->lock) | |
43 | { | |
a8693424 AS |
44 | struct device *dev = ohci_to_hcd(ohci)->self.controller; |
45 | struct usb_host_endpoint *ep = urb->ep; | |
46 | struct urb_priv *urb_priv; | |
47 | ||
1da177e4 LT |
48 | // ASSERT (urb->hcpriv != 0); |
49 | ||
a8693424 | 50 | restart: |
1da177e4 | 51 | urb_free_priv (ohci, urb->hcpriv); |
ece1d77e | 52 | urb->hcpriv = NULL; |
55d84968 AS |
53 | if (likely(status == -EINPROGRESS)) |
54 | status = 0; | |
1da177e4 LT |
55 | |
56 | switch (usb_pipetype (urb->pipe)) { | |
57 | case PIPE_ISOCHRONOUS: | |
58 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | |
a1f17a87 LY |
59 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { |
60 | if (quirk_amdiso(ohci)) | |
ad93562b | 61 | usb_amd_quirk_pll_enable(); |
a1f17a87 | 62 | if (quirk_amdprefetch(ohci)) |
2621d011 | 63 | sb800_prefetch(dev, 0); |
a1f17a87 | 64 | } |
1da177e4 LT |
65 | break; |
66 | case PIPE_INTERRUPT: | |
67 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | |
68 | break; | |
69 | } | |
70 | ||
71 | #ifdef OHCI_VERBOSE_DEBUG | |
55d84968 | 72 | urb_print(urb, "RET", usb_pipeout (urb->pipe), status); |
1da177e4 LT |
73 | #endif |
74 | ||
75 | /* urb->complete() can reenter this HCD */ | |
e9df41c5 | 76 | usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb); |
1da177e4 | 77 | spin_unlock (&ohci->lock); |
4a00027d | 78 | usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status); |
1da177e4 LT |
79 | spin_lock (&ohci->lock); |
80 | ||
81 | /* stop periodic dma if it's not needed */ | |
82 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | |
83 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) { | |
84 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); | |
85 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
86 | } | |
a8693424 AS |
87 | |
88 | /* | |
89 | * An isochronous URB that is sumitted too late won't have any TDs | |
90 | * (marked by the fact that the td_cnt value is larger than the | |
91 | * actual number of TDs). If the next URB on this endpoint is like | |
92 | * that, give it back now. | |
93 | */ | |
94 | if (!list_empty(&ep->urb_list)) { | |
95 | urb = list_first_entry(&ep->urb_list, struct urb, urb_list); | |
96 | urb_priv = urb->hcpriv; | |
97 | if (urb_priv->td_cnt > urb_priv->length) { | |
98 | status = 0; | |
99 | goto restart; | |
100 | } | |
101 | } | |
1da177e4 LT |
102 | } |
103 | ||
104 | ||
105 | /*-------------------------------------------------------------------------* | |
106 | * ED handling functions | |
dd9048af | 107 | *-------------------------------------------------------------------------*/ |
1da177e4 LT |
108 | |
109 | /* search for the right schedule branch to use for a periodic ed. | |
110 | * does some load balancing; returns the branch, or negative errno. | |
111 | */ | |
112 | static int balance (struct ohci_hcd *ohci, int interval, int load) | |
113 | { | |
114 | int i, branch = -ENOSPC; | |
115 | ||
116 | /* iso periods can be huge; iso tds specify frame numbers */ | |
117 | if (interval > NUM_INTS) | |
118 | interval = NUM_INTS; | |
119 | ||
120 | /* search for the least loaded schedule branch of that period | |
121 | * that has enough bandwidth left unreserved. | |
122 | */ | |
123 | for (i = 0; i < interval ; i++) { | |
124 | if (branch < 0 || ohci->load [branch] > ohci->load [i]) { | |
1da177e4 LT |
125 | int j; |
126 | ||
127 | /* usb 1.1 says 90% of one frame */ | |
128 | for (j = i; j < NUM_INTS; j += interval) { | |
129 | if ((ohci->load [j] + load) > 900) | |
130 | break; | |
131 | } | |
132 | if (j < NUM_INTS) | |
133 | continue; | |
dd9048af | 134 | branch = i; |
1da177e4 LT |
135 | } |
136 | } | |
137 | return branch; | |
138 | } | |
139 | ||
140 | /*-------------------------------------------------------------------------*/ | |
141 | ||
142 | /* both iso and interrupt requests have periods; this routine puts them | |
143 | * into the schedule tree in the apppropriate place. most iso devices use | |
144 | * 1msec periods, but that's not required. | |
145 | */ | |
146 | static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) | |
147 | { | |
148 | unsigned i; | |
149 | ||
150 | ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n", | |
151 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | |
152 | ed, ed->branch, ed->load, ed->interval); | |
153 | ||
154 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
155 | struct ed **prev = &ohci->periodic [i]; | |
156 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
157 | struct ed *here = *prev; | |
158 | ||
159 | /* sorting each branch by period (slow before fast) | |
160 | * lets us share the faster parts of the tree. | |
161 | * (plus maybe: put interrupt eds before iso) | |
162 | */ | |
163 | while (here && ed != here) { | |
164 | if (ed->interval > here->interval) | |
165 | break; | |
166 | prev = &here->ed_next; | |
167 | prev_p = &here->hwNextED; | |
168 | here = *prev; | |
169 | } | |
170 | if (ed != here) { | |
171 | ed->ed_next = here; | |
172 | if (here) | |
173 | ed->hwNextED = *prev_p; | |
174 | wmb (); | |
175 | *prev = ed; | |
176 | *prev_p = cpu_to_hc32(ohci, ed->dma); | |
177 | wmb(); | |
178 | } | |
179 | ohci->load [i] += ed->load; | |
180 | } | |
181 | ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval; | |
182 | } | |
183 | ||
184 | /* link an ed into one of the HC chains */ | |
185 | ||
186 | static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 187 | { |
1da177e4 LT |
188 | int branch; |
189 | ||
1da177e4 LT |
190 | ed->state = ED_OPER; |
191 | ed->ed_prev = NULL; | |
192 | ed->ed_next = NULL; | |
193 | ed->hwNextED = 0; | |
89a0fd18 MN |
194 | if (quirk_zfmicro(ohci) |
195 | && (ed->type == PIPE_INTERRUPT) | |
196 | && !(ohci->eds_scheduled++)) | |
9cebcdc7 | 197 | mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ)); |
1da177e4 LT |
198 | wmb (); |
199 | ||
200 | /* we care about rm_list when setting CLE/BLE in case the HC was at | |
201 | * work on some TD when CLE/BLE was turned off, and isn't quiesced | |
202 | * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF. | |
203 | * | |
204 | * control and bulk EDs are doubly linked (ed_next, ed_prev), but | |
205 | * periodic ones are singly linked (ed_next). that's because the | |
206 | * periodic schedule encodes a tree like figure 3-5 in the ohci | |
207 | * spec: each qh can have several "previous" nodes, and the tree | |
208 | * doesn't have unused/idle descriptors. | |
209 | */ | |
210 | switch (ed->type) { | |
211 | case PIPE_CONTROL: | |
212 | if (ohci->ed_controltail == NULL) { | |
213 | WARN_ON (ohci->hc_control & OHCI_CTRL_CLE); | |
214 | ohci_writel (ohci, ed->dma, | |
215 | &ohci->regs->ed_controlhead); | |
216 | } else { | |
217 | ohci->ed_controltail->ed_next = ed; | |
218 | ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci, | |
219 | ed->dma); | |
220 | } | |
221 | ed->ed_prev = ohci->ed_controltail; | |
222 | if (!ohci->ed_controltail && !ohci->ed_rm_list) { | |
223 | wmb(); | |
224 | ohci->hc_control |= OHCI_CTRL_CLE; | |
225 | ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); | |
226 | ohci_writel (ohci, ohci->hc_control, | |
227 | &ohci->regs->control); | |
228 | } | |
229 | ohci->ed_controltail = ed; | |
230 | break; | |
231 | ||
232 | case PIPE_BULK: | |
233 | if (ohci->ed_bulktail == NULL) { | |
234 | WARN_ON (ohci->hc_control & OHCI_CTRL_BLE); | |
235 | ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead); | |
236 | } else { | |
237 | ohci->ed_bulktail->ed_next = ed; | |
238 | ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci, | |
239 | ed->dma); | |
240 | } | |
241 | ed->ed_prev = ohci->ed_bulktail; | |
242 | if (!ohci->ed_bulktail && !ohci->ed_rm_list) { | |
243 | wmb(); | |
244 | ohci->hc_control |= OHCI_CTRL_BLE; | |
245 | ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); | |
246 | ohci_writel (ohci, ohci->hc_control, | |
247 | &ohci->regs->control); | |
248 | } | |
249 | ohci->ed_bulktail = ed; | |
250 | break; | |
251 | ||
252 | // case PIPE_INTERRUPT: | |
253 | // case PIPE_ISOCHRONOUS: | |
254 | default: | |
255 | branch = balance (ohci, ed->interval, ed->load); | |
256 | if (branch < 0) { | |
257 | ohci_dbg (ohci, | |
258 | "ERR %d, interval %d msecs, load %d\n", | |
259 | branch, ed->interval, ed->load); | |
260 | // FIXME if there are TDs queued, fail them! | |
261 | return branch; | |
262 | } | |
263 | ed->branch = branch; | |
264 | periodic_link (ohci, ed); | |
dd9048af | 265 | } |
1da177e4 LT |
266 | |
267 | /* the HC may not see the schedule updates yet, but if it does | |
268 | * then they'll be properly ordered. | |
269 | */ | |
270 | return 0; | |
271 | } | |
272 | ||
273 | /*-------------------------------------------------------------------------*/ | |
274 | ||
275 | /* scan the periodic table to find and unlink this ED */ | |
276 | static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
277 | { | |
278 | int i; | |
279 | ||
280 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
281 | struct ed *temp; | |
282 | struct ed **prev = &ohci->periodic [i]; | |
283 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
284 | ||
285 | while (*prev && (temp = *prev) != ed) { | |
286 | prev_p = &temp->hwNextED; | |
287 | prev = &temp->ed_next; | |
288 | } | |
289 | if (*prev) { | |
290 | *prev_p = ed->hwNextED; | |
291 | *prev = ed->ed_next; | |
292 | } | |
293 | ohci->load [i] -= ed->load; | |
dd9048af | 294 | } |
1da177e4 LT |
295 | ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; |
296 | ||
297 | ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", | |
298 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | |
299 | ed, ed->branch, ed->load, ed->interval); | |
300 | } | |
301 | ||
dd9048af | 302 | /* unlink an ed from one of the HC chains. |
1da177e4 LT |
303 | * just the link to the ed is unlinked. |
304 | * the link from the ed still points to another operational ed or 0 | |
305 | * so the HC can eventually finish the processing of the unlinked ed | |
306 | * (assuming it already started that, which needn't be true). | |
307 | * | |
308 | * ED_UNLINK is a transient state: the HC may still see this ED, but soon | |
309 | * it won't. ED_SKIP means the HC will finish its current transaction, | |
310 | * but won't start anything new. The TD queue may still grow; device | |
311 | * drivers don't know about this HCD-internal state. | |
312 | * | |
313 | * When the HC can't see the ED, something changes ED_UNLINK to one of: | |
314 | * | |
315 | * - ED_OPER: when there's any request queued, the ED gets rescheduled | |
316 | * immediately. HC should be working on them. | |
317 | * | |
318 | * - ED_IDLE: when there's no TD queue. there's no reason for the HC | |
319 | * to care about this ED; safe to disable the endpoint. | |
320 | * | |
321 | * When finish_unlinks() runs later, after SOF interrupt, it will often | |
322 | * complete one or more URB unlinks before making that state change. | |
323 | */ | |
dd9048af | 324 | static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) |
1da177e4 LT |
325 | { |
326 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
327 | wmb (); | |
328 | ed->state = ED_UNLINK; | |
329 | ||
330 | /* To deschedule something from the control or bulk list, just | |
331 | * clear CLE/BLE and wait. There's no safe way to scrub out list | |
332 | * head/current registers until later, and "later" isn't very | |
333 | * tightly specified. Figure 6-5 and Section 6.4.2.2 show how | |
334 | * the HC is reading the ED queues (while we modify them). | |
335 | * | |
336 | * For now, ed_schedule() is "later". It might be good paranoia | |
337 | * to scrub those registers in finish_unlinks(), in case of bugs | |
338 | * that make the HC try to use them. | |
339 | */ | |
340 | switch (ed->type) { | |
341 | case PIPE_CONTROL: | |
342 | /* remove ED from the HC's list: */ | |
343 | if (ed->ed_prev == NULL) { | |
344 | if (!ed->hwNextED) { | |
345 | ohci->hc_control &= ~OHCI_CTRL_CLE; | |
346 | ohci_writel (ohci, ohci->hc_control, | |
347 | &ohci->regs->control); | |
348 | // a ohci_readl() later syncs CLE with the HC | |
349 | } else | |
350 | ohci_writel (ohci, | |
351 | hc32_to_cpup (ohci, &ed->hwNextED), | |
352 | &ohci->regs->ed_controlhead); | |
353 | } else { | |
354 | ed->ed_prev->ed_next = ed->ed_next; | |
355 | ed->ed_prev->hwNextED = ed->hwNextED; | |
356 | } | |
357 | /* remove ED from the HCD's list: */ | |
358 | if (ohci->ed_controltail == ed) { | |
359 | ohci->ed_controltail = ed->ed_prev; | |
360 | if (ohci->ed_controltail) | |
361 | ohci->ed_controltail->ed_next = NULL; | |
362 | } else if (ed->ed_next) { | |
363 | ed->ed_next->ed_prev = ed->ed_prev; | |
364 | } | |
365 | break; | |
366 | ||
367 | case PIPE_BULK: | |
368 | /* remove ED from the HC's list: */ | |
369 | if (ed->ed_prev == NULL) { | |
370 | if (!ed->hwNextED) { | |
371 | ohci->hc_control &= ~OHCI_CTRL_BLE; | |
372 | ohci_writel (ohci, ohci->hc_control, | |
373 | &ohci->regs->control); | |
374 | // a ohci_readl() later syncs BLE with the HC | |
375 | } else | |
376 | ohci_writel (ohci, | |
377 | hc32_to_cpup (ohci, &ed->hwNextED), | |
378 | &ohci->regs->ed_bulkhead); | |
379 | } else { | |
380 | ed->ed_prev->ed_next = ed->ed_next; | |
381 | ed->ed_prev->hwNextED = ed->hwNextED; | |
382 | } | |
383 | /* remove ED from the HCD's list: */ | |
384 | if (ohci->ed_bulktail == ed) { | |
385 | ohci->ed_bulktail = ed->ed_prev; | |
386 | if (ohci->ed_bulktail) | |
387 | ohci->ed_bulktail->ed_next = NULL; | |
388 | } else if (ed->ed_next) { | |
389 | ed->ed_next->ed_prev = ed->ed_prev; | |
390 | } | |
391 | break; | |
392 | ||
393 | // case PIPE_INTERRUPT: | |
394 | // case PIPE_ISOCHRONOUS: | |
395 | default: | |
396 | periodic_unlink (ohci, ed); | |
397 | break; | |
398 | } | |
399 | } | |
400 | ||
401 | ||
402 | /*-------------------------------------------------------------------------*/ | |
403 | ||
404 | /* get and maybe (re)init an endpoint. init _should_ be done only as part | |
405 | * of enumeration, usb_set_configuration() or usb_set_interface(). | |
406 | */ | |
407 | static struct ed *ed_get ( | |
408 | struct ohci_hcd *ohci, | |
409 | struct usb_host_endpoint *ep, | |
410 | struct usb_device *udev, | |
411 | unsigned int pipe, | |
412 | int interval | |
413 | ) { | |
dd9048af | 414 | struct ed *ed; |
1da177e4 LT |
415 | unsigned long flags; |
416 | ||
417 | spin_lock_irqsave (&ohci->lock, flags); | |
418 | ||
419 | if (!(ed = ep->hcpriv)) { | |
420 | struct td *td; | |
421 | int is_out; | |
422 | u32 info; | |
423 | ||
424 | ed = ed_alloc (ohci, GFP_ATOMIC); | |
425 | if (!ed) { | |
426 | /* out of memory */ | |
427 | goto done; | |
428 | } | |
429 | ||
dd9048af | 430 | /* dummy td; end of td list for ed */ |
1da177e4 | 431 | td = td_alloc (ohci, GFP_ATOMIC); |
dd9048af | 432 | if (!td) { |
1da177e4 LT |
433 | /* out of memory */ |
434 | ed_free (ohci, ed); | |
435 | ed = NULL; | |
436 | goto done; | |
437 | } | |
438 | ed->dummy = td; | |
439 | ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma); | |
440 | ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ | |
441 | ed->state = ED_IDLE; | |
442 | ||
443 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); | |
444 | ||
445 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS | |
4b26d50b | 446 | * succeeds ... otherwise we wouldn't need "pipe". |
1da177e4 LT |
447 | */ |
448 | info = usb_pipedevice (pipe); | |
449 | ed->type = usb_pipetype(pipe); | |
450 | ||
451 | info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7; | |
29cc8897 | 452 | info |= usb_endpoint_maxp(&ep->desc) << 16; |
1da177e4 LT |
453 | if (udev->speed == USB_SPEED_LOW) |
454 | info |= ED_LOWSPEED; | |
455 | /* only control transfers store pids in tds */ | |
456 | if (ed->type != PIPE_CONTROL) { | |
457 | info |= is_out ? ED_OUT : ED_IN; | |
458 | if (ed->type != PIPE_BULK) { | |
459 | /* periodic transfers... */ | |
460 | if (ed->type == PIPE_ISOCHRONOUS) | |
461 | info |= ED_ISO; | |
462 | else if (interval > 32) /* iso can be bigger */ | |
463 | interval = 32; | |
464 | ed->interval = interval; | |
465 | ed->load = usb_calc_bus_time ( | |
466 | udev->speed, !is_out, | |
467 | ed->type == PIPE_ISOCHRONOUS, | |
29cc8897 | 468 | usb_endpoint_maxp(&ep->desc)) |
1da177e4 LT |
469 | / 1000; |
470 | } | |
471 | } | |
472 | ed->hwINFO = cpu_to_hc32(ohci, info); | |
473 | ||
474 | ep->hcpriv = ed; | |
475 | } | |
476 | ||
477 | done: | |
478 | spin_unlock_irqrestore (&ohci->lock, flags); | |
dd9048af | 479 | return ed; |
1da177e4 LT |
480 | } |
481 | ||
482 | /*-------------------------------------------------------------------------*/ | |
483 | ||
484 | /* request unlinking of an endpoint from an operational HC. | |
485 | * put the ep on the rm_list | |
486 | * real work is done at the next start frame (SF) hardware interrupt | |
487 | * caller guarantees HCD is running, so hardware access is safe, | |
488 | * and that ed->state is ED_OPER | |
489 | */ | |
490 | static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 491 | { |
1da177e4 LT |
492 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE); |
493 | ed_deschedule (ohci, ed); | |
494 | ||
495 | /* rm_list is just singly linked, for simplicity */ | |
496 | ed->ed_next = ohci->ed_rm_list; | |
497 | ed->ed_prev = NULL; | |
498 | ohci->ed_rm_list = ed; | |
499 | ||
500 | /* enable SOF interrupt */ | |
501 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus); | |
502 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable); | |
503 | // flush those writes, and get latest HCCA contents | |
504 | (void) ohci_readl (ohci, &ohci->regs->control); | |
505 | ||
506 | /* SF interrupt might get delayed; record the frame counter value that | |
507 | * indicates when the HC isn't looking at it, so concurrent unlinks | |
508 | * behave. frame_no wraps every 2^16 msec, and changes right before | |
509 | * SF is triggered. | |
510 | */ | |
511 | ed->tick = ohci_frame_no(ohci) + 1; | |
512 | ||
513 | } | |
514 | ||
515 | /*-------------------------------------------------------------------------* | |
516 | * TD handling functions | |
517 | *-------------------------------------------------------------------------*/ | |
518 | ||
519 | /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ | |
520 | ||
521 | static void | |
522 | td_fill (struct ohci_hcd *ohci, u32 info, | |
523 | dma_addr_t data, int len, | |
524 | struct urb *urb, int index) | |
525 | { | |
526 | struct td *td, *td_pt; | |
527 | struct urb_priv *urb_priv = urb->hcpriv; | |
528 | int is_iso = info & TD_ISO; | |
529 | int hash; | |
530 | ||
531 | // ASSERT (index < urb_priv->length); | |
532 | ||
533 | /* aim for only one interrupt per urb. mostly applies to control | |
534 | * and iso; other urbs rarely need more than one TD per urb. | |
535 | * this way, only final tds (or ones with an error) cause IRQs. | |
536 | * at least immediately; use DI=6 in case any control request is | |
537 | * tempted to die part way through. (and to force the hc to flush | |
538 | * its donelist soonish, even on unlink paths.) | |
539 | * | |
540 | * NOTE: could delay interrupts even for the last TD, and get fewer | |
541 | * interrupts ... increasing per-urb latency by sharing interrupts. | |
542 | * Drivers that queue bulk urbs may request that behavior. | |
543 | */ | |
544 | if (index != (urb_priv->length - 1) | |
545 | || (urb->transfer_flags & URB_NO_INTERRUPT)) | |
546 | info |= TD_DI_SET (6); | |
547 | ||
548 | /* use this td as the next dummy */ | |
549 | td_pt = urb_priv->td [index]; | |
550 | ||
551 | /* fill the old dummy TD */ | |
552 | td = urb_priv->td [index] = urb_priv->ed->dummy; | |
553 | urb_priv->ed->dummy = td_pt; | |
554 | ||
555 | td->ed = urb_priv->ed; | |
556 | td->next_dl_td = NULL; | |
557 | td->index = index; | |
dd9048af | 558 | td->urb = urb; |
1da177e4 LT |
559 | td->data_dma = data; |
560 | if (!len) | |
561 | data = 0; | |
562 | ||
563 | td->hwINFO = cpu_to_hc32 (ohci, info); | |
564 | if (is_iso) { | |
565 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); | |
566 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, | |
567 | (data & 0x0FFF) | 0xE000); | |
1da177e4 | 568 | } else { |
dd9048af DB |
569 | td->hwCBP = cpu_to_hc32 (ohci, data); |
570 | } | |
1da177e4 LT |
571 | if (data) |
572 | td->hwBE = cpu_to_hc32 (ohci, data + len - 1); | |
573 | else | |
574 | td->hwBE = 0; | |
575 | td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma); | |
576 | ||
577 | /* append to queue */ | |
578 | list_add_tail (&td->td_list, &td->ed->td_list); | |
579 | ||
580 | /* hash it for later reverse mapping */ | |
581 | hash = TD_HASH_FUNC (td->td_dma); | |
582 | td->td_hash = ohci->td_hash [hash]; | |
583 | ohci->td_hash [hash] = td; | |
584 | ||
585 | /* HC might read the TD (or cachelines) right away ... */ | |
586 | wmb (); | |
587 | td->ed->hwTailP = td->hwNextTD; | |
588 | } | |
589 | ||
590 | /*-------------------------------------------------------------------------*/ | |
591 | ||
592 | /* Prepare all TDs of a transfer, and queue them onto the ED. | |
593 | * Caller guarantees HC is active. | |
594 | * Usually the ED is already on the schedule, so TDs might be | |
595 | * processed as soon as they're queued. | |
596 | */ | |
597 | static void td_submit_urb ( | |
598 | struct ohci_hcd *ohci, | |
599 | struct urb *urb | |
600 | ) { | |
601 | struct urb_priv *urb_priv = urb->hcpriv; | |
2621d011 | 602 | struct device *dev = ohci_to_hcd(ohci)->self.controller; |
1da177e4 LT |
603 | dma_addr_t data; |
604 | int data_len = urb->transfer_buffer_length; | |
605 | int cnt = 0; | |
606 | u32 info = 0; | |
607 | int is_out = usb_pipeout (urb->pipe); | |
608 | int periodic = 0; | |
609 | ||
610 | /* OHCI handles the bulk/interrupt data toggles itself. We just | |
611 | * use the device toggle bits for resetting, and rely on the fact | |
612 | * that resetting toggle is meaningless if the endpoint is active. | |
613 | */ | |
dd9048af | 614 | if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) { |
1da177e4 LT |
615 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), |
616 | is_out, 1); | |
617 | urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C); | |
618 | } | |
619 | ||
1da177e4 LT |
620 | list_add (&urb_priv->pending, &ohci->pending); |
621 | ||
622 | if (data_len) | |
623 | data = urb->transfer_dma; | |
624 | else | |
625 | data = 0; | |
626 | ||
627 | /* NOTE: TD_CC is set so we can tell which TDs the HC processed by | |
628 | * using TD_CC_GET, as well as by seeing them on the done list. | |
629 | * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.) | |
630 | */ | |
631 | switch (urb_priv->ed->type) { | |
632 | ||
633 | /* Bulk and interrupt are identical except for where in the schedule | |
634 | * their EDs live. | |
635 | */ | |
636 | case PIPE_INTERRUPT: | |
637 | /* ... and periodic urbs have extra accounting */ | |
638 | periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0 | |
639 | && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0; | |
640 | /* FALLTHROUGH */ | |
641 | case PIPE_BULK: | |
642 | info = is_out | |
643 | ? TD_T_TOGGLE | TD_CC | TD_DP_OUT | |
644 | : TD_T_TOGGLE | TD_CC | TD_DP_IN; | |
645 | /* TDs _could_ transfer up to 8K each */ | |
646 | while (data_len > 4096) { | |
647 | td_fill (ohci, info, data, 4096, urb, cnt); | |
648 | data += 4096; | |
649 | data_len -= 4096; | |
650 | cnt++; | |
651 | } | |
652 | /* maybe avoid ED halt on final TD short read */ | |
653 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
654 | info |= TD_R; | |
655 | td_fill (ohci, info, data, data_len, urb, cnt); | |
656 | cnt++; | |
657 | if ((urb->transfer_flags & URB_ZERO_PACKET) | |
658 | && cnt < urb_priv->length) { | |
659 | td_fill (ohci, info, 0, 0, urb, cnt); | |
660 | cnt++; | |
661 | } | |
662 | /* maybe kickstart bulk list */ | |
663 | if (urb_priv->ed->type == PIPE_BULK) { | |
664 | wmb (); | |
665 | ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus); | |
666 | } | |
667 | break; | |
668 | ||
669 | /* control manages DATA0/DATA1 toggle per-request; SETUP resets it, | |
670 | * any DATA phase works normally, and the STATUS ack is special. | |
671 | */ | |
672 | case PIPE_CONTROL: | |
673 | info = TD_CC | TD_DP_SETUP | TD_T_DATA0; | |
674 | td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++); | |
675 | if (data_len > 0) { | |
676 | info = TD_CC | TD_R | TD_T_DATA1; | |
677 | info |= is_out ? TD_DP_OUT : TD_DP_IN; | |
678 | /* NOTE: mishandles transfers >8K, some >4K */ | |
679 | td_fill (ohci, info, data, data_len, urb, cnt++); | |
680 | } | |
681 | info = (is_out || data_len == 0) | |
682 | ? TD_CC | TD_DP_IN | TD_T_DATA1 | |
683 | : TD_CC | TD_DP_OUT | TD_T_DATA1; | |
684 | td_fill (ohci, info, data, 0, urb, cnt++); | |
685 | /* maybe kickstart control list */ | |
686 | wmb (); | |
687 | ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus); | |
688 | break; | |
689 | ||
690 | /* ISO has no retransmit, so no toggle; and it uses special TDs. | |
691 | * Each TD could handle multiple consecutive frames (interval 1); | |
692 | * we could often reduce the number of TDs here. | |
693 | */ | |
694 | case PIPE_ISOCHRONOUS: | |
6a41b4d3 AS |
695 | for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets; |
696 | cnt++) { | |
1da177e4 LT |
697 | int frame = urb->start_frame; |
698 | ||
699 | // FIXME scheduling should handle frame counter | |
700 | // roll-around ... exotic case (and OHCI has | |
701 | // a 2^16 iso range, vs other HCs max of 2^10) | |
702 | frame += cnt * urb->interval; | |
703 | frame &= 0xffff; | |
704 | td_fill (ohci, TD_CC | TD_ISO | frame, | |
705 | data + urb->iso_frame_desc [cnt].offset, | |
706 | urb->iso_frame_desc [cnt].length, urb, cnt); | |
707 | } | |
a1f17a87 LY |
708 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { |
709 | if (quirk_amdiso(ohci)) | |
ad93562b | 710 | usb_amd_quirk_pll_disable(); |
a1f17a87 | 711 | if (quirk_amdprefetch(ohci)) |
2621d011 | 712 | sb800_prefetch(dev, 1); |
a1f17a87 | 713 | } |
1da177e4 LT |
714 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 |
715 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | |
716 | break; | |
717 | } | |
718 | ||
719 | /* start periodic dma if needed */ | |
720 | if (periodic) { | |
721 | wmb (); | |
722 | ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE; | |
723 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
724 | } | |
725 | ||
726 | // ASSERT (urb_priv->length == cnt); | |
727 | } | |
728 | ||
729 | /*-------------------------------------------------------------------------* | |
730 | * Done List handling functions | |
731 | *-------------------------------------------------------------------------*/ | |
732 | ||
55d84968 AS |
733 | /* calculate transfer length/status and update the urb */ |
734 | static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) | |
1da177e4 LT |
735 | { |
736 | u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO); | |
737 | int cc = 0; | |
55d84968 | 738 | int status = -EINPROGRESS; |
1da177e4 LT |
739 | |
740 | list_del (&td->td_list); | |
741 | ||
742 | /* ISO ... drivers see per-TD length/status */ | |
dd9048af | 743 | if (tdINFO & TD_ISO) { |
55d84968 | 744 | u16 tdPSW = ohci_hwPSW(ohci, td, 0); |
1da177e4 LT |
745 | int dlen = 0; |
746 | ||
747 | /* NOTE: assumes FC in tdINFO == 0, and that | |
748 | * only the first of 0..MAXPSW psws is used. | |
749 | */ | |
750 | ||
dd9048af DB |
751 | cc = (tdPSW >> 12) & 0xF; |
752 | if (tdINFO & TD_CC) /* hc didn't touch? */ | |
55d84968 | 753 | return status; |
1da177e4 LT |
754 | |
755 | if (usb_pipeout (urb->pipe)) | |
756 | dlen = urb->iso_frame_desc [td->index].length; | |
757 | else { | |
758 | /* short reads are always OK for ISO */ | |
759 | if (cc == TD_DATAUNDERRUN) | |
760 | cc = TD_CC_NOERROR; | |
761 | dlen = tdPSW & 0x3ff; | |
762 | } | |
763 | urb->actual_length += dlen; | |
764 | urb->iso_frame_desc [td->index].actual_length = dlen; | |
765 | urb->iso_frame_desc [td->index].status = cc_to_error [cc]; | |
766 | ||
767 | if (cc != TD_CC_NOERROR) | |
768 | ohci_vdbg (ohci, | |
769 | "urb %p iso td %p (%d) len %d cc %d\n", | |
770 | urb, td, 1 + td->index, dlen, cc); | |
771 | ||
772 | /* BULK, INT, CONTROL ... drivers see aggregate length/status, | |
773 | * except that "setup" bytes aren't counted and "short" transfers | |
774 | * might not be reported as errors. | |
775 | */ | |
776 | } else { | |
777 | int type = usb_pipetype (urb->pipe); | |
778 | u32 tdBE = hc32_to_cpup (ohci, &td->hwBE); | |
779 | ||
dd9048af | 780 | cc = TD_CC_GET (tdINFO); |
1da177e4 LT |
781 | |
782 | /* update packet status if needed (short is normally ok) */ | |
783 | if (cc == TD_DATAUNDERRUN | |
784 | && !(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
785 | cc = TD_CC_NOERROR; | |
55d84968 AS |
786 | if (cc != TD_CC_NOERROR && cc < 0x0E) |
787 | status = cc_to_error[cc]; | |
1da177e4 LT |
788 | |
789 | /* count all non-empty packets except control SETUP packet */ | |
790 | if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { | |
791 | if (td->hwCBP == 0) | |
792 | urb->actual_length += tdBE - td->data_dma + 1; | |
793 | else | |
794 | urb->actual_length += | |
795 | hc32_to_cpup (ohci, &td->hwCBP) | |
796 | - td->data_dma; | |
797 | } | |
798 | ||
799 | if (cc != TD_CC_NOERROR && cc < 0x0E) | |
800 | ohci_vdbg (ohci, | |
801 | "urb %p td %p (%d) cc %d, len=%d/%d\n", | |
802 | urb, td, 1 + td->index, cc, | |
803 | urb->actual_length, | |
804 | urb->transfer_buffer_length); | |
dd9048af | 805 | } |
55d84968 | 806 | return status; |
1da177e4 LT |
807 | } |
808 | ||
809 | /*-------------------------------------------------------------------------*/ | |
810 | ||
6e8fe43b | 811 | static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc) |
1da177e4 | 812 | { |
dd9048af | 813 | struct urb *urb = td->urb; |
6e8fe43b | 814 | urb_priv_t *urb_priv = urb->hcpriv; |
1da177e4 LT |
815 | struct ed *ed = td->ed; |
816 | struct list_head *tmp = td->td_list.next; | |
817 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); | |
818 | ||
819 | /* clear ed halt; this is the td that caused it, but keep it inactive | |
820 | * until its urb->complete() has a chance to clean up. | |
821 | */ | |
822 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
823 | wmb (); | |
dd9048af | 824 | ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); |
1da177e4 | 825 | |
6e8fe43b AS |
826 | /* Get rid of all later tds from this urb. We don't have |
827 | * to be careful: no errors and nothing was transferred. | |
828 | * Also patch the ed so it looks as if those tds completed normally. | |
1da177e4 LT |
829 | */ |
830 | while (tmp != &ed->td_list) { | |
831 | struct td *next; | |
1da177e4 LT |
832 | |
833 | next = list_entry (tmp, struct td, td_list); | |
834 | tmp = next->td_list.next; | |
835 | ||
836 | if (next->urb != urb) | |
837 | break; | |
838 | ||
839 | /* NOTE: if multi-td control DATA segments get supported, | |
840 | * this urb had one of them, this td wasn't the last td | |
841 | * in that segment (TD_R clear), this ed halted because | |
842 | * of a short read, _and_ URB_SHORT_NOT_OK is clear ... | |
843 | * then we need to leave the control STATUS packet queued | |
844 | * and clear ED_SKIP. | |
845 | */ | |
1da177e4 | 846 | |
6e8fe43b AS |
847 | list_del(&next->td_list); |
848 | urb_priv->td_cnt++; | |
1da177e4 LT |
849 | ed->hwHeadP = next->hwNextTD | toggle; |
850 | } | |
851 | ||
852 | /* help for troubleshooting: report anything that | |
853 | * looks odd ... that doesn't include protocol stalls | |
854 | * (or maybe some other things) | |
855 | */ | |
856 | switch (cc) { | |
857 | case TD_DATAUNDERRUN: | |
858 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) | |
859 | break; | |
860 | /* fallthrough */ | |
861 | case TD_CC_STALL: | |
862 | if (usb_pipecontrol (urb->pipe)) | |
863 | break; | |
864 | /* fallthrough */ | |
865 | default: | |
866 | ohci_dbg (ohci, | |
867 | "urb %p path %s ep%d%s %08x cc %d --> status %d\n", | |
868 | urb, urb->dev->devpath, | |
869 | usb_pipeendpoint (urb->pipe), | |
870 | usb_pipein (urb->pipe) ? "in" : "out", | |
871 | hc32_to_cpu (ohci, td->hwINFO), | |
872 | cc, cc_to_error [cc]); | |
873 | } | |
1da177e4 LT |
874 | } |
875 | ||
876 | /* replies to the request have to be on a FIFO basis so | |
877 | * we unreverse the hc-reversed done-list | |
878 | */ | |
879 | static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) | |
880 | { | |
881 | u32 td_dma; | |
882 | struct td *td_rev = NULL; | |
883 | struct td *td = NULL; | |
884 | ||
885 | td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); | |
886 | ohci->hcca->done_head = 0; | |
887 | wmb(); | |
888 | ||
889 | /* get TD from hc's singly linked list, and | |
890 | * prepend to ours. ed->td_list changes later. | |
891 | */ | |
dd9048af DB |
892 | while (td_dma) { |
893 | int cc; | |
1da177e4 LT |
894 | |
895 | td = dma_to_td (ohci, td_dma); | |
896 | if (!td) { | |
897 | ohci_err (ohci, "bad entry %8x\n", td_dma); | |
898 | break; | |
899 | } | |
900 | ||
901 | td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE); | |
902 | cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO)); | |
903 | ||
904 | /* Non-iso endpoints can halt on error; un-halt, | |
905 | * and dequeue any other TDs from this urb. | |
906 | * No other TD could have caused the halt. | |
907 | */ | |
908 | if (cc != TD_CC_NOERROR | |
909 | && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) | |
6e8fe43b | 910 | ed_halted(ohci, td, cc); |
1da177e4 | 911 | |
dd9048af | 912 | td->next_dl_td = td_rev; |
1da177e4 LT |
913 | td_rev = td; |
914 | td_dma = hc32_to_cpup (ohci, &td->hwNextTD); | |
dd9048af | 915 | } |
1da177e4 LT |
916 | return td_rev; |
917 | } | |
918 | ||
919 | /*-------------------------------------------------------------------------*/ | |
920 | ||
921 | /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ | |
922 | static void | |
7d12e780 | 923 | finish_unlinks (struct ohci_hcd *ohci, u16 tick) |
1da177e4 LT |
924 | { |
925 | struct ed *ed, **last; | |
926 | ||
927 | rescan_all: | |
928 | for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { | |
929 | struct list_head *entry, *tmp; | |
930 | int completed, modified; | |
931 | __hc32 *prev; | |
932 | ||
933 | /* only take off EDs that the HC isn't using, accounting for | |
934 | * frame counter wraps and EDs with partially retired TDs | |
935 | */ | |
b7463c71 | 936 | if (likely(ohci->rh_state == OHCI_RH_RUNNING)) { |
1da177e4 LT |
937 | if (tick_before (tick, ed->tick)) { |
938 | skip_ed: | |
939 | last = &ed->ed_next; | |
940 | continue; | |
941 | } | |
942 | ||
943 | if (!list_empty (&ed->td_list)) { | |
944 | struct td *td; | |
945 | u32 head; | |
946 | ||
947 | td = list_entry (ed->td_list.next, struct td, | |
948 | td_list); | |
949 | head = hc32_to_cpu (ohci, ed->hwHeadP) & | |
950 | TD_MASK; | |
951 | ||
952 | /* INTR_WDH may need to clean up first */ | |
89a0fd18 MN |
953 | if (td->td_dma != head) { |
954 | if (ed == ohci->ed_to_check) | |
955 | ohci->ed_to_check = NULL; | |
956 | else | |
957 | goto skip_ed; | |
958 | } | |
1da177e4 LT |
959 | } |
960 | } | |
961 | ||
962 | /* reentrancy: if we drop the schedule lock, someone might | |
963 | * have modified this list. normally it's just prepending | |
964 | * entries (which we'd ignore), but paranoia won't hurt. | |
965 | */ | |
966 | *last = ed->ed_next; | |
967 | ed->ed_next = NULL; | |
968 | modified = 0; | |
969 | ||
970 | /* unlink urbs as requested, but rescan the list after | |
971 | * we call a completion since it might have unlinked | |
972 | * another (earlier) urb | |
973 | * | |
974 | * When we get here, the HC doesn't see this ed. But it | |
975 | * must not be rescheduled until all completed URBs have | |
976 | * been given back to the driver. | |
977 | */ | |
978 | rescan_this: | |
979 | completed = 0; | |
980 | prev = &ed->hwHeadP; | |
981 | list_for_each_safe (entry, tmp, &ed->td_list) { | |
982 | struct td *td; | |
983 | struct urb *urb; | |
984 | urb_priv_t *urb_priv; | |
985 | __hc32 savebits; | |
29c8f6a7 | 986 | u32 tdINFO; |
1da177e4 LT |
987 | |
988 | td = list_entry (entry, struct td, td_list); | |
989 | urb = td->urb; | |
990 | urb_priv = td->urb->hcpriv; | |
991 | ||
eb231054 | 992 | if (!urb->unlinked) { |
1da177e4 LT |
993 | prev = &td->hwNextTD; |
994 | continue; | |
995 | } | |
996 | ||
997 | /* patch pointer hc uses */ | |
998 | savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); | |
999 | *prev = td->hwNextTD | savebits; | |
1000 | ||
29c8f6a7 DB |
1001 | /* If this was unlinked, the TD may not have been |
1002 | * retired ... so manually save the data toggle. | |
1003 | * The controller ignores the value we save for | |
1004 | * control and ISO endpoints. | |
1005 | */ | |
1006 | tdINFO = hc32_to_cpup(ohci, &td->hwINFO); | |
1007 | if ((tdINFO & TD_T) == TD_T_DATA0) | |
1008 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C); | |
1009 | else if ((tdINFO & TD_T) == TD_T_DATA1) | |
1010 | ed->hwHeadP |= cpu_to_hc32(ohci, ED_C); | |
1011 | ||
1da177e4 LT |
1012 | /* HC may have partly processed this TD */ |
1013 | td_done (ohci, urb, td); | |
1014 | urb_priv->td_cnt++; | |
1015 | ||
1016 | /* if URB is done, clean up */ | |
a8693424 | 1017 | if (urb_priv->td_cnt >= urb_priv->length) { |
1da177e4 | 1018 | modified = completed = 1; |
55d84968 | 1019 | finish_urb(ohci, urb, 0); |
1da177e4 LT |
1020 | } |
1021 | } | |
1022 | if (completed && !list_empty (&ed->td_list)) | |
1023 | goto rescan_this; | |
1024 | ||
1025 | /* ED's now officially unlinked, hc doesn't see */ | |
1026 | ed->state = ED_IDLE; | |
89a0fd18 MN |
1027 | if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) |
1028 | ohci->eds_scheduled--; | |
1da177e4 LT |
1029 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
1030 | ed->hwNextED = 0; | |
1031 | wmb (); | |
1032 | ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); | |
1033 | ||
1034 | /* but if there's work queued, reschedule */ | |
1035 | if (!list_empty (&ed->td_list)) { | |
b7463c71 | 1036 | if (ohci->rh_state == OHCI_RH_RUNNING) |
1da177e4 LT |
1037 | ed_schedule (ohci, ed); |
1038 | } | |
1039 | ||
1040 | if (modified) | |
1041 | goto rescan_all; | |
dd9048af | 1042 | } |
1da177e4 | 1043 | |
dd9048af | 1044 | /* maybe reenable control and bulk lists */ |
b7463c71 | 1045 | if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) { |
1da177e4 LT |
1046 | u32 command = 0, control = 0; |
1047 | ||
1048 | if (ohci->ed_controltail) { | |
1049 | command |= OHCI_CLF; | |
89a0fd18 | 1050 | if (quirk_zfmicro(ohci)) |
0e498763 | 1051 | mdelay(1); |
1da177e4 LT |
1052 | if (!(ohci->hc_control & OHCI_CTRL_CLE)) { |
1053 | control |= OHCI_CTRL_CLE; | |
1054 | ohci_writel (ohci, 0, | |
1055 | &ohci->regs->ed_controlcurrent); | |
1056 | } | |
1057 | } | |
1058 | if (ohci->ed_bulktail) { | |
1059 | command |= OHCI_BLF; | |
89a0fd18 | 1060 | if (quirk_zfmicro(ohci)) |
0e498763 | 1061 | mdelay(1); |
1da177e4 LT |
1062 | if (!(ohci->hc_control & OHCI_CTRL_BLE)) { |
1063 | control |= OHCI_CTRL_BLE; | |
1064 | ohci_writel (ohci, 0, | |
1065 | &ohci->regs->ed_bulkcurrent); | |
1066 | } | |
1067 | } | |
dd9048af | 1068 | |
1da177e4 LT |
1069 | /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ |
1070 | if (control) { | |
1071 | ohci->hc_control |= control; | |
89a0fd18 | 1072 | if (quirk_zfmicro(ohci)) |
0e498763 | 1073 | mdelay(1); |
dd9048af DB |
1074 | ohci_writel (ohci, ohci->hc_control, |
1075 | &ohci->regs->control); | |
1076 | } | |
0e498763 | 1077 | if (command) { |
89a0fd18 | 1078 | if (quirk_zfmicro(ohci)) |
0e498763 | 1079 | mdelay(1); |
dd9048af DB |
1080 | ohci_writel (ohci, command, &ohci->regs->cmdstatus); |
1081 | } | |
0e498763 | 1082 | } |
1da177e4 LT |
1083 | } |
1084 | ||
1085 | ||
1086 | ||
1087 | /*-------------------------------------------------------------------------*/ | |
1088 | ||
89a0fd18 MN |
1089 | /* |
1090 | * Used to take back a TD from the host controller. This would normally be | |
1091 | * called from within dl_done_list, however it may be called directly if the | |
1092 | * HC no longer sees the TD and it has not appeared on the donelist (after | |
1093 | * two frames). This bug has been observed on ZF Micro systems. | |
1094 | */ | |
1095 | static void takeback_td(struct ohci_hcd *ohci, struct td *td) | |
1096 | { | |
1097 | struct urb *urb = td->urb; | |
1098 | urb_priv_t *urb_priv = urb->hcpriv; | |
1099 | struct ed *ed = td->ed; | |
55d84968 | 1100 | int status; |
89a0fd18 MN |
1101 | |
1102 | /* update URB's length and status from TD */ | |
55d84968 | 1103 | status = td_done(ohci, urb, td); |
89a0fd18 MN |
1104 | urb_priv->td_cnt++; |
1105 | ||
1106 | /* If all this urb's TDs are done, call complete() */ | |
a8693424 | 1107 | if (urb_priv->td_cnt >= urb_priv->length) |
55d84968 | 1108 | finish_urb(ohci, urb, status); |
89a0fd18 MN |
1109 | |
1110 | /* clean schedule: unlink EDs that are no longer busy */ | |
1111 | if (list_empty(&ed->td_list)) { | |
1112 | if (ed->state == ED_OPER) | |
1113 | start_ed_unlink(ohci, ed); | |
1114 | ||
1115 | /* ... reenabling halted EDs only after fault cleanup */ | |
1116 | } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE)) | |
1117 | == cpu_to_hc32(ohci, ED_SKIP)) { | |
1118 | td = list_entry(ed->td_list.next, struct td, td_list); | |
1119 | if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) { | |
1120 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP); | |
1121 | /* ... hc may need waking-up */ | |
1122 | switch (ed->type) { | |
1123 | case PIPE_CONTROL: | |
1124 | ohci_writel(ohci, OHCI_CLF, | |
1125 | &ohci->regs->cmdstatus); | |
1126 | break; | |
1127 | case PIPE_BULK: | |
1128 | ohci_writel(ohci, OHCI_BLF, | |
1129 | &ohci->regs->cmdstatus); | |
1130 | break; | |
1131 | } | |
1132 | } | |
1133 | } | |
1134 | } | |
1135 | ||
1da177e4 LT |
1136 | /* |
1137 | * Process normal completions (error or success) and clean the schedules. | |
1138 | * | |
1139 | * This is the main path for handing urbs back to drivers. The only other | |
89a0fd18 MN |
1140 | * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list, |
1141 | * instead of scanning the (re-reversed) donelist as this does. There's | |
1142 | * an abnormal path too, handling a quirk in some Compaq silicon: URBs | |
1143 | * with TDs that appear to be orphaned are directly reclaimed. | |
1da177e4 LT |
1144 | */ |
1145 | static void | |
7d12e780 | 1146 | dl_done_list (struct ohci_hcd *ohci) |
1da177e4 LT |
1147 | { |
1148 | struct td *td = dl_reverse_done_list (ohci); | |
1149 | ||
dd9048af | 1150 | while (td) { |
1da177e4 | 1151 | struct td *td_next = td->next_dl_td; |
50ce5c06 AS |
1152 | struct ed *ed = td->ed; |
1153 | ||
1154 | /* | |
1155 | * Some OHCI controllers (NVIDIA for sure, maybe others) | |
1156 | * occasionally forget to add TDs to the done queue. Since | |
1157 | * TDs for a given endpoint are always processed in order, | |
1158 | * if we find a TD on the donelist then all of its | |
1159 | * predecessors must be finished as well. | |
1160 | */ | |
1161 | for (;;) { | |
1162 | struct td *td2; | |
1163 | ||
1164 | td2 = list_first_entry(&ed->td_list, struct td, | |
1165 | td_list); | |
1166 | if (td2 == td) | |
1167 | break; | |
1168 | takeback_td(ohci, td2); | |
1169 | } | |
1170 | ||
89a0fd18 | 1171 | takeback_td(ohci, td); |
dd9048af DB |
1172 | td = td_next; |
1173 | } | |
1da177e4 | 1174 | } |