Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Universal Host Controller Interface driver for USB. | |
3 | * | |
4 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | |
8 | * (C) Copyright 1999 Randy Dunlap | |
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | |
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | |
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | |
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | |
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | |
b761d9d8 | 16 | * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu |
1da177e4 LT |
17 | */ |
18 | ||
1da177e4 LT |
19 | |
20 | /* | |
21 | * Technically, updating td->status here is a race, but it's not really a | |
22 | * problem. The worst that can happen is that we set the IOC bit again | |
23 | * generating a spurious interrupt. We could fix this by creating another | |
24 | * QH and leaving the IOC bit always set, but then we would have to play | |
25 | * games with the FSBR code to make sure we get the correct order in all | |
26 | * the cases. I don't think it's worth the effort | |
27 | */ | |
dccf4a48 | 28 | static void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
1da177e4 | 29 | { |
6c1b445c | 30 | if (uhci->is_stopped) |
1f09df8b | 31 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); |
1da177e4 LT |
32 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
33 | } | |
34 | ||
35 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |
36 | { | |
37 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | |
38 | } | |
39 | ||
84afddd7 AS |
40 | |
41 | /* | |
42 | * Full-Speed Bandwidth Reclamation (FSBR). | |
43 | * We turn on FSBR whenever a queue that wants it is advancing, | |
44 | * and leave it on for a short time thereafter. | |
45 | */ | |
46 | static void uhci_fsbr_on(struct uhci_hcd *uhci) | |
47 | { | |
48 | uhci->fsbr_is_on = 1; | |
49 | uhci->skel_term_qh->link = cpu_to_le32( | |
50 | uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | |
51 | } | |
52 | ||
53 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | |
54 | { | |
55 | uhci->fsbr_is_on = 0; | |
56 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | |
57 | } | |
58 | ||
59 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
60 | { | |
61 | struct urb_priv *urbp = urb->hcpriv; | |
62 | ||
63 | if (!(urb->transfer_flags & URB_NO_FSBR)) | |
64 | urbp->fsbr = 1; | |
65 | } | |
66 | ||
c5e3b741 | 67 | static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) |
84afddd7 | 68 | { |
84afddd7 | 69 | if (urbp->fsbr) { |
c5e3b741 | 70 | uhci->fsbr_is_wanted = 1; |
84afddd7 AS |
71 | if (!uhci->fsbr_is_on) |
72 | uhci_fsbr_on(uhci); | |
c5e3b741 AS |
73 | else if (uhci->fsbr_expiring) { |
74 | uhci->fsbr_expiring = 0; | |
75 | del_timer(&uhci->fsbr_timer); | |
76 | } | |
77 | } | |
78 | } | |
79 | ||
80 | static void uhci_fsbr_timeout(unsigned long _uhci) | |
81 | { | |
82 | struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; | |
83 | unsigned long flags; | |
84 | ||
85 | spin_lock_irqsave(&uhci->lock, flags); | |
86 | if (uhci->fsbr_expiring) { | |
87 | uhci->fsbr_expiring = 0; | |
88 | uhci_fsbr_off(uhci); | |
84afddd7 | 89 | } |
c5e3b741 | 90 | spin_unlock_irqrestore(&uhci->lock, flags); |
84afddd7 AS |
91 | } |
92 | ||
93 | ||
2532178a | 94 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
1da177e4 LT |
95 | { |
96 | dma_addr_t dma_handle; | |
97 | struct uhci_td *td; | |
98 | ||
99 | td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); | |
100 | if (!td) | |
101 | return NULL; | |
102 | ||
103 | td->dma_handle = dma_handle; | |
1da177e4 | 104 | td->frame = -1; |
1da177e4 LT |
105 | |
106 | INIT_LIST_HEAD(&td->list); | |
1da177e4 LT |
107 | INIT_LIST_HEAD(&td->fl_list); |
108 | ||
1da177e4 LT |
109 | return td; |
110 | } | |
111 | ||
dccf4a48 AS |
112 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
113 | { | |
114 | if (!list_empty(&td->list)) | |
115 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); | |
dccf4a48 AS |
116 | if (!list_empty(&td->fl_list)) |
117 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); | |
118 | ||
119 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | |
120 | } | |
121 | ||
1da177e4 LT |
122 | static inline void uhci_fill_td(struct uhci_td *td, u32 status, |
123 | u32 token, u32 buffer) | |
124 | { | |
125 | td->status = cpu_to_le32(status); | |
126 | td->token = cpu_to_le32(token); | |
127 | td->buffer = cpu_to_le32(buffer); | |
128 | } | |
129 | ||
04538a25 AS |
130 | static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) |
131 | { | |
132 | list_add_tail(&td->list, &urbp->td_list); | |
133 | } | |
134 | ||
135 | static void uhci_remove_td_from_urbp(struct uhci_td *td) | |
136 | { | |
137 | list_del_init(&td->list); | |
138 | } | |
139 | ||
1da177e4 | 140 | /* |
687f5f34 | 141 | * We insert Isochronous URBs directly into the frame list at the beginning |
1da177e4 | 142 | */ |
dccf4a48 AS |
143 | static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, |
144 | struct uhci_td *td, unsigned framenum) | |
1da177e4 LT |
145 | { |
146 | framenum &= (UHCI_NUMFRAMES - 1); | |
147 | ||
148 | td->frame = framenum; | |
149 | ||
150 | /* Is there a TD already mapped there? */ | |
a1d59ce8 | 151 | if (uhci->frame_cpu[framenum]) { |
1da177e4 LT |
152 | struct uhci_td *ftd, *ltd; |
153 | ||
a1d59ce8 | 154 | ftd = uhci->frame_cpu[framenum]; |
1da177e4 LT |
155 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
156 | ||
157 | list_add_tail(&td->fl_list, &ftd->fl_list); | |
158 | ||
159 | td->link = ltd->link; | |
160 | wmb(); | |
161 | ltd->link = cpu_to_le32(td->dma_handle); | |
162 | } else { | |
a1d59ce8 | 163 | td->link = uhci->frame[framenum]; |
1da177e4 | 164 | wmb(); |
a1d59ce8 AS |
165 | uhci->frame[framenum] = cpu_to_le32(td->dma_handle); |
166 | uhci->frame_cpu[framenum] = td; | |
1da177e4 LT |
167 | } |
168 | } | |
169 | ||
dccf4a48 | 170 | static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, |
b81d3436 | 171 | struct uhci_td *td) |
1da177e4 LT |
172 | { |
173 | /* If it's not inserted, don't remove it */ | |
b81d3436 AS |
174 | if (td->frame == -1) { |
175 | WARN_ON(!list_empty(&td->fl_list)); | |
1da177e4 | 176 | return; |
b81d3436 | 177 | } |
1da177e4 | 178 | |
b81d3436 | 179 | if (uhci->frame_cpu[td->frame] == td) { |
1da177e4 | 180 | if (list_empty(&td->fl_list)) { |
a1d59ce8 AS |
181 | uhci->frame[td->frame] = td->link; |
182 | uhci->frame_cpu[td->frame] = NULL; | |
1da177e4 LT |
183 | } else { |
184 | struct uhci_td *ntd; | |
185 | ||
186 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | |
a1d59ce8 AS |
187 | uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
188 | uhci->frame_cpu[td->frame] = ntd; | |
1da177e4 LT |
189 | } |
190 | } else { | |
191 | struct uhci_td *ptd; | |
192 | ||
193 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); | |
194 | ptd->link = td->link; | |
195 | } | |
196 | ||
1da177e4 LT |
197 | list_del_init(&td->fl_list); |
198 | td->frame = -1; | |
199 | } | |
200 | ||
c8155cc5 AS |
201 | static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, |
202 | unsigned int framenum) | |
203 | { | |
204 | struct uhci_td *ftd, *ltd; | |
205 | ||
206 | framenum &= (UHCI_NUMFRAMES - 1); | |
207 | ||
208 | ftd = uhci->frame_cpu[framenum]; | |
209 | if (ftd) { | |
210 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); | |
211 | uhci->frame[framenum] = ltd->link; | |
212 | uhci->frame_cpu[framenum] = NULL; | |
213 | ||
214 | while (!list_empty(&ftd->fl_list)) | |
215 | list_del_init(ftd->fl_list.prev); | |
216 | } | |
217 | } | |
218 | ||
dccf4a48 AS |
219 | /* |
220 | * Remove all the TDs for an Isochronous URB from the frame list | |
221 | */ | |
222 | static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) | |
b81d3436 AS |
223 | { |
224 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
225 | struct uhci_td *td; | |
226 | ||
227 | list_for_each_entry(td, &urbp->td_list, list) | |
dccf4a48 | 228 | uhci_remove_td_from_frame_list(uhci, td); |
b81d3436 AS |
229 | } |
230 | ||
dccf4a48 AS |
231 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
232 | struct usb_device *udev, struct usb_host_endpoint *hep) | |
1da177e4 LT |
233 | { |
234 | dma_addr_t dma_handle; | |
235 | struct uhci_qh *qh; | |
236 | ||
237 | qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); | |
238 | if (!qh) | |
239 | return NULL; | |
240 | ||
59e29ed9 | 241 | memset(qh, 0, sizeof(*qh)); |
1da177e4 LT |
242 | qh->dma_handle = dma_handle; |
243 | ||
244 | qh->element = UHCI_PTR_TERM; | |
245 | qh->link = UHCI_PTR_TERM; | |
246 | ||
dccf4a48 AS |
247 | INIT_LIST_HEAD(&qh->queue); |
248 | INIT_LIST_HEAD(&qh->node); | |
1da177e4 | 249 | |
dccf4a48 | 250 | if (udev) { /* Normal QH */ |
85a975d0 AS |
251 | qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; |
252 | if (qh->type != USB_ENDPOINT_XFER_ISOC) { | |
253 | qh->dummy_td = uhci_alloc_td(uhci); | |
254 | if (!qh->dummy_td) { | |
255 | dma_pool_free(uhci->qh_pool, qh, dma_handle); | |
256 | return NULL; | |
257 | } | |
af0bb599 | 258 | } |
dccf4a48 AS |
259 | qh->state = QH_STATE_IDLE; |
260 | qh->hep = hep; | |
261 | qh->udev = udev; | |
262 | hep->hcpriv = qh; | |
1da177e4 | 263 | |
3ca2a321 AS |
264 | if (qh->type == USB_ENDPOINT_XFER_INT || |
265 | qh->type == USB_ENDPOINT_XFER_ISOC) | |
266 | qh->load = usb_calc_bus_time(udev->speed, | |
267 | usb_endpoint_dir_in(&hep->desc), | |
268 | qh->type == USB_ENDPOINT_XFER_ISOC, | |
269 | le16_to_cpu(hep->desc.wMaxPacketSize)) | |
270 | / 1000 + 1; | |
271 | ||
dccf4a48 AS |
272 | } else { /* Skeleton QH */ |
273 | qh->state = QH_STATE_ACTIVE; | |
4de7d2c2 | 274 | qh->type = -1; |
dccf4a48 | 275 | } |
1da177e4 LT |
276 | return qh; |
277 | } | |
278 | ||
279 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
280 | { | |
dccf4a48 AS |
281 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
282 | if (!list_empty(&qh->queue)) | |
1da177e4 | 283 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); |
1da177e4 | 284 | |
dccf4a48 AS |
285 | list_del(&qh->node); |
286 | if (qh->udev) { | |
287 | qh->hep->hcpriv = NULL; | |
85a975d0 AS |
288 | if (qh->dummy_td) |
289 | uhci_free_td(uhci, qh->dummy_td); | |
dccf4a48 | 290 | } |
1da177e4 LT |
291 | dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
292 | } | |
293 | ||
0ed8fee1 | 294 | /* |
a0b458b6 AS |
295 | * When a queue is stopped and a dequeued URB is given back, adjust |
296 | * the previous TD link (if the URB isn't first on the queue) or | |
297 | * save its toggle value (if it is first and is currently executing). | |
10b8e47d AS |
298 | * |
299 | * Returns 0 if the URB should not yet be given back, 1 otherwise. | |
0ed8fee1 | 300 | */ |
10b8e47d | 301 | static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, |
a0b458b6 | 302 | struct urb *urb) |
0ed8fee1 | 303 | { |
a0b458b6 | 304 | struct urb_priv *urbp = urb->hcpriv; |
0ed8fee1 | 305 | struct uhci_td *td; |
10b8e47d | 306 | int ret = 1; |
0ed8fee1 | 307 | |
a0b458b6 | 308 | /* Isochronous pipes don't use toggles and their TD link pointers |
10b8e47d AS |
309 | * get adjusted during uhci_urb_dequeue(). But since their queues |
310 | * cannot truly be stopped, we have to watch out for dequeues | |
311 | * occurring after the nominal unlink frame. */ | |
312 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { | |
313 | ret = (uhci->frame_number + uhci->is_stopped != | |
314 | qh->unlink_frame); | |
c5e3b741 | 315 | goto done; |
10b8e47d | 316 | } |
a0b458b6 AS |
317 | |
318 | /* If the URB isn't first on its queue, adjust the link pointer | |
319 | * of the last TD in the previous URB. The toggle doesn't need | |
320 | * to be saved since this URB can't be executing yet. */ | |
321 | if (qh->queue.next != &urbp->node) { | |
322 | struct urb_priv *purbp; | |
323 | struct uhci_td *ptd; | |
324 | ||
325 | purbp = list_entry(urbp->node.prev, struct urb_priv, node); | |
326 | WARN_ON(list_empty(&purbp->td_list)); | |
327 | ptd = list_entry(purbp->td_list.prev, struct uhci_td, | |
328 | list); | |
329 | td = list_entry(urbp->td_list.prev, struct uhci_td, | |
330 | list); | |
331 | ptd->link = td->link; | |
c5e3b741 | 332 | goto done; |
a0b458b6 AS |
333 | } |
334 | ||
0ed8fee1 AS |
335 | /* If the QH element pointer is UHCI_PTR_TERM then then currently |
336 | * executing URB has already been unlinked, so this one isn't it. */ | |
a0b458b6 | 337 | if (qh_element(qh) == UHCI_PTR_TERM) |
c5e3b741 | 338 | goto done; |
0ed8fee1 AS |
339 | qh->element = UHCI_PTR_TERM; |
340 | ||
85a975d0 | 341 | /* Control pipes don't have to worry about toggles */ |
a0b458b6 | 342 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) |
c5e3b741 | 343 | goto done; |
0ed8fee1 | 344 | |
a0b458b6 | 345 | /* Save the next toggle value */ |
59e29ed9 AS |
346 | WARN_ON(list_empty(&urbp->td_list)); |
347 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
348 | qh->needs_fixup = 1; | |
349 | qh->initial_toggle = uhci_toggle(td_token(td)); | |
c5e3b741 AS |
350 | |
351 | done: | |
10b8e47d | 352 | return ret; |
0ed8fee1 AS |
353 | } |
354 | ||
355 | /* | |
356 | * Fix up the data toggles for URBs in a queue, when one of them | |
357 | * terminates early (short transfer, error, or dequeued). | |
358 | */ | |
359 | static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |
360 | { | |
361 | struct urb_priv *urbp = NULL; | |
362 | struct uhci_td *td; | |
363 | unsigned int toggle = qh->initial_toggle; | |
364 | unsigned int pipe; | |
365 | ||
366 | /* Fixups for a short transfer start with the second URB in the | |
367 | * queue (the short URB is the first). */ | |
368 | if (skip_first) | |
369 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
370 | ||
371 | /* When starting with the first URB, if the QH element pointer is | |
372 | * still valid then we know the URB's toggles are okay. */ | |
373 | else if (qh_element(qh) != UHCI_PTR_TERM) | |
374 | toggle = 2; | |
375 | ||
376 | /* Fix up the toggle for the URBs in the queue. Normally this | |
377 | * loop won't run more than once: When an error or short transfer | |
378 | * occurs, the queue usually gets emptied. */ | |
1393adb2 | 379 | urbp = list_prepare_entry(urbp, &qh->queue, node); |
0ed8fee1 AS |
380 | list_for_each_entry_continue(urbp, &qh->queue, node) { |
381 | ||
382 | /* If the first TD has the right toggle value, we don't | |
383 | * need to change any toggles in this URB */ | |
384 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
385 | if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { | |
db59b464 | 386 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
0ed8fee1 AS |
387 | list); |
388 | toggle = uhci_toggle(td_token(td)) ^ 1; | |
389 | ||
390 | /* Otherwise all the toggles in the URB have to be switched */ | |
391 | } else { | |
392 | list_for_each_entry(td, &urbp->td_list, list) { | |
393 | td->token ^= __constant_cpu_to_le32( | |
394 | TD_TOKEN_TOGGLE); | |
395 | toggle ^= 1; | |
396 | } | |
397 | } | |
398 | } | |
399 | ||
400 | wmb(); | |
401 | pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; | |
402 | usb_settoggle(qh->udev, usb_pipeendpoint(pipe), | |
403 | usb_pipeout(pipe), toggle); | |
404 | qh->needs_fixup = 0; | |
405 | } | |
406 | ||
1da177e4 | 407 | /* |
dccf4a48 | 408 | * Put a QH on the schedule in both hardware and software |
1da177e4 | 409 | */ |
dccf4a48 | 410 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 411 | { |
dccf4a48 | 412 | struct uhci_qh *pqh; |
1da177e4 | 413 | |
dccf4a48 | 414 | WARN_ON(list_empty(&qh->queue)); |
1da177e4 | 415 | |
dccf4a48 AS |
416 | /* Set the element pointer if it isn't set already. |
417 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ | |
418 | if (qh_element(qh) == UHCI_PTR_TERM) { | |
419 | struct urb_priv *urbp = list_entry(qh->queue.next, | |
420 | struct urb_priv, node); | |
421 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
422 | struct uhci_td, list); | |
1da177e4 | 423 | |
dccf4a48 | 424 | qh->element = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
425 | } |
426 | ||
84afddd7 AS |
427 | /* Treat the queue as if it has just advanced */ |
428 | qh->wait_expired = 0; | |
429 | qh->advance_jiffies = jiffies; | |
430 | ||
dccf4a48 AS |
431 | if (qh->state == QH_STATE_ACTIVE) |
432 | return; | |
433 | qh->state = QH_STATE_ACTIVE; | |
434 | ||
435 | /* Move the QH from its old list to the end of the appropriate | |
436 | * skeleton's list */ | |
0ed8fee1 AS |
437 | if (qh == uhci->next_qh) |
438 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
439 | node); | |
dccf4a48 AS |
440 | list_move_tail(&qh->node, &qh->skel->node); |
441 | ||
442 | /* Link it into the schedule */ | |
443 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
444 | qh->link = pqh->link; | |
445 | wmb(); | |
446 | pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); | |
1da177e4 LT |
447 | } |
448 | ||
449 | /* | |
dccf4a48 | 450 | * Take a QH off the hardware schedule |
1da177e4 | 451 | */ |
dccf4a48 | 452 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 LT |
453 | { |
454 | struct uhci_qh *pqh; | |
1da177e4 | 455 | |
dccf4a48 | 456 | if (qh->state == QH_STATE_UNLINKING) |
1da177e4 | 457 | return; |
dccf4a48 AS |
458 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
459 | qh->state = QH_STATE_UNLINKING; | |
1da177e4 | 460 | |
dccf4a48 AS |
461 | /* Unlink the QH from the schedule and record when we did it */ |
462 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
463 | pqh->link = qh->link; | |
464 | mb(); | |
1da177e4 LT |
465 | |
466 | uhci_get_current_frame_number(uhci); | |
dccf4a48 | 467 | qh->unlink_frame = uhci->frame_number; |
1da177e4 | 468 | |
dccf4a48 AS |
469 | /* Force an interrupt so we know when the QH is fully unlinked */ |
470 | if (list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
471 | uhci_set_next_interrupt(uhci); |
472 | ||
dccf4a48 | 473 | /* Move the QH from its old list to the end of the unlinking list */ |
0ed8fee1 AS |
474 | if (qh == uhci->next_qh) |
475 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
476 | node); | |
dccf4a48 | 477 | list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); |
1da177e4 LT |
478 | } |
479 | ||
dccf4a48 AS |
480 | /* |
481 | * When we and the controller are through with a QH, it becomes IDLE. | |
482 | * This happens when a QH has been off the schedule (on the unlinking | |
483 | * list) for more than one frame, or when an error occurs while adding | |
484 | * the first URB onto a new QH. | |
485 | */ | |
486 | static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1da177e4 | 487 | { |
dccf4a48 | 488 | WARN_ON(qh->state == QH_STATE_ACTIVE); |
1da177e4 | 489 | |
0ed8fee1 AS |
490 | if (qh == uhci->next_qh) |
491 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
492 | node); | |
dccf4a48 AS |
493 | list_move(&qh->node, &uhci->idle_qh_list); |
494 | qh->state = QH_STATE_IDLE; | |
1da177e4 | 495 | |
59e29ed9 AS |
496 | /* Now that the QH is idle, its post_td isn't being used */ |
497 | if (qh->post_td) { | |
498 | uhci_free_td(uhci, qh->post_td); | |
499 | qh->post_td = NULL; | |
500 | } | |
501 | ||
dccf4a48 AS |
502 | /* If anyone is waiting for a QH to become idle, wake them up */ |
503 | if (uhci->num_waiting) | |
504 | wake_up_all(&uhci->waitqh); | |
1da177e4 LT |
505 | } |
506 | ||
3ca2a321 AS |
507 | /* |
508 | * Find the highest existing bandwidth load for a given phase and period. | |
509 | */ | |
510 | static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) | |
511 | { | |
512 | int highest_load = uhci->load[phase]; | |
513 | ||
514 | for (phase += period; phase < MAX_PHASE; phase += period) | |
515 | highest_load = max_t(int, highest_load, uhci->load[phase]); | |
516 | return highest_load; | |
517 | } | |
518 | ||
519 | /* | |
520 | * Set qh->phase to the optimal phase for a periodic transfer and | |
521 | * check whether the bandwidth requirement is acceptable. | |
522 | */ | |
523 | static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
524 | { | |
525 | int minimax_load; | |
526 | ||
527 | /* Find the optimal phase (unless it is already set) and get | |
528 | * its load value. */ | |
529 | if (qh->phase >= 0) | |
530 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | |
531 | else { | |
532 | int phase, load; | |
533 | int max_phase = min_t(int, MAX_PHASE, qh->period); | |
534 | ||
535 | qh->phase = 0; | |
536 | minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); | |
537 | for (phase = 1; phase < max_phase; ++phase) { | |
538 | load = uhci_highest_load(uhci, phase, qh->period); | |
539 | if (load < minimax_load) { | |
540 | minimax_load = load; | |
541 | qh->phase = phase; | |
542 | } | |
543 | } | |
544 | } | |
545 | ||
546 | /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ | |
547 | if (minimax_load + qh->load > 900) { | |
548 | dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " | |
549 | "period %d, phase %d, %d + %d us\n", | |
550 | qh->period, qh->phase, minimax_load, qh->load); | |
551 | return -ENOSPC; | |
552 | } | |
553 | return 0; | |
554 | } | |
555 | ||
556 | /* | |
557 | * Reserve a periodic QH's bandwidth in the schedule | |
558 | */ | |
559 | static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
560 | { | |
561 | int i; | |
562 | int load = qh->load; | |
563 | char *p = "??"; | |
564 | ||
565 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | |
566 | uhci->load[i] += load; | |
567 | uhci->total_load += load; | |
568 | } | |
569 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | |
570 | uhci->total_load / MAX_PHASE; | |
571 | switch (qh->type) { | |
572 | case USB_ENDPOINT_XFER_INT: | |
573 | ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | |
574 | p = "INT"; | |
575 | break; | |
576 | case USB_ENDPOINT_XFER_ISOC: | |
577 | ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | |
578 | p = "ISO"; | |
579 | break; | |
580 | } | |
581 | qh->bandwidth_reserved = 1; | |
582 | dev_dbg(uhci_dev(uhci), | |
583 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | |
584 | "reserve", qh->udev->devnum, | |
585 | qh->hep->desc.bEndpointAddress, p, | |
586 | qh->period, qh->phase, load); | |
587 | } | |
588 | ||
589 | /* | |
590 | * Release a periodic QH's bandwidth reservation | |
591 | */ | |
592 | static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
593 | { | |
594 | int i; | |
595 | int load = qh->load; | |
596 | char *p = "??"; | |
597 | ||
598 | for (i = qh->phase; i < MAX_PHASE; i += qh->period) { | |
599 | uhci->load[i] -= load; | |
600 | uhci->total_load -= load; | |
601 | } | |
602 | uhci_to_hcd(uhci)->self.bandwidth_allocated = | |
603 | uhci->total_load / MAX_PHASE; | |
604 | switch (qh->type) { | |
605 | case USB_ENDPOINT_XFER_INT: | |
606 | --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; | |
607 | p = "INT"; | |
608 | break; | |
609 | case USB_ENDPOINT_XFER_ISOC: | |
610 | --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; | |
611 | p = "ISO"; | |
612 | break; | |
613 | } | |
614 | qh->bandwidth_reserved = 0; | |
615 | dev_dbg(uhci_dev(uhci), | |
616 | "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", | |
617 | "release", qh->udev->devnum, | |
618 | qh->hep->desc.bEndpointAddress, p, | |
619 | qh->period, qh->phase, load); | |
620 | } | |
621 | ||
dccf4a48 AS |
622 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
623 | struct urb *urb) | |
1da177e4 LT |
624 | { |
625 | struct urb_priv *urbp; | |
626 | ||
54e6ecb2 | 627 | urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC); |
1da177e4 LT |
628 | if (!urbp) |
629 | return NULL; | |
630 | ||
631 | memset((void *)urbp, 0, sizeof(*urbp)); | |
632 | ||
1da177e4 | 633 | urbp->urb = urb; |
dccf4a48 | 634 | urb->hcpriv = urbp; |
1da177e4 | 635 | |
dccf4a48 | 636 | INIT_LIST_HEAD(&urbp->node); |
1da177e4 | 637 | INIT_LIST_HEAD(&urbp->td_list); |
1da177e4 | 638 | |
1da177e4 LT |
639 | return urbp; |
640 | } | |
641 | ||
dccf4a48 AS |
642 | static void uhci_free_urb_priv(struct uhci_hcd *uhci, |
643 | struct urb_priv *urbp) | |
1da177e4 LT |
644 | { |
645 | struct uhci_td *td, *tmp; | |
1da177e4 | 646 | |
dccf4a48 AS |
647 | if (!list_empty(&urbp->node)) |
648 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", | |
649 | urbp->urb); | |
1da177e4 | 650 | |
1da177e4 | 651 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
04538a25 AS |
652 | uhci_remove_td_from_urbp(td); |
653 | uhci_free_td(uhci, td); | |
1da177e4 LT |
654 | } |
655 | ||
dccf4a48 | 656 | urbp->urb->hcpriv = NULL; |
1da177e4 LT |
657 | kmem_cache_free(uhci_up_cachep, urbp); |
658 | } | |
659 | ||
1da177e4 LT |
660 | /* |
661 | * Map status to standard result codes | |
662 | * | |
663 | * <status> is (td_status(td) & 0xF60000), a.k.a. | |
664 | * uhci_status_bits(td_status(td)). | |
665 | * Note: <status> does not include the TD_CTRL_NAK bit. | |
666 | * <dir_out> is True for output TDs and False for input TDs. | |
667 | */ | |
668 | static int uhci_map_status(int status, int dir_out) | |
669 | { | |
670 | if (!status) | |
671 | return 0; | |
672 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ | |
673 | return -EPROTO; | |
674 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ | |
675 | if (dir_out) | |
676 | return -EPROTO; | |
677 | else | |
678 | return -EILSEQ; | |
679 | } | |
680 | if (status & TD_CTRL_BABBLE) /* Babble */ | |
681 | return -EOVERFLOW; | |
682 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ | |
683 | return -ENOSR; | |
684 | if (status & TD_CTRL_STALLED) /* Stalled */ | |
685 | return -EPIPE; | |
1da177e4 LT |
686 | return 0; |
687 | } | |
688 | ||
689 | /* | |
690 | * Control transfers | |
691 | */ | |
dccf4a48 AS |
692 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, |
693 | struct uhci_qh *qh) | |
1da177e4 | 694 | { |
1da177e4 | 695 | struct uhci_td *td; |
1da177e4 | 696 | unsigned long destination, status; |
dccf4a48 | 697 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 LT |
698 | int len = urb->transfer_buffer_length; |
699 | dma_addr_t data = urb->transfer_dma; | |
dccf4a48 | 700 | __le32 *plink; |
04538a25 | 701 | struct urb_priv *urbp = urb->hcpriv; |
1da177e4 LT |
702 | |
703 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
704 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | |
705 | ||
af0bb599 AS |
706 | /* 3 errors, dummy TD remains inactive */ |
707 | status = uhci_maxerr(3); | |
1da177e4 LT |
708 | if (urb->dev->speed == USB_SPEED_LOW) |
709 | status |= TD_CTRL_LS; | |
710 | ||
711 | /* | |
712 | * Build the TD for the control request setup packet | |
713 | */ | |
af0bb599 | 714 | td = qh->dummy_td; |
04538a25 | 715 | uhci_add_td_to_urbp(td, urbp); |
fa346568 | 716 | uhci_fill_td(td, status, destination | uhci_explen(8), |
dccf4a48 AS |
717 | urb->setup_dma); |
718 | plink = &td->link; | |
af0bb599 | 719 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
720 | |
721 | /* | |
722 | * If direction is "send", change the packet ID from SETUP (0x2D) | |
723 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and | |
724 | * set Short Packet Detect (SPD) for all data packets. | |
725 | */ | |
726 | if (usb_pipeout(urb->pipe)) | |
727 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); | |
728 | else { | |
729 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); | |
730 | status |= TD_CTRL_SPD; | |
731 | } | |
732 | ||
733 | /* | |
687f5f34 | 734 | * Build the DATA TDs |
1da177e4 LT |
735 | */ |
736 | while (len > 0) { | |
dccf4a48 | 737 | int pktsze = min(len, maxsze); |
1da177e4 | 738 | |
2532178a | 739 | td = uhci_alloc_td(uhci); |
1da177e4 | 740 | if (!td) |
af0bb599 | 741 | goto nomem; |
dccf4a48 | 742 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
743 | |
744 | /* Alternate Data0/1 (start with Data1) */ | |
745 | destination ^= TD_TOKEN_TOGGLE; | |
746 | ||
04538a25 | 747 | uhci_add_td_to_urbp(td, urbp); |
fa346568 | 748 | uhci_fill_td(td, status, destination | uhci_explen(pktsze), |
dccf4a48 AS |
749 | data); |
750 | plink = &td->link; | |
1da177e4 LT |
751 | |
752 | data += pktsze; | |
753 | len -= pktsze; | |
754 | } | |
755 | ||
756 | /* | |
757 | * Build the final TD for control status | |
758 | */ | |
2532178a | 759 | td = uhci_alloc_td(uhci); |
1da177e4 | 760 | if (!td) |
af0bb599 | 761 | goto nomem; |
dccf4a48 | 762 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
763 | |
764 | /* | |
765 | * It's IN if the pipe is an output pipe or we're not expecting | |
766 | * data back. | |
767 | */ | |
768 | destination &= ~TD_TOKEN_PID_MASK; | |
769 | if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) | |
770 | destination |= USB_PID_IN; | |
771 | else | |
772 | destination |= USB_PID_OUT; | |
773 | ||
774 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ | |
775 | ||
776 | status &= ~TD_CTRL_SPD; | |
777 | ||
04538a25 | 778 | uhci_add_td_to_urbp(td, urbp); |
1da177e4 | 779 | uhci_fill_td(td, status | TD_CTRL_IOC, |
dccf4a48 | 780 | destination | uhci_explen(0), 0); |
af0bb599 AS |
781 | plink = &td->link; |
782 | ||
783 | /* | |
784 | * Build the new dummy TD and activate the old one | |
785 | */ | |
786 | td = uhci_alloc_td(uhci); | |
787 | if (!td) | |
788 | goto nomem; | |
789 | *plink = cpu_to_le32(td->dma_handle); | |
790 | ||
791 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
792 | wmb(); | |
793 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
794 | qh->dummy_td = td; | |
1da177e4 LT |
795 | |
796 | /* Low-speed transfers get a different queue, and won't hog the bus. | |
797 | * Also, some devices enumerate better without FSBR; the easiest way | |
798 | * to do that is to put URBs on the low-speed queue while the device | |
630aa3cf | 799 | * isn't in the CONFIGURED state. */ |
1da177e4 | 800 | if (urb->dev->speed == USB_SPEED_LOW || |
630aa3cf | 801 | urb->dev->state != USB_STATE_CONFIGURED) |
dccf4a48 | 802 | qh->skel = uhci->skel_ls_control_qh; |
1da177e4 | 803 | else { |
dccf4a48 | 804 | qh->skel = uhci->skel_fs_control_qh; |
84afddd7 | 805 | uhci_add_fsbr(uhci, urb); |
1da177e4 | 806 | } |
59e29ed9 AS |
807 | |
808 | urb->actual_length = -8; /* Account for the SETUP packet */ | |
dccf4a48 | 809 | return 0; |
af0bb599 AS |
810 | |
811 | nomem: | |
812 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
04538a25 | 813 | uhci_remove_td_from_urbp(qh->dummy_td); |
af0bb599 | 814 | return -ENOMEM; |
1da177e4 LT |
815 | } |
816 | ||
1da177e4 LT |
817 | /* |
818 | * Common submit for bulk and interrupt | |
819 | */ | |
dccf4a48 AS |
820 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, |
821 | struct uhci_qh *qh) | |
1da177e4 LT |
822 | { |
823 | struct uhci_td *td; | |
1da177e4 | 824 | unsigned long destination, status; |
dccf4a48 | 825 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 | 826 | int len = urb->transfer_buffer_length; |
1da177e4 | 827 | dma_addr_t data = urb->transfer_dma; |
af0bb599 | 828 | __le32 *plink; |
04538a25 | 829 | struct urb_priv *urbp = urb->hcpriv; |
af0bb599 | 830 | unsigned int toggle; |
1da177e4 LT |
831 | |
832 | if (len < 0) | |
833 | return -EINVAL; | |
834 | ||
835 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
836 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
af0bb599 AS |
837 | toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
838 | usb_pipeout(urb->pipe)); | |
1da177e4 | 839 | |
af0bb599 AS |
840 | /* 3 errors, dummy TD remains inactive */ |
841 | status = uhci_maxerr(3); | |
1da177e4 LT |
842 | if (urb->dev->speed == USB_SPEED_LOW) |
843 | status |= TD_CTRL_LS; | |
844 | if (usb_pipein(urb->pipe)) | |
845 | status |= TD_CTRL_SPD; | |
846 | ||
847 | /* | |
687f5f34 | 848 | * Build the DATA TDs |
1da177e4 | 849 | */ |
af0bb599 AS |
850 | plink = NULL; |
851 | td = qh->dummy_td; | |
1da177e4 LT |
852 | do { /* Allow zero length packets */ |
853 | int pktsze = maxsze; | |
854 | ||
dccf4a48 | 855 | if (len <= pktsze) { /* The last packet */ |
1da177e4 LT |
856 | pktsze = len; |
857 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
858 | status &= ~TD_CTRL_SPD; | |
859 | } | |
860 | ||
af0bb599 AS |
861 | if (plink) { |
862 | td = uhci_alloc_td(uhci); | |
863 | if (!td) | |
864 | goto nomem; | |
865 | *plink = cpu_to_le32(td->dma_handle); | |
866 | } | |
04538a25 | 867 | uhci_add_td_to_urbp(td, urbp); |
dccf4a48 | 868 | uhci_fill_td(td, status, |
af0bb599 AS |
869 | destination | uhci_explen(pktsze) | |
870 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
871 | data); | |
dccf4a48 | 872 | plink = &td->link; |
af0bb599 | 873 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
874 | |
875 | data += pktsze; | |
876 | len -= maxsze; | |
af0bb599 | 877 | toggle ^= 1; |
1da177e4 LT |
878 | } while (len > 0); |
879 | ||
880 | /* | |
881 | * URB_ZERO_PACKET means adding a 0-length packet, if direction | |
882 | * is OUT and the transfer_length was an exact multiple of maxsze, | |
883 | * hence (len = transfer_length - N * maxsze) == 0 | |
884 | * however, if transfer_length == 0, the zero packet was already | |
885 | * prepared above. | |
886 | */ | |
dccf4a48 AS |
887 | if ((urb->transfer_flags & URB_ZERO_PACKET) && |
888 | usb_pipeout(urb->pipe) && len == 0 && | |
889 | urb->transfer_buffer_length > 0) { | |
2532178a | 890 | td = uhci_alloc_td(uhci); |
1da177e4 | 891 | if (!td) |
af0bb599 | 892 | goto nomem; |
dccf4a48 | 893 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 | 894 | |
04538a25 | 895 | uhci_add_td_to_urbp(td, urbp); |
af0bb599 AS |
896 | uhci_fill_td(td, status, |
897 | destination | uhci_explen(0) | | |
898 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
899 | data); | |
900 | plink = &td->link; | |
1da177e4 | 901 | |
af0bb599 | 902 | toggle ^= 1; |
1da177e4 LT |
903 | } |
904 | ||
905 | /* Set the interrupt-on-completion flag on the last packet. | |
906 | * A more-or-less typical 4 KB URB (= size of one memory page) | |
907 | * will require about 3 ms to transfer; that's a little on the | |
908 | * fast side but not enough to justify delaying an interrupt | |
909 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT | |
910 | * flag setting. */ | |
dccf4a48 | 911 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); |
1da177e4 | 912 | |
af0bb599 AS |
913 | /* |
914 | * Build the new dummy TD and activate the old one | |
915 | */ | |
916 | td = uhci_alloc_td(uhci); | |
917 | if (!td) | |
918 | goto nomem; | |
919 | *plink = cpu_to_le32(td->dma_handle); | |
920 | ||
921 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
922 | wmb(); | |
923 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
924 | qh->dummy_td = td; | |
925 | ||
926 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
927 | usb_pipeout(urb->pipe), toggle); | |
dccf4a48 | 928 | return 0; |
af0bb599 AS |
929 | |
930 | nomem: | |
931 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
04538a25 | 932 | uhci_remove_td_from_urbp(qh->dummy_td); |
af0bb599 | 933 | return -ENOMEM; |
1da177e4 LT |
934 | } |
935 | ||
dccf4a48 AS |
936 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
937 | struct uhci_qh *qh) | |
1da177e4 LT |
938 | { |
939 | int ret; | |
940 | ||
941 | /* Can't have low-speed bulk transfers */ | |
942 | if (urb->dev->speed == USB_SPEED_LOW) | |
943 | return -EINVAL; | |
944 | ||
dccf4a48 AS |
945 | qh->skel = uhci->skel_bulk_qh; |
946 | ret = uhci_submit_common(uhci, urb, qh); | |
947 | if (ret == 0) | |
84afddd7 | 948 | uhci_add_fsbr(uhci, urb); |
1da177e4 LT |
949 | return ret; |
950 | } | |
951 | ||
caf3827a | 952 | static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
dccf4a48 | 953 | struct uhci_qh *qh) |
1da177e4 | 954 | { |
3ca2a321 | 955 | int ret; |
caf3827a | 956 | |
dccf4a48 AS |
957 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
958 | * Drivers can submit URBs of any length, but longer ones will need | |
959 | * multiple intervals to complete. | |
1da177e4 | 960 | */ |
caf3827a | 961 | |
3ca2a321 AS |
962 | if (!qh->bandwidth_reserved) { |
963 | int exponent; | |
caf3827a | 964 | |
3ca2a321 AS |
965 | /* Figure out which power-of-two queue to use */ |
966 | for (exponent = 7; exponent >= 0; --exponent) { | |
967 | if ((1 << exponent) <= urb->interval) | |
968 | break; | |
969 | } | |
970 | if (exponent < 0) | |
971 | return -EINVAL; | |
972 | qh->period = 1 << exponent; | |
caf3827a | 973 | qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; |
caf3827a | 974 | |
3ca2a321 AS |
975 | /* For now, interrupt phase is fixed by the layout |
976 | * of the QH lists. */ | |
977 | qh->phase = (qh->period / 2) & (MAX_PHASE - 1); | |
978 | ret = uhci_check_bandwidth(uhci, qh); | |
979 | if (ret) | |
980 | return ret; | |
981 | } else if (qh->period > urb->interval) | |
982 | return -EINVAL; /* Can't decrease the period */ | |
983 | ||
984 | ret = uhci_submit_common(uhci, urb, qh); | |
985 | if (ret == 0) { | |
986 | urb->interval = qh->period; | |
987 | if (!qh->bandwidth_reserved) | |
988 | uhci_reserve_bandwidth(uhci, qh); | |
989 | } | |
990 | return ret; | |
1da177e4 LT |
991 | } |
992 | ||
b1869000 AS |
993 | /* |
994 | * Fix up the data structures following a short transfer | |
995 | */ | |
996 | static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |
59e29ed9 | 997 | struct uhci_qh *qh, struct urb_priv *urbp) |
b1869000 AS |
998 | { |
999 | struct uhci_td *td; | |
59e29ed9 AS |
1000 | struct list_head *tmp; |
1001 | int ret; | |
b1869000 AS |
1002 | |
1003 | td = list_entry(urbp->td_list.prev, struct uhci_td, list); | |
1004 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | |
b1869000 AS |
1005 | |
1006 | /* When a control transfer is short, we have to restart | |
1007 | * the queue at the status stage transaction, which is | |
1008 | * the last TD. */ | |
59e29ed9 | 1009 | WARN_ON(list_empty(&urbp->td_list)); |
b1869000 | 1010 | qh->element = cpu_to_le32(td->dma_handle); |
59e29ed9 | 1011 | tmp = td->list.prev; |
b1869000 AS |
1012 | ret = -EINPROGRESS; |
1013 | ||
59e29ed9 | 1014 | } else { |
b1869000 AS |
1015 | |
1016 | /* When a bulk/interrupt transfer is short, we have to | |
1017 | * fix up the toggles of the following URBs on the queue | |
1018 | * before restarting the queue at the next URB. */ | |
59e29ed9 | 1019 | qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; |
b1869000 AS |
1020 | uhci_fixup_toggles(qh, 1); |
1021 | ||
59e29ed9 AS |
1022 | if (list_empty(&urbp->td_list)) |
1023 | td = qh->post_td; | |
b1869000 | 1024 | qh->element = td->link; |
59e29ed9 AS |
1025 | tmp = urbp->td_list.prev; |
1026 | ret = 0; | |
b1869000 AS |
1027 | } |
1028 | ||
59e29ed9 AS |
1029 | /* Remove all the TDs we skipped over, from tmp back to the start */ |
1030 | while (tmp != &urbp->td_list) { | |
1031 | td = list_entry(tmp, struct uhci_td, list); | |
1032 | tmp = tmp->prev; | |
1033 | ||
04538a25 AS |
1034 | uhci_remove_td_from_urbp(td); |
1035 | uhci_free_td(uhci, td); | |
59e29ed9 | 1036 | } |
b1869000 AS |
1037 | return ret; |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Common result for control, bulk, and interrupt | |
1042 | */ | |
1043 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |
1044 | { | |
1045 | struct urb_priv *urbp = urb->hcpriv; | |
1046 | struct uhci_qh *qh = urbp->qh; | |
59e29ed9 | 1047 | struct uhci_td *td, *tmp; |
b1869000 AS |
1048 | unsigned status; |
1049 | int ret = 0; | |
1050 | ||
59e29ed9 | 1051 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
b1869000 AS |
1052 | unsigned int ctrlstat; |
1053 | int len; | |
1054 | ||
b1869000 AS |
1055 | ctrlstat = td_status(td); |
1056 | status = uhci_status_bits(ctrlstat); | |
1057 | if (status & TD_CTRL_ACTIVE) | |
1058 | return -EINPROGRESS; | |
1059 | ||
1060 | len = uhci_actual_length(ctrlstat); | |
1061 | urb->actual_length += len; | |
1062 | ||
1063 | if (status) { | |
1064 | ret = uhci_map_status(status, | |
1065 | uhci_packetout(td_token(td))); | |
1066 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | |
1067 | /* Some debugging code */ | |
be3cbc5f | 1068 | dev_dbg(&urb->dev->dev, |
b1869000 AS |
1069 | "%s: failed with status %x\n", |
1070 | __FUNCTION__, status); | |
1071 | ||
1072 | if (debug > 1 && errbuf) { | |
1073 | /* Print the chain for debugging */ | |
1074 | uhci_show_qh(urbp->qh, errbuf, | |
1075 | ERRBUF_LEN, 0); | |
1076 | lprintk(errbuf); | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | } else if (len < uhci_expected_length(td_token(td))) { | |
1081 | ||
1082 | /* We received a short packet */ | |
1083 | if (urb->transfer_flags & URB_SHORT_NOT_OK) | |
1084 | ret = -EREMOTEIO; | |
f443ddf1 AS |
1085 | |
1086 | /* Fixup needed only if this isn't the URB's last TD */ | |
1087 | else if (&td->list != urbp->td_list.prev) | |
b1869000 AS |
1088 | ret = 1; |
1089 | } | |
1090 | ||
04538a25 | 1091 | uhci_remove_td_from_urbp(td); |
59e29ed9 | 1092 | if (qh->post_td) |
04538a25 | 1093 | uhci_free_td(uhci, qh->post_td); |
59e29ed9 AS |
1094 | qh->post_td = td; |
1095 | ||
b1869000 AS |
1096 | if (ret != 0) |
1097 | goto err; | |
1098 | } | |
1099 | return ret; | |
1100 | ||
1101 | err: | |
1102 | if (ret < 0) { | |
1103 | /* In case a control transfer gets an error | |
1104 | * during the setup stage */ | |
1105 | urb->actual_length = max(urb->actual_length, 0); | |
1106 | ||
1107 | /* Note that the queue has stopped and save | |
1108 | * the next toggle value */ | |
1109 | qh->element = UHCI_PTR_TERM; | |
1110 | qh->is_stopped = 1; | |
1111 | qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); | |
1112 | qh->initial_toggle = uhci_toggle(td_token(td)) ^ | |
1113 | (ret == -EREMOTEIO); | |
1114 | ||
1115 | } else /* Short packet received */ | |
59e29ed9 | 1116 | ret = uhci_fixup_short_transfer(uhci, qh, urbp); |
b1869000 AS |
1117 | return ret; |
1118 | } | |
1119 | ||
1da177e4 LT |
1120 | /* |
1121 | * Isochronous transfers | |
1122 | */ | |
0ed8fee1 AS |
1123 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
1124 | struct uhci_qh *qh) | |
1da177e4 | 1125 | { |
0ed8fee1 AS |
1126 | struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ |
1127 | int i, frame; | |
1128 | unsigned long destination, status; | |
1129 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
1da177e4 | 1130 | |
caf3827a AS |
1131 | /* Values must not be too big (could overflow below) */ |
1132 | if (urb->interval >= UHCI_NUMFRAMES || | |
1133 | urb->number_of_packets >= UHCI_NUMFRAMES) | |
1da177e4 LT |
1134 | return -EFBIG; |
1135 | ||
caf3827a | 1136 | /* Check the period and figure out the starting frame number */ |
3ca2a321 AS |
1137 | if (!qh->bandwidth_reserved) { |
1138 | qh->period = urb->interval; | |
caf3827a | 1139 | if (urb->transfer_flags & URB_ISO_ASAP) { |
3ca2a321 AS |
1140 | qh->phase = -1; /* Find the best phase */ |
1141 | i = uhci_check_bandwidth(uhci, qh); | |
1142 | if (i) | |
1143 | return i; | |
1144 | ||
1145 | /* Allow a little time to allocate the TDs */ | |
c8155cc5 | 1146 | uhci_get_current_frame_number(uhci); |
3ca2a321 AS |
1147 | frame = uhci->frame_number + 10; |
1148 | ||
1149 | /* Move forward to the first frame having the | |
1150 | * correct phase */ | |
1151 | urb->start_frame = frame + ((qh->phase - frame) & | |
1152 | (qh->period - 1)); | |
caf3827a | 1153 | } else { |
c8155cc5 | 1154 | i = urb->start_frame - uhci->last_iso_frame; |
caf3827a AS |
1155 | if (i <= 0 || i >= UHCI_NUMFRAMES) |
1156 | return -EINVAL; | |
3ca2a321 AS |
1157 | qh->phase = urb->start_frame & (qh->period - 1); |
1158 | i = uhci_check_bandwidth(uhci, qh); | |
1159 | if (i) | |
1160 | return i; | |
caf3827a | 1161 | } |
3ca2a321 | 1162 | |
caf3827a AS |
1163 | } else if (qh->period != urb->interval) { |
1164 | return -EINVAL; /* Can't change the period */ | |
1da177e4 | 1165 | |
caf3827a | 1166 | } else { /* Pick up where the last URB leaves off */ |
0ed8fee1 | 1167 | if (list_empty(&qh->queue)) { |
c8155cc5 | 1168 | frame = qh->iso_frame; |
caf3827a AS |
1169 | } else { |
1170 | struct urb *lurb; | |
0ed8fee1 | 1171 | |
caf3827a | 1172 | lurb = list_entry(qh->queue.prev, |
0ed8fee1 | 1173 | struct urb_priv, node)->urb; |
caf3827a AS |
1174 | frame = lurb->start_frame + |
1175 | lurb->number_of_packets * | |
1176 | lurb->interval; | |
0ed8fee1 | 1177 | } |
caf3827a AS |
1178 | if (urb->transfer_flags & URB_ISO_ASAP) |
1179 | urb->start_frame = frame; | |
c8155cc5 AS |
1180 | else if (urb->start_frame != frame) |
1181 | return -EINVAL; | |
1da177e4 | 1182 | } |
1da177e4 | 1183 | |
caf3827a | 1184 | /* Make sure we won't have to go too far into the future */ |
c8155cc5 | 1185 | if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, |
caf3827a AS |
1186 | urb->start_frame + urb->number_of_packets * |
1187 | urb->interval)) | |
1188 | return -EFBIG; | |
1189 | ||
1190 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; | |
1191 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
1192 | ||
b81d3436 | 1193 | for (i = 0; i < urb->number_of_packets; i++) { |
2532178a | 1194 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
1195 | if (!td) |
1196 | return -ENOMEM; | |
1197 | ||
04538a25 | 1198 | uhci_add_td_to_urbp(td, urbp); |
dccf4a48 AS |
1199 | uhci_fill_td(td, status, destination | |
1200 | uhci_explen(urb->iso_frame_desc[i].length), | |
1201 | urb->transfer_dma + | |
1202 | urb->iso_frame_desc[i].offset); | |
b81d3436 | 1203 | } |
1da177e4 | 1204 | |
dccf4a48 AS |
1205 | /* Set the interrupt-on-completion flag on the last packet. */ |
1206 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); | |
1207 | ||
dccf4a48 | 1208 | /* Add the TDs to the frame list */ |
b81d3436 AS |
1209 | frame = urb->start_frame; |
1210 | list_for_each_entry(td, &urbp->td_list, list) { | |
dccf4a48 | 1211 | uhci_insert_td_in_frame_list(uhci, td, frame); |
c8155cc5 AS |
1212 | frame += qh->period; |
1213 | } | |
1214 | ||
1215 | if (list_empty(&qh->queue)) { | |
1216 | qh->iso_packet_desc = &urb->iso_frame_desc[0]; | |
1217 | qh->iso_frame = urb->start_frame; | |
1218 | qh->iso_status = 0; | |
1da177e4 LT |
1219 | } |
1220 | ||
3ca2a321 AS |
1221 | qh->skel = uhci->skel_iso_qh; |
1222 | if (!qh->bandwidth_reserved) | |
1223 | uhci_reserve_bandwidth(uhci, qh); | |
dccf4a48 | 1224 | return 0; |
1da177e4 LT |
1225 | } |
1226 | ||
1227 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) | |
1228 | { | |
c8155cc5 AS |
1229 | struct uhci_td *td, *tmp; |
1230 | struct urb_priv *urbp = urb->hcpriv; | |
1231 | struct uhci_qh *qh = urbp->qh; | |
1da177e4 | 1232 | |
c8155cc5 AS |
1233 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { |
1234 | unsigned int ctrlstat; | |
1235 | int status; | |
1da177e4 | 1236 | int actlength; |
1da177e4 | 1237 | |
c8155cc5 | 1238 | if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) |
1da177e4 LT |
1239 | return -EINPROGRESS; |
1240 | ||
c8155cc5 AS |
1241 | uhci_remove_tds_from_frame(uhci, qh->iso_frame); |
1242 | ||
1243 | ctrlstat = td_status(td); | |
1244 | if (ctrlstat & TD_CTRL_ACTIVE) { | |
1245 | status = -EXDEV; /* TD was added too late? */ | |
1246 | } else { | |
1247 | status = uhci_map_status(uhci_status_bits(ctrlstat), | |
1248 | usb_pipeout(urb->pipe)); | |
1249 | actlength = uhci_actual_length(ctrlstat); | |
1250 | ||
1251 | urb->actual_length += actlength; | |
1252 | qh->iso_packet_desc->actual_length = actlength; | |
1253 | qh->iso_packet_desc->status = status; | |
1254 | } | |
1da177e4 | 1255 | |
1da177e4 LT |
1256 | if (status) { |
1257 | urb->error_count++; | |
c8155cc5 | 1258 | qh->iso_status = status; |
1da177e4 LT |
1259 | } |
1260 | ||
c8155cc5 AS |
1261 | uhci_remove_td_from_urbp(td); |
1262 | uhci_free_td(uhci, td); | |
1263 | qh->iso_frame += qh->period; | |
1264 | ++qh->iso_packet_desc; | |
1da177e4 | 1265 | } |
c8155cc5 | 1266 | return qh->iso_status; |
1da177e4 LT |
1267 | } |
1268 | ||
1da177e4 | 1269 | static int uhci_urb_enqueue(struct usb_hcd *hcd, |
dccf4a48 | 1270 | struct usb_host_endpoint *hep, |
55016f10 | 1271 | struct urb *urb, gfp_t mem_flags) |
1da177e4 LT |
1272 | { |
1273 | int ret; | |
1274 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1275 | unsigned long flags; | |
dccf4a48 AS |
1276 | struct urb_priv *urbp; |
1277 | struct uhci_qh *qh; | |
1da177e4 LT |
1278 | |
1279 | spin_lock_irqsave(&uhci->lock, flags); | |
1280 | ||
1281 | ret = urb->status; | |
1282 | if (ret != -EINPROGRESS) /* URB already unlinked! */ | |
dccf4a48 | 1283 | goto done; |
1da177e4 | 1284 | |
dccf4a48 AS |
1285 | ret = -ENOMEM; |
1286 | urbp = uhci_alloc_urb_priv(uhci, urb); | |
1287 | if (!urbp) | |
1288 | goto done; | |
1da177e4 | 1289 | |
dccf4a48 AS |
1290 | if (hep->hcpriv) |
1291 | qh = (struct uhci_qh *) hep->hcpriv; | |
1292 | else { | |
1293 | qh = uhci_alloc_qh(uhci, urb->dev, hep); | |
1294 | if (!qh) | |
1295 | goto err_no_qh; | |
1da177e4 | 1296 | } |
dccf4a48 | 1297 | urbp->qh = qh; |
1da177e4 | 1298 | |
4de7d2c2 AS |
1299 | switch (qh->type) { |
1300 | case USB_ENDPOINT_XFER_CONTROL: | |
dccf4a48 AS |
1301 | ret = uhci_submit_control(uhci, urb, qh); |
1302 | break; | |
4de7d2c2 | 1303 | case USB_ENDPOINT_XFER_BULK: |
dccf4a48 | 1304 | ret = uhci_submit_bulk(uhci, urb, qh); |
1da177e4 | 1305 | break; |
4de7d2c2 | 1306 | case USB_ENDPOINT_XFER_INT: |
3ca2a321 | 1307 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1da177e4 | 1308 | break; |
4de7d2c2 | 1309 | case USB_ENDPOINT_XFER_ISOC: |
c8155cc5 | 1310 | urb->error_count = 0; |
dccf4a48 | 1311 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1da177e4 LT |
1312 | break; |
1313 | } | |
dccf4a48 AS |
1314 | if (ret != 0) |
1315 | goto err_submit_failed; | |
1da177e4 | 1316 | |
dccf4a48 AS |
1317 | /* Add this URB to the QH */ |
1318 | urbp->qh = qh; | |
1319 | list_add_tail(&urbp->node, &qh->queue); | |
1da177e4 | 1320 | |
dccf4a48 AS |
1321 | /* If the new URB is the first and only one on this QH then either |
1322 | * the QH is new and idle or else it's unlinked and waiting to | |
2775562a AS |
1323 | * become idle, so we can activate it right away. But only if the |
1324 | * queue isn't stopped. */ | |
84afddd7 | 1325 | if (qh->queue.next == &urbp->node && !qh->is_stopped) { |
dccf4a48 | 1326 | uhci_activate_qh(uhci, qh); |
c5e3b741 | 1327 | uhci_urbp_wants_fsbr(uhci, urbp); |
84afddd7 | 1328 | } |
dccf4a48 AS |
1329 | goto done; |
1330 | ||
1331 | err_submit_failed: | |
1332 | if (qh->state == QH_STATE_IDLE) | |
1333 | uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ | |
1da177e4 | 1334 | |
dccf4a48 AS |
1335 | err_no_qh: |
1336 | uhci_free_urb_priv(uhci, urbp); | |
1337 | ||
1338 | done: | |
1da177e4 LT |
1339 | spin_unlock_irqrestore(&uhci->lock, flags); |
1340 | return ret; | |
1341 | } | |
1342 | ||
0ed8fee1 AS |
1343 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) |
1344 | { | |
1345 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1346 | unsigned long flags; | |
1347 | struct urb_priv *urbp; | |
10b8e47d | 1348 | struct uhci_qh *qh; |
0ed8fee1 AS |
1349 | |
1350 | spin_lock_irqsave(&uhci->lock, flags); | |
1351 | urbp = urb->hcpriv; | |
1352 | if (!urbp) /* URB was never linked! */ | |
1353 | goto done; | |
10b8e47d | 1354 | qh = urbp->qh; |
0ed8fee1 AS |
1355 | |
1356 | /* Remove Isochronous TDs from the frame list ASAP */ | |
10b8e47d | 1357 | if (qh->type == USB_ENDPOINT_XFER_ISOC) { |
0ed8fee1 | 1358 | uhci_unlink_isochronous_tds(uhci, urb); |
10b8e47d AS |
1359 | mb(); |
1360 | ||
1361 | /* If the URB has already started, update the QH unlink time */ | |
1362 | uhci_get_current_frame_number(uhci); | |
1363 | if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) | |
1364 | qh->unlink_frame = uhci->frame_number; | |
1365 | } | |
1366 | ||
1367 | uhci_unlink_qh(uhci, qh); | |
0ed8fee1 AS |
1368 | |
1369 | done: | |
1370 | spin_unlock_irqrestore(&uhci->lock, flags); | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1da177e4 | 1374 | /* |
0ed8fee1 | 1375 | * Finish unlinking an URB and give it back |
1da177e4 | 1376 | */ |
0ed8fee1 | 1377 | static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, |
7d12e780 | 1378 | struct urb *urb) |
0ed8fee1 AS |
1379 | __releases(uhci->lock) |
1380 | __acquires(uhci->lock) | |
1da177e4 | 1381 | { |
dccf4a48 | 1382 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1da177e4 | 1383 | |
c8155cc5 AS |
1384 | /* When giving back the first URB in an Isochronous queue, |
1385 | * reinitialize the QH's iso-related members for the next URB. */ | |
1386 | if (qh->type == USB_ENDPOINT_XFER_ISOC && | |
1387 | urbp->node.prev == &qh->queue && | |
1388 | urbp->node.next != &qh->queue) { | |
1389 | struct urb *nurb = list_entry(urbp->node.next, | |
1390 | struct urb_priv, node)->urb; | |
1391 | ||
1392 | qh->iso_packet_desc = &nurb->iso_frame_desc[0]; | |
1393 | qh->iso_frame = nurb->start_frame; | |
1394 | qh->iso_status = 0; | |
1395 | } | |
1da177e4 | 1396 | |
0ed8fee1 AS |
1397 | /* Take the URB off the QH's queue. If the queue is now empty, |
1398 | * this is a perfect time for a toggle fixup. */ | |
1399 | list_del_init(&urbp->node); | |
1400 | if (list_empty(&qh->queue) && qh->needs_fixup) { | |
1401 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
1402 | usb_pipeout(urb->pipe), qh->initial_toggle); | |
1403 | qh->needs_fixup = 0; | |
1404 | } | |
1405 | ||
0ed8fee1 | 1406 | uhci_free_urb_priv(uhci, urbp); |
1da177e4 | 1407 | |
0ed8fee1 | 1408 | spin_unlock(&uhci->lock); |
7d12e780 | 1409 | usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); |
0ed8fee1 | 1410 | spin_lock(&uhci->lock); |
1da177e4 | 1411 | |
0ed8fee1 AS |
1412 | /* If the queue is now empty, we can unlink the QH and give up its |
1413 | * reserved bandwidth. */ | |
1414 | if (list_empty(&qh->queue)) { | |
1415 | uhci_unlink_qh(uhci, qh); | |
3ca2a321 AS |
1416 | if (qh->bandwidth_reserved) |
1417 | uhci_release_bandwidth(uhci, qh); | |
0ed8fee1 | 1418 | } |
dccf4a48 | 1419 | } |
1da177e4 | 1420 | |
dccf4a48 | 1421 | /* |
0ed8fee1 | 1422 | * Scan the URBs in a QH's queue |
dccf4a48 | 1423 | */ |
0ed8fee1 AS |
1424 | #define QH_FINISHED_UNLINKING(qh) \ |
1425 | (qh->state == QH_STATE_UNLINKING && \ | |
1426 | uhci->frame_number + uhci->is_stopped != qh->unlink_frame) | |
1da177e4 | 1427 | |
7d12e780 | 1428 | static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 1429 | { |
1da177e4 | 1430 | struct urb_priv *urbp; |
0ed8fee1 AS |
1431 | struct urb *urb; |
1432 | int status; | |
1da177e4 | 1433 | |
0ed8fee1 AS |
1434 | while (!list_empty(&qh->queue)) { |
1435 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1436 | urb = urbp->urb; | |
1da177e4 | 1437 | |
b1869000 | 1438 | if (qh->type == USB_ENDPOINT_XFER_ISOC) |
0ed8fee1 | 1439 | status = uhci_result_isochronous(uhci, urb); |
b1869000 | 1440 | else |
0ed8fee1 | 1441 | status = uhci_result_common(uhci, urb); |
0ed8fee1 AS |
1442 | if (status == -EINPROGRESS) |
1443 | break; | |
1da177e4 | 1444 | |
0ed8fee1 AS |
1445 | spin_lock(&urb->lock); |
1446 | if (urb->status == -EINPROGRESS) /* Not dequeued */ | |
1447 | urb->status = status; | |
1448 | else | |
2775562a | 1449 | status = ECONNRESET; /* Not -ECONNRESET */ |
0ed8fee1 | 1450 | spin_unlock(&urb->lock); |
1da177e4 | 1451 | |
0ed8fee1 AS |
1452 | /* Dequeued but completed URBs can't be given back unless |
1453 | * the QH is stopped or has finished unlinking. */ | |
2775562a AS |
1454 | if (status == ECONNRESET) { |
1455 | if (QH_FINISHED_UNLINKING(qh)) | |
1456 | qh->is_stopped = 1; | |
1457 | else if (!qh->is_stopped) | |
1458 | return; | |
1459 | } | |
1da177e4 | 1460 | |
7d12e780 | 1461 | uhci_giveback_urb(uhci, qh, urb); |
7ceb932f | 1462 | if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC) |
0ed8fee1 AS |
1463 | break; |
1464 | } | |
1da177e4 | 1465 | |
0ed8fee1 AS |
1466 | /* If the QH is neither stopped nor finished unlinking (normal case), |
1467 | * our work here is done. */ | |
2775562a AS |
1468 | if (QH_FINISHED_UNLINKING(qh)) |
1469 | qh->is_stopped = 1; | |
1470 | else if (!qh->is_stopped) | |
0ed8fee1 | 1471 | return; |
1da177e4 | 1472 | |
0ed8fee1 | 1473 | /* Otherwise give back each of the dequeued URBs */ |
2775562a | 1474 | restart: |
0ed8fee1 AS |
1475 | list_for_each_entry(urbp, &qh->queue, node) { |
1476 | urb = urbp->urb; | |
1477 | if (urb->status != -EINPROGRESS) { | |
10b8e47d AS |
1478 | |
1479 | /* Fix up the TD links and save the toggles for | |
1480 | * non-Isochronous queues. For Isochronous queues, | |
1481 | * test for too-recent dequeues. */ | |
1482 | if (!uhci_cleanup_queue(uhci, qh, urb)) { | |
1483 | qh->is_stopped = 0; | |
1484 | return; | |
1485 | } | |
7d12e780 | 1486 | uhci_giveback_urb(uhci, qh, urb); |
0ed8fee1 AS |
1487 | goto restart; |
1488 | } | |
1489 | } | |
1490 | qh->is_stopped = 0; | |
1da177e4 | 1491 | |
0ed8fee1 AS |
1492 | /* There are no more dequeued URBs. If there are still URBs on the |
1493 | * queue, the QH can now be re-activated. */ | |
1494 | if (!list_empty(&qh->queue)) { | |
1495 | if (qh->needs_fixup) | |
1496 | uhci_fixup_toggles(qh, 0); | |
84afddd7 AS |
1497 | |
1498 | /* If the first URB on the queue wants FSBR but its time | |
1499 | * limit has expired, set the next TD to interrupt on | |
1500 | * completion before reactivating the QH. */ | |
1501 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1502 | if (urbp->fsbr && qh->wait_expired) { | |
1503 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
1504 | struct uhci_td, list); | |
1505 | ||
1506 | td->status |= __cpu_to_le32(TD_CTRL_IOC); | |
1507 | } | |
1508 | ||
0ed8fee1 | 1509 | uhci_activate_qh(uhci, qh); |
1da177e4 LT |
1510 | } |
1511 | ||
0ed8fee1 AS |
1512 | /* The queue is empty. The QH can become idle if it is fully |
1513 | * unlinked. */ | |
1514 | else if (QH_FINISHED_UNLINKING(qh)) | |
1515 | uhci_make_qh_idle(uhci, qh); | |
1da177e4 LT |
1516 | } |
1517 | ||
84afddd7 AS |
1518 | /* |
1519 | * Check for queues that have made some forward progress. | |
1520 | * Returns 0 if the queue is not Isochronous, is ACTIVE, and | |
1521 | * has not advanced since last examined; 1 otherwise. | |
b761d9d8 AS |
1522 | * |
1523 | * Early Intel controllers have a bug which causes qh->element sometimes | |
1524 | * not to advance when a TD completes successfully. The queue remains | |
1525 | * stuck on the inactive completed TD. We detect such cases and advance | |
1526 | * the element pointer by hand. | |
84afddd7 AS |
1527 | */ |
1528 | static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1529 | { | |
1530 | struct urb_priv *urbp = NULL; | |
1531 | struct uhci_td *td; | |
1532 | int ret = 1; | |
1533 | unsigned status; | |
1534 | ||
1535 | if (qh->type == USB_ENDPOINT_XFER_ISOC) | |
c5e3b741 | 1536 | goto done; |
84afddd7 AS |
1537 | |
1538 | /* Treat an UNLINKING queue as though it hasn't advanced. | |
1539 | * This is okay because reactivation will treat it as though | |
1540 | * it has advanced, and if it is going to become IDLE then | |
1541 | * this doesn't matter anyway. Furthermore it's possible | |
1542 | * for an UNLINKING queue not to have any URBs at all, or | |
1543 | * for its first URB not to have any TDs (if it was dequeued | |
1544 | * just as it completed). So it's not easy in any case to | |
1545 | * test whether such queues have advanced. */ | |
1546 | if (qh->state != QH_STATE_ACTIVE) { | |
1547 | urbp = NULL; | |
1548 | status = 0; | |
1549 | ||
1550 | } else { | |
1551 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1552 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
1553 | status = td_status(td); | |
1554 | if (!(status & TD_CTRL_ACTIVE)) { | |
1555 | ||
1556 | /* We're okay, the queue has advanced */ | |
1557 | qh->wait_expired = 0; | |
1558 | qh->advance_jiffies = jiffies; | |
c5e3b741 | 1559 | goto done; |
84afddd7 AS |
1560 | } |
1561 | ret = 0; | |
1562 | } | |
1563 | ||
1564 | /* The queue hasn't advanced; check for timeout */ | |
c5e3b741 AS |
1565 | if (qh->wait_expired) |
1566 | goto done; | |
1567 | ||
1568 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | |
b761d9d8 AS |
1569 | |
1570 | /* Detect the Intel bug and work around it */ | |
1571 | if (qh->post_td && qh_element(qh) == | |
1572 | cpu_to_le32(qh->post_td->dma_handle)) { | |
1573 | qh->element = qh->post_td->link; | |
1574 | qh->advance_jiffies = jiffies; | |
c5e3b741 AS |
1575 | ret = 1; |
1576 | goto done; | |
b761d9d8 AS |
1577 | } |
1578 | ||
84afddd7 AS |
1579 | qh->wait_expired = 1; |
1580 | ||
1581 | /* If the current URB wants FSBR, unlink it temporarily | |
1582 | * so that we can safely set the next TD to interrupt on | |
1583 | * completion. That way we'll know as soon as the queue | |
1584 | * starts moving again. */ | |
1585 | if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) | |
1586 | uhci_unlink_qh(uhci, qh); | |
c5e3b741 AS |
1587 | |
1588 | } else { | |
1589 | /* Unmoving but not-yet-expired queues keep FSBR alive */ | |
1590 | if (urbp) | |
1591 | uhci_urbp_wants_fsbr(uhci, urbp); | |
84afddd7 | 1592 | } |
c5e3b741 AS |
1593 | |
1594 | done: | |
84afddd7 AS |
1595 | return ret; |
1596 | } | |
1597 | ||
0ed8fee1 AS |
1598 | /* |
1599 | * Process events in the schedule, but only in one thread at a time | |
1600 | */ | |
7d12e780 | 1601 | static void uhci_scan_schedule(struct uhci_hcd *uhci) |
1da177e4 | 1602 | { |
0ed8fee1 AS |
1603 | int i; |
1604 | struct uhci_qh *qh; | |
1da177e4 LT |
1605 | |
1606 | /* Don't allow re-entrant calls */ | |
1607 | if (uhci->scan_in_progress) { | |
1608 | uhci->need_rescan = 1; | |
1609 | return; | |
1610 | } | |
1611 | uhci->scan_in_progress = 1; | |
84afddd7 | 1612 | rescan: |
1da177e4 | 1613 | uhci->need_rescan = 0; |
c5e3b741 | 1614 | uhci->fsbr_is_wanted = 0; |
1da177e4 | 1615 | |
6c1b445c | 1616 | uhci_clear_next_interrupt(uhci); |
1da177e4 | 1617 | uhci_get_current_frame_number(uhci); |
c8155cc5 | 1618 | uhci->cur_iso_frame = uhci->frame_number; |
1da177e4 | 1619 | |
0ed8fee1 AS |
1620 | /* Go through all the QH queues and process the URBs in each one */ |
1621 | for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { | |
1622 | uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, | |
1623 | struct uhci_qh, node); | |
1624 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { | |
1625 | uhci->next_qh = list_entry(qh->node.next, | |
1626 | struct uhci_qh, node); | |
84afddd7 AS |
1627 | |
1628 | if (uhci_advance_check(uhci, qh)) { | |
7d12e780 | 1629 | uhci_scan_qh(uhci, qh); |
c5e3b741 AS |
1630 | if (qh->state == QH_STATE_ACTIVE) { |
1631 | uhci_urbp_wants_fsbr(uhci, | |
1632 | list_entry(qh->queue.next, struct urb_priv, node)); | |
1633 | } | |
84afddd7 | 1634 | } |
0ed8fee1 | 1635 | } |
1da177e4 | 1636 | } |
1da177e4 | 1637 | |
c8155cc5 | 1638 | uhci->last_iso_frame = uhci->cur_iso_frame; |
1da177e4 LT |
1639 | if (uhci->need_rescan) |
1640 | goto rescan; | |
1641 | uhci->scan_in_progress = 0; | |
1642 | ||
c5e3b741 AS |
1643 | if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && |
1644 | !uhci->fsbr_expiring) { | |
1645 | uhci->fsbr_expiring = 1; | |
1646 | mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); | |
1647 | } | |
84afddd7 | 1648 | |
04538a25 | 1649 | if (list_empty(&uhci->skel_unlink_qh->node)) |
1da177e4 LT |
1650 | uhci_clear_next_interrupt(uhci); |
1651 | else | |
1652 | uhci_set_next_interrupt(uhci); | |
1da177e4 | 1653 | } |