Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Universal Host Controller Interface driver for USB. | |
3 | * | |
4 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | |
8 | * (C) Copyright 1999 Randy Dunlap | |
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | |
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | |
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | |
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | |
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | |
dccf4a48 | 16 | * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu |
1da177e4 LT |
17 | */ |
18 | ||
1da177e4 LT |
19 | static void uhci_free_pending_tds(struct uhci_hcd *uhci); |
20 | ||
21 | /* | |
22 | * Technically, updating td->status here is a race, but it's not really a | |
23 | * problem. The worst that can happen is that we set the IOC bit again | |
24 | * generating a spurious interrupt. We could fix this by creating another | |
25 | * QH and leaving the IOC bit always set, but then we would have to play | |
26 | * games with the FSBR code to make sure we get the correct order in all | |
27 | * the cases. I don't think it's worth the effort | |
28 | */ | |
dccf4a48 | 29 | static void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
1da177e4 | 30 | { |
6c1b445c | 31 | if (uhci->is_stopped) |
1f09df8b | 32 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); |
1da177e4 LT |
33 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
34 | } | |
35 | ||
36 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |
37 | { | |
38 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | |
39 | } | |
40 | ||
2532178a | 41 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
1da177e4 LT |
42 | { |
43 | dma_addr_t dma_handle; | |
44 | struct uhci_td *td; | |
45 | ||
46 | td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); | |
47 | if (!td) | |
48 | return NULL; | |
49 | ||
50 | td->dma_handle = dma_handle; | |
1da177e4 | 51 | td->frame = -1; |
1da177e4 LT |
52 | |
53 | INIT_LIST_HEAD(&td->list); | |
54 | INIT_LIST_HEAD(&td->remove_list); | |
55 | INIT_LIST_HEAD(&td->fl_list); | |
56 | ||
1da177e4 LT |
57 | return td; |
58 | } | |
59 | ||
dccf4a48 AS |
60 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
61 | { | |
62 | if (!list_empty(&td->list)) | |
63 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); | |
64 | if (!list_empty(&td->remove_list)) | |
65 | dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td); | |
66 | if (!list_empty(&td->fl_list)) | |
67 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); | |
68 | ||
69 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | |
70 | } | |
71 | ||
1da177e4 LT |
72 | static inline void uhci_fill_td(struct uhci_td *td, u32 status, |
73 | u32 token, u32 buffer) | |
74 | { | |
75 | td->status = cpu_to_le32(status); | |
76 | td->token = cpu_to_le32(token); | |
77 | td->buffer = cpu_to_le32(buffer); | |
78 | } | |
79 | ||
80 | /* | |
687f5f34 | 81 | * We insert Isochronous URBs directly into the frame list at the beginning |
1da177e4 | 82 | */ |
dccf4a48 AS |
83 | static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, |
84 | struct uhci_td *td, unsigned framenum) | |
1da177e4 LT |
85 | { |
86 | framenum &= (UHCI_NUMFRAMES - 1); | |
87 | ||
88 | td->frame = framenum; | |
89 | ||
90 | /* Is there a TD already mapped there? */ | |
a1d59ce8 | 91 | if (uhci->frame_cpu[framenum]) { |
1da177e4 LT |
92 | struct uhci_td *ftd, *ltd; |
93 | ||
a1d59ce8 | 94 | ftd = uhci->frame_cpu[framenum]; |
1da177e4 LT |
95 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
96 | ||
97 | list_add_tail(&td->fl_list, &ftd->fl_list); | |
98 | ||
99 | td->link = ltd->link; | |
100 | wmb(); | |
101 | ltd->link = cpu_to_le32(td->dma_handle); | |
102 | } else { | |
a1d59ce8 | 103 | td->link = uhci->frame[framenum]; |
1da177e4 | 104 | wmb(); |
a1d59ce8 AS |
105 | uhci->frame[framenum] = cpu_to_le32(td->dma_handle); |
106 | uhci->frame_cpu[framenum] = td; | |
1da177e4 LT |
107 | } |
108 | } | |
109 | ||
dccf4a48 | 110 | static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, |
b81d3436 | 111 | struct uhci_td *td) |
1da177e4 LT |
112 | { |
113 | /* If it's not inserted, don't remove it */ | |
b81d3436 AS |
114 | if (td->frame == -1) { |
115 | WARN_ON(!list_empty(&td->fl_list)); | |
1da177e4 | 116 | return; |
b81d3436 | 117 | } |
1da177e4 | 118 | |
b81d3436 | 119 | if (uhci->frame_cpu[td->frame] == td) { |
1da177e4 | 120 | if (list_empty(&td->fl_list)) { |
a1d59ce8 AS |
121 | uhci->frame[td->frame] = td->link; |
122 | uhci->frame_cpu[td->frame] = NULL; | |
1da177e4 LT |
123 | } else { |
124 | struct uhci_td *ntd; | |
125 | ||
126 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | |
a1d59ce8 AS |
127 | uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
128 | uhci->frame_cpu[td->frame] = ntd; | |
1da177e4 LT |
129 | } |
130 | } else { | |
131 | struct uhci_td *ptd; | |
132 | ||
133 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); | |
134 | ptd->link = td->link; | |
135 | } | |
136 | ||
1da177e4 LT |
137 | list_del_init(&td->fl_list); |
138 | td->frame = -1; | |
139 | } | |
140 | ||
dccf4a48 AS |
141 | /* |
142 | * Remove all the TDs for an Isochronous URB from the frame list | |
143 | */ | |
144 | static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) | |
b81d3436 AS |
145 | { |
146 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
147 | struct uhci_td *td; | |
148 | ||
149 | list_for_each_entry(td, &urbp->td_list, list) | |
dccf4a48 | 150 | uhci_remove_td_from_frame_list(uhci, td); |
b81d3436 AS |
151 | wmb(); |
152 | } | |
153 | ||
dccf4a48 AS |
154 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
155 | struct usb_device *udev, struct usb_host_endpoint *hep) | |
1da177e4 LT |
156 | { |
157 | dma_addr_t dma_handle; | |
158 | struct uhci_qh *qh; | |
159 | ||
160 | qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); | |
161 | if (!qh) | |
162 | return NULL; | |
163 | ||
164 | qh->dma_handle = dma_handle; | |
165 | ||
166 | qh->element = UHCI_PTR_TERM; | |
167 | qh->link = UHCI_PTR_TERM; | |
168 | ||
dccf4a48 AS |
169 | INIT_LIST_HEAD(&qh->queue); |
170 | INIT_LIST_HEAD(&qh->node); | |
1da177e4 | 171 | |
dccf4a48 | 172 | if (udev) { /* Normal QH */ |
af0bb599 AS |
173 | qh->dummy_td = uhci_alloc_td(uhci); |
174 | if (!qh->dummy_td) { | |
175 | dma_pool_free(uhci->qh_pool, qh, dma_handle); | |
176 | return NULL; | |
177 | } | |
dccf4a48 AS |
178 | qh->state = QH_STATE_IDLE; |
179 | qh->hep = hep; | |
180 | qh->udev = udev; | |
181 | hep->hcpriv = qh; | |
1da177e4 | 182 | |
dccf4a48 AS |
183 | } else { /* Skeleton QH */ |
184 | qh->state = QH_STATE_ACTIVE; | |
185 | qh->udev = NULL; | |
186 | } | |
1da177e4 LT |
187 | return qh; |
188 | } | |
189 | ||
190 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
191 | { | |
dccf4a48 AS |
192 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
193 | if (!list_empty(&qh->queue)) | |
1da177e4 | 194 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); |
1da177e4 | 195 | |
dccf4a48 AS |
196 | list_del(&qh->node); |
197 | if (qh->udev) { | |
198 | qh->hep->hcpriv = NULL; | |
af0bb599 | 199 | uhci_free_td(uhci, qh->dummy_td); |
dccf4a48 | 200 | } |
1da177e4 LT |
201 | dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
202 | } | |
203 | ||
0ed8fee1 AS |
204 | /* |
205 | * When the currently executing URB is dequeued, save its current toggle value | |
206 | */ | |
207 | static void uhci_save_toggle(struct uhci_qh *qh, struct urb *urb) | |
208 | { | |
209 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
210 | struct uhci_td *td; | |
211 | ||
212 | /* If the QH element pointer is UHCI_PTR_TERM then then currently | |
213 | * executing URB has already been unlinked, so this one isn't it. */ | |
214 | if (qh_element(qh) == UHCI_PTR_TERM || | |
215 | qh->queue.next != &urbp->node) | |
216 | return; | |
217 | qh->element = UHCI_PTR_TERM; | |
218 | ||
219 | /* Only bulk and interrupt pipes have to worry about toggles */ | |
220 | if (!(usb_pipetype(urb->pipe) == PIPE_BULK || | |
221 | usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) | |
222 | return; | |
223 | ||
224 | /* Find the first active TD; that's the device's toggle state */ | |
225 | list_for_each_entry(td, &urbp->td_list, list) { | |
226 | if (td_status(td) & TD_CTRL_ACTIVE) { | |
227 | qh->needs_fixup = 1; | |
228 | qh->initial_toggle = uhci_toggle(td_token(td)); | |
229 | return; | |
230 | } | |
231 | } | |
232 | ||
233 | WARN_ON(1); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Fix up the data toggles for URBs in a queue, when one of them | |
238 | * terminates early (short transfer, error, or dequeued). | |
239 | */ | |
240 | static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |
241 | { | |
242 | struct urb_priv *urbp = NULL; | |
243 | struct uhci_td *td; | |
244 | unsigned int toggle = qh->initial_toggle; | |
245 | unsigned int pipe; | |
246 | ||
247 | /* Fixups for a short transfer start with the second URB in the | |
248 | * queue (the short URB is the first). */ | |
249 | if (skip_first) | |
250 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
251 | ||
252 | /* When starting with the first URB, if the QH element pointer is | |
253 | * still valid then we know the URB's toggles are okay. */ | |
254 | else if (qh_element(qh) != UHCI_PTR_TERM) | |
255 | toggle = 2; | |
256 | ||
257 | /* Fix up the toggle for the URBs in the queue. Normally this | |
258 | * loop won't run more than once: When an error or short transfer | |
259 | * occurs, the queue usually gets emptied. */ | |
1393adb2 | 260 | urbp = list_prepare_entry(urbp, &qh->queue, node); |
0ed8fee1 AS |
261 | list_for_each_entry_continue(urbp, &qh->queue, node) { |
262 | ||
263 | /* If the first TD has the right toggle value, we don't | |
264 | * need to change any toggles in this URB */ | |
265 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | |
266 | if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { | |
267 | td = list_entry(urbp->td_list.next, struct uhci_td, | |
268 | list); | |
269 | toggle = uhci_toggle(td_token(td)) ^ 1; | |
270 | ||
271 | /* Otherwise all the toggles in the URB have to be switched */ | |
272 | } else { | |
273 | list_for_each_entry(td, &urbp->td_list, list) { | |
274 | td->token ^= __constant_cpu_to_le32( | |
275 | TD_TOKEN_TOGGLE); | |
276 | toggle ^= 1; | |
277 | } | |
278 | } | |
279 | } | |
280 | ||
281 | wmb(); | |
282 | pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; | |
283 | usb_settoggle(qh->udev, usb_pipeendpoint(pipe), | |
284 | usb_pipeout(pipe), toggle); | |
285 | qh->needs_fixup = 0; | |
286 | } | |
287 | ||
1da177e4 | 288 | /* |
dccf4a48 | 289 | * Put a QH on the schedule in both hardware and software |
1da177e4 | 290 | */ |
dccf4a48 | 291 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 292 | { |
dccf4a48 | 293 | struct uhci_qh *pqh; |
1da177e4 | 294 | |
dccf4a48 | 295 | WARN_ON(list_empty(&qh->queue)); |
1da177e4 | 296 | |
dccf4a48 AS |
297 | /* Set the element pointer if it isn't set already. |
298 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ | |
299 | if (qh_element(qh) == UHCI_PTR_TERM) { | |
300 | struct urb_priv *urbp = list_entry(qh->queue.next, | |
301 | struct urb_priv, node); | |
302 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
303 | struct uhci_td, list); | |
1da177e4 | 304 | |
dccf4a48 | 305 | qh->element = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
306 | } |
307 | ||
dccf4a48 AS |
308 | if (qh->state == QH_STATE_ACTIVE) |
309 | return; | |
310 | qh->state = QH_STATE_ACTIVE; | |
311 | ||
312 | /* Move the QH from its old list to the end of the appropriate | |
313 | * skeleton's list */ | |
0ed8fee1 AS |
314 | if (qh == uhci->next_qh) |
315 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
316 | node); | |
dccf4a48 AS |
317 | list_move_tail(&qh->node, &qh->skel->node); |
318 | ||
319 | /* Link it into the schedule */ | |
320 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
321 | qh->link = pqh->link; | |
322 | wmb(); | |
323 | pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); | |
1da177e4 LT |
324 | } |
325 | ||
326 | /* | |
dccf4a48 | 327 | * Take a QH off the hardware schedule |
1da177e4 | 328 | */ |
dccf4a48 | 329 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 LT |
330 | { |
331 | struct uhci_qh *pqh; | |
1da177e4 | 332 | |
dccf4a48 | 333 | if (qh->state == QH_STATE_UNLINKING) |
1da177e4 | 334 | return; |
dccf4a48 AS |
335 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
336 | qh->state = QH_STATE_UNLINKING; | |
1da177e4 | 337 | |
dccf4a48 AS |
338 | /* Unlink the QH from the schedule and record when we did it */ |
339 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
340 | pqh->link = qh->link; | |
341 | mb(); | |
1da177e4 LT |
342 | |
343 | uhci_get_current_frame_number(uhci); | |
dccf4a48 | 344 | qh->unlink_frame = uhci->frame_number; |
1da177e4 | 345 | |
dccf4a48 AS |
346 | /* Force an interrupt so we know when the QH is fully unlinked */ |
347 | if (list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
348 | uhci_set_next_interrupt(uhci); |
349 | ||
dccf4a48 | 350 | /* Move the QH from its old list to the end of the unlinking list */ |
0ed8fee1 AS |
351 | if (qh == uhci->next_qh) |
352 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
353 | node); | |
dccf4a48 | 354 | list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); |
1da177e4 LT |
355 | } |
356 | ||
dccf4a48 AS |
357 | /* |
358 | * When we and the controller are through with a QH, it becomes IDLE. | |
359 | * This happens when a QH has been off the schedule (on the unlinking | |
360 | * list) for more than one frame, or when an error occurs while adding | |
361 | * the first URB onto a new QH. | |
362 | */ | |
363 | static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1da177e4 | 364 | { |
dccf4a48 | 365 | WARN_ON(qh->state == QH_STATE_ACTIVE); |
1da177e4 | 366 | |
0ed8fee1 AS |
367 | if (qh == uhci->next_qh) |
368 | uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, | |
369 | node); | |
dccf4a48 AS |
370 | list_move(&qh->node, &uhci->idle_qh_list); |
371 | qh->state = QH_STATE_IDLE; | |
1da177e4 | 372 | |
dccf4a48 AS |
373 | /* If anyone is waiting for a QH to become idle, wake them up */ |
374 | if (uhci->num_waiting) | |
375 | wake_up_all(&uhci->waitqh); | |
1da177e4 LT |
376 | } |
377 | ||
dccf4a48 AS |
378 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
379 | struct urb *urb) | |
1da177e4 LT |
380 | { |
381 | struct urb_priv *urbp; | |
382 | ||
383 | urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); | |
384 | if (!urbp) | |
385 | return NULL; | |
386 | ||
387 | memset((void *)urbp, 0, sizeof(*urbp)); | |
388 | ||
1da177e4 | 389 | urbp->urb = urb; |
dccf4a48 | 390 | urb->hcpriv = urbp; |
1da177e4 | 391 | |
dccf4a48 | 392 | INIT_LIST_HEAD(&urbp->node); |
1da177e4 | 393 | INIT_LIST_HEAD(&urbp->td_list); |
1da177e4 | 394 | |
1da177e4 LT |
395 | return urbp; |
396 | } | |
397 | ||
398 | static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) | |
399 | { | |
400 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
401 | ||
1da177e4 LT |
402 | list_add_tail(&td->list, &urbp->td_list); |
403 | } | |
404 | ||
405 | static void uhci_remove_td_from_urb(struct uhci_td *td) | |
406 | { | |
407 | if (list_empty(&td->list)) | |
408 | return; | |
409 | ||
410 | list_del_init(&td->list); | |
1da177e4 LT |
411 | } |
412 | ||
dccf4a48 AS |
413 | static void uhci_free_urb_priv(struct uhci_hcd *uhci, |
414 | struct urb_priv *urbp) | |
1da177e4 LT |
415 | { |
416 | struct uhci_td *td, *tmp; | |
1da177e4 | 417 | |
dccf4a48 AS |
418 | if (!list_empty(&urbp->node)) |
419 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", | |
420 | urbp->urb); | |
1da177e4 LT |
421 | |
422 | uhci_get_current_frame_number(uhci); | |
423 | if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) { | |
424 | uhci_free_pending_tds(uhci); | |
425 | uhci->td_remove_age = uhci->frame_number; | |
426 | } | |
427 | ||
428 | /* Check to see if the remove list is empty. Set the IOC bit */ | |
dccf4a48 | 429 | /* to force an interrupt so we can remove the TDs. */ |
1da177e4 LT |
430 | if (list_empty(&uhci->td_remove_list)) |
431 | uhci_set_next_interrupt(uhci); | |
432 | ||
433 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { | |
434 | uhci_remove_td_from_urb(td); | |
1da177e4 LT |
435 | list_add(&td->remove_list, &uhci->td_remove_list); |
436 | } | |
437 | ||
dccf4a48 | 438 | urbp->urb->hcpriv = NULL; |
1da177e4 LT |
439 | kmem_cache_free(uhci_up_cachep, urbp); |
440 | } | |
441 | ||
442 | static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
443 | { | |
444 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
445 | ||
446 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { | |
447 | urbp->fsbr = 1; | |
448 | if (!uhci->fsbr++ && !uhci->fsbrtimeout) | |
449 | uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | |
450 | } | |
451 | } | |
452 | ||
453 | static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
454 | { | |
455 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
456 | ||
457 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { | |
458 | urbp->fsbr = 0; | |
459 | if (!--uhci->fsbr) | |
460 | uhci->fsbrtimeout = jiffies + FSBR_DELAY; | |
461 | } | |
462 | } | |
463 | ||
464 | /* | |
465 | * Map status to standard result codes | |
466 | * | |
467 | * <status> is (td_status(td) & 0xF60000), a.k.a. | |
468 | * uhci_status_bits(td_status(td)). | |
469 | * Note: <status> does not include the TD_CTRL_NAK bit. | |
470 | * <dir_out> is True for output TDs and False for input TDs. | |
471 | */ | |
472 | static int uhci_map_status(int status, int dir_out) | |
473 | { | |
474 | if (!status) | |
475 | return 0; | |
476 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ | |
477 | return -EPROTO; | |
478 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ | |
479 | if (dir_out) | |
480 | return -EPROTO; | |
481 | else | |
482 | return -EILSEQ; | |
483 | } | |
484 | if (status & TD_CTRL_BABBLE) /* Babble */ | |
485 | return -EOVERFLOW; | |
486 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ | |
487 | return -ENOSR; | |
488 | if (status & TD_CTRL_STALLED) /* Stalled */ | |
489 | return -EPIPE; | |
490 | WARN_ON(status & TD_CTRL_ACTIVE); /* Active */ | |
491 | return 0; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Control transfers | |
496 | */ | |
dccf4a48 AS |
497 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, |
498 | struct uhci_qh *qh) | |
1da177e4 | 499 | { |
1da177e4 | 500 | struct uhci_td *td; |
1da177e4 | 501 | unsigned long destination, status; |
dccf4a48 | 502 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 LT |
503 | int len = urb->transfer_buffer_length; |
504 | dma_addr_t data = urb->transfer_dma; | |
dccf4a48 | 505 | __le32 *plink; |
1da177e4 LT |
506 | |
507 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
508 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | |
509 | ||
af0bb599 AS |
510 | /* 3 errors, dummy TD remains inactive */ |
511 | status = uhci_maxerr(3); | |
1da177e4 LT |
512 | if (urb->dev->speed == USB_SPEED_LOW) |
513 | status |= TD_CTRL_LS; | |
514 | ||
515 | /* | |
516 | * Build the TD for the control request setup packet | |
517 | */ | |
af0bb599 | 518 | td = qh->dummy_td; |
1da177e4 | 519 | uhci_add_td_to_urb(urb, td); |
fa346568 | 520 | uhci_fill_td(td, status, destination | uhci_explen(8), |
dccf4a48 AS |
521 | urb->setup_dma); |
522 | plink = &td->link; | |
af0bb599 | 523 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
524 | |
525 | /* | |
526 | * If direction is "send", change the packet ID from SETUP (0x2D) | |
527 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and | |
528 | * set Short Packet Detect (SPD) for all data packets. | |
529 | */ | |
530 | if (usb_pipeout(urb->pipe)) | |
531 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); | |
532 | else { | |
533 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); | |
534 | status |= TD_CTRL_SPD; | |
535 | } | |
536 | ||
537 | /* | |
687f5f34 | 538 | * Build the DATA TDs |
1da177e4 LT |
539 | */ |
540 | while (len > 0) { | |
dccf4a48 | 541 | int pktsze = min(len, maxsze); |
1da177e4 | 542 | |
2532178a | 543 | td = uhci_alloc_td(uhci); |
1da177e4 | 544 | if (!td) |
af0bb599 | 545 | goto nomem; |
dccf4a48 | 546 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
547 | |
548 | /* Alternate Data0/1 (start with Data1) */ | |
549 | destination ^= TD_TOKEN_TOGGLE; | |
550 | ||
551 | uhci_add_td_to_urb(urb, td); | |
fa346568 | 552 | uhci_fill_td(td, status, destination | uhci_explen(pktsze), |
dccf4a48 AS |
553 | data); |
554 | plink = &td->link; | |
1da177e4 LT |
555 | |
556 | data += pktsze; | |
557 | len -= pktsze; | |
558 | } | |
559 | ||
560 | /* | |
561 | * Build the final TD for control status | |
562 | */ | |
2532178a | 563 | td = uhci_alloc_td(uhci); |
1da177e4 | 564 | if (!td) |
af0bb599 | 565 | goto nomem; |
dccf4a48 | 566 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
567 | |
568 | /* | |
569 | * It's IN if the pipe is an output pipe or we're not expecting | |
570 | * data back. | |
571 | */ | |
572 | destination &= ~TD_TOKEN_PID_MASK; | |
573 | if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) | |
574 | destination |= USB_PID_IN; | |
575 | else | |
576 | destination |= USB_PID_OUT; | |
577 | ||
578 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ | |
579 | ||
580 | status &= ~TD_CTRL_SPD; | |
581 | ||
582 | uhci_add_td_to_urb(urb, td); | |
583 | uhci_fill_td(td, status | TD_CTRL_IOC, | |
dccf4a48 | 584 | destination | uhci_explen(0), 0); |
af0bb599 AS |
585 | plink = &td->link; |
586 | ||
587 | /* | |
588 | * Build the new dummy TD and activate the old one | |
589 | */ | |
590 | td = uhci_alloc_td(uhci); | |
591 | if (!td) | |
592 | goto nomem; | |
593 | *plink = cpu_to_le32(td->dma_handle); | |
594 | ||
595 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
596 | wmb(); | |
597 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
598 | qh->dummy_td = td; | |
1da177e4 LT |
599 | |
600 | /* Low-speed transfers get a different queue, and won't hog the bus. | |
601 | * Also, some devices enumerate better without FSBR; the easiest way | |
602 | * to do that is to put URBs on the low-speed queue while the device | |
630aa3cf | 603 | * isn't in the CONFIGURED state. */ |
1da177e4 | 604 | if (urb->dev->speed == USB_SPEED_LOW || |
630aa3cf | 605 | urb->dev->state != USB_STATE_CONFIGURED) |
dccf4a48 | 606 | qh->skel = uhci->skel_ls_control_qh; |
1da177e4 | 607 | else { |
dccf4a48 | 608 | qh->skel = uhci->skel_fs_control_qh; |
1da177e4 LT |
609 | uhci_inc_fsbr(uhci, urb); |
610 | } | |
dccf4a48 | 611 | return 0; |
af0bb599 AS |
612 | |
613 | nomem: | |
614 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
615 | uhci_remove_td_from_urb(qh->dummy_td); | |
616 | return -ENOMEM; | |
1da177e4 LT |
617 | } |
618 | ||
619 | /* | |
620 | * If control-IN transfer was short, the status packet wasn't sent. | |
621 | * This routine changes the element pointer in the QH to point at the | |
622 | * status TD. It's safe to do this even while the QH is live, because | |
623 | * the hardware only updates the element pointer following a successful | |
624 | * transfer. The inactive TD for the short packet won't cause an update, | |
625 | * so the pointer won't get overwritten. The next time the controller | |
626 | * sees this QH, it will send the status packet. | |
627 | */ | |
628 | static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) | |
629 | { | |
630 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
631 | struct uhci_td *td; | |
632 | ||
dccf4a48 | 633 | urbp->short_transfer = 1; |
1da177e4 LT |
634 | |
635 | td = list_entry(urbp->td_list.prev, struct uhci_td, list); | |
636 | urbp->qh->element = cpu_to_le32(td->dma_handle); | |
637 | ||
638 | return -EINPROGRESS; | |
639 | } | |
640 | ||
641 | ||
642 | static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) | |
643 | { | |
644 | struct list_head *tmp, *head; | |
645 | struct urb_priv *urbp = urb->hcpriv; | |
646 | struct uhci_td *td; | |
647 | unsigned int status; | |
648 | int ret = 0; | |
649 | ||
1da177e4 | 650 | head = &urbp->td_list; |
dccf4a48 | 651 | if (urbp->short_transfer) { |
1da177e4 LT |
652 | tmp = head->prev; |
653 | goto status_stage; | |
654 | } | |
655 | ||
dccf4a48 AS |
656 | urb->actual_length = 0; |
657 | ||
1da177e4 LT |
658 | tmp = head->next; |
659 | td = list_entry(tmp, struct uhci_td, list); | |
660 | ||
661 | /* The first TD is the SETUP stage, check the status, but skip */ | |
662 | /* the count */ | |
663 | status = uhci_status_bits(td_status(td)); | |
664 | if (status & TD_CTRL_ACTIVE) | |
665 | return -EINPROGRESS; | |
666 | ||
667 | if (status) | |
668 | goto td_error; | |
669 | ||
687f5f34 | 670 | /* The rest of the TDs (but the last) are data */ |
1da177e4 LT |
671 | tmp = tmp->next; |
672 | while (tmp != head && tmp->next != head) { | |
673 | unsigned int ctrlstat; | |
674 | ||
675 | td = list_entry(tmp, struct uhci_td, list); | |
676 | tmp = tmp->next; | |
677 | ||
678 | ctrlstat = td_status(td); | |
679 | status = uhci_status_bits(ctrlstat); | |
680 | if (status & TD_CTRL_ACTIVE) | |
681 | return -EINPROGRESS; | |
682 | ||
683 | urb->actual_length += uhci_actual_length(ctrlstat); | |
684 | ||
685 | if (status) | |
686 | goto td_error; | |
687 | ||
688 | /* Check to see if we received a short packet */ | |
689 | if (uhci_actual_length(ctrlstat) < | |
690 | uhci_expected_length(td_token(td))) { | |
691 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | |
692 | ret = -EREMOTEIO; | |
693 | goto err; | |
694 | } | |
695 | ||
dccf4a48 | 696 | return usb_control_retrigger_status(uhci, urb); |
1da177e4 LT |
697 | } |
698 | } | |
699 | ||
700 | status_stage: | |
701 | td = list_entry(tmp, struct uhci_td, list); | |
702 | ||
703 | /* Control status stage */ | |
704 | status = td_status(td); | |
705 | ||
706 | #ifdef I_HAVE_BUGGY_APC_BACKUPS | |
707 | /* APC BackUPS Pro kludge */ | |
708 | /* It tries to send all of the descriptor instead of the amount */ | |
709 | /* we requested */ | |
710 | if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ | |
711 | status & TD_CTRL_ACTIVE && | |
712 | status & TD_CTRL_NAK) | |
713 | return 0; | |
714 | #endif | |
715 | ||
716 | status = uhci_status_bits(status); | |
717 | if (status & TD_CTRL_ACTIVE) | |
718 | return -EINPROGRESS; | |
719 | ||
720 | if (status) | |
721 | goto td_error; | |
722 | ||
723 | return 0; | |
724 | ||
725 | td_error: | |
726 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); | |
727 | ||
728 | err: | |
729 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | |
730 | /* Some debugging code */ | |
731 | dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", | |
732 | __FUNCTION__, status); | |
733 | ||
734 | if (errbuf) { | |
735 | /* Print the chain for debugging purposes */ | |
736 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); | |
1da177e4 LT |
737 | lprintk(errbuf); |
738 | } | |
739 | } | |
740 | ||
0ed8fee1 AS |
741 | /* Note that the queue has stopped */ |
742 | urbp->qh->element = UHCI_PTR_TERM; | |
743 | urbp->qh->is_stopped = 1; | |
1da177e4 LT |
744 | return ret; |
745 | } | |
746 | ||
747 | /* | |
748 | * Common submit for bulk and interrupt | |
749 | */ | |
dccf4a48 AS |
750 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, |
751 | struct uhci_qh *qh) | |
1da177e4 LT |
752 | { |
753 | struct uhci_td *td; | |
1da177e4 | 754 | unsigned long destination, status; |
dccf4a48 | 755 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 | 756 | int len = urb->transfer_buffer_length; |
1da177e4 | 757 | dma_addr_t data = urb->transfer_dma; |
af0bb599 AS |
758 | __le32 *plink; |
759 | unsigned int toggle; | |
1da177e4 LT |
760 | |
761 | if (len < 0) | |
762 | return -EINVAL; | |
763 | ||
764 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
765 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
af0bb599 AS |
766 | toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
767 | usb_pipeout(urb->pipe)); | |
1da177e4 | 768 | |
af0bb599 AS |
769 | /* 3 errors, dummy TD remains inactive */ |
770 | status = uhci_maxerr(3); | |
1da177e4 LT |
771 | if (urb->dev->speed == USB_SPEED_LOW) |
772 | status |= TD_CTRL_LS; | |
773 | if (usb_pipein(urb->pipe)) | |
774 | status |= TD_CTRL_SPD; | |
775 | ||
776 | /* | |
687f5f34 | 777 | * Build the DATA TDs |
1da177e4 | 778 | */ |
af0bb599 AS |
779 | plink = NULL; |
780 | td = qh->dummy_td; | |
1da177e4 LT |
781 | do { /* Allow zero length packets */ |
782 | int pktsze = maxsze; | |
783 | ||
dccf4a48 | 784 | if (len <= pktsze) { /* The last packet */ |
1da177e4 LT |
785 | pktsze = len; |
786 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
787 | status &= ~TD_CTRL_SPD; | |
788 | } | |
789 | ||
af0bb599 AS |
790 | if (plink) { |
791 | td = uhci_alloc_td(uhci); | |
792 | if (!td) | |
793 | goto nomem; | |
794 | *plink = cpu_to_le32(td->dma_handle); | |
795 | } | |
1da177e4 | 796 | uhci_add_td_to_urb(urb, td); |
dccf4a48 | 797 | uhci_fill_td(td, status, |
af0bb599 AS |
798 | destination | uhci_explen(pktsze) | |
799 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
800 | data); | |
dccf4a48 | 801 | plink = &td->link; |
af0bb599 | 802 | status |= TD_CTRL_ACTIVE; |
1da177e4 LT |
803 | |
804 | data += pktsze; | |
805 | len -= maxsze; | |
af0bb599 | 806 | toggle ^= 1; |
1da177e4 LT |
807 | } while (len > 0); |
808 | ||
809 | /* | |
810 | * URB_ZERO_PACKET means adding a 0-length packet, if direction | |
811 | * is OUT and the transfer_length was an exact multiple of maxsze, | |
812 | * hence (len = transfer_length - N * maxsze) == 0 | |
813 | * however, if transfer_length == 0, the zero packet was already | |
814 | * prepared above. | |
815 | */ | |
dccf4a48 AS |
816 | if ((urb->transfer_flags & URB_ZERO_PACKET) && |
817 | usb_pipeout(urb->pipe) && len == 0 && | |
818 | urb->transfer_buffer_length > 0) { | |
2532178a | 819 | td = uhci_alloc_td(uhci); |
1da177e4 | 820 | if (!td) |
af0bb599 | 821 | goto nomem; |
dccf4a48 | 822 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
823 | |
824 | uhci_add_td_to_urb(urb, td); | |
af0bb599 AS |
825 | uhci_fill_td(td, status, |
826 | destination | uhci_explen(0) | | |
827 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | |
828 | data); | |
829 | plink = &td->link; | |
1da177e4 | 830 | |
af0bb599 | 831 | toggle ^= 1; |
1da177e4 LT |
832 | } |
833 | ||
834 | /* Set the interrupt-on-completion flag on the last packet. | |
835 | * A more-or-less typical 4 KB URB (= size of one memory page) | |
836 | * will require about 3 ms to transfer; that's a little on the | |
837 | * fast side but not enough to justify delaying an interrupt | |
838 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT | |
839 | * flag setting. */ | |
dccf4a48 | 840 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); |
1da177e4 | 841 | |
af0bb599 AS |
842 | /* |
843 | * Build the new dummy TD and activate the old one | |
844 | */ | |
845 | td = uhci_alloc_td(uhci); | |
846 | if (!td) | |
847 | goto nomem; | |
848 | *plink = cpu_to_le32(td->dma_handle); | |
849 | ||
850 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | |
851 | wmb(); | |
852 | qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); | |
853 | qh->dummy_td = td; | |
854 | ||
855 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
856 | usb_pipeout(urb->pipe), toggle); | |
dccf4a48 | 857 | return 0; |
af0bb599 AS |
858 | |
859 | nomem: | |
860 | /* Remove the dummy TD from the td_list so it doesn't get freed */ | |
861 | uhci_remove_td_from_urb(qh->dummy_td); | |
862 | return -ENOMEM; | |
1da177e4 LT |
863 | } |
864 | ||
865 | /* | |
866 | * Common result for bulk and interrupt | |
867 | */ | |
868 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |
869 | { | |
870 | struct urb_priv *urbp = urb->hcpriv; | |
871 | struct uhci_td *td; | |
872 | unsigned int status = 0; | |
873 | int ret = 0; | |
874 | ||
875 | urb->actual_length = 0; | |
876 | ||
877 | list_for_each_entry(td, &urbp->td_list, list) { | |
878 | unsigned int ctrlstat = td_status(td); | |
879 | ||
880 | status = uhci_status_bits(ctrlstat); | |
881 | if (status & TD_CTRL_ACTIVE) | |
882 | return -EINPROGRESS; | |
883 | ||
884 | urb->actual_length += uhci_actual_length(ctrlstat); | |
885 | ||
886 | if (status) | |
887 | goto td_error; | |
888 | ||
889 | if (uhci_actual_length(ctrlstat) < | |
890 | uhci_expected_length(td_token(td))) { | |
891 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | |
892 | ret = -EREMOTEIO; | |
893 | goto err; | |
dccf4a48 AS |
894 | } |
895 | ||
896 | /* | |
897 | * This URB stopped short of its end. We have to | |
898 | * fix up the toggles of the following URBs on the | |
899 | * queue and restart the queue. | |
900 | * | |
901 | * Do this only the first time we encounter the | |
902 | * short URB. | |
903 | */ | |
904 | if (!urbp->short_transfer) { | |
905 | urbp->short_transfer = 1; | |
0ed8fee1 AS |
906 | urbp->qh->initial_toggle = |
907 | uhci_toggle(td_token(td)) ^ 1; | |
908 | uhci_fixup_toggles(urbp->qh, 1); | |
909 | ||
dccf4a48 AS |
910 | td = list_entry(urbp->td_list.prev, |
911 | struct uhci_td, list); | |
912 | urbp->qh->element = td->link; | |
913 | } | |
914 | break; | |
1da177e4 LT |
915 | } |
916 | } | |
917 | ||
918 | return 0; | |
919 | ||
920 | td_error: | |
921 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); | |
922 | ||
1da177e4 LT |
923 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
924 | /* Some debugging code */ | |
925 | dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", | |
926 | __FUNCTION__, status); | |
927 | ||
8d402e1a | 928 | if (debug > 1 && errbuf) { |
1da177e4 LT |
929 | /* Print the chain for debugging purposes */ |
930 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); | |
1da177e4 LT |
931 | lprintk(errbuf); |
932 | } | |
933 | } | |
dbf4fcad | 934 | err: |
0ed8fee1 AS |
935 | |
936 | /* Note that the queue has stopped and save the next toggle value */ | |
937 | urbp->qh->element = UHCI_PTR_TERM; | |
938 | urbp->qh->is_stopped = 1; | |
939 | urbp->qh->needs_fixup = 1; | |
940 | urbp->qh->initial_toggle = uhci_toggle(td_token(td)) ^ | |
941 | (ret == -EREMOTEIO); | |
1da177e4 LT |
942 | return ret; |
943 | } | |
944 | ||
dccf4a48 AS |
945 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
946 | struct uhci_qh *qh) | |
1da177e4 LT |
947 | { |
948 | int ret; | |
949 | ||
950 | /* Can't have low-speed bulk transfers */ | |
951 | if (urb->dev->speed == USB_SPEED_LOW) | |
952 | return -EINVAL; | |
953 | ||
dccf4a48 AS |
954 | qh->skel = uhci->skel_bulk_qh; |
955 | ret = uhci_submit_common(uhci, urb, qh); | |
956 | if (ret == 0) | |
1da177e4 | 957 | uhci_inc_fsbr(uhci, urb); |
1da177e4 LT |
958 | return ret; |
959 | } | |
960 | ||
dccf4a48 AS |
961 | static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
962 | struct uhci_qh *qh) | |
1da177e4 | 963 | { |
dccf4a48 AS |
964 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
965 | * Drivers can submit URBs of any length, but longer ones will need | |
966 | * multiple intervals to complete. | |
1da177e4 | 967 | */ |
dccf4a48 AS |
968 | qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)]; |
969 | return uhci_submit_common(uhci, urb, qh); | |
1da177e4 LT |
970 | } |
971 | ||
972 | /* | |
973 | * Isochronous transfers | |
974 | */ | |
0ed8fee1 AS |
975 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
976 | struct uhci_qh *qh) | |
1da177e4 | 977 | { |
0ed8fee1 AS |
978 | struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ |
979 | int i, frame; | |
980 | unsigned long destination, status; | |
981 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
1da177e4 LT |
982 | |
983 | if (urb->number_of_packets > 900) /* 900? Why? */ | |
984 | return -EFBIG; | |
985 | ||
0ed8fee1 AS |
986 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; |
987 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
1da177e4 | 988 | |
0ed8fee1 | 989 | /* Figure out the starting frame number */ |
1da177e4 | 990 | if (urb->transfer_flags & URB_ISO_ASAP) { |
0ed8fee1 | 991 | if (list_empty(&qh->queue)) { |
1da177e4 | 992 | uhci_get_current_frame_number(uhci); |
0ed8fee1 AS |
993 | urb->start_frame = (uhci->frame_number + 10); |
994 | ||
995 | } else { /* Go right after the last one */ | |
996 | struct urb *last_urb; | |
997 | ||
998 | last_urb = list_entry(qh->queue.prev, | |
999 | struct urb_priv, node)->urb; | |
1000 | urb->start_frame = (last_urb->start_frame + | |
1001 | last_urb->number_of_packets * | |
1002 | last_urb->interval); | |
1003 | } | |
1da177e4 | 1004 | } else { |
1da177e4 LT |
1005 | /* FIXME: Sanity check */ |
1006 | } | |
0ed8fee1 | 1007 | urb->start_frame &= (UHCI_NUMFRAMES - 1); |
1da177e4 | 1008 | |
b81d3436 | 1009 | for (i = 0; i < urb->number_of_packets; i++) { |
2532178a | 1010 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
1011 | if (!td) |
1012 | return -ENOMEM; | |
1013 | ||
1014 | uhci_add_td_to_urb(urb, td); | |
dccf4a48 AS |
1015 | uhci_fill_td(td, status, destination | |
1016 | uhci_explen(urb->iso_frame_desc[i].length), | |
1017 | urb->transfer_dma + | |
1018 | urb->iso_frame_desc[i].offset); | |
b81d3436 | 1019 | } |
1da177e4 | 1020 | |
dccf4a48 AS |
1021 | /* Set the interrupt-on-completion flag on the last packet. */ |
1022 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); | |
1023 | ||
1024 | qh->skel = uhci->skel_iso_qh; | |
1025 | ||
1026 | /* Add the TDs to the frame list */ | |
b81d3436 AS |
1027 | frame = urb->start_frame; |
1028 | list_for_each_entry(td, &urbp->td_list, list) { | |
dccf4a48 | 1029 | uhci_insert_td_in_frame_list(uhci, td, frame); |
b81d3436 | 1030 | frame += urb->interval; |
1da177e4 LT |
1031 | } |
1032 | ||
dccf4a48 | 1033 | return 0; |
1da177e4 LT |
1034 | } |
1035 | ||
1036 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) | |
1037 | { | |
1038 | struct uhci_td *td; | |
1039 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
1040 | int status; | |
1041 | int i, ret = 0; | |
1042 | ||
b81d3436 | 1043 | urb->actual_length = urb->error_count = 0; |
1da177e4 LT |
1044 | |
1045 | i = 0; | |
1046 | list_for_each_entry(td, &urbp->td_list, list) { | |
1047 | int actlength; | |
1048 | unsigned int ctrlstat = td_status(td); | |
1049 | ||
1050 | if (ctrlstat & TD_CTRL_ACTIVE) | |
1051 | return -EINPROGRESS; | |
1052 | ||
1053 | actlength = uhci_actual_length(ctrlstat); | |
1054 | urb->iso_frame_desc[i].actual_length = actlength; | |
1055 | urb->actual_length += actlength; | |
1056 | ||
1057 | status = uhci_map_status(uhci_status_bits(ctrlstat), | |
1058 | usb_pipeout(urb->pipe)); | |
1059 | urb->iso_frame_desc[i].status = status; | |
1060 | if (status) { | |
1061 | urb->error_count++; | |
1062 | ret = status; | |
1063 | } | |
1064 | ||
1065 | i++; | |
1066 | } | |
1067 | ||
1068 | return ret; | |
1069 | } | |
1070 | ||
1da177e4 | 1071 | static int uhci_urb_enqueue(struct usb_hcd *hcd, |
dccf4a48 | 1072 | struct usb_host_endpoint *hep, |
55016f10 | 1073 | struct urb *urb, gfp_t mem_flags) |
1da177e4 LT |
1074 | { |
1075 | int ret; | |
1076 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1077 | unsigned long flags; | |
dccf4a48 AS |
1078 | struct urb_priv *urbp; |
1079 | struct uhci_qh *qh; | |
1da177e4 LT |
1080 | int bustime; |
1081 | ||
1082 | spin_lock_irqsave(&uhci->lock, flags); | |
1083 | ||
1084 | ret = urb->status; | |
1085 | if (ret != -EINPROGRESS) /* URB already unlinked! */ | |
dccf4a48 | 1086 | goto done; |
1da177e4 | 1087 | |
dccf4a48 AS |
1088 | ret = -ENOMEM; |
1089 | urbp = uhci_alloc_urb_priv(uhci, urb); | |
1090 | if (!urbp) | |
1091 | goto done; | |
1da177e4 | 1092 | |
dccf4a48 AS |
1093 | if (hep->hcpriv) |
1094 | qh = (struct uhci_qh *) hep->hcpriv; | |
1095 | else { | |
1096 | qh = uhci_alloc_qh(uhci, urb->dev, hep); | |
1097 | if (!qh) | |
1098 | goto err_no_qh; | |
1da177e4 | 1099 | } |
dccf4a48 | 1100 | urbp->qh = qh; |
1da177e4 LT |
1101 | |
1102 | switch (usb_pipetype(urb->pipe)) { | |
1103 | case PIPE_CONTROL: | |
dccf4a48 AS |
1104 | ret = uhci_submit_control(uhci, urb, qh); |
1105 | break; | |
1106 | case PIPE_BULK: | |
1107 | ret = uhci_submit_bulk(uhci, urb, qh); | |
1da177e4 LT |
1108 | break; |
1109 | case PIPE_INTERRUPT: | |
dccf4a48 | 1110 | if (list_empty(&qh->queue)) { |
1da177e4 LT |
1111 | bustime = usb_check_bandwidth(urb->dev, urb); |
1112 | if (bustime < 0) | |
1113 | ret = bustime; | |
1114 | else { | |
dccf4a48 AS |
1115 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1116 | if (ret == 0) | |
1da177e4 LT |
1117 | usb_claim_bandwidth(urb->dev, urb, bustime, 0); |
1118 | } | |
1119 | } else { /* inherit from parent */ | |
dccf4a48 AS |
1120 | struct urb_priv *eurbp; |
1121 | ||
1122 | eurbp = list_entry(qh->queue.prev, struct urb_priv, | |
1123 | node); | |
1124 | urb->bandwidth = eurbp->urb->bandwidth; | |
1125 | ret = uhci_submit_interrupt(uhci, urb, qh); | |
1da177e4 LT |
1126 | } |
1127 | break; | |
1da177e4 LT |
1128 | case PIPE_ISOCHRONOUS: |
1129 | bustime = usb_check_bandwidth(urb->dev, urb); | |
1130 | if (bustime < 0) { | |
1131 | ret = bustime; | |
1132 | break; | |
1133 | } | |
1134 | ||
dccf4a48 AS |
1135 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1136 | if (ret == 0) | |
1da177e4 LT |
1137 | usb_claim_bandwidth(urb->dev, urb, bustime, 1); |
1138 | break; | |
1139 | } | |
dccf4a48 AS |
1140 | if (ret != 0) |
1141 | goto err_submit_failed; | |
1da177e4 | 1142 | |
dccf4a48 AS |
1143 | /* Add this URB to the QH */ |
1144 | urbp->qh = qh; | |
1145 | list_add_tail(&urbp->node, &qh->queue); | |
1da177e4 | 1146 | |
dccf4a48 AS |
1147 | /* If the new URB is the first and only one on this QH then either |
1148 | * the QH is new and idle or else it's unlinked and waiting to | |
1149 | * become idle, so we can activate it right away. */ | |
1150 | if (qh->queue.next == &urbp->node) | |
1151 | uhci_activate_qh(uhci, qh); | |
dccf4a48 AS |
1152 | goto done; |
1153 | ||
1154 | err_submit_failed: | |
1155 | if (qh->state == QH_STATE_IDLE) | |
1156 | uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ | |
1da177e4 | 1157 | |
dccf4a48 AS |
1158 | err_no_qh: |
1159 | uhci_free_urb_priv(uhci, urbp); | |
1160 | ||
1161 | done: | |
1da177e4 LT |
1162 | spin_unlock_irqrestore(&uhci->lock, flags); |
1163 | return ret; | |
1164 | } | |
1165 | ||
0ed8fee1 AS |
1166 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) |
1167 | { | |
1168 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1169 | unsigned long flags; | |
1170 | struct urb_priv *urbp; | |
1171 | ||
1172 | spin_lock_irqsave(&uhci->lock, flags); | |
1173 | urbp = urb->hcpriv; | |
1174 | if (!urbp) /* URB was never linked! */ | |
1175 | goto done; | |
1176 | ||
1177 | /* Remove Isochronous TDs from the frame list ASAP */ | |
1178 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) | |
1179 | uhci_unlink_isochronous_tds(uhci, urb); | |
1180 | uhci_unlink_qh(uhci, urbp->qh); | |
1181 | ||
1182 | done: | |
1183 | spin_unlock_irqrestore(&uhci->lock, flags); | |
1184 | return 0; | |
1185 | } | |
1186 | ||
1da177e4 | 1187 | /* |
0ed8fee1 | 1188 | * Finish unlinking an URB and give it back |
1da177e4 | 1189 | */ |
0ed8fee1 AS |
1190 | static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, |
1191 | struct urb *urb, struct pt_regs *regs) | |
1192 | __releases(uhci->lock) | |
1193 | __acquires(uhci->lock) | |
1da177e4 | 1194 | { |
dccf4a48 | 1195 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1da177e4 | 1196 | |
0ed8fee1 AS |
1197 | /* Isochronous TDs get unlinked directly from the frame list */ |
1198 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) | |
1199 | uhci_unlink_isochronous_tds(uhci, urb); | |
1da177e4 | 1200 | |
0ed8fee1 AS |
1201 | /* If the URB isn't first on its queue, adjust the link pointer |
1202 | * of the last TD in the previous URB. */ | |
1203 | else if (qh->queue.next != &urbp->node) { | |
1204 | struct urb_priv *purbp; | |
1205 | struct uhci_td *ptd, *ltd; | |
dccf4a48 | 1206 | |
0ed8fee1 AS |
1207 | purbp = list_entry(urbp->node.prev, struct urb_priv, node); |
1208 | ptd = list_entry(purbp->td_list.prev, struct uhci_td, | |
1209 | list); | |
1210 | ltd = list_entry(urbp->td_list.prev, struct uhci_td, | |
1211 | list); | |
1212 | ptd->link = ltd->link; | |
dccf4a48 | 1213 | } |
0ed8fee1 AS |
1214 | |
1215 | /* Take the URB off the QH's queue. If the queue is now empty, | |
1216 | * this is a perfect time for a toggle fixup. */ | |
1217 | list_del_init(&urbp->node); | |
1218 | if (list_empty(&qh->queue) && qh->needs_fixup) { | |
1219 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
1220 | usb_pipeout(urb->pipe), qh->initial_toggle); | |
1221 | qh->needs_fixup = 0; | |
1222 | } | |
1223 | ||
1224 | uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ | |
1225 | uhci_free_urb_priv(uhci, urbp); | |
1da177e4 LT |
1226 | |
1227 | switch (usb_pipetype(urb->pipe)) { | |
1da177e4 LT |
1228 | case PIPE_ISOCHRONOUS: |
1229 | /* Release bandwidth for Interrupt or Isoc. transfers */ | |
1230 | if (urb->bandwidth) | |
1231 | usb_release_bandwidth(urb->dev, urb, 1); | |
1da177e4 LT |
1232 | break; |
1233 | case PIPE_INTERRUPT: | |
1234 | /* Release bandwidth for Interrupt or Isoc. transfers */ | |
1235 | /* Make sure we don't release if we have a queued URB */ | |
0ed8fee1 | 1236 | if (list_empty(&qh->queue) && urb->bandwidth) |
1da177e4 LT |
1237 | usb_release_bandwidth(urb->dev, urb, 0); |
1238 | else | |
1239 | /* bandwidth was passed on to queued URB, */ | |
1240 | /* so don't let usb_unlink_urb() release it */ | |
1241 | urb->bandwidth = 0; | |
1da177e4 | 1242 | break; |
1da177e4 LT |
1243 | } |
1244 | ||
0ed8fee1 AS |
1245 | spin_unlock(&uhci->lock); |
1246 | usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs); | |
1247 | spin_lock(&uhci->lock); | |
1da177e4 | 1248 | |
0ed8fee1 AS |
1249 | /* If the queue is now empty, we can unlink the QH and give up its |
1250 | * reserved bandwidth. */ | |
1251 | if (list_empty(&qh->queue)) { | |
1252 | uhci_unlink_qh(uhci, qh); | |
1da177e4 | 1253 | |
0ed8fee1 AS |
1254 | /* Bandwidth stuff not yet implemented */ |
1255 | } | |
dccf4a48 | 1256 | } |
1da177e4 | 1257 | |
dccf4a48 | 1258 | /* |
0ed8fee1 | 1259 | * Scan the URBs in a QH's queue |
dccf4a48 | 1260 | */ |
0ed8fee1 AS |
1261 | #define QH_FINISHED_UNLINKING(qh) \ |
1262 | (qh->state == QH_STATE_UNLINKING && \ | |
1263 | uhci->frame_number + uhci->is_stopped != qh->unlink_frame) | |
1da177e4 | 1264 | |
0ed8fee1 AS |
1265 | static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh, |
1266 | struct pt_regs *regs) | |
1da177e4 | 1267 | { |
1da177e4 | 1268 | struct urb_priv *urbp; |
0ed8fee1 AS |
1269 | struct urb *urb; |
1270 | int status; | |
1da177e4 | 1271 | |
0ed8fee1 AS |
1272 | while (!list_empty(&qh->queue)) { |
1273 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | |
1274 | urb = urbp->urb; | |
1da177e4 | 1275 | |
0ed8fee1 AS |
1276 | switch (usb_pipetype(urb->pipe)) { |
1277 | case PIPE_CONTROL: | |
1278 | status = uhci_result_control(uhci, urb); | |
1279 | break; | |
1280 | case PIPE_ISOCHRONOUS: | |
1281 | status = uhci_result_isochronous(uhci, urb); | |
1282 | break; | |
1283 | default: /* PIPE_BULK or PIPE_INTERRUPT */ | |
1284 | status = uhci_result_common(uhci, urb); | |
1285 | break; | |
1286 | } | |
1287 | if (status == -EINPROGRESS) | |
1288 | break; | |
1da177e4 | 1289 | |
0ed8fee1 AS |
1290 | spin_lock(&urb->lock); |
1291 | if (urb->status == -EINPROGRESS) /* Not dequeued */ | |
1292 | urb->status = status; | |
1293 | else | |
1294 | status = -ECONNRESET; | |
1295 | spin_unlock(&urb->lock); | |
1da177e4 | 1296 | |
0ed8fee1 AS |
1297 | /* Dequeued but completed URBs can't be given back unless |
1298 | * the QH is stopped or has finished unlinking. */ | |
1299 | if (status == -ECONNRESET && | |
1300 | !(qh->is_stopped || QH_FINISHED_UNLINKING(qh))) | |
1301 | return; | |
1da177e4 | 1302 | |
0ed8fee1 AS |
1303 | uhci_giveback_urb(uhci, qh, urb, regs); |
1304 | if (qh->is_stopped) | |
1305 | break; | |
1306 | } | |
1da177e4 | 1307 | |
0ed8fee1 AS |
1308 | /* If the QH is neither stopped nor finished unlinking (normal case), |
1309 | * our work here is done. */ | |
1310 | restart: | |
1311 | if (!(qh->is_stopped || QH_FINISHED_UNLINKING(qh))) | |
1312 | return; | |
1da177e4 | 1313 | |
0ed8fee1 AS |
1314 | /* Otherwise give back each of the dequeued URBs */ |
1315 | list_for_each_entry(urbp, &qh->queue, node) { | |
1316 | urb = urbp->urb; | |
1317 | if (urb->status != -EINPROGRESS) { | |
1318 | uhci_save_toggle(qh, urb); | |
1319 | uhci_giveback_urb(uhci, qh, urb, regs); | |
1320 | goto restart; | |
1321 | } | |
1322 | } | |
1323 | qh->is_stopped = 0; | |
1da177e4 | 1324 | |
0ed8fee1 AS |
1325 | /* There are no more dequeued URBs. If there are still URBs on the |
1326 | * queue, the QH can now be re-activated. */ | |
1327 | if (!list_empty(&qh->queue)) { | |
1328 | if (qh->needs_fixup) | |
1329 | uhci_fixup_toggles(qh, 0); | |
1330 | uhci_activate_qh(uhci, qh); | |
1da177e4 LT |
1331 | } |
1332 | ||
0ed8fee1 AS |
1333 | /* The queue is empty. The QH can become idle if it is fully |
1334 | * unlinked. */ | |
1335 | else if (QH_FINISHED_UNLINKING(qh)) | |
1336 | uhci_make_qh_idle(uhci, qh); | |
1da177e4 LT |
1337 | } |
1338 | ||
1da177e4 LT |
1339 | static void uhci_free_pending_tds(struct uhci_hcd *uhci) |
1340 | { | |
1341 | struct uhci_td *td, *tmp; | |
1342 | ||
1343 | list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) { | |
1344 | list_del_init(&td->remove_list); | |
1345 | ||
1346 | uhci_free_td(uhci, td); | |
1347 | } | |
1348 | } | |
1349 | ||
0ed8fee1 AS |
1350 | /* |
1351 | * Process events in the schedule, but only in one thread at a time | |
1352 | */ | |
1da177e4 LT |
1353 | static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) |
1354 | { | |
0ed8fee1 AS |
1355 | int i; |
1356 | struct uhci_qh *qh; | |
1da177e4 LT |
1357 | |
1358 | /* Don't allow re-entrant calls */ | |
1359 | if (uhci->scan_in_progress) { | |
1360 | uhci->need_rescan = 1; | |
1361 | return; | |
1362 | } | |
1363 | uhci->scan_in_progress = 1; | |
1364 | rescan: | |
1365 | uhci->need_rescan = 0; | |
1366 | ||
6c1b445c | 1367 | uhci_clear_next_interrupt(uhci); |
1da177e4 LT |
1368 | uhci_get_current_frame_number(uhci); |
1369 | ||
1da177e4 LT |
1370 | if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) |
1371 | uhci_free_pending_tds(uhci); | |
1da177e4 | 1372 | |
0ed8fee1 AS |
1373 | /* Go through all the QH queues and process the URBs in each one */ |
1374 | for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { | |
1375 | uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, | |
1376 | struct uhci_qh, node); | |
1377 | while ((qh = uhci->next_qh) != uhci->skelqh[i]) { | |
1378 | uhci->next_qh = list_entry(qh->node.next, | |
1379 | struct uhci_qh, node); | |
1380 | uhci_scan_qh(uhci, qh, regs); | |
1381 | } | |
1da177e4 | 1382 | } |
1da177e4 LT |
1383 | |
1384 | if (uhci->need_rescan) | |
1385 | goto rescan; | |
1386 | uhci->scan_in_progress = 0; | |
1387 | ||
0ed8fee1 AS |
1388 | /* If the controller is stopped, we can finish these off right now */ |
1389 | if (uhci->is_stopped) | |
1390 | uhci_free_pending_tds(uhci); | |
dccf4a48 AS |
1391 | |
1392 | if (list_empty(&uhci->td_remove_list) && | |
1393 | list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
1394 | uhci_clear_next_interrupt(uhci); |
1395 | else | |
1396 | uhci_set_next_interrupt(uhci); | |
1da177e4 | 1397 | } |
f5946f82 AS |
1398 | |
1399 | static void check_fsbr(struct uhci_hcd *uhci) | |
1400 | { | |
0ed8fee1 AS |
1401 | /* For now, don't scan URBs for FSBR timeouts. |
1402 | * Add it back in later... */ | |
f5946f82 AS |
1403 | |
1404 | /* Really disable FSBR */ | |
1405 | if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { | |
1406 | uhci->fsbrtimeout = 0; | |
1407 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | |
1408 | } | |
1409 | } |