Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Universal Host Controller Interface driver for USB. | |
3 | * | |
4 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | |
8 | * (C) Copyright 1999 Randy Dunlap | |
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | |
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | |
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | |
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | |
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | |
dccf4a48 | 16 | * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu |
1da177e4 LT |
17 | */ |
18 | ||
1da177e4 LT |
19 | static void uhci_free_pending_tds(struct uhci_hcd *uhci); |
20 | ||
21 | /* | |
22 | * Technically, updating td->status here is a race, but it's not really a | |
23 | * problem. The worst that can happen is that we set the IOC bit again | |
24 | * generating a spurious interrupt. We could fix this by creating another | |
25 | * QH and leaving the IOC bit always set, but then we would have to play | |
26 | * games with the FSBR code to make sure we get the correct order in all | |
27 | * the cases. I don't think it's worth the effort | |
28 | */ | |
dccf4a48 | 29 | static void uhci_set_next_interrupt(struct uhci_hcd *uhci) |
1da177e4 | 30 | { |
6c1b445c | 31 | if (uhci->is_stopped) |
1f09df8b | 32 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); |
1da177e4 LT |
33 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); |
34 | } | |
35 | ||
36 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | |
37 | { | |
38 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | |
39 | } | |
40 | ||
2532178a | 41 | static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) |
1da177e4 LT |
42 | { |
43 | dma_addr_t dma_handle; | |
44 | struct uhci_td *td; | |
45 | ||
46 | td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); | |
47 | if (!td) | |
48 | return NULL; | |
49 | ||
50 | td->dma_handle = dma_handle; | |
51 | ||
52 | td->link = UHCI_PTR_TERM; | |
53 | td->buffer = 0; | |
54 | ||
55 | td->frame = -1; | |
1da177e4 LT |
56 | |
57 | INIT_LIST_HEAD(&td->list); | |
58 | INIT_LIST_HEAD(&td->remove_list); | |
59 | INIT_LIST_HEAD(&td->fl_list); | |
60 | ||
1da177e4 LT |
61 | return td; |
62 | } | |
63 | ||
dccf4a48 AS |
64 | static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) |
65 | { | |
66 | if (!list_empty(&td->list)) | |
67 | dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); | |
68 | if (!list_empty(&td->remove_list)) | |
69 | dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td); | |
70 | if (!list_empty(&td->fl_list)) | |
71 | dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); | |
72 | ||
73 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | |
74 | } | |
75 | ||
1da177e4 LT |
76 | static inline void uhci_fill_td(struct uhci_td *td, u32 status, |
77 | u32 token, u32 buffer) | |
78 | { | |
79 | td->status = cpu_to_le32(status); | |
80 | td->token = cpu_to_le32(token); | |
81 | td->buffer = cpu_to_le32(buffer); | |
82 | } | |
83 | ||
84 | /* | |
687f5f34 | 85 | * We insert Isochronous URBs directly into the frame list at the beginning |
1da177e4 | 86 | */ |
dccf4a48 AS |
87 | static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, |
88 | struct uhci_td *td, unsigned framenum) | |
1da177e4 LT |
89 | { |
90 | framenum &= (UHCI_NUMFRAMES - 1); | |
91 | ||
92 | td->frame = framenum; | |
93 | ||
94 | /* Is there a TD already mapped there? */ | |
a1d59ce8 | 95 | if (uhci->frame_cpu[framenum]) { |
1da177e4 LT |
96 | struct uhci_td *ftd, *ltd; |
97 | ||
a1d59ce8 | 98 | ftd = uhci->frame_cpu[framenum]; |
1da177e4 LT |
99 | ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); |
100 | ||
101 | list_add_tail(&td->fl_list, &ftd->fl_list); | |
102 | ||
103 | td->link = ltd->link; | |
104 | wmb(); | |
105 | ltd->link = cpu_to_le32(td->dma_handle); | |
106 | } else { | |
a1d59ce8 | 107 | td->link = uhci->frame[framenum]; |
1da177e4 | 108 | wmb(); |
a1d59ce8 AS |
109 | uhci->frame[framenum] = cpu_to_le32(td->dma_handle); |
110 | uhci->frame_cpu[framenum] = td; | |
1da177e4 LT |
111 | } |
112 | } | |
113 | ||
dccf4a48 | 114 | static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, |
b81d3436 | 115 | struct uhci_td *td) |
1da177e4 LT |
116 | { |
117 | /* If it's not inserted, don't remove it */ | |
b81d3436 AS |
118 | if (td->frame == -1) { |
119 | WARN_ON(!list_empty(&td->fl_list)); | |
1da177e4 | 120 | return; |
b81d3436 | 121 | } |
1da177e4 | 122 | |
b81d3436 | 123 | if (uhci->frame_cpu[td->frame] == td) { |
1da177e4 | 124 | if (list_empty(&td->fl_list)) { |
a1d59ce8 AS |
125 | uhci->frame[td->frame] = td->link; |
126 | uhci->frame_cpu[td->frame] = NULL; | |
1da177e4 LT |
127 | } else { |
128 | struct uhci_td *ntd; | |
129 | ||
130 | ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); | |
a1d59ce8 AS |
131 | uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle); |
132 | uhci->frame_cpu[td->frame] = ntd; | |
1da177e4 LT |
133 | } |
134 | } else { | |
135 | struct uhci_td *ptd; | |
136 | ||
137 | ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); | |
138 | ptd->link = td->link; | |
139 | } | |
140 | ||
1da177e4 LT |
141 | list_del_init(&td->fl_list); |
142 | td->frame = -1; | |
143 | } | |
144 | ||
dccf4a48 AS |
145 | /* |
146 | * Remove all the TDs for an Isochronous URB from the frame list | |
147 | */ | |
148 | static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) | |
b81d3436 AS |
149 | { |
150 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
151 | struct uhci_td *td; | |
152 | ||
153 | list_for_each_entry(td, &urbp->td_list, list) | |
dccf4a48 | 154 | uhci_remove_td_from_frame_list(uhci, td); |
b81d3436 AS |
155 | wmb(); |
156 | } | |
157 | ||
1da177e4 | 158 | /* |
dccf4a48 | 159 | * Remove an URB's TDs from the hardware schedule |
1da177e4 | 160 | */ |
dccf4a48 AS |
161 | static void uhci_remove_tds_from_schedule(struct uhci_hcd *uhci, |
162 | struct urb *urb, int status) | |
1da177e4 | 163 | { |
dccf4a48 | 164 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1da177e4 | 165 | |
dccf4a48 AS |
166 | /* Isochronous TDs get unlinked directly from the frame list */ |
167 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | |
168 | uhci_unlink_isochronous_tds(uhci, urb); | |
169 | return; | |
1da177e4 | 170 | } |
1da177e4 | 171 | |
dccf4a48 AS |
172 | /* If the URB isn't first on its queue, adjust the link pointer |
173 | * of the last TD in the previous URB. */ | |
174 | if (urbp->node.prev != &urbp->qh->queue) { | |
175 | struct urb_priv *purbp; | |
176 | struct uhci_td *ptd, *ltd; | |
177 | ||
178 | if (status == -EINPROGRESS) | |
179 | status = 0; | |
180 | purbp = list_entry(urbp->node.prev, struct urb_priv, node); | |
181 | ptd = list_entry(purbp->td_list.prev, struct uhci_td, | |
182 | list); | |
183 | ltd = list_entry(urbp->td_list.prev, struct uhci_td, | |
184 | list); | |
185 | ptd->link = ltd->link; | |
186 | } | |
1da177e4 | 187 | |
dccf4a48 AS |
188 | /* If the URB completed with an error, then the QH element certainly |
189 | * points to one of the URB's TDs. If it completed normally then | |
190 | * the QH element has certainly moved on to the next URB. And if | |
191 | * the URB is still in progress then it must have been dequeued. | |
192 | * The QH element either hasn't reached it yet or is somewhere in | |
193 | * the middle. If the URB wasn't first we can assume that it | |
194 | * hasn't started yet (see above): Otherwise all the preceding URBs | |
195 | * would have completed and been removed from the queue, so this one | |
196 | * _would_ be first. | |
197 | * | |
198 | * If the QH element is inside this URB, clear it. It will be | |
199 | * set properly when the QH is activated. | |
200 | */ | |
201 | if (status < 0) | |
202 | urbp->qh->element = UHCI_PTR_TERM; | |
1da177e4 LT |
203 | } |
204 | ||
dccf4a48 AS |
205 | static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, |
206 | struct usb_device *udev, struct usb_host_endpoint *hep) | |
1da177e4 LT |
207 | { |
208 | dma_addr_t dma_handle; | |
209 | struct uhci_qh *qh; | |
210 | ||
211 | qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); | |
212 | if (!qh) | |
213 | return NULL; | |
214 | ||
215 | qh->dma_handle = dma_handle; | |
216 | ||
217 | qh->element = UHCI_PTR_TERM; | |
218 | qh->link = UHCI_PTR_TERM; | |
219 | ||
dccf4a48 AS |
220 | INIT_LIST_HEAD(&qh->queue); |
221 | INIT_LIST_HEAD(&qh->node); | |
1da177e4 | 222 | |
dccf4a48 AS |
223 | if (udev) { /* Normal QH */ |
224 | qh->state = QH_STATE_IDLE; | |
225 | qh->hep = hep; | |
226 | qh->udev = udev; | |
227 | hep->hcpriv = qh; | |
228 | usb_get_dev(udev); | |
1da177e4 | 229 | |
dccf4a48 AS |
230 | } else { /* Skeleton QH */ |
231 | qh->state = QH_STATE_ACTIVE; | |
232 | qh->udev = NULL; | |
233 | } | |
1da177e4 LT |
234 | return qh; |
235 | } | |
236 | ||
237 | static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
238 | { | |
dccf4a48 AS |
239 | WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); |
240 | if (!list_empty(&qh->queue)) | |
1da177e4 | 241 | dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); |
1da177e4 | 242 | |
dccf4a48 AS |
243 | list_del(&qh->node); |
244 | if (qh->udev) { | |
245 | qh->hep->hcpriv = NULL; | |
246 | usb_put_dev(qh->udev); | |
247 | } | |
1da177e4 LT |
248 | dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); |
249 | } | |
250 | ||
251 | /* | |
dccf4a48 | 252 | * Put a QH on the schedule in both hardware and software |
1da177e4 | 253 | */ |
dccf4a48 | 254 | static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 | 255 | { |
dccf4a48 | 256 | struct uhci_qh *pqh; |
1da177e4 | 257 | |
dccf4a48 | 258 | WARN_ON(list_empty(&qh->queue)); |
1da177e4 | 259 | |
dccf4a48 AS |
260 | /* Set the element pointer if it isn't set already. |
261 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ | |
262 | if (qh_element(qh) == UHCI_PTR_TERM) { | |
263 | struct urb_priv *urbp = list_entry(qh->queue.next, | |
264 | struct urb_priv, node); | |
265 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
266 | struct uhci_td, list); | |
1da177e4 | 267 | |
dccf4a48 | 268 | qh->element = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
269 | } |
270 | ||
dccf4a48 AS |
271 | if (qh->state == QH_STATE_ACTIVE) |
272 | return; | |
273 | qh->state = QH_STATE_ACTIVE; | |
274 | ||
275 | /* Move the QH from its old list to the end of the appropriate | |
276 | * skeleton's list */ | |
277 | list_move_tail(&qh->node, &qh->skel->node); | |
278 | ||
279 | /* Link it into the schedule */ | |
280 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
281 | qh->link = pqh->link; | |
282 | wmb(); | |
283 | pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); | |
1da177e4 LT |
284 | } |
285 | ||
286 | /* | |
dccf4a48 | 287 | * Take a QH off the hardware schedule |
1da177e4 | 288 | */ |
dccf4a48 | 289 | static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) |
1da177e4 LT |
290 | { |
291 | struct uhci_qh *pqh; | |
1da177e4 | 292 | |
dccf4a48 | 293 | if (qh->state == QH_STATE_UNLINKING) |
1da177e4 | 294 | return; |
dccf4a48 AS |
295 | WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); |
296 | qh->state = QH_STATE_UNLINKING; | |
1da177e4 | 297 | |
dccf4a48 AS |
298 | /* Unlink the QH from the schedule and record when we did it */ |
299 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | |
300 | pqh->link = qh->link; | |
301 | mb(); | |
1da177e4 LT |
302 | |
303 | uhci_get_current_frame_number(uhci); | |
dccf4a48 | 304 | qh->unlink_frame = uhci->frame_number; |
1da177e4 | 305 | |
dccf4a48 AS |
306 | /* Force an interrupt so we know when the QH is fully unlinked */ |
307 | if (list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
308 | uhci_set_next_interrupt(uhci); |
309 | ||
dccf4a48 AS |
310 | /* Move the QH from its old list to the end of the unlinking list */ |
311 | list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); | |
1da177e4 LT |
312 | } |
313 | ||
dccf4a48 AS |
314 | /* |
315 | * When we and the controller are through with a QH, it becomes IDLE. | |
316 | * This happens when a QH has been off the schedule (on the unlinking | |
317 | * list) for more than one frame, or when an error occurs while adding | |
318 | * the first URB onto a new QH. | |
319 | */ | |
320 | static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) | |
1da177e4 | 321 | { |
dccf4a48 | 322 | WARN_ON(qh->state == QH_STATE_ACTIVE); |
1da177e4 | 323 | |
dccf4a48 AS |
324 | list_move(&qh->node, &uhci->idle_qh_list); |
325 | qh->state = QH_STATE_IDLE; | |
1da177e4 | 326 | |
dccf4a48 AS |
327 | /* If anyone is waiting for a QH to become idle, wake them up */ |
328 | if (uhci->num_waiting) | |
329 | wake_up_all(&uhci->waitqh); | |
1da177e4 LT |
330 | } |
331 | ||
dccf4a48 AS |
332 | static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, |
333 | struct urb *urb) | |
1da177e4 LT |
334 | { |
335 | struct urb_priv *urbp; | |
336 | ||
337 | urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC); | |
338 | if (!urbp) | |
339 | return NULL; | |
340 | ||
341 | memset((void *)urbp, 0, sizeof(*urbp)); | |
342 | ||
1da177e4 | 343 | urbp->urb = urb; |
dccf4a48 AS |
344 | urb->hcpriv = urbp; |
345 | urbp->fsbrtime = jiffies; | |
1da177e4 | 346 | |
dccf4a48 | 347 | INIT_LIST_HEAD(&urbp->node); |
1da177e4 | 348 | INIT_LIST_HEAD(&urbp->td_list); |
1da177e4 LT |
349 | INIT_LIST_HEAD(&urbp->urb_list); |
350 | ||
1da177e4 LT |
351 | return urbp; |
352 | } | |
353 | ||
354 | static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td) | |
355 | { | |
356 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
357 | ||
1da177e4 LT |
358 | list_add_tail(&td->list, &urbp->td_list); |
359 | } | |
360 | ||
361 | static void uhci_remove_td_from_urb(struct uhci_td *td) | |
362 | { | |
363 | if (list_empty(&td->list)) | |
364 | return; | |
365 | ||
366 | list_del_init(&td->list); | |
1da177e4 LT |
367 | } |
368 | ||
dccf4a48 AS |
369 | static void uhci_free_urb_priv(struct uhci_hcd *uhci, |
370 | struct urb_priv *urbp) | |
1da177e4 LT |
371 | { |
372 | struct uhci_td *td, *tmp; | |
1da177e4 LT |
373 | |
374 | if (!list_empty(&urbp->urb_list)) | |
dccf4a48 AS |
375 | dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list!\n", |
376 | urbp->urb); | |
377 | if (!list_empty(&urbp->node)) | |
378 | dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", | |
379 | urbp->urb); | |
1da177e4 LT |
380 | |
381 | uhci_get_current_frame_number(uhci); | |
382 | if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) { | |
383 | uhci_free_pending_tds(uhci); | |
384 | uhci->td_remove_age = uhci->frame_number; | |
385 | } | |
386 | ||
387 | /* Check to see if the remove list is empty. Set the IOC bit */ | |
dccf4a48 | 388 | /* to force an interrupt so we can remove the TDs. */ |
1da177e4 LT |
389 | if (list_empty(&uhci->td_remove_list)) |
390 | uhci_set_next_interrupt(uhci); | |
391 | ||
392 | list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { | |
393 | uhci_remove_td_from_urb(td); | |
1da177e4 LT |
394 | list_add(&td->remove_list, &uhci->td_remove_list); |
395 | } | |
396 | ||
dccf4a48 | 397 | urbp->urb->hcpriv = NULL; |
1da177e4 LT |
398 | kmem_cache_free(uhci_up_cachep, urbp); |
399 | } | |
400 | ||
401 | static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
402 | { | |
403 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
404 | ||
405 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) { | |
406 | urbp->fsbr = 1; | |
407 | if (!uhci->fsbr++ && !uhci->fsbrtimeout) | |
408 | uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; | |
409 | } | |
410 | } | |
411 | ||
412 | static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb) | |
413 | { | |
414 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
415 | ||
416 | if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) { | |
417 | urbp->fsbr = 0; | |
418 | if (!--uhci->fsbr) | |
419 | uhci->fsbrtimeout = jiffies + FSBR_DELAY; | |
420 | } | |
421 | } | |
422 | ||
423 | /* | |
424 | * Map status to standard result codes | |
425 | * | |
426 | * <status> is (td_status(td) & 0xF60000), a.k.a. | |
427 | * uhci_status_bits(td_status(td)). | |
428 | * Note: <status> does not include the TD_CTRL_NAK bit. | |
429 | * <dir_out> is True for output TDs and False for input TDs. | |
430 | */ | |
431 | static int uhci_map_status(int status, int dir_out) | |
432 | { | |
433 | if (!status) | |
434 | return 0; | |
435 | if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ | |
436 | return -EPROTO; | |
437 | if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ | |
438 | if (dir_out) | |
439 | return -EPROTO; | |
440 | else | |
441 | return -EILSEQ; | |
442 | } | |
443 | if (status & TD_CTRL_BABBLE) /* Babble */ | |
444 | return -EOVERFLOW; | |
445 | if (status & TD_CTRL_DBUFERR) /* Buffer error */ | |
446 | return -ENOSR; | |
447 | if (status & TD_CTRL_STALLED) /* Stalled */ | |
448 | return -EPIPE; | |
449 | WARN_ON(status & TD_CTRL_ACTIVE); /* Active */ | |
450 | return 0; | |
451 | } | |
452 | ||
dccf4a48 AS |
453 | /* |
454 | * Fix up the data toggles for URBs in a queue, when one of them | |
455 | * terminates early (short transfer, error, or dequeued). | |
456 | */ | |
457 | static void uhci_fixup_toggles(struct urb *urb) | |
458 | { | |
459 | struct list_head *head; | |
460 | struct uhci_td *td; | |
461 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
462 | int prevactive = 0; | |
463 | unsigned int toggle = 0; | |
464 | struct urb_priv *turbp, *list_end; | |
465 | ||
466 | /* | |
467 | * We need to find out what the last successful toggle was so | |
468 | * we can update the data toggles for the following transfers. | |
469 | * | |
470 | * There are 2 ways the last successful completed TD is found: | |
471 | * | |
472 | * 1) The TD is NOT active and the actual length < expected length | |
473 | * 2) The TD is NOT active and it's the last TD in the chain | |
474 | * | |
475 | * and a third way the first uncompleted TD is found: | |
476 | * | |
477 | * 3) The TD is active and the previous TD is NOT active | |
478 | */ | |
479 | head = &urbp->td_list; | |
480 | list_for_each_entry(td, head, list) { | |
481 | unsigned int ctrlstat = td_status(td); | |
482 | ||
483 | if (!(ctrlstat & TD_CTRL_ACTIVE) && | |
484 | (uhci_actual_length(ctrlstat) < | |
485 | uhci_expected_length(td_token(td)) || | |
486 | td->list.next == head)) | |
487 | toggle = uhci_toggle(td_token(td)) ^ 1; | |
488 | else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive) | |
489 | toggle = uhci_toggle(td_token(td)); | |
490 | ||
491 | prevactive = ctrlstat & TD_CTRL_ACTIVE; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Fix up the toggle for the following URBs in the queue. | |
496 | * | |
497 | * We can stop as soon as we find an URB with toggles set correctly, | |
498 | * because then all the following URBs will be correct also. | |
499 | */ | |
500 | list_end = list_entry(&urbp->qh->queue, struct urb_priv, node); | |
501 | turbp = urbp; | |
502 | while ((turbp = list_entry(turbp->node.next, struct urb_priv, node)) | |
503 | != list_end) { | |
504 | td = list_entry(turbp->td_list.next, struct uhci_td, list); | |
505 | if (uhci_toggle(td_token(td)) == toggle) | |
506 | return; | |
507 | ||
508 | list_for_each_entry(td, &turbp->td_list, list) { | |
509 | td->token ^= __constant_cpu_to_le32(TD_TOKEN_TOGGLE); | |
510 | toggle ^= 1; | |
511 | } | |
512 | } | |
513 | ||
514 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
515 | usb_pipeout(urb->pipe), toggle); | |
516 | } | |
517 | ||
1da177e4 LT |
518 | /* |
519 | * Control transfers | |
520 | */ | |
dccf4a48 AS |
521 | static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, |
522 | struct uhci_qh *qh) | |
1da177e4 | 523 | { |
1da177e4 | 524 | struct uhci_td *td; |
1da177e4 | 525 | unsigned long destination, status; |
dccf4a48 | 526 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 LT |
527 | int len = urb->transfer_buffer_length; |
528 | dma_addr_t data = urb->transfer_dma; | |
dccf4a48 | 529 | __le32 *plink; |
1da177e4 LT |
530 | |
531 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
532 | destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; | |
533 | ||
534 | /* 3 errors */ | |
535 | status = TD_CTRL_ACTIVE | uhci_maxerr(3); | |
536 | if (urb->dev->speed == USB_SPEED_LOW) | |
537 | status |= TD_CTRL_LS; | |
538 | ||
539 | /* | |
540 | * Build the TD for the control request setup packet | |
541 | */ | |
2532178a | 542 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
543 | if (!td) |
544 | return -ENOMEM; | |
545 | ||
546 | uhci_add_td_to_urb(urb, td); | |
fa346568 | 547 | uhci_fill_td(td, status, destination | uhci_explen(8), |
dccf4a48 AS |
548 | urb->setup_dma); |
549 | plink = &td->link; | |
1da177e4 LT |
550 | |
551 | /* | |
552 | * If direction is "send", change the packet ID from SETUP (0x2D) | |
553 | * to OUT (0xE1). Else change it from SETUP to IN (0x69) and | |
554 | * set Short Packet Detect (SPD) for all data packets. | |
555 | */ | |
556 | if (usb_pipeout(urb->pipe)) | |
557 | destination ^= (USB_PID_SETUP ^ USB_PID_OUT); | |
558 | else { | |
559 | destination ^= (USB_PID_SETUP ^ USB_PID_IN); | |
560 | status |= TD_CTRL_SPD; | |
561 | } | |
562 | ||
563 | /* | |
687f5f34 | 564 | * Build the DATA TDs |
1da177e4 LT |
565 | */ |
566 | while (len > 0) { | |
dccf4a48 | 567 | int pktsze = min(len, maxsze); |
1da177e4 | 568 | |
2532178a | 569 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
570 | if (!td) |
571 | return -ENOMEM; | |
dccf4a48 | 572 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
573 | |
574 | /* Alternate Data0/1 (start with Data1) */ | |
575 | destination ^= TD_TOKEN_TOGGLE; | |
576 | ||
577 | uhci_add_td_to_urb(urb, td); | |
fa346568 | 578 | uhci_fill_td(td, status, destination | uhci_explen(pktsze), |
dccf4a48 AS |
579 | data); |
580 | plink = &td->link; | |
1da177e4 LT |
581 | |
582 | data += pktsze; | |
583 | len -= pktsze; | |
584 | } | |
585 | ||
586 | /* | |
587 | * Build the final TD for control status | |
588 | */ | |
2532178a | 589 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
590 | if (!td) |
591 | return -ENOMEM; | |
dccf4a48 | 592 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
593 | |
594 | /* | |
595 | * It's IN if the pipe is an output pipe or we're not expecting | |
596 | * data back. | |
597 | */ | |
598 | destination &= ~TD_TOKEN_PID_MASK; | |
599 | if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) | |
600 | destination |= USB_PID_IN; | |
601 | else | |
602 | destination |= USB_PID_OUT; | |
603 | ||
604 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ | |
605 | ||
606 | status &= ~TD_CTRL_SPD; | |
607 | ||
608 | uhci_add_td_to_urb(urb, td); | |
609 | uhci_fill_td(td, status | TD_CTRL_IOC, | |
dccf4a48 | 610 | destination | uhci_explen(0), 0); |
1da177e4 LT |
611 | |
612 | /* Low-speed transfers get a different queue, and won't hog the bus. | |
613 | * Also, some devices enumerate better without FSBR; the easiest way | |
614 | * to do that is to put URBs on the low-speed queue while the device | |
630aa3cf | 615 | * isn't in the CONFIGURED state. */ |
1da177e4 | 616 | if (urb->dev->speed == USB_SPEED_LOW || |
630aa3cf | 617 | urb->dev->state != USB_STATE_CONFIGURED) |
dccf4a48 | 618 | qh->skel = uhci->skel_ls_control_qh; |
1da177e4 | 619 | else { |
dccf4a48 | 620 | qh->skel = uhci->skel_fs_control_qh; |
1da177e4 LT |
621 | uhci_inc_fsbr(uhci, urb); |
622 | } | |
623 | ||
dccf4a48 | 624 | return 0; |
1da177e4 LT |
625 | } |
626 | ||
627 | /* | |
628 | * If control-IN transfer was short, the status packet wasn't sent. | |
629 | * This routine changes the element pointer in the QH to point at the | |
630 | * status TD. It's safe to do this even while the QH is live, because | |
631 | * the hardware only updates the element pointer following a successful | |
632 | * transfer. The inactive TD for the short packet won't cause an update, | |
633 | * so the pointer won't get overwritten. The next time the controller | |
634 | * sees this QH, it will send the status packet. | |
635 | */ | |
636 | static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) | |
637 | { | |
638 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
639 | struct uhci_td *td; | |
640 | ||
dccf4a48 | 641 | urbp->short_transfer = 1; |
1da177e4 LT |
642 | |
643 | td = list_entry(urbp->td_list.prev, struct uhci_td, list); | |
644 | urbp->qh->element = cpu_to_le32(td->dma_handle); | |
645 | ||
646 | return -EINPROGRESS; | |
647 | } | |
648 | ||
649 | ||
650 | static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) | |
651 | { | |
652 | struct list_head *tmp, *head; | |
653 | struct urb_priv *urbp = urb->hcpriv; | |
654 | struct uhci_td *td; | |
655 | unsigned int status; | |
656 | int ret = 0; | |
657 | ||
1da177e4 | 658 | head = &urbp->td_list; |
dccf4a48 | 659 | if (urbp->short_transfer) { |
1da177e4 LT |
660 | tmp = head->prev; |
661 | goto status_stage; | |
662 | } | |
663 | ||
dccf4a48 AS |
664 | urb->actual_length = 0; |
665 | ||
1da177e4 LT |
666 | tmp = head->next; |
667 | td = list_entry(tmp, struct uhci_td, list); | |
668 | ||
669 | /* The first TD is the SETUP stage, check the status, but skip */ | |
670 | /* the count */ | |
671 | status = uhci_status_bits(td_status(td)); | |
672 | if (status & TD_CTRL_ACTIVE) | |
673 | return -EINPROGRESS; | |
674 | ||
675 | if (status) | |
676 | goto td_error; | |
677 | ||
687f5f34 | 678 | /* The rest of the TDs (but the last) are data */ |
1da177e4 LT |
679 | tmp = tmp->next; |
680 | while (tmp != head && tmp->next != head) { | |
681 | unsigned int ctrlstat; | |
682 | ||
683 | td = list_entry(tmp, struct uhci_td, list); | |
684 | tmp = tmp->next; | |
685 | ||
686 | ctrlstat = td_status(td); | |
687 | status = uhci_status_bits(ctrlstat); | |
688 | if (status & TD_CTRL_ACTIVE) | |
689 | return -EINPROGRESS; | |
690 | ||
691 | urb->actual_length += uhci_actual_length(ctrlstat); | |
692 | ||
693 | if (status) | |
694 | goto td_error; | |
695 | ||
696 | /* Check to see if we received a short packet */ | |
697 | if (uhci_actual_length(ctrlstat) < | |
698 | uhci_expected_length(td_token(td))) { | |
699 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | |
700 | ret = -EREMOTEIO; | |
701 | goto err; | |
702 | } | |
703 | ||
dccf4a48 | 704 | return usb_control_retrigger_status(uhci, urb); |
1da177e4 LT |
705 | } |
706 | } | |
707 | ||
708 | status_stage: | |
709 | td = list_entry(tmp, struct uhci_td, list); | |
710 | ||
711 | /* Control status stage */ | |
712 | status = td_status(td); | |
713 | ||
714 | #ifdef I_HAVE_BUGGY_APC_BACKUPS | |
715 | /* APC BackUPS Pro kludge */ | |
716 | /* It tries to send all of the descriptor instead of the amount */ | |
717 | /* we requested */ | |
718 | if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */ | |
719 | status & TD_CTRL_ACTIVE && | |
720 | status & TD_CTRL_NAK) | |
721 | return 0; | |
722 | #endif | |
723 | ||
724 | status = uhci_status_bits(status); | |
725 | if (status & TD_CTRL_ACTIVE) | |
726 | return -EINPROGRESS; | |
727 | ||
728 | if (status) | |
729 | goto td_error; | |
730 | ||
731 | return 0; | |
732 | ||
733 | td_error: | |
734 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); | |
735 | ||
736 | err: | |
737 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | |
738 | /* Some debugging code */ | |
739 | dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", | |
740 | __FUNCTION__, status); | |
741 | ||
742 | if (errbuf) { | |
743 | /* Print the chain for debugging purposes */ | |
744 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); | |
745 | ||
746 | lprintk(errbuf); | |
747 | } | |
748 | } | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
753 | /* | |
754 | * Common submit for bulk and interrupt | |
755 | */ | |
dccf4a48 AS |
756 | static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, |
757 | struct uhci_qh *qh) | |
1da177e4 LT |
758 | { |
759 | struct uhci_td *td; | |
1da177e4 | 760 | unsigned long destination, status; |
dccf4a48 | 761 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
1da177e4 | 762 | int len = urb->transfer_buffer_length; |
1da177e4 | 763 | dma_addr_t data = urb->transfer_dma; |
dccf4a48 | 764 | __le32 *plink, fake_link; |
1da177e4 LT |
765 | |
766 | if (len < 0) | |
767 | return -EINVAL; | |
768 | ||
769 | /* The "pipe" thing contains the destination in bits 8--18 */ | |
770 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
771 | ||
dccf4a48 AS |
772 | /* 3 errors */ |
773 | status = TD_CTRL_ACTIVE | uhci_maxerr(3); | |
1da177e4 LT |
774 | if (urb->dev->speed == USB_SPEED_LOW) |
775 | status |= TD_CTRL_LS; | |
776 | if (usb_pipein(urb->pipe)) | |
777 | status |= TD_CTRL_SPD; | |
778 | ||
779 | /* | |
687f5f34 | 780 | * Build the DATA TDs |
1da177e4 | 781 | */ |
dccf4a48 | 782 | plink = &fake_link; |
1da177e4 LT |
783 | do { /* Allow zero length packets */ |
784 | int pktsze = maxsze; | |
785 | ||
dccf4a48 | 786 | if (len <= pktsze) { /* The last packet */ |
1da177e4 LT |
787 | pktsze = len; |
788 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
789 | status &= ~TD_CTRL_SPD; | |
790 | } | |
791 | ||
2532178a | 792 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
793 | if (!td) |
794 | return -ENOMEM; | |
dccf4a48 | 795 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
796 | |
797 | uhci_add_td_to_urb(urb, td); | |
dccf4a48 AS |
798 | uhci_fill_td(td, status, |
799 | destination | uhci_explen(pktsze) | | |
1da177e4 LT |
800 | (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
801 | usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), | |
802 | data); | |
dccf4a48 | 803 | plink = &td->link; |
1da177e4 LT |
804 | |
805 | data += pktsze; | |
806 | len -= maxsze; | |
807 | ||
808 | usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
809 | usb_pipeout(urb->pipe)); | |
810 | } while (len > 0); | |
811 | ||
812 | /* | |
813 | * URB_ZERO_PACKET means adding a 0-length packet, if direction | |
814 | * is OUT and the transfer_length was an exact multiple of maxsze, | |
815 | * hence (len = transfer_length - N * maxsze) == 0 | |
816 | * however, if transfer_length == 0, the zero packet was already | |
817 | * prepared above. | |
818 | */ | |
dccf4a48 AS |
819 | if ((urb->transfer_flags & URB_ZERO_PACKET) && |
820 | usb_pipeout(urb->pipe) && len == 0 && | |
821 | urb->transfer_buffer_length > 0) { | |
2532178a | 822 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
823 | if (!td) |
824 | return -ENOMEM; | |
dccf4a48 | 825 | *plink = cpu_to_le32(td->dma_handle); |
1da177e4 LT |
826 | |
827 | uhci_add_td_to_urb(urb, td); | |
fa346568 | 828 | uhci_fill_td(td, status, destination | uhci_explen(0) | |
1da177e4 LT |
829 | (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
830 | usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), | |
831 | data); | |
832 | ||
833 | usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), | |
834 | usb_pipeout(urb->pipe)); | |
835 | } | |
836 | ||
837 | /* Set the interrupt-on-completion flag on the last packet. | |
838 | * A more-or-less typical 4 KB URB (= size of one memory page) | |
839 | * will require about 3 ms to transfer; that's a little on the | |
840 | * fast side but not enough to justify delaying an interrupt | |
841 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT | |
842 | * flag setting. */ | |
dccf4a48 | 843 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); |
1da177e4 | 844 | |
dccf4a48 | 845 | return 0; |
1da177e4 LT |
846 | } |
847 | ||
848 | /* | |
849 | * Common result for bulk and interrupt | |
850 | */ | |
851 | static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |
852 | { | |
853 | struct urb_priv *urbp = urb->hcpriv; | |
854 | struct uhci_td *td; | |
855 | unsigned int status = 0; | |
856 | int ret = 0; | |
857 | ||
858 | urb->actual_length = 0; | |
859 | ||
860 | list_for_each_entry(td, &urbp->td_list, list) { | |
861 | unsigned int ctrlstat = td_status(td); | |
862 | ||
863 | status = uhci_status_bits(ctrlstat); | |
864 | if (status & TD_CTRL_ACTIVE) | |
865 | return -EINPROGRESS; | |
866 | ||
867 | urb->actual_length += uhci_actual_length(ctrlstat); | |
868 | ||
869 | if (status) | |
870 | goto td_error; | |
871 | ||
872 | if (uhci_actual_length(ctrlstat) < | |
873 | uhci_expected_length(td_token(td))) { | |
874 | if (urb->transfer_flags & URB_SHORT_NOT_OK) { | |
875 | ret = -EREMOTEIO; | |
876 | goto err; | |
dccf4a48 AS |
877 | } |
878 | ||
879 | /* | |
880 | * This URB stopped short of its end. We have to | |
881 | * fix up the toggles of the following URBs on the | |
882 | * queue and restart the queue. | |
883 | * | |
884 | * Do this only the first time we encounter the | |
885 | * short URB. | |
886 | */ | |
887 | if (!urbp->short_transfer) { | |
888 | urbp->short_transfer = 1; | |
889 | uhci_fixup_toggles(urb); | |
890 | td = list_entry(urbp->td_list.prev, | |
891 | struct uhci_td, list); | |
892 | urbp->qh->element = td->link; | |
893 | } | |
894 | break; | |
1da177e4 LT |
895 | } |
896 | } | |
897 | ||
898 | return 0; | |
899 | ||
900 | td_error: | |
901 | ret = uhci_map_status(status, uhci_packetout(td_token(td))); | |
902 | ||
903 | err: | |
904 | /* | |
905 | * Enable this chunk of code if you want to see some more debugging. | |
906 | * But be careful, it has the tendancy to starve out khubd and prevent | |
907 | * disconnects from happening successfully if you have a slow debug | |
908 | * log interface (like a serial console. | |
909 | */ | |
910 | #if 0 | |
911 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | |
912 | /* Some debugging code */ | |
913 | dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n", | |
914 | __FUNCTION__, status); | |
915 | ||
916 | if (errbuf) { | |
917 | /* Print the chain for debugging purposes */ | |
918 | uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0); | |
919 | ||
920 | lprintk(errbuf); | |
921 | } | |
922 | } | |
923 | #endif | |
924 | return ret; | |
925 | } | |
926 | ||
dccf4a48 AS |
927 | static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, |
928 | struct uhci_qh *qh) | |
1da177e4 LT |
929 | { |
930 | int ret; | |
931 | ||
932 | /* Can't have low-speed bulk transfers */ | |
933 | if (urb->dev->speed == USB_SPEED_LOW) | |
934 | return -EINVAL; | |
935 | ||
dccf4a48 AS |
936 | qh->skel = uhci->skel_bulk_qh; |
937 | ret = uhci_submit_common(uhci, urb, qh); | |
938 | if (ret == 0) | |
1da177e4 | 939 | uhci_inc_fsbr(uhci, urb); |
1da177e4 LT |
940 | return ret; |
941 | } | |
942 | ||
dccf4a48 AS |
943 | static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, |
944 | struct uhci_qh *qh) | |
1da177e4 | 945 | { |
dccf4a48 AS |
946 | /* USB 1.1 interrupt transfers only involve one packet per interval. |
947 | * Drivers can submit URBs of any length, but longer ones will need | |
948 | * multiple intervals to complete. | |
1da177e4 | 949 | */ |
dccf4a48 AS |
950 | qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)]; |
951 | return uhci_submit_common(uhci, urb, qh); | |
1da177e4 LT |
952 | } |
953 | ||
954 | /* | |
955 | * Isochronous transfers | |
956 | */ | |
957 | static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) | |
958 | { | |
959 | struct urb *last_urb = NULL; | |
960 | struct urb_priv *up; | |
961 | int ret = 0; | |
962 | ||
963 | list_for_each_entry(up, &uhci->urb_list, urb_list) { | |
964 | struct urb *u = up->urb; | |
965 | ||
687f5f34 | 966 | /* look for pending URBs with identical pipe handle */ |
1da177e4 LT |
967 | if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && |
968 | (u->status == -EINPROGRESS) && (u != urb)) { | |
969 | if (!last_urb) | |
970 | *start = u->start_frame; | |
971 | last_urb = u; | |
972 | } | |
973 | } | |
974 | ||
975 | if (last_urb) { | |
976 | *end = (last_urb->start_frame + last_urb->number_of_packets * | |
977 | last_urb->interval) & (UHCI_NUMFRAMES-1); | |
978 | ret = 0; | |
979 | } else | |
980 | ret = -1; /* no previous urb found */ | |
981 | ||
982 | return ret; | |
983 | } | |
984 | ||
985 | static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb) | |
986 | { | |
987 | int limits; | |
988 | unsigned int start = 0, end = 0; | |
989 | ||
990 | if (urb->number_of_packets > 900) /* 900? Why? */ | |
991 | return -EFBIG; | |
992 | ||
993 | limits = isochronous_find_limits(uhci, urb, &start, &end); | |
994 | ||
995 | if (urb->transfer_flags & URB_ISO_ASAP) { | |
996 | if (limits) { | |
997 | uhci_get_current_frame_number(uhci); | |
998 | urb->start_frame = (uhci->frame_number + 10) | |
999 | & (UHCI_NUMFRAMES - 1); | |
1000 | } else | |
1001 | urb->start_frame = end; | |
1002 | } else { | |
1003 | urb->start_frame &= (UHCI_NUMFRAMES - 1); | |
1004 | /* FIXME: Sanity check */ | |
1005 | } | |
1006 | ||
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * Isochronous transfers | |
1012 | */ | |
dccf4a48 AS |
1013 | static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
1014 | struct uhci_qh *qh) | |
1da177e4 | 1015 | { |
dccf4a48 | 1016 | struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ |
1da177e4 | 1017 | int i, ret, frame; |
dccf4a48 | 1018 | unsigned long destination, status; |
b81d3436 | 1019 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; |
1da177e4 LT |
1020 | |
1021 | status = TD_CTRL_ACTIVE | TD_CTRL_IOS; | |
1022 | destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); | |
1023 | ||
1024 | ret = isochronous_find_start(uhci, urb); | |
1025 | if (ret) | |
1026 | return ret; | |
1027 | ||
b81d3436 | 1028 | for (i = 0; i < urb->number_of_packets; i++) { |
2532178a | 1029 | td = uhci_alloc_td(uhci); |
1da177e4 LT |
1030 | if (!td) |
1031 | return -ENOMEM; | |
1032 | ||
1033 | uhci_add_td_to_urb(urb, td); | |
dccf4a48 AS |
1034 | uhci_fill_td(td, status, destination | |
1035 | uhci_explen(urb->iso_frame_desc[i].length), | |
1036 | urb->transfer_dma + | |
1037 | urb->iso_frame_desc[i].offset); | |
b81d3436 | 1038 | } |
1da177e4 | 1039 | |
dccf4a48 AS |
1040 | /* Set the interrupt-on-completion flag on the last packet. */ |
1041 | td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); | |
1042 | ||
1043 | qh->skel = uhci->skel_iso_qh; | |
1044 | ||
1045 | /* Add the TDs to the frame list */ | |
b81d3436 AS |
1046 | frame = urb->start_frame; |
1047 | list_for_each_entry(td, &urbp->td_list, list) { | |
dccf4a48 | 1048 | uhci_insert_td_in_frame_list(uhci, td, frame); |
b81d3436 | 1049 | frame += urb->interval; |
1da177e4 LT |
1050 | } |
1051 | ||
dccf4a48 | 1052 | return 0; |
1da177e4 LT |
1053 | } |
1054 | ||
1055 | static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) | |
1056 | { | |
1057 | struct uhci_td *td; | |
1058 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
1059 | int status; | |
1060 | int i, ret = 0; | |
1061 | ||
b81d3436 | 1062 | urb->actual_length = urb->error_count = 0; |
1da177e4 LT |
1063 | |
1064 | i = 0; | |
1065 | list_for_each_entry(td, &urbp->td_list, list) { | |
1066 | int actlength; | |
1067 | unsigned int ctrlstat = td_status(td); | |
1068 | ||
1069 | if (ctrlstat & TD_CTRL_ACTIVE) | |
1070 | return -EINPROGRESS; | |
1071 | ||
1072 | actlength = uhci_actual_length(ctrlstat); | |
1073 | urb->iso_frame_desc[i].actual_length = actlength; | |
1074 | urb->actual_length += actlength; | |
1075 | ||
1076 | status = uhci_map_status(uhci_status_bits(ctrlstat), | |
1077 | usb_pipeout(urb->pipe)); | |
1078 | urb->iso_frame_desc[i].status = status; | |
1079 | if (status) { | |
1080 | urb->error_count++; | |
1081 | ret = status; | |
1082 | } | |
1083 | ||
1084 | i++; | |
1085 | } | |
1086 | ||
1087 | return ret; | |
1088 | } | |
1089 | ||
1da177e4 | 1090 | static int uhci_urb_enqueue(struct usb_hcd *hcd, |
dccf4a48 | 1091 | struct usb_host_endpoint *hep, |
55016f10 | 1092 | struct urb *urb, gfp_t mem_flags) |
1da177e4 LT |
1093 | { |
1094 | int ret; | |
1095 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1096 | unsigned long flags; | |
dccf4a48 AS |
1097 | struct urb_priv *urbp; |
1098 | struct uhci_qh *qh; | |
1da177e4 LT |
1099 | int bustime; |
1100 | ||
1101 | spin_lock_irqsave(&uhci->lock, flags); | |
1102 | ||
1103 | ret = urb->status; | |
1104 | if (ret != -EINPROGRESS) /* URB already unlinked! */ | |
dccf4a48 | 1105 | goto done; |
1da177e4 | 1106 | |
dccf4a48 AS |
1107 | ret = -ENOMEM; |
1108 | urbp = uhci_alloc_urb_priv(uhci, urb); | |
1109 | if (!urbp) | |
1110 | goto done; | |
1da177e4 | 1111 | |
dccf4a48 AS |
1112 | if (hep->hcpriv) |
1113 | qh = (struct uhci_qh *) hep->hcpriv; | |
1114 | else { | |
1115 | qh = uhci_alloc_qh(uhci, urb->dev, hep); | |
1116 | if (!qh) | |
1117 | goto err_no_qh; | |
1da177e4 | 1118 | } |
dccf4a48 | 1119 | urbp->qh = qh; |
1da177e4 LT |
1120 | |
1121 | switch (usb_pipetype(urb->pipe)) { | |
1122 | case PIPE_CONTROL: | |
dccf4a48 AS |
1123 | ret = uhci_submit_control(uhci, urb, qh); |
1124 | break; | |
1125 | case PIPE_BULK: | |
1126 | ret = uhci_submit_bulk(uhci, urb, qh); | |
1da177e4 LT |
1127 | break; |
1128 | case PIPE_INTERRUPT: | |
dccf4a48 | 1129 | if (list_empty(&qh->queue)) { |
1da177e4 LT |
1130 | bustime = usb_check_bandwidth(urb->dev, urb); |
1131 | if (bustime < 0) | |
1132 | ret = bustime; | |
1133 | else { | |
dccf4a48 AS |
1134 | ret = uhci_submit_interrupt(uhci, urb, qh); |
1135 | if (ret == 0) | |
1da177e4 LT |
1136 | usb_claim_bandwidth(urb->dev, urb, bustime, 0); |
1137 | } | |
1138 | } else { /* inherit from parent */ | |
dccf4a48 AS |
1139 | struct urb_priv *eurbp; |
1140 | ||
1141 | eurbp = list_entry(qh->queue.prev, struct urb_priv, | |
1142 | node); | |
1143 | urb->bandwidth = eurbp->urb->bandwidth; | |
1144 | ret = uhci_submit_interrupt(uhci, urb, qh); | |
1da177e4 LT |
1145 | } |
1146 | break; | |
1da177e4 LT |
1147 | case PIPE_ISOCHRONOUS: |
1148 | bustime = usb_check_bandwidth(urb->dev, urb); | |
1149 | if (bustime < 0) { | |
1150 | ret = bustime; | |
1151 | break; | |
1152 | } | |
1153 | ||
dccf4a48 AS |
1154 | ret = uhci_submit_isochronous(uhci, urb, qh); |
1155 | if (ret == 0) | |
1da177e4 LT |
1156 | usb_claim_bandwidth(urb->dev, urb, bustime, 1); |
1157 | break; | |
1158 | } | |
dccf4a48 AS |
1159 | if (ret != 0) |
1160 | goto err_submit_failed; | |
1da177e4 | 1161 | |
dccf4a48 AS |
1162 | /* Add this URB to the QH */ |
1163 | urbp->qh = qh; | |
1164 | list_add_tail(&urbp->node, &qh->queue); | |
1165 | list_add_tail(&urbp->urb_list, &uhci->urb_list); | |
1da177e4 | 1166 | |
dccf4a48 AS |
1167 | /* If the new URB is the first and only one on this QH then either |
1168 | * the QH is new and idle or else it's unlinked and waiting to | |
1169 | * become idle, so we can activate it right away. */ | |
1170 | if (qh->queue.next == &urbp->node) | |
1171 | uhci_activate_qh(uhci, qh); | |
1172 | ||
1173 | /* If the QH is already active, we have a race with the hardware. | |
1174 | * This won't get fixed until dummy TDs are added. */ | |
1175 | else if (qh->state == QH_STATE_ACTIVE) { | |
1176 | ||
1177 | /* If the URB isn't first on its queue, adjust the link pointer | |
1178 | * of the last TD in the previous URB. */ | |
1179 | if (urbp->node.prev != &urbp->qh->queue) { | |
1180 | struct urb_priv *purbp = list_entry(urbp->node.prev, | |
1181 | struct urb_priv, node); | |
1182 | struct uhci_td *ptd = list_entry(purbp->td_list.prev, | |
1183 | struct uhci_td, list); | |
1184 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
1185 | struct uhci_td, list); | |
1186 | ||
1187 | ptd->link = cpu_to_le32(td->dma_handle); | |
1188 | ||
1189 | } | |
1190 | if (qh_element(qh) == UHCI_PTR_TERM) { | |
1191 | struct uhci_td *td = list_entry(urbp->td_list.next, | |
1192 | struct uhci_td, list); | |
1193 | ||
1194 | qh->element = cpu_to_le32(td->dma_handle); | |
1195 | } | |
1196 | } | |
1197 | goto done; | |
1198 | ||
1199 | err_submit_failed: | |
1200 | if (qh->state == QH_STATE_IDLE) | |
1201 | uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ | |
1da177e4 | 1202 | |
dccf4a48 AS |
1203 | err_no_qh: |
1204 | uhci_free_urb_priv(uhci, urbp); | |
1205 | ||
1206 | done: | |
1da177e4 LT |
1207 | spin_unlock_irqrestore(&uhci->lock, flags); |
1208 | return ret; | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * Return the result of a transfer | |
1213 | */ | |
1214 | static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) | |
1215 | { | |
dccf4a48 AS |
1216 | int status; |
1217 | int okay_to_giveback = 0; | |
1218 | struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; | |
1da177e4 LT |
1219 | |
1220 | switch (usb_pipetype(urb->pipe)) { | |
1221 | case PIPE_CONTROL: | |
dccf4a48 | 1222 | status = uhci_result_control(uhci, urb); |
1da177e4 LT |
1223 | break; |
1224 | case PIPE_ISOCHRONOUS: | |
dccf4a48 AS |
1225 | status = uhci_result_isochronous(uhci, urb); |
1226 | break; | |
1227 | default: /* PIPE_BULK or PIPE_INTERRUPT */ | |
1228 | status = uhci_result_common(uhci, urb); | |
1da177e4 LT |
1229 | break; |
1230 | } | |
1231 | ||
dccf4a48 AS |
1232 | spin_lock(&urb->lock); |
1233 | if (urb->status == -EINPROGRESS) { /* Not yet dequeued */ | |
1234 | if (status != -EINPROGRESS) { /* URB has completed */ | |
1235 | urb->status = status; | |
1236 | ||
1237 | /* If the URB got a real error (as opposed to | |
1238 | * simply being dequeued), we don't have to | |
1239 | * unlink the QH. Fix this later... */ | |
1240 | if (status < 0) | |
1241 | uhci_unlink_qh(uhci, urbp->qh); | |
1242 | else | |
1243 | okay_to_giveback = 1; | |
1244 | } | |
1245 | } else { /* Already dequeued */ | |
1246 | if (urbp->qh->state == QH_STATE_UNLINKING && | |
1247 | uhci->frame_number + uhci->is_stopped != | |
1248 | urbp->qh->unlink_frame) | |
1249 | okay_to_giveback = 1; | |
1250 | } | |
1251 | spin_unlock(&urb->lock); | |
1252 | if (!okay_to_giveback) | |
1253 | return; | |
1da177e4 LT |
1254 | |
1255 | switch (usb_pipetype(urb->pipe)) { | |
1da177e4 LT |
1256 | case PIPE_ISOCHRONOUS: |
1257 | /* Release bandwidth for Interrupt or Isoc. transfers */ | |
1258 | if (urb->bandwidth) | |
1259 | usb_release_bandwidth(urb->dev, urb, 1); | |
1da177e4 LT |
1260 | break; |
1261 | case PIPE_INTERRUPT: | |
1262 | /* Release bandwidth for Interrupt or Isoc. transfers */ | |
1263 | /* Make sure we don't release if we have a queued URB */ | |
dccf4a48 | 1264 | if (list_empty(&urbp->qh->queue) && urb->bandwidth) |
1da177e4 LT |
1265 | usb_release_bandwidth(urb->dev, urb, 0); |
1266 | else | |
1267 | /* bandwidth was passed on to queued URB, */ | |
1268 | /* so don't let usb_unlink_urb() release it */ | |
1269 | urb->bandwidth = 0; | |
dccf4a48 AS |
1270 | /* Falls through */ |
1271 | case PIPE_BULK: | |
1272 | if (status < 0) | |
1273 | uhci_fixup_toggles(urb); | |
1274 | break; | |
1275 | default: /* PIPE_CONTROL */ | |
1da177e4 | 1276 | break; |
1da177e4 LT |
1277 | } |
1278 | ||
dccf4a48 AS |
1279 | /* Take the URB's TDs off the hardware schedule */ |
1280 | uhci_remove_tds_from_schedule(uhci, urb, status); | |
1da177e4 | 1281 | |
dccf4a48 AS |
1282 | /* Take the URB off the QH's queue and see if the QH is now unused */ |
1283 | list_del_init(&urbp->node); | |
1284 | if (list_empty(&urbp->qh->queue)) | |
1285 | uhci_unlink_qh(uhci, urbp->qh); | |
1da177e4 LT |
1286 | |
1287 | uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ | |
1288 | ||
dccf4a48 AS |
1289 | /* Queue it for giving back */ |
1290 | list_move_tail(&urbp->urb_list, &uhci->complete_list); | |
1291 | } | |
1da177e4 | 1292 | |
dccf4a48 AS |
1293 | /* |
1294 | * Check out the QHs waiting to be fully unlinked | |
1295 | */ | |
1296 | static void uhci_scan_unlinking_qhs(struct uhci_hcd *uhci) | |
1297 | { | |
1298 | struct uhci_qh *qh, *tmp; | |
1da177e4 | 1299 | |
dccf4a48 | 1300 | list_for_each_entry_safe(qh, tmp, &uhci->skel_unlink_qh->node, node) { |
1da177e4 | 1301 | |
dccf4a48 AS |
1302 | /* If the queue is empty and the QH is fully unlinked then |
1303 | * it can become IDLE. */ | |
1304 | if (list_empty(&qh->queue)) { | |
1305 | if (uhci->frame_number + uhci->is_stopped != | |
1306 | qh->unlink_frame) | |
1307 | uhci_make_qh_idle(uhci, qh); | |
1da177e4 | 1308 | |
dccf4a48 AS |
1309 | /* If none of the QH's URBs have been dequeued then the QH |
1310 | * should be re-activated. */ | |
1311 | } else { | |
1312 | struct urb_priv *urbp; | |
1313 | int any_dequeued = 0; | |
1314 | ||
1315 | list_for_each_entry(urbp, &qh->queue, node) { | |
1316 | if (urbp->urb->status != -EINPROGRESS) { | |
1317 | any_dequeued = 1; | |
1318 | break; | |
1319 | } | |
1320 | } | |
1321 | if (!any_dequeued) | |
1322 | uhci_activate_qh(uhci, qh); | |
1323 | } | |
1324 | } | |
1da177e4 LT |
1325 | } |
1326 | ||
1327 | static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) | |
1328 | { | |
1329 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1330 | unsigned long flags; | |
1331 | struct urb_priv *urbp; | |
1332 | ||
1333 | spin_lock_irqsave(&uhci->lock, flags); | |
1334 | urbp = urb->hcpriv; | |
1335 | if (!urbp) /* URB was never linked! */ | |
1336 | goto done; | |
1da177e4 | 1337 | |
dccf4a48 | 1338 | /* Remove Isochronous TDs from the frame list ASAP */ |
b81d3436 | 1339 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) |
dccf4a48 AS |
1340 | uhci_unlink_isochronous_tds(uhci, urb); |
1341 | uhci_unlink_qh(uhci, urbp->qh); | |
1da177e4 LT |
1342 | |
1343 | done: | |
1344 | spin_unlock_irqrestore(&uhci->lock, flags); | |
1345 | return 0; | |
1346 | } | |
1347 | ||
1348 | static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) | |
1349 | { | |
1350 | struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; | |
1351 | struct list_head *head; | |
1352 | struct uhci_td *td; | |
1353 | int count = 0; | |
1354 | ||
1355 | uhci_dec_fsbr(uhci, urb); | |
1356 | ||
1357 | urbp->fsbr_timeout = 1; | |
1358 | ||
1359 | /* | |
1360 | * Ideally we would want to fix qh->element as well, but it's | |
1361 | * read/write by the HC, so that can introduce a race. It's not | |
1362 | * really worth the hassle | |
1363 | */ | |
1364 | ||
1365 | head = &urbp->td_list; | |
1366 | list_for_each_entry(td, head, list) { | |
1367 | /* | |
1368 | * Make sure we don't do the last one (since it'll have the | |
687f5f34 | 1369 | * TERM bit set) as well as we skip every so many TDs to |
1da177e4 LT |
1370 | * make sure it doesn't hog the bandwidth |
1371 | */ | |
1372 | if (td->list.next != head && (count % DEPTH_INTERVAL) == | |
1373 | (DEPTH_INTERVAL - 1)) | |
1374 | td->link |= UHCI_PTR_DEPTH; | |
1375 | ||
1376 | count++; | |
1377 | } | |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | ||
1da177e4 LT |
1382 | static void uhci_free_pending_tds(struct uhci_hcd *uhci) |
1383 | { | |
1384 | struct uhci_td *td, *tmp; | |
1385 | ||
1386 | list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) { | |
1387 | list_del_init(&td->remove_list); | |
1388 | ||
1389 | uhci_free_td(uhci, td); | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | static void | |
1394 | uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) | |
1395 | __releases(uhci->lock) | |
1396 | __acquires(uhci->lock) | |
1397 | { | |
1398 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1399 | ||
dccf4a48 | 1400 | uhci_free_urb_priv(uhci, (struct urb_priv *) (urb->hcpriv)); |
1da177e4 LT |
1401 | |
1402 | spin_unlock(&uhci->lock); | |
1403 | usb_hcd_giveback_urb(hcd, urb, regs); | |
1404 | spin_lock(&uhci->lock); | |
1405 | } | |
1406 | ||
1407 | static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs) | |
1408 | { | |
1409 | struct urb_priv *urbp, *tmp; | |
1410 | ||
1411 | list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) { | |
1412 | struct urb *urb = urbp->urb; | |
1413 | ||
1414 | list_del_init(&urbp->urb_list); | |
1415 | uhci_finish_urb(uhci_to_hcd(uhci), urb, regs); | |
1416 | } | |
1417 | } | |
1418 | ||
1da177e4 LT |
1419 | /* Process events in the schedule, but only in one thread at a time */ |
1420 | static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) | |
1421 | { | |
1422 | struct urb_priv *urbp, *tmp; | |
1423 | ||
1424 | /* Don't allow re-entrant calls */ | |
1425 | if (uhci->scan_in_progress) { | |
1426 | uhci->need_rescan = 1; | |
1427 | return; | |
1428 | } | |
1429 | uhci->scan_in_progress = 1; | |
1430 | rescan: | |
1431 | uhci->need_rescan = 0; | |
1432 | ||
6c1b445c | 1433 | uhci_clear_next_interrupt(uhci); |
1da177e4 LT |
1434 | uhci_get_current_frame_number(uhci); |
1435 | ||
1da177e4 LT |
1436 | if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) |
1437 | uhci_free_pending_tds(uhci); | |
1da177e4 LT |
1438 | |
1439 | /* Walk the list of pending URBs to see which ones completed | |
1440 | * (must be _safe because uhci_transfer_result() dequeues URBs) */ | |
1441 | list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) { | |
1442 | struct urb *urb = urbp->urb; | |
1443 | ||
1444 | /* Checks the status and does all of the magic necessary */ | |
1445 | uhci_transfer_result(uhci, urb); | |
1446 | } | |
1447 | uhci_finish_completion(uhci, regs); | |
1448 | ||
1449 | /* If the controller is stopped, we can finish these off right now */ | |
dccf4a48 | 1450 | if (uhci->is_stopped) |
1da177e4 | 1451 | uhci_free_pending_tds(uhci); |
1da177e4 LT |
1452 | |
1453 | if (uhci->need_rescan) | |
1454 | goto rescan; | |
1455 | uhci->scan_in_progress = 0; | |
1456 | ||
dccf4a48 AS |
1457 | /* Check out the QHs waiting for unlinking */ |
1458 | uhci_scan_unlinking_qhs(uhci); | |
1459 | ||
1460 | if (list_empty(&uhci->td_remove_list) && | |
1461 | list_empty(&uhci->skel_unlink_qh->node)) | |
1da177e4 LT |
1462 | uhci_clear_next_interrupt(uhci); |
1463 | else | |
1464 | uhci_set_next_interrupt(uhci); | |
1da177e4 | 1465 | } |
f5946f82 AS |
1466 | |
1467 | static void check_fsbr(struct uhci_hcd *uhci) | |
1468 | { | |
1469 | struct urb_priv *up; | |
1470 | ||
1471 | list_for_each_entry(up, &uhci->urb_list, urb_list) { | |
1472 | struct urb *u = up->urb; | |
1473 | ||
1474 | spin_lock(&u->lock); | |
1475 | ||
1476 | /* Check if the FSBR timed out */ | |
1477 | if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT)) | |
1478 | uhci_fsbr_timeout(uhci, u); | |
1479 | ||
1480 | spin_unlock(&u->lock); | |
1481 | } | |
1482 | ||
1483 | /* Really disable FSBR */ | |
1484 | if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { | |
1485 | uhci->fsbrtimeout = 0; | |
1486 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | |
1487 | } | |
1488 | } |