Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d49d4317 | 2 | * Copyright (C) 2001-2004 by David Brownell |
53bd6a60 | 3 | * |
1da177e4 LT |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the | |
6 | * Free Software Foundation; either version 2 of the License, or (at your | |
7 | * option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 | * for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software Foundation, | |
16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
19 | /* this file is part of ehci-hcd.c */ | |
20 | ||
21 | /*-------------------------------------------------------------------------*/ | |
22 | ||
23 | /* | |
24 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. | |
25 | * | |
26 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" | |
27 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned | |
28 | * buffers needed for the larger number). We use one QH per endpoint, queue | |
29 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. | |
30 | * | |
31 | * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with | |
32 | * interrupts) needs careful scheduling. Performance improvements can be | |
33 | * an ongoing challenge. That's in "ehci-sched.c". | |
53bd6a60 | 34 | * |
1da177e4 LT |
35 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, |
36 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using | |
37 | * (b) special fields in qh entries or (c) split iso entries. TTs will | |
38 | * buffer low/full speed data so the host collects it at high speed. | |
39 | */ | |
40 | ||
41 | /*-------------------------------------------------------------------------*/ | |
42 | ||
43 | /* fill a qtd, returning how much of the buffer we were able to queue up */ | |
44 | ||
45 | static int | |
6dbd682b SR |
46 | qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, |
47 | size_t len, int token, int maxpacket) | |
1da177e4 LT |
48 | { |
49 | int i, count; | |
50 | u64 addr = buf; | |
51 | ||
52 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
6dbd682b SR |
53 | qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); |
54 | qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); | |
1da177e4 LT |
55 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ |
56 | if (likely (len < count)) /* ... iff needed */ | |
57 | count = len; | |
58 | else { | |
59 | buf += 0x1000; | |
60 | buf &= ~0x0fff; | |
61 | ||
62 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
63 | for (i = 1; count < len && i < 5; i++) { | |
64 | addr = buf; | |
6dbd682b SR |
65 | qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); |
66 | qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, | |
67 | (u32)(addr >> 32)); | |
1da177e4 LT |
68 | buf += 0x1000; |
69 | if ((count + 0x1000) < len) | |
70 | count += 0x1000; | |
71 | else | |
72 | count = len; | |
73 | } | |
74 | ||
75 | /* short packets may only terminate transfers */ | |
76 | if (count != len) | |
77 | count -= (count % maxpacket); | |
78 | } | |
6dbd682b | 79 | qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); |
1da177e4 LT |
80 | qtd->length = count; |
81 | ||
82 | return count; | |
83 | } | |
84 | ||
85 | /*-------------------------------------------------------------------------*/ | |
86 | ||
87 | static inline void | |
88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | |
89 | { | |
3807e26d AD |
90 | struct ehci_qh_hw *hw = qh->hw; |
91 | ||
1da177e4 | 92 | /* writes to an active overlay are unsafe */ |
c1fdb68e | 93 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
1da177e4 | 94 | |
3807e26d AD |
95 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
96 | hw->hw_alt_next = EHCI_LIST_END(ehci); | |
1da177e4 | 97 | |
a455212d AS |
98 | /* Except for control endpoints, we make hardware maintain data |
99 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
100 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
101 | * ever clear it. | |
102 | */ | |
4c53de72 | 103 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { |
a455212d AS |
104 | unsigned is_out, epnum; |
105 | ||
e04f5f7e | 106 | is_out = qh->is_out; |
3807e26d | 107 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
ffa0248e | 108 | if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) { |
3807e26d | 109 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
ffa0248e | 110 | usb_settoggle(qh->ps.udev, epnum, is_out, 1); |
a455212d AS |
111 | } |
112 | } | |
113 | ||
3807e26d | 114 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
1da177e4 LT |
115 | } |
116 | ||
117 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | |
118 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
119 | * recovery (including urb dequeue) would need software changes to a QH... | |
120 | */ | |
121 | static void | |
122 | qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
123 | { | |
124 | struct ehci_qtd *qtd; | |
125 | ||
c1fdb68e | 126 | qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); |
1da177e4 | 127 | |
c1fdb68e AS |
128 | /* |
129 | * first qtd may already be partially processed. | |
130 | * If we come here during unlink, the QH overlay region | |
131 | * might have reference to the just unlinked qtd. The | |
132 | * qtd is updated in qh_completions(). Update the QH | |
133 | * overlay here. | |
134 | */ | |
135 | if (qh->hw->hw_token & ACTIVE_BIT(ehci)) | |
136 | qh->hw->hw_qtd_next = qtd->hw_next; | |
137 | else | |
138 | qh_update(ehci, qh, qtd); | |
1da177e4 LT |
139 | } |
140 | ||
141 | /*-------------------------------------------------------------------------*/ | |
142 | ||
914b7012 AS |
143 | static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
144 | ||
145 | static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, | |
146 | struct usb_host_endpoint *ep) | |
147 | { | |
148 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
149 | struct ehci_qh *qh = ep->hcpriv; | |
150 | unsigned long flags; | |
151 | ||
152 | spin_lock_irqsave(&ehci->lock, flags); | |
153 | qh->clearing_tt = 0; | |
154 | if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) | |
e8799906 | 155 | && ehci->rh_state == EHCI_RH_RUNNING) |
914b7012 AS |
156 | qh_link_async(ehci, qh); |
157 | spin_unlock_irqrestore(&ehci->lock, flags); | |
158 | } | |
159 | ||
160 | static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, | |
161 | struct urb *urb, u32 token) | |
162 | { | |
163 | ||
164 | /* If an async split transaction gets an error or is unlinked, | |
165 | * the TT buffer may be left in an indeterminate state. We | |
166 | * have to clear the TT buffer. | |
167 | * | |
168 | * Note: this routine is never called for Isochronous transfers. | |
169 | */ | |
170 | if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { | |
1c20163d | 171 | #ifdef CONFIG_DYNAMIC_DEBUG |
914b7012 AS |
172 | struct usb_device *tt = urb->dev->tt->hub; |
173 | dev_dbg(&tt->dev, | |
174 | "clear tt buffer port %d, a%d ep%d t%08x\n", | |
175 | urb->dev->ttport, urb->dev->devnum, | |
176 | usb_pipeendpoint(urb->pipe), token); | |
1c20163d | 177 | #endif /* CONFIG_DYNAMIC_DEBUG */ |
914b7012 AS |
178 | if (!ehci_is_TDI(ehci) |
179 | || urb->dev->tt->hub != | |
180 | ehci_to_hcd(ehci)->self.root_hub) { | |
181 | if (usb_hub_clear_tt_buffer(urb) == 0) | |
182 | qh->clearing_tt = 1; | |
183 | } else { | |
184 | ||
185 | /* REVISIT ARC-derived cores don't clear the root | |
186 | * hub TT buffer in this way... | |
187 | */ | |
188 | } | |
189 | } | |
190 | } | |
191 | ||
14c04c0f | 192 | static int qtd_copy_status ( |
1da177e4 LT |
193 | struct ehci_hcd *ehci, |
194 | struct urb *urb, | |
195 | size_t length, | |
196 | u32 token | |
197 | ) | |
198 | { | |
14c04c0f AS |
199 | int status = -EINPROGRESS; |
200 | ||
1da177e4 LT |
201 | /* count IN/OUT bytes, not SETUP (even short packets) */ |
202 | if (likely (QTD_PID (token) != 2)) | |
203 | urb->actual_length += length - QTD_LENGTH (token); | |
204 | ||
205 | /* don't modify error codes */ | |
eb231054 | 206 | if (unlikely(urb->unlinked)) |
14c04c0f | 207 | return status; |
1da177e4 LT |
208 | |
209 | /* force cleanup after short read; not always an error */ | |
210 | if (unlikely (IS_SHORT_READ (token))) | |
14c04c0f | 211 | status = -EREMOTEIO; |
1da177e4 LT |
212 | |
213 | /* serious "can't proceed" faults reported by the hardware */ | |
214 | if (token & QTD_STS_HALT) { | |
215 | if (token & QTD_STS_BABBLE) { | |
216 | /* FIXME "must" disable babbling device's port too */ | |
14c04c0f | 217 | status = -EOVERFLOW; |
ba516de3 AS |
218 | /* CERR nonzero + halt --> stall */ |
219 | } else if (QTD_CERR(token)) { | |
220 | status = -EPIPE; | |
221 | ||
222 | /* In theory, more than one of the following bits can be set | |
223 | * since they are sticky and the transaction is retried. | |
224 | * Which to test first is rather arbitrary. | |
225 | */ | |
1da177e4 LT |
226 | } else if (token & QTD_STS_MMF) { |
227 | /* fs/ls interrupt xfer missed the complete-split */ | |
14c04c0f | 228 | status = -EPROTO; |
1da177e4 | 229 | } else if (token & QTD_STS_DBE) { |
14c04c0f | 230 | status = (QTD_PID (token) == 1) /* IN ? */ |
1da177e4 LT |
231 | ? -ENOSR /* hc couldn't read data */ |
232 | : -ECOMM; /* hc couldn't write data */ | |
233 | } else if (token & QTD_STS_XACT) { | |
ba516de3 AS |
234 | /* timeout, bad CRC, wrong PID, etc */ |
235 | ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", | |
236 | urb->dev->devpath, | |
237 | usb_pipeendpoint(urb->pipe), | |
238 | usb_pipein(urb->pipe) ? "in" : "out"); | |
14c04c0f | 239 | status = -EPROTO; |
ba516de3 AS |
240 | } else { /* unknown */ |
241 | status = -EPROTO; | |
242 | } | |
1da177e4 | 243 | } |
14c04c0f AS |
244 | |
245 | return status; | |
1da177e4 LT |
246 | } |
247 | ||
248 | static void | |
14c04c0f | 249 | ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) |
1da177e4 | 250 | { |
2656a9ab AS |
251 | if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { |
252 | /* ... update hc-wide periodic stats */ | |
253 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | |
1da177e4 LT |
254 | } |
255 | ||
eb231054 AS |
256 | if (unlikely(urb->unlinked)) { |
257 | COUNT(ehci->stats.unlink); | |
258 | } else { | |
4f667627 DB |
259 | /* report non-error and short read status as zero */ |
260 | if (status == -EINPROGRESS || status == -EREMOTEIO) | |
14c04c0f | 261 | status = 0; |
eb231054 | 262 | COUNT(ehci->stats.complete); |
1da177e4 | 263 | } |
1da177e4 LT |
264 | |
265 | #ifdef EHCI_URB_TRACE | |
266 | ehci_dbg (ehci, | |
267 | "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
441b62c1 | 268 | __func__, urb->dev->devpath, urb, |
1da177e4 LT |
269 | usb_pipeendpoint (urb->pipe), |
270 | usb_pipein (urb->pipe) ? "in" : "out", | |
14c04c0f | 271 | status, |
1da177e4 LT |
272 | urb->actual_length, urb->transfer_buffer_length); |
273 | #endif | |
274 | ||
e9df41c5 | 275 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
4a00027d | 276 | usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); |
1da177e4 LT |
277 | } |
278 | ||
1da177e4 LT |
279 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); |
280 | ||
281 | /* | |
282 | * Process and free completed qtds for a qh, returning URBs to drivers. | |
79bcf7b0 AS |
283 | * Chases up to qh->hw_current. Returns nonzero if the caller should |
284 | * unlink qh. | |
1da177e4 | 285 | */ |
1da177e4 | 286 | static unsigned |
7d12e780 | 287 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 288 | { |
3a44494e | 289 | struct ehci_qtd *last, *end = qh->dummy; |
1da177e4 | 290 | struct list_head *entry, *tmp; |
3a44494e | 291 | int last_status; |
1da177e4 | 292 | int stopped; |
1da177e4 | 293 | u8 state; |
3807e26d | 294 | struct ehci_qh_hw *hw = qh->hw; |
1da177e4 | 295 | |
1da177e4 LT |
296 | /* completions (or tasks on other cpus) must never clobber HALT |
297 | * till we've gone through and cleaned everything up, even when | |
298 | * they add urbs to this qh's queue or mark them for unlinking. | |
299 | * | |
300 | * NOTE: unlinking expects to be done in queue order. | |
3a44494e AS |
301 | * |
302 | * It's a bug for qh->qh_state to be anything other than | |
303 | * QH_STATE_IDLE, unless our caller is scan_async() or | |
569b394f | 304 | * scan_intr(). |
1da177e4 LT |
305 | */ |
306 | state = qh->qh_state; | |
307 | qh->qh_state = QH_STATE_COMPLETING; | |
308 | stopped = (state == QH_STATE_IDLE); | |
309 | ||
3a44494e AS |
310 | rescan: |
311 | last = NULL; | |
312 | last_status = -EINPROGRESS; | |
7bc782d7 | 313 | qh->dequeue_during_giveback = 0; |
3a44494e | 314 | |
1da177e4 LT |
315 | /* remove de-activated QTDs from front of queue. |
316 | * after faults (including short reads), cleanup this urb | |
317 | * then let the queue advance. | |
318 | * if queue is stopped, handles unlinks. | |
319 | */ | |
320 | list_for_each_safe (entry, tmp, &qh->qtd_list) { | |
321 | struct ehci_qtd *qtd; | |
322 | struct urb *urb; | |
323 | u32 token = 0; | |
324 | ||
325 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
326 | urb = qtd->urb; | |
327 | ||
328 | /* clean up any state from previous QTD ...*/ | |
329 | if (last) { | |
330 | if (likely (last->urb != urb)) { | |
14c04c0f | 331 | ehci_urb_done(ehci, last->urb, last_status); |
b5f7a0ec | 332 | last_status = -EINPROGRESS; |
1da177e4 LT |
333 | } |
334 | ehci_qtd_free (ehci, last); | |
335 | last = NULL; | |
336 | } | |
337 | ||
338 | /* ignore urbs submitted during completions we reported */ | |
339 | if (qtd == end) | |
340 | break; | |
341 | ||
342 | /* hardware copies qtd out of qh overlay */ | |
343 | rmb (); | |
6dbd682b | 344 | token = hc32_to_cpu(ehci, qtd->hw_token); |
1da177e4 LT |
345 | |
346 | /* always clean up qtds the hc de-activated */ | |
a2c2706e | 347 | retry_xacterr: |
1da177e4 LT |
348 | if ((token & QTD_STS_ACTIVE) == 0) { |
349 | ||
332960bd VP |
350 | /* Report Data Buffer Error: non-fatal but useful */ |
351 | if (token & QTD_STS_DBE) | |
352 | ehci_dbg(ehci, | |
353 | "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
354 | urb, | |
355 | usb_endpoint_num(&urb->ep->desc), | |
356 | usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", | |
357 | urb->transfer_buffer_length, | |
358 | qtd, | |
359 | qh); | |
360 | ||
a082b5c7 DB |
361 | /* on STALL, error, and short reads this urb must |
362 | * complete and all its qtds must be recycled. | |
363 | */ | |
1da177e4 | 364 | if ((token & QTD_STS_HALT) != 0) { |
a2c2706e AS |
365 | |
366 | /* retry transaction errors until we | |
367 | * reach the software xacterr limit | |
368 | */ | |
369 | if ((token & QTD_STS_XACT) && | |
370 | QTD_CERR(token) == 0 && | |
ef4638f9 | 371 | ++qh->xacterrs < QH_XACTERR_MAX && |
a2c2706e AS |
372 | !urb->unlinked) { |
373 | ehci_dbg(ehci, | |
d0626808 | 374 | "detected XactErr len %zu/%zu retry %d\n", |
ef4638f9 | 375 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
a2c2706e AS |
376 | |
377 | /* reset the token in the qtd and the | |
378 | * qh overlay (which still contains | |
379 | * the qtd) so that we pick up from | |
380 | * where we left off | |
381 | */ | |
382 | token &= ~QTD_STS_HALT; | |
383 | token |= QTD_STS_ACTIVE | | |
384 | (EHCI_TUNE_CERR << 10); | |
385 | qtd->hw_token = cpu_to_hc32(ehci, | |
386 | token); | |
387 | wmb(); | |
3807e26d AD |
388 | hw->hw_token = cpu_to_hc32(ehci, |
389 | token); | |
a2c2706e AS |
390 | goto retry_xacterr; |
391 | } | |
1da177e4 LT |
392 | stopped = 1; |
393 | ||
394 | /* magic dummy for some short reads; qh won't advance. | |
395 | * that silicon quirk can kick in with this dummy too. | |
a082b5c7 DB |
396 | * |
397 | * other short reads won't stop the queue, including | |
398 | * control transfers (status stage handles that) or | |
399 | * most other single-qtd reads ... the queue stops if | |
400 | * URB_SHORT_NOT_OK was set so the driver submitting | |
401 | * the urbs could clean it up. | |
1da177e4 LT |
402 | */ |
403 | } else if (IS_SHORT_READ (token) | |
6dbd682b SR |
404 | && !(qtd->hw_alt_next |
405 | & EHCI_LIST_END(ehci))) { | |
1da177e4 | 406 | stopped = 1; |
1da177e4 LT |
407 | } |
408 | ||
409 | /* stop scanning when we reach qtds the hc is using */ | |
410 | } else if (likely (!stopped | |
c0c53dbc | 411 | && ehci->rh_state >= EHCI_RH_RUNNING)) { |
1da177e4 LT |
412 | break; |
413 | ||
a082b5c7 | 414 | /* scan the whole queue for unlinks whenever it stops */ |
1da177e4 LT |
415 | } else { |
416 | stopped = 1; | |
417 | ||
a082b5c7 | 418 | /* cancel everything if we halt, suspend, etc */ |
c0c53dbc | 419 | if (ehci->rh_state < EHCI_RH_RUNNING) |
14c04c0f | 420 | last_status = -ESHUTDOWN; |
1da177e4 | 421 | |
a082b5c7 DB |
422 | /* this qtd is active; skip it unless a previous qtd |
423 | * for its urb faulted, or its urb was canceled. | |
1da177e4 | 424 | */ |
a082b5c7 | 425 | else if (last_status == -EINPROGRESS && !urb->unlinked) |
1da177e4 | 426 | continue; |
53bd6a60 | 427 | |
feca7746 AS |
428 | /* |
429 | * If this was the active qtd when the qh was unlinked | |
430 | * and the overlay's token is active, then the overlay | |
431 | * hasn't been written back to the qtd yet so use its | |
432 | * token instead of the qtd's. After the qtd is | |
433 | * processed and removed, the overlay won't be valid | |
434 | * any more. | |
435 | */ | |
436 | if (state == QH_STATE_IDLE && | |
437 | qh->qtd_list.next == &qtd->qtd_list && | |
438 | (hw->hw_token & ACTIVE_BIT(ehci))) { | |
3807e26d | 439 | token = hc32_to_cpu(ehci, hw->hw_token); |
feca7746 | 440 | hw->hw_token &= ~ACTIVE_BIT(ehci); |
1da177e4 | 441 | |
914b7012 AS |
442 | /* An unlink may leave an incomplete |
443 | * async transaction in the TT buffer. | |
444 | * We have to clear it. | |
445 | */ | |
446 | ehci_clear_tt_buffer(ehci, qh, urb, token); | |
447 | } | |
1da177e4 | 448 | } |
53bd6a60 | 449 | |
4f667627 DB |
450 | /* unless we already know the urb's status, collect qtd status |
451 | * and update count of bytes transferred. in common short read | |
452 | * cases with only one data qtd (including control transfers), | |
453 | * queue processing won't halt. but with two or more qtds (for | |
454 | * example, with a 32 KB transfer), when the first qtd gets a | |
455 | * short read the second must be removed by hand. | |
456 | */ | |
457 | if (last_status == -EINPROGRESS) { | |
458 | last_status = qtd_copy_status(ehci, urb, | |
459 | qtd->length, token); | |
460 | if (last_status == -EREMOTEIO | |
461 | && (qtd->hw_alt_next | |
462 | & EHCI_LIST_END(ehci))) | |
463 | last_status = -EINPROGRESS; | |
914b7012 AS |
464 | |
465 | /* As part of low/full-speed endpoint-halt processing | |
466 | * we must clear the TT buffer (11.17.5). | |
467 | */ | |
468 | if (unlikely(last_status != -EINPROGRESS && | |
c2f6595f AS |
469 | last_status != -EREMOTEIO)) { |
470 | /* The TT's in some hubs malfunction when they | |
471 | * receive this request following a STALL (they | |
472 | * stop sending isochronous packets). Since a | |
473 | * STALL can't leave the TT buffer in a busy | |
474 | * state (if you believe Figures 11-48 - 11-51 | |
475 | * in the USB 2.0 spec), we won't clear the TT | |
476 | * buffer in this case. Strictly speaking this | |
477 | * is a violation of the spec. | |
478 | */ | |
479 | if (last_status != -EPIPE) | |
480 | ehci_clear_tt_buffer(ehci, qh, urb, | |
481 | token); | |
482 | } | |
b0d9efba | 483 | } |
1da177e4 | 484 | |
a082b5c7 DB |
485 | /* if we're removing something not at the queue head, |
486 | * patch the hardware queue pointer. | |
487 | */ | |
1da177e4 LT |
488 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { |
489 | last = list_entry (qtd->qtd_list.prev, | |
490 | struct ehci_qtd, qtd_list); | |
491 | last->hw_next = qtd->hw_next; | |
492 | } | |
a082b5c7 DB |
493 | |
494 | /* remove qtd; it's recycled after possible urb completion */ | |
1da177e4 LT |
495 | list_del (&qtd->qtd_list); |
496 | last = qtd; | |
a2c2706e AS |
497 | |
498 | /* reinit the xacterr counter for the next qtd */ | |
ef4638f9 | 499 | qh->xacterrs = 0; |
1da177e4 LT |
500 | } |
501 | ||
502 | /* last urb's completion might still need calling */ | |
503 | if (likely (last != NULL)) { | |
14c04c0f | 504 | ehci_urb_done(ehci, last->urb, last_status); |
1da177e4 LT |
505 | ehci_qtd_free (ehci, last); |
506 | } | |
507 | ||
3a44494e | 508 | /* Do we need to rescan for URBs dequeued during a giveback? */ |
7bc782d7 | 509 | if (unlikely(qh->dequeue_during_giveback)) { |
3a44494e AS |
510 | /* If the QH is already unlinked, do the rescan now. */ |
511 | if (state == QH_STATE_IDLE) | |
512 | goto rescan; | |
513 | ||
7bc782d7 | 514 | /* Otherwise the caller must unlink the QH. */ |
3a44494e AS |
515 | } |
516 | ||
1da177e4 LT |
517 | /* restore original state; caller must unlink or relink */ |
518 | qh->qh_state = state; | |
519 | ||
520 | /* be sure the hardware's done with the qh before refreshing | |
521 | * it after fault cleanup, or recovering from silicon wrongly | |
522 | * overlaying the dummy qtd (which reduces DMA chatter). | |
7bc782d7 AS |
523 | * |
524 | * We won't refresh a QH that's linked (after the HC | |
525 | * stopped the queue). That avoids a race: | |
526 | * - HC reads first part of QH; | |
527 | * - CPU updates that first part and the token; | |
528 | * - HC reads rest of that QH, including token | |
529 | * Result: HC gets an inconsistent image, and then | |
530 | * DMAs to/from the wrong memory (corrupting it). | |
531 | * | |
532 | * That should be rare for interrupt transfers, | |
533 | * except maybe high bandwidth ... | |
1da177e4 | 534 | */ |
7bc782d7 AS |
535 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) |
536 | qh->exception = 1; | |
1da177e4 | 537 | |
7bc782d7 AS |
538 | /* Let the caller know if the QH needs to be unlinked. */ |
539 | return qh->exception; | |
1da177e4 LT |
540 | } |
541 | ||
542 | /*-------------------------------------------------------------------------*/ | |
543 | ||
544 | // high bandwidth multiplier, as encoded in highspeed endpoint descriptors | |
545 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | |
546 | // ... and packet size, for any kind of endpoint descriptor | |
547 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | |
548 | ||
549 | /* | |
550 | * reverse of qh_urb_transaction: free a list of TDs. | |
551 | * used for cleanup after errors, before HC sees an URB's TDs. | |
552 | */ | |
553 | static void qtd_list_free ( | |
554 | struct ehci_hcd *ehci, | |
555 | struct urb *urb, | |
556 | struct list_head *qtd_list | |
557 | ) { | |
558 | struct list_head *entry, *temp; | |
559 | ||
560 | list_for_each_safe (entry, temp, qtd_list) { | |
561 | struct ehci_qtd *qtd; | |
562 | ||
563 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
564 | list_del (&qtd->qtd_list); | |
565 | ehci_qtd_free (ehci, qtd); | |
566 | } | |
567 | } | |
568 | ||
569 | /* | |
570 | * create a list of filled qtds for this URB; won't link into qh. | |
571 | */ | |
572 | static struct list_head * | |
573 | qh_urb_transaction ( | |
574 | struct ehci_hcd *ehci, | |
575 | struct urb *urb, | |
576 | struct list_head *head, | |
55016f10 | 577 | gfp_t flags |
1da177e4 LT |
578 | ) { |
579 | struct ehci_qtd *qtd, *qtd_prev; | |
580 | dma_addr_t buf; | |
40f8db8f | 581 | int len, this_sg_len, maxpacket; |
1da177e4 LT |
582 | int is_input; |
583 | u32 token; | |
40f8db8f AS |
584 | int i; |
585 | struct scatterlist *sg; | |
1da177e4 LT |
586 | |
587 | /* | |
588 | * URBs map to sequences of QTDs: one logical transaction | |
589 | */ | |
590 | qtd = ehci_qtd_alloc (ehci, flags); | |
591 | if (unlikely (!qtd)) | |
592 | return NULL; | |
593 | list_add_tail (&qtd->qtd_list, head); | |
594 | qtd->urb = urb; | |
595 | ||
596 | token = QTD_STS_ACTIVE; | |
597 | token |= (EHCI_TUNE_CERR << 10); | |
598 | /* for split transactions, SplitXState initialized to zero */ | |
599 | ||
600 | len = urb->transfer_buffer_length; | |
601 | is_input = usb_pipein (urb->pipe); | |
602 | if (usb_pipecontrol (urb->pipe)) { | |
603 | /* SETUP pid */ | |
6dbd682b SR |
604 | qtd_fill(ehci, qtd, urb->setup_dma, |
605 | sizeof (struct usb_ctrlrequest), | |
606 | token | (2 /* "setup" */ << 8), 8); | |
1da177e4 LT |
607 | |
608 | /* ... and always at least one more pid */ | |
609 | token ^= QTD_TOGGLE; | |
610 | qtd_prev = qtd; | |
611 | qtd = ehci_qtd_alloc (ehci, flags); | |
612 | if (unlikely (!qtd)) | |
613 | goto cleanup; | |
614 | qtd->urb = urb; | |
6dbd682b | 615 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 | 616 | list_add_tail (&qtd->qtd_list, head); |
6912354a AS |
617 | |
618 | /* for zero length DATA stages, STATUS is always IN */ | |
619 | if (len == 0) | |
620 | token |= (1 /* "in" */ << 8); | |
53bd6a60 | 621 | } |
1da177e4 LT |
622 | |
623 | /* | |
624 | * data transfer stage: buffer setup | |
625 | */ | |
bc677d5b | 626 | i = urb->num_mapped_sgs; |
40f8db8f | 627 | if (len > 0 && i > 0) { |
910f8d0c | 628 | sg = urb->sg; |
40f8db8f AS |
629 | buf = sg_dma_address(sg); |
630 | ||
631 | /* urb->transfer_buffer_length may be smaller than the | |
632 | * size of the scatterlist (or vice versa) | |
633 | */ | |
634 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
635 | } else { | |
636 | sg = NULL; | |
637 | buf = urb->transfer_dma; | |
638 | this_sg_len = len; | |
639 | } | |
1da177e4 | 640 | |
6912354a | 641 | if (is_input) |
1da177e4 LT |
642 | token |= (1 /* "in" */ << 8); |
643 | /* else it's already initted to "out" pid (0 << 8) */ | |
644 | ||
645 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | |
646 | ||
647 | /* | |
648 | * buffer gets wrapped in one or more qtds; | |
649 | * last one may be "short" (including zero len) | |
650 | * and may serve as a control status ack | |
651 | */ | |
652 | for (;;) { | |
653 | int this_qtd_len; | |
654 | ||
40f8db8f AS |
655 | this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, |
656 | maxpacket); | |
657 | this_sg_len -= this_qtd_len; | |
1da177e4 LT |
658 | len -= this_qtd_len; |
659 | buf += this_qtd_len; | |
a082b5c7 DB |
660 | |
661 | /* | |
662 | * short reads advance to a "magic" dummy instead of the next | |
663 | * qtd ... that forces the queue to stop, for manual cleanup. | |
664 | * (this will usually be overridden later.) | |
665 | */ | |
1da177e4 | 666 | if (is_input) |
3807e26d | 667 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
1da177e4 LT |
668 | |
669 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
670 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
671 | token ^= QTD_TOGGLE; | |
672 | ||
40f8db8f AS |
673 | if (likely(this_sg_len <= 0)) { |
674 | if (--i <= 0 || len <= 0) | |
675 | break; | |
676 | sg = sg_next(sg); | |
677 | buf = sg_dma_address(sg); | |
678 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
679 | } | |
1da177e4 LT |
680 | |
681 | qtd_prev = qtd; | |
682 | qtd = ehci_qtd_alloc (ehci, flags); | |
683 | if (unlikely (!qtd)) | |
684 | goto cleanup; | |
685 | qtd->urb = urb; | |
6dbd682b | 686 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
687 | list_add_tail (&qtd->qtd_list, head); |
688 | } | |
689 | ||
a082b5c7 DB |
690 | /* |
691 | * unless the caller requires manual cleanup after short reads, | |
692 | * have the alt_next mechanism keep the queue running after the | |
693 | * last data qtd (the only one, for control and most other cases). | |
1da177e4 LT |
694 | */ |
695 | if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
696 | || usb_pipecontrol (urb->pipe))) | |
6dbd682b | 697 | qtd->hw_alt_next = EHCI_LIST_END(ehci); |
1da177e4 LT |
698 | |
699 | /* | |
700 | * control requests may need a terminating data "status" ack; | |
9a971dda ML |
701 | * other OUT ones may need a terminating short packet |
702 | * (zero length). | |
1da177e4 | 703 | */ |
6912354a | 704 | if (likely (urb->transfer_buffer_length != 0)) { |
1da177e4 LT |
705 | int one_more = 0; |
706 | ||
707 | if (usb_pipecontrol (urb->pipe)) { | |
708 | one_more = 1; | |
709 | token ^= 0x0100; /* "in" <--> "out" */ | |
710 | token |= QTD_TOGGLE; /* force DATA1 */ | |
9a971dda | 711 | } else if (usb_pipeout(urb->pipe) |
1da177e4 LT |
712 | && (urb->transfer_flags & URB_ZERO_PACKET) |
713 | && !(urb->transfer_buffer_length % maxpacket)) { | |
714 | one_more = 1; | |
715 | } | |
716 | if (one_more) { | |
717 | qtd_prev = qtd; | |
718 | qtd = ehci_qtd_alloc (ehci, flags); | |
719 | if (unlikely (!qtd)) | |
720 | goto cleanup; | |
721 | qtd->urb = urb; | |
6dbd682b | 722 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
723 | list_add_tail (&qtd->qtd_list, head); |
724 | ||
725 | /* never any data in such packets */ | |
6dbd682b | 726 | qtd_fill(ehci, qtd, 0, 0, token, 0); |
1da177e4 LT |
727 | } |
728 | } | |
729 | ||
730 | /* by default, enable interrupt on urb completion */ | |
731 | if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
6dbd682b | 732 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); |
1da177e4 LT |
733 | return head; |
734 | ||
735 | cleanup: | |
736 | qtd_list_free (ehci, urb, head); | |
737 | return NULL; | |
738 | } | |
739 | ||
740 | /*-------------------------------------------------------------------------*/ | |
741 | ||
742 | // Would be best to create all qh's from config descriptors, | |
743 | // when each interface/altsetting is established. Unlink | |
744 | // any previous qh and cancel its urbs first; endpoints are | |
745 | // implicitly reset then (data toggle too). | |
746 | // That'd mean updating how usbcore talks to HCDs. (2.7?) | |
747 | ||
748 | ||
749 | /* | |
750 | * Each QH holds a qtd list; a QH is used for everything except iso. | |
751 | * | |
752 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
753 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
754 | * just one microframe in the s-mask. For split interrupt transactions | |
755 | * there are additional complications: c-mask, maybe FSTNs. | |
756 | */ | |
757 | static struct ehci_qh * | |
758 | qh_make ( | |
759 | struct ehci_hcd *ehci, | |
760 | struct urb *urb, | |
55016f10 | 761 | gfp_t flags |
1da177e4 LT |
762 | ) { |
763 | struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); | |
764 | u32 info1 = 0, info2 = 0; | |
765 | int is_input, type; | |
766 | int maxp = 0; | |
340ba5f9 | 767 | struct usb_tt *tt = urb->dev->tt; |
3807e26d | 768 | struct ehci_qh_hw *hw; |
1da177e4 LT |
769 | |
770 | if (!qh) | |
771 | return qh; | |
772 | ||
773 | /* | |
774 | * init endpoint/device data for this QH | |
775 | */ | |
776 | info1 |= usb_pipeendpoint (urb->pipe) << 8; | |
777 | info1 |= usb_pipedevice (urb->pipe) << 0; | |
778 | ||
779 | is_input = usb_pipein (urb->pipe); | |
780 | type = usb_pipetype (urb->pipe); | |
781 | maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); | |
782 | ||
caa9ef67 DB |
783 | /* 1024 byte maxpacket is a hardware ceiling. High bandwidth |
784 | * acts like up to 3KB, but is built from smaller packets. | |
785 | */ | |
786 | if (max_packet(maxp) > 1024) { | |
787 | ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); | |
788 | goto done; | |
789 | } | |
790 | ||
1da177e4 LT |
791 | /* Compute interrupt scheduling parameters just once, and save. |
792 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
793 | * - split transactions need a second CSPLIT uframe; same question | |
794 | * - splits also need a schedule gap (for full/low speed I/O) | |
795 | * - qh has a polling interval | |
796 | * | |
797 | * For control/bulk requests, the HC or TT handles these. | |
798 | */ | |
799 | if (type == PIPE_INTERRUPT) { | |
d0ce5c6b AS |
800 | unsigned tmp; |
801 | ||
ffa0248e | 802 | qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, |
340ba5f9 DB |
803 | is_input, 0, |
804 | hb_mult(maxp) * max_packet(maxp))); | |
ffa0248e | 805 | qh->ps.phase = NO_FRAME; |
1da177e4 LT |
806 | |
807 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
ffa0248e | 808 | qh->ps.c_usecs = 0; |
1da177e4 LT |
809 | qh->gap_uf = 0; |
810 | ||
ffa0248e | 811 | if (urb->interval > 1 && urb->interval < 8) { |
1da177e4 LT |
812 | /* NOTE interval 2 or 4 uframes could work. |
813 | * But interval 1 scheduling is simpler, and | |
814 | * includes high bandwidth. | |
815 | */ | |
1b9a38bf | 816 | urb->interval = 1; |
ffa0248e AS |
817 | } else if (urb->interval > ehci->periodic_size << 3) { |
818 | urb->interval = ehci->periodic_size << 3; | |
1da177e4 | 819 | } |
ffa0248e | 820 | qh->ps.period = urb->interval >> 3; |
d0ce5c6b AS |
821 | |
822 | /* period for bandwidth allocation */ | |
823 | tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, | |
824 | 1 << (urb->ep->desc.bInterval - 1)); | |
825 | ||
826 | /* Allow urb->interval to override */ | |
827 | qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); | |
828 | qh->ps.bw_period = qh->ps.bw_uperiod >> 3; | |
1da177e4 | 829 | } else { |
d0384200 | 830 | int think_time; |
831 | ||
1da177e4 LT |
832 | /* gap is f(FS/LS transfer times) */ |
833 | qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, | |
834 | is_input, 0, maxp) / (125 * 1000); | |
835 | ||
836 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
837 | if (is_input) { // SPLIT, gap, CSPLIT+DATA | |
ffa0248e AS |
838 | qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0); |
839 | qh->ps.usecs = HS_USECS(1); | |
1da177e4 | 840 | } else { // SPLIT+DATA, gap, CSPLIT |
ffa0248e AS |
841 | qh->ps.usecs += HS_USECS(1); |
842 | qh->ps.c_usecs = HS_USECS(0); | |
1da177e4 LT |
843 | } |
844 | ||
d0384200 | 845 | think_time = tt ? tt->think_time : 0; |
ffa0248e | 846 | qh->ps.tt_usecs = NS_TO_US(think_time + |
d0384200 | 847 | usb_calc_bus_time (urb->dev->speed, |
848 | is_input, 0, max_packet (maxp))); | |
ffa0248e AS |
849 | if (urb->interval > ehci->periodic_size) |
850 | urb->interval = ehci->periodic_size; | |
851 | qh->ps.period = urb->interval; | |
d0ce5c6b AS |
852 | |
853 | /* period for bandwidth allocation */ | |
854 | tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, | |
855 | urb->ep->desc.bInterval); | |
856 | tmp = rounddown_pow_of_two(tmp); | |
857 | ||
858 | /* Allow urb->interval to override */ | |
859 | qh->ps.bw_period = min_t(unsigned, tmp, urb->interval); | |
860 | qh->ps.bw_uperiod = qh->ps.bw_period << 3; | |
1da177e4 LT |
861 | } |
862 | } | |
863 | ||
864 | /* support for tt scheduling, and access to toggles */ | |
ffa0248e AS |
865 | qh->ps.udev = urb->dev; |
866 | qh->ps.ep = urb->ep; | |
1da177e4 LT |
867 | |
868 | /* using TT? */ | |
869 | switch (urb->dev->speed) { | |
870 | case USB_SPEED_LOW: | |
4c53de72 | 871 | info1 |= QH_LOW_SPEED; |
1da177e4 LT |
872 | /* FALL THROUGH */ |
873 | ||
874 | case USB_SPEED_FULL: | |
875 | /* EPS 0 means "full" */ | |
876 | if (type != PIPE_INTERRUPT) | |
877 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
878 | if (type == PIPE_CONTROL) { | |
4c53de72 AS |
879 | info1 |= QH_CONTROL_EP; /* for TT */ |
880 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ | |
1da177e4 LT |
881 | } |
882 | info1 |= maxp << 16; | |
883 | ||
884 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
8cd42e97 KG |
885 | |
886 | /* Some Freescale processors have an erratum in which the | |
887 | * port number in the queue head was 0..N-1 instead of 1..N. | |
888 | */ | |
889 | if (ehci_has_fsl_portno_bug(ehci)) | |
890 | info2 |= (urb->dev->ttport-1) << 23; | |
891 | else | |
892 | info2 |= urb->dev->ttport << 23; | |
1da177e4 LT |
893 | |
894 | /* set the address of the TT; for TDI's integrated | |
895 | * root hub tt, leave it zeroed. | |
896 | */ | |
340ba5f9 DB |
897 | if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) |
898 | info2 |= tt->hub->devnum << 16; | |
1da177e4 LT |
899 | |
900 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
901 | ||
902 | break; | |
903 | ||
904 | case USB_SPEED_HIGH: /* no TT involved */ | |
4c53de72 | 905 | info1 |= QH_HIGH_SPEED; |
1da177e4 LT |
906 | if (type == PIPE_CONTROL) { |
907 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
908 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
4c53de72 | 909 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ |
1da177e4 LT |
910 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
911 | } else if (type == PIPE_BULK) { | |
912 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
caa9ef67 DB |
913 | /* The USB spec says that high speed bulk endpoints |
914 | * always use 512 byte maxpacket. But some device | |
915 | * vendors decided to ignore that, and MSFT is happy | |
916 | * to help them do so. So now people expect to use | |
917 | * such nonconformant devices with Linux too; sigh. | |
918 | */ | |
919 | info1 |= max_packet(maxp) << 16; | |
1da177e4 LT |
920 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
921 | } else { /* PIPE_INTERRUPT */ | |
922 | info1 |= max_packet (maxp) << 16; | |
923 | info2 |= hb_mult (maxp) << 30; | |
924 | } | |
925 | break; | |
926 | default: | |
82491c2a GKH |
927 | ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, |
928 | urb->dev->speed); | |
1da177e4 | 929 | done: |
c83e1a9f | 930 | qh_destroy(ehci, qh); |
1da177e4 LT |
931 | return NULL; |
932 | } | |
933 | ||
934 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
935 | ||
c1fdb68e | 936 | /* init as live, toggle clear */ |
1da177e4 | 937 | qh->qh_state = QH_STATE_IDLE; |
3807e26d AD |
938 | hw = qh->hw; |
939 | hw->hw_info1 = cpu_to_hc32(ehci, info1); | |
940 | hw->hw_info2 = cpu_to_hc32(ehci, info2); | |
e04f5f7e | 941 | qh->is_out = !is_input; |
a455212d | 942 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
1da177e4 LT |
943 | return qh; |
944 | } | |
945 | ||
946 | /*-------------------------------------------------------------------------*/ | |
947 | ||
31446610 AS |
948 | static void enable_async(struct ehci_hcd *ehci) |
949 | { | |
950 | if (ehci->async_count++) | |
951 | return; | |
952 | ||
953 | /* Stop waiting to turn off the async schedule */ | |
954 | ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); | |
955 | ||
956 | /* Don't start the schedule until ASS is 0 */ | |
957 | ehci_poll_ASS(ehci); | |
18aafe64 | 958 | turn_on_io_watchdog(ehci); |
31446610 AS |
959 | } |
960 | ||
961 | static void disable_async(struct ehci_hcd *ehci) | |
962 | { | |
963 | if (--ehci->async_count) | |
964 | return; | |
965 | ||
6e018751 AS |
966 | /* The async schedule and unlink lists are supposed to be empty */ |
967 | WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || | |
214ac7a0 | 968 | !list_empty(&ehci->async_idle)); |
31446610 AS |
969 | |
970 | /* Don't turn off the schedule until ASS is 1 */ | |
971 | ehci_poll_ASS(ehci); | |
972 | } | |
973 | ||
1da177e4 LT |
974 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ |
975 | ||
976 | static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
977 | { | |
6dbd682b | 978 | __hc32 dma = QH_NEXT(ehci, qh->qh_dma); |
1da177e4 LT |
979 | struct ehci_qh *head; |
980 | ||
914b7012 AS |
981 | /* Don't link a QH if there's a Clear-TT-Buffer pending */ |
982 | if (unlikely(qh->clearing_tt)) | |
983 | return; | |
984 | ||
3a44494e AS |
985 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
986 | ||
a455212d | 987 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ |
3a44494e | 988 | qh_refresh(ehci, qh); |
1da177e4 LT |
989 | |
990 | /* splice right after start */ | |
31446610 | 991 | head = ehci->async; |
1da177e4 | 992 | qh->qh_next = head->qh_next; |
3807e26d | 993 | qh->hw->hw_next = head->hw->hw_next; |
1da177e4 LT |
994 | wmb (); |
995 | ||
996 | head->qh_next.qh = qh; | |
3807e26d | 997 | head->hw->hw_next = dma; |
1da177e4 LT |
998 | |
999 | qh->qh_state = QH_STATE_LINKED; | |
7bc782d7 AS |
1000 | qh->xacterrs = 0; |
1001 | qh->exception = 0; | |
1da177e4 | 1002 | /* qtd completions reported later by interrupt */ |
31446610 AS |
1003 | |
1004 | enable_async(ehci); | |
1da177e4 LT |
1005 | } |
1006 | ||
1007 | /*-------------------------------------------------------------------------*/ | |
1008 | ||
1da177e4 LT |
1009 | /* |
1010 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1011 | * Allocates and initializes the QH if necessary. | |
1012 | * Returns null if it can't allocate a QH it needs to. | |
1013 | * If the QH has TDs (urbs) already, that's great. | |
1014 | */ | |
1015 | static struct ehci_qh *qh_append_tds ( | |
1016 | struct ehci_hcd *ehci, | |
1017 | struct urb *urb, | |
1018 | struct list_head *qtd_list, | |
1019 | int epnum, | |
1020 | void **ptr | |
1021 | ) | |
1022 | { | |
1023 | struct ehci_qh *qh = NULL; | |
fd05e720 | 1024 | __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); |
1da177e4 LT |
1025 | |
1026 | qh = (struct ehci_qh *) *ptr; | |
1027 | if (unlikely (qh == NULL)) { | |
1028 | /* can't sleep here, we have ehci->lock... */ | |
1029 | qh = qh_make (ehci, urb, GFP_ATOMIC); | |
1030 | *ptr = qh; | |
1031 | } | |
1032 | if (likely (qh != NULL)) { | |
1033 | struct ehci_qtd *qtd; | |
1034 | ||
1035 | if (unlikely (list_empty (qtd_list))) | |
1036 | qtd = NULL; | |
1037 | else | |
1038 | qtd = list_entry (qtd_list->next, struct ehci_qtd, | |
1039 | qtd_list); | |
1040 | ||
1041 | /* control qh may need patching ... */ | |
1042 | if (unlikely (epnum == 0)) { | |
1043 | ||
1044 | /* usb_reset_device() briefly reverts to address 0 */ | |
1045 | if (usb_pipedevice (urb->pipe) == 0) | |
3807e26d | 1046 | qh->hw->hw_info1 &= ~qh_addr_mask; |
1da177e4 LT |
1047 | } |
1048 | ||
1049 | /* just one way to queue requests: swap with the dummy qtd. | |
1050 | * only hc or qh_refresh() ever modify the overlay. | |
1051 | */ | |
1052 | if (likely (qtd != NULL)) { | |
1053 | struct ehci_qtd *dummy; | |
1054 | dma_addr_t dma; | |
6dbd682b | 1055 | __hc32 token; |
1da177e4 LT |
1056 | |
1057 | /* to avoid racing the HC, use the dummy td instead of | |
1058 | * the first td of our list (becomes new dummy). both | |
1059 | * tds stay deactivated until we're done, when the | |
1060 | * HC is allowed to fetch the old dummy (4.10.2). | |
1061 | */ | |
1062 | token = qtd->hw_token; | |
6dbd682b | 1063 | qtd->hw_token = HALT_BIT(ehci); |
41f05ded | 1064 | |
1da177e4 LT |
1065 | dummy = qh->dummy; |
1066 | ||
1067 | dma = dummy->qtd_dma; | |
1068 | *dummy = *qtd; | |
1069 | dummy->qtd_dma = dma; | |
1070 | ||
1071 | list_del (&qtd->qtd_list); | |
1072 | list_add (&dummy->qtd_list, qtd_list); | |
7d283aee | 1073 | list_splice_tail(qtd_list, &qh->qtd_list); |
1da177e4 | 1074 | |
6dbd682b | 1075 | ehci_qtd_init(ehci, qtd, qtd->qtd_dma); |
1da177e4 LT |
1076 | qh->dummy = qtd; |
1077 | ||
1078 | /* hc must see the new dummy at list end */ | |
1079 | dma = qtd->qtd_dma; | |
1080 | qtd = list_entry (qh->qtd_list.prev, | |
1081 | struct ehci_qtd, qtd_list); | |
6dbd682b | 1082 | qtd->hw_next = QTD_NEXT(ehci, dma); |
1da177e4 LT |
1083 | |
1084 | /* let the hc process these next qtds */ | |
1085 | wmb (); | |
1086 | dummy->hw_token = token; | |
1087 | ||
c83e1a9f | 1088 | urb->hcpriv = qh; |
1da177e4 LT |
1089 | } |
1090 | } | |
1091 | return qh; | |
1092 | } | |
1093 | ||
1094 | /*-------------------------------------------------------------------------*/ | |
1095 | ||
1096 | static int | |
1097 | submit_async ( | |
1098 | struct ehci_hcd *ehci, | |
1da177e4 LT |
1099 | struct urb *urb, |
1100 | struct list_head *qtd_list, | |
55016f10 | 1101 | gfp_t mem_flags |
1da177e4 | 1102 | ) { |
1da177e4 LT |
1103 | int epnum; |
1104 | unsigned long flags; | |
1105 | struct ehci_qh *qh = NULL; | |
e9df41c5 | 1106 | int rc; |
1da177e4 | 1107 | |
e9df41c5 | 1108 | epnum = urb->ep->desc.bEndpointAddress; |
1da177e4 LT |
1109 | |
1110 | #ifdef EHCI_URB_TRACE | |
eb34a908 DD |
1111 | { |
1112 | struct ehci_qtd *qtd; | |
1113 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1114 | ehci_dbg(ehci, | |
1115 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1116 | __func__, urb->dev->devpath, urb, | |
1117 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1118 | urb->transfer_buffer_length, | |
1119 | qtd, urb->ep->hcpriv); | |
1120 | } | |
1da177e4 LT |
1121 | #endif |
1122 | ||
1123 | spin_lock_irqsave (&ehci->lock, flags); | |
541c7d43 | 1124 | if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { |
8de98402 BH |
1125 | rc = -ESHUTDOWN; |
1126 | goto done; | |
1127 | } | |
e9df41c5 AS |
1128 | rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); |
1129 | if (unlikely(rc)) | |
1130 | goto done; | |
8de98402 | 1131 | |
e9df41c5 | 1132 | qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); |
8de98402 | 1133 | if (unlikely(qh == NULL)) { |
e9df41c5 | 1134 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
8de98402 BH |
1135 | rc = -ENOMEM; |
1136 | goto done; | |
1137 | } | |
1da177e4 LT |
1138 | |
1139 | /* Control/bulk operations through TTs don't need scheduling, | |
1140 | * the HC and TT handle it when the TT has a buffer ready. | |
1141 | */ | |
8de98402 | 1142 | if (likely (qh->qh_state == QH_STATE_IDLE)) |
7a0f0d95 | 1143 | qh_link_async(ehci, qh); |
8de98402 | 1144 | done: |
1da177e4 | 1145 | spin_unlock_irqrestore (&ehci->lock, flags); |
8de98402 | 1146 | if (unlikely (qh == NULL)) |
1da177e4 | 1147 | qtd_list_free (ehci, urb, qtd_list); |
8de98402 | 1148 | return rc; |
1da177e4 LT |
1149 | } |
1150 | ||
1151 | /*-------------------------------------------------------------------------*/ | |
726a85ca | 1152 | #ifdef CONFIG_USB_HCD_TEST_MODE |
9841f37a MG |
1153 | /* |
1154 | * This function creates the qtds and submits them for the | |
1155 | * SINGLE_STEP_SET_FEATURE Test. | |
1156 | * This is done in two parts: first SETUP req for GetDesc is sent then | |
1157 | * 15 seconds later, the IN stage for GetDesc starts to req data from dev | |
1158 | * | |
1159 | * is_setup : i/p arguement decides which of the two stage needs to be | |
1160 | * performed; TRUE - SETUP and FALSE - IN+STATUS | |
1161 | * Returns 0 if success | |
1162 | */ | |
1163 | static int submit_single_step_set_feature( | |
1164 | struct usb_hcd *hcd, | |
1165 | struct urb *urb, | |
1166 | int is_setup | |
1167 | ) { | |
1168 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
1169 | struct list_head qtd_list; | |
1170 | struct list_head *head; | |
1171 | ||
1172 | struct ehci_qtd *qtd, *qtd_prev; | |
1173 | dma_addr_t buf; | |
1174 | int len, maxpacket; | |
1175 | u32 token; | |
1176 | ||
1177 | INIT_LIST_HEAD(&qtd_list); | |
1178 | head = &qtd_list; | |
1179 | ||
1180 | /* URBs map to sequences of QTDs: one logical transaction */ | |
1181 | qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); | |
1182 | if (unlikely(!qtd)) | |
1183 | return -1; | |
1184 | list_add_tail(&qtd->qtd_list, head); | |
1185 | qtd->urb = urb; | |
1186 | ||
1187 | token = QTD_STS_ACTIVE; | |
1188 | token |= (EHCI_TUNE_CERR << 10); | |
1189 | ||
1190 | len = urb->transfer_buffer_length; | |
1191 | /* | |
1192 | * Check if the request is to perform just the SETUP stage (getDesc) | |
1193 | * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens | |
1194 | * 15 secs after the setup | |
1195 | */ | |
1196 | if (is_setup) { | |
1197 | /* SETUP pid */ | |
1198 | qtd_fill(ehci, qtd, urb->setup_dma, | |
1199 | sizeof(struct usb_ctrlrequest), | |
1200 | token | (2 /* "setup" */ << 8), 8); | |
1201 | ||
1202 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); | |
1203 | return 0; /*Return now; we shall come back after 15 seconds*/ | |
1204 | } | |
1205 | ||
1206 | /* | |
1207 | * IN: data transfer stage: buffer setup : start the IN txn phase for | |
1208 | * the get_Desc SETUP which was sent 15seconds back | |
1209 | */ | |
1210 | token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ | |
1211 | buf = urb->transfer_dma; | |
1212 | ||
1213 | token |= (1 /* "in" */ << 8); /*This is IN stage*/ | |
1214 | ||
1215 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); | |
1216 | ||
1217 | qtd_fill(ehci, qtd, buf, len, token, maxpacket); | |
1218 | ||
1219 | /* | |
1220 | * Our IN phase shall always be a short read; so keep the queue running | |
1221 | * and let it advance to the next qtd which zero length OUT status | |
1222 | */ | |
1223 | qtd->hw_alt_next = EHCI_LIST_END(ehci); | |
1224 | ||
1225 | /* STATUS stage for GetDesc control request */ | |
1226 | token ^= 0x0100; /* "in" <--> "out" */ | |
1227 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1228 | ||
1229 | qtd_prev = qtd; | |
1230 | qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); | |
1231 | if (unlikely(!qtd)) | |
1232 | goto cleanup; | |
1233 | qtd->urb = urb; | |
1234 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); | |
1235 | list_add_tail(&qtd->qtd_list, head); | |
1236 | ||
1237 | /* dont fill any data in such packets */ | |
1238 | qtd_fill(ehci, qtd, 0, 0, token, 0); | |
1239 | ||
1240 | /* by default, enable interrupt on urb completion */ | |
1241 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
1242 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); | |
1243 | ||
1244 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); | |
1245 | ||
1246 | return 0; | |
1247 | ||
1248 | cleanup: | |
1249 | qtd_list_free(ehci, urb, head); | |
1250 | return -1; | |
1251 | } | |
726a85ca | 1252 | #endif /* CONFIG_USB_HCD_TEST_MODE */ |
9841f37a MG |
1253 | |
1254 | /*-------------------------------------------------------------------------*/ | |
1da177e4 | 1255 | |
3c273a05 | 1256 | static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1257 | { |
3c273a05 | 1258 | struct ehci_qh *prev; |
1da177e4 | 1259 | |
3c273a05 | 1260 | /* Add to the end of the list of QHs waiting for the next IAAD */ |
6402c796 | 1261 | qh->qh_state = QH_STATE_UNLINK_WAIT; |
6e018751 | 1262 | list_add_tail(&qh->unlink_node, &ehci->async_unlink); |
3c273a05 AS |
1263 | |
1264 | /* Unlink it from the schedule */ | |
1265 | prev = ehci->async; | |
1266 | while (prev->qh_next.qh != qh) | |
1267 | prev = prev->qh_next.qh; | |
1268 | ||
1269 | prev->hw->hw_next = qh->hw->hw_next; | |
1270 | prev->qh_next = qh->qh_next; | |
1271 | if (ehci->qh_scan_next == qh) | |
1272 | ehci->qh_scan_next = qh->qh_next.qh; | |
1273 | } | |
1da177e4 | 1274 | |
214ac7a0 | 1275 | static void start_iaa_cycle(struct ehci_hcd *ehci) |
3c273a05 | 1276 | { |
214ac7a0 AS |
1277 | /* Do nothing if an IAA cycle is already running */ |
1278 | if (ehci->iaa_in_progress) | |
3c273a05 | 1279 | return; |
214ac7a0 | 1280 | ehci->iaa_in_progress = true; |
1da177e4 | 1281 | |
3c273a05 AS |
1282 | /* If the controller isn't running, we don't have to wait for it */ |
1283 | if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { | |
214ac7a0 | 1284 | end_unlink_async(ehci); |
31446610 | 1285 | |
3c273a05 | 1286 | /* Otherwise start a new IAA cycle */ |
32830f20 | 1287 | } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { |
6e0c3339 | 1288 | |
3c273a05 AS |
1289 | /* Make sure the unlinks are all visible to the hardware */ |
1290 | wmb(); | |
1da177e4 | 1291 | |
3c273a05 AS |
1292 | ehci_writel(ehci, ehci->command | CMD_IAAD, |
1293 | &ehci->regs->command); | |
1294 | ehci_readl(ehci, &ehci->regs->command); | |
1295 | ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); | |
1da177e4 | 1296 | } |
3c273a05 AS |
1297 | } |
1298 | ||
1299 | /* the async qh for the qtds being unlinked are now gone from the HC */ | |
1300 | ||
1301 | static void end_unlink_async(struct ehci_hcd *ehci) | |
1302 | { | |
1303 | struct ehci_qh *qh; | |
214ac7a0 | 1304 | bool early_exit; |
2f7ac6c1 GJ |
1305 | |
1306 | if (ehci->has_synopsys_hc_bug) | |
1307 | ehci_writel(ehci, (u32) ehci->async->qh_dma, | |
1308 | &ehci->regs->async_next); | |
3c273a05 | 1309 | |
214ac7a0 AS |
1310 | /* The current IAA cycle has ended */ |
1311 | ehci->iaa_in_progress = false; | |
1312 | ||
1313 | if (list_empty(&ehci->async_unlink)) | |
1314 | return; | |
1315 | qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, | |
1316 | unlink_node); /* QH whose IAA cycle just ended */ | |
1317 | ||
1318 | /* | |
1319 | * If async_unlinking is set then this routine is already running, | |
1320 | * either on the stack or on another CPU. | |
1321 | */ | |
1322 | early_exit = ehci->async_unlinking; | |
1323 | ||
1324 | /* If the controller isn't running, process all the waiting QHs */ | |
1325 | if (ehci->rh_state < EHCI_RH_RUNNING) | |
1326 | list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); | |
1327 | ||
1328 | /* | |
1329 | * Intel (?) bug: The HC can write back the overlay region even | |
1330 | * after the IAA interrupt occurs. In self-defense, always go | |
1331 | * through two IAA cycles for each QH. | |
1332 | */ | |
1333 | else if (qh->qh_state == QH_STATE_UNLINK_WAIT) { | |
1334 | qh->qh_state = QH_STATE_UNLINK; | |
1335 | early_exit = true; | |
1336 | } | |
1337 | ||
1338 | /* Otherwise process only the first waiting QH (NVIDIA bug?) */ | |
1339 | else | |
1340 | list_move_tail(&qh->unlink_node, &ehci->async_idle); | |
1341 | ||
1342 | /* Start a new IAA cycle if any QHs are waiting for it */ | |
1343 | if (!list_empty(&ehci->async_unlink)) | |
1344 | start_iaa_cycle(ehci); | |
1345 | ||
1346 | /* | |
1347 | * Don't allow nesting or concurrent calls, | |
1348 | * or wait for the second IAA cycle for the next QH. | |
1349 | */ | |
1350 | if (early_exit) | |
1351 | return; | |
1352 | ||
3c273a05 | 1353 | /* Process the idle QHs */ |
3c273a05 | 1354 | ehci->async_unlinking = true; |
214ac7a0 AS |
1355 | while (!list_empty(&ehci->async_idle)) { |
1356 | qh = list_first_entry(&ehci->async_idle, struct ehci_qh, | |
6e018751 AS |
1357 | unlink_node); |
1358 | list_del(&qh->unlink_node); | |
3c273a05 AS |
1359 | |
1360 | qh->qh_state = QH_STATE_IDLE; | |
1361 | qh->qh_next.qh = NULL; | |
1362 | ||
79bcf7b0 AS |
1363 | if (!list_empty(&qh->qtd_list)) |
1364 | qh_completions(ehci, qh); | |
3c273a05 AS |
1365 | if (!list_empty(&qh->qtd_list) && |
1366 | ehci->rh_state == EHCI_RH_RUNNING) | |
1367 | qh_link_async(ehci, qh); | |
1368 | disable_async(ehci); | |
1369 | } | |
1370 | ehci->async_unlinking = false; | |
1da177e4 LT |
1371 | } |
1372 | ||
6e0c3339 AS |
1373 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
1374 | ||
32830f20 AS |
1375 | static void unlink_empty_async(struct ehci_hcd *ehci) |
1376 | { | |
6e0c3339 AS |
1377 | struct ehci_qh *qh; |
1378 | struct ehci_qh *qh_to_unlink = NULL; | |
6e0c3339 | 1379 | int count = 0; |
32830f20 | 1380 | |
6e0c3339 AS |
1381 | /* Find the last async QH which has been empty for a timer cycle */ |
1382 | for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { | |
32830f20 AS |
1383 | if (list_empty(&qh->qtd_list) && |
1384 | qh->qh_state == QH_STATE_LINKED) { | |
6e0c3339 | 1385 | ++count; |
afc2c9a2 | 1386 | if (qh->unlink_cycle != ehci->async_unlink_cycle) |
6e0c3339 | 1387 | qh_to_unlink = qh; |
32830f20 AS |
1388 | } |
1389 | } | |
1390 | ||
6e0c3339 | 1391 | /* If nothing else is being unlinked, unlink the last empty QH */ |
214ac7a0 | 1392 | if (list_empty(&ehci->async_unlink) && qh_to_unlink) { |
6e0c3339 AS |
1393 | start_unlink_async(ehci, qh_to_unlink); |
1394 | --count; | |
1395 | } | |
32830f20 | 1396 | |
6e0c3339 AS |
1397 | /* Other QHs will be handled later */ |
1398 | if (count > 0) { | |
32830f20 AS |
1399 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); |
1400 | ++ehci->async_unlink_cycle; | |
1401 | } | |
1402 | } | |
1403 | ||
2a40f324 | 1404 | /* The root hub is suspended; unlink all the async QHs */ |
70b55c2a | 1405 | static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci) |
2a40f324 AS |
1406 | { |
1407 | struct ehci_qh *qh; | |
1408 | ||
1409 | while (ehci->async->qh_next.qh) { | |
1410 | qh = ehci->async->qh_next.qh; | |
1411 | WARN_ON(!list_empty(&qh->qtd_list)); | |
1412 | single_unlink_async(ehci, qh); | |
1413 | } | |
214ac7a0 | 1414 | start_iaa_cycle(ehci); |
2a40f324 AS |
1415 | } |
1416 | ||
1da177e4 LT |
1417 | /* makes sure the async qh will become idle */ |
1418 | /* caller must own ehci->lock */ | |
1419 | ||
3c273a05 | 1420 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1421 | { |
7bc782d7 AS |
1422 | /* If the QH isn't linked then there's nothing we can do. */ |
1423 | if (qh->qh_state != QH_STATE_LINKED) | |
1da177e4 | 1424 | return; |
1da177e4 | 1425 | |
3c273a05 | 1426 | single_unlink_async(ehci, qh); |
214ac7a0 | 1427 | start_iaa_cycle(ehci); |
1da177e4 LT |
1428 | } |
1429 | ||
1430 | /*-------------------------------------------------------------------------*/ | |
1431 | ||
7d12e780 | 1432 | static void scan_async (struct ehci_hcd *ehci) |
1da177e4 LT |
1433 | { |
1434 | struct ehci_qh *qh; | |
32830f20 | 1435 | bool check_unlinks_later = false; |
1da177e4 | 1436 | |
004c1968 AS |
1437 | ehci->qh_scan_next = ehci->async->qh_next.qh; |
1438 | while (ehci->qh_scan_next) { | |
1439 | qh = ehci->qh_scan_next; | |
1440 | ehci->qh_scan_next = qh->qh_next.qh; | |
79bcf7b0 | 1441 | |
004c1968 AS |
1442 | /* clean any finished work for this qh */ |
1443 | if (!list_empty(&qh->qtd_list)) { | |
1444 | int temp; | |
1445 | ||
1446 | /* | |
1447 | * Unlinks could happen here; completion reporting | |
1448 | * drops the lock. That's why ehci->qh_scan_next | |
1449 | * always holds the next qh to scan; if the next qh | |
1450 | * gets unlinked then ehci->qh_scan_next is adjusted | |
3c273a05 | 1451 | * in single_unlink_async(). |
1da177e4 | 1452 | */ |
004c1968 | 1453 | temp = qh_completions(ehci, qh); |
79bcf7b0 | 1454 | if (unlikely(temp)) { |
3c273a05 | 1455 | start_unlink_async(ehci, qh); |
32830f20 AS |
1456 | } else if (list_empty(&qh->qtd_list) |
1457 | && qh->qh_state == QH_STATE_LINKED) { | |
1458 | qh->unlink_cycle = ehci->async_unlink_cycle; | |
1459 | check_unlinks_later = true; | |
79bcf7b0 | 1460 | } |
004c1968 | 1461 | } |
32830f20 | 1462 | } |
1da177e4 | 1463 | |
32830f20 AS |
1464 | /* |
1465 | * Unlink empty entries, reducing DMA usage as well | |
1466 | * as HCD schedule-scanning costs. Delay for any qh | |
1467 | * we just scanned, there's a not-unusual case that it | |
1468 | * doesn't stay idle for long. | |
1469 | */ | |
1470 | if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && | |
1471 | !(ehci->enabled_hrtimer_events & | |
1472 | BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { | |
1473 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); | |
1474 | ++ehci->async_unlink_cycle; | |
1da177e4 | 1475 | } |
1da177e4 | 1476 | } |