Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d49d4317 | 2 | * Copyright (C) 2001-2004 by David Brownell |
53bd6a60 | 3 | * |
1da177e4 LT |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the | |
6 | * Free Software Foundation; either version 2 of the License, or (at your | |
7 | * option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 | * for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software Foundation, | |
16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
19 | /* this file is part of ehci-hcd.c */ | |
20 | ||
21 | /*-------------------------------------------------------------------------*/ | |
22 | ||
23 | /* | |
24 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. | |
25 | * | |
26 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" | |
27 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned | |
28 | * buffers needed for the larger number). We use one QH per endpoint, queue | |
29 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. | |
30 | * | |
31 | * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with | |
32 | * interrupts) needs careful scheduling. Performance improvements can be | |
33 | * an ongoing challenge. That's in "ehci-sched.c". | |
53bd6a60 | 34 | * |
1da177e4 LT |
35 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, |
36 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using | |
37 | * (b) special fields in qh entries or (c) split iso entries. TTs will | |
38 | * buffer low/full speed data so the host collects it at high speed. | |
39 | */ | |
40 | ||
41 | /*-------------------------------------------------------------------------*/ | |
42 | ||
43 | /* fill a qtd, returning how much of the buffer we were able to queue up */ | |
44 | ||
45 | static int | |
6dbd682b SR |
46 | qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, |
47 | size_t len, int token, int maxpacket) | |
1da177e4 LT |
48 | { |
49 | int i, count; | |
50 | u64 addr = buf; | |
51 | ||
52 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
6dbd682b SR |
53 | qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); |
54 | qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); | |
1da177e4 LT |
55 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ |
56 | if (likely (len < count)) /* ... iff needed */ | |
57 | count = len; | |
58 | else { | |
59 | buf += 0x1000; | |
60 | buf &= ~0x0fff; | |
61 | ||
62 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
63 | for (i = 1; count < len && i < 5; i++) { | |
64 | addr = buf; | |
6dbd682b SR |
65 | qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); |
66 | qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, | |
67 | (u32)(addr >> 32)); | |
1da177e4 LT |
68 | buf += 0x1000; |
69 | if ((count + 0x1000) < len) | |
70 | count += 0x1000; | |
71 | else | |
72 | count = len; | |
73 | } | |
74 | ||
75 | /* short packets may only terminate transfers */ | |
76 | if (count != len) | |
77 | count -= (count % maxpacket); | |
78 | } | |
6dbd682b | 79 | qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); |
1da177e4 LT |
80 | qtd->length = count; |
81 | ||
82 | return count; | |
83 | } | |
84 | ||
85 | /*-------------------------------------------------------------------------*/ | |
86 | ||
87 | static inline void | |
88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | |
89 | { | |
3807e26d AD |
90 | struct ehci_qh_hw *hw = qh->hw; |
91 | ||
1da177e4 | 92 | /* writes to an active overlay are unsafe */ |
c1fdb68e | 93 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
1da177e4 | 94 | |
3807e26d AD |
95 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
96 | hw->hw_alt_next = EHCI_LIST_END(ehci); | |
1da177e4 | 97 | |
a455212d AS |
98 | /* Except for control endpoints, we make hardware maintain data |
99 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
100 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
101 | * ever clear it. | |
102 | */ | |
4c53de72 | 103 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { |
a455212d AS |
104 | unsigned is_out, epnum; |
105 | ||
e04f5f7e | 106 | is_out = qh->is_out; |
3807e26d | 107 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
ffa0248e | 108 | if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) { |
3807e26d | 109 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
ffa0248e | 110 | usb_settoggle(qh->ps.udev, epnum, is_out, 1); |
a455212d AS |
111 | } |
112 | } | |
113 | ||
3807e26d | 114 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
1da177e4 LT |
115 | } |
116 | ||
117 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | |
118 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
119 | * recovery (including urb dequeue) would need software changes to a QH... | |
120 | */ | |
121 | static void | |
122 | qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
123 | { | |
124 | struct ehci_qtd *qtd; | |
125 | ||
c1fdb68e | 126 | qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); |
1da177e4 | 127 | |
c1fdb68e AS |
128 | /* |
129 | * first qtd may already be partially processed. | |
130 | * If we come here during unlink, the QH overlay region | |
131 | * might have reference to the just unlinked qtd. The | |
132 | * qtd is updated in qh_completions(). Update the QH | |
133 | * overlay here. | |
134 | */ | |
fc0855f2 | 135 | if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { |
c1fdb68e | 136 | qh->hw->hw_qtd_next = qtd->hw_next; |
fc0855f2 AS |
137 | if (qh->should_be_inactive) |
138 | ehci_warn(ehci, "qh %p should be inactive!\n", qh); | |
139 | } else { | |
c1fdb68e | 140 | qh_update(ehci, qh, qtd); |
fc0855f2 AS |
141 | } |
142 | qh->should_be_inactive = 0; | |
1da177e4 LT |
143 | } |
144 | ||
145 | /*-------------------------------------------------------------------------*/ | |
146 | ||
914b7012 AS |
147 | static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
148 | ||
149 | static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, | |
150 | struct usb_host_endpoint *ep) | |
151 | { | |
152 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
153 | struct ehci_qh *qh = ep->hcpriv; | |
154 | unsigned long flags; | |
155 | ||
156 | spin_lock_irqsave(&ehci->lock, flags); | |
157 | qh->clearing_tt = 0; | |
158 | if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) | |
e8799906 | 159 | && ehci->rh_state == EHCI_RH_RUNNING) |
914b7012 AS |
160 | qh_link_async(ehci, qh); |
161 | spin_unlock_irqrestore(&ehci->lock, flags); | |
162 | } | |
163 | ||
164 | static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, | |
165 | struct urb *urb, u32 token) | |
166 | { | |
167 | ||
168 | /* If an async split transaction gets an error or is unlinked, | |
169 | * the TT buffer may be left in an indeterminate state. We | |
170 | * have to clear the TT buffer. | |
171 | * | |
172 | * Note: this routine is never called for Isochronous transfers. | |
173 | */ | |
174 | if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { | |
1c20163d | 175 | #ifdef CONFIG_DYNAMIC_DEBUG |
914b7012 AS |
176 | struct usb_device *tt = urb->dev->tt->hub; |
177 | dev_dbg(&tt->dev, | |
178 | "clear tt buffer port %d, a%d ep%d t%08x\n", | |
179 | urb->dev->ttport, urb->dev->devnum, | |
180 | usb_pipeendpoint(urb->pipe), token); | |
1c20163d | 181 | #endif /* CONFIG_DYNAMIC_DEBUG */ |
914b7012 AS |
182 | if (!ehci_is_TDI(ehci) |
183 | || urb->dev->tt->hub != | |
184 | ehci_to_hcd(ehci)->self.root_hub) { | |
185 | if (usb_hub_clear_tt_buffer(urb) == 0) | |
186 | qh->clearing_tt = 1; | |
187 | } else { | |
188 | ||
189 | /* REVISIT ARC-derived cores don't clear the root | |
190 | * hub TT buffer in this way... | |
191 | */ | |
192 | } | |
193 | } | |
194 | } | |
195 | ||
14c04c0f | 196 | static int qtd_copy_status ( |
1da177e4 LT |
197 | struct ehci_hcd *ehci, |
198 | struct urb *urb, | |
199 | size_t length, | |
200 | u32 token | |
201 | ) | |
202 | { | |
14c04c0f AS |
203 | int status = -EINPROGRESS; |
204 | ||
1da177e4 LT |
205 | /* count IN/OUT bytes, not SETUP (even short packets) */ |
206 | if (likely (QTD_PID (token) != 2)) | |
207 | urb->actual_length += length - QTD_LENGTH (token); | |
208 | ||
209 | /* don't modify error codes */ | |
eb231054 | 210 | if (unlikely(urb->unlinked)) |
14c04c0f | 211 | return status; |
1da177e4 LT |
212 | |
213 | /* force cleanup after short read; not always an error */ | |
214 | if (unlikely (IS_SHORT_READ (token))) | |
14c04c0f | 215 | status = -EREMOTEIO; |
1da177e4 LT |
216 | |
217 | /* serious "can't proceed" faults reported by the hardware */ | |
218 | if (token & QTD_STS_HALT) { | |
219 | if (token & QTD_STS_BABBLE) { | |
220 | /* FIXME "must" disable babbling device's port too */ | |
14c04c0f | 221 | status = -EOVERFLOW; |
ba516de3 AS |
222 | /* CERR nonzero + halt --> stall */ |
223 | } else if (QTD_CERR(token)) { | |
224 | status = -EPIPE; | |
225 | ||
226 | /* In theory, more than one of the following bits can be set | |
227 | * since they are sticky and the transaction is retried. | |
228 | * Which to test first is rather arbitrary. | |
229 | */ | |
1da177e4 LT |
230 | } else if (token & QTD_STS_MMF) { |
231 | /* fs/ls interrupt xfer missed the complete-split */ | |
14c04c0f | 232 | status = -EPROTO; |
1da177e4 | 233 | } else if (token & QTD_STS_DBE) { |
14c04c0f | 234 | status = (QTD_PID (token) == 1) /* IN ? */ |
1da177e4 LT |
235 | ? -ENOSR /* hc couldn't read data */ |
236 | : -ECOMM; /* hc couldn't write data */ | |
237 | } else if (token & QTD_STS_XACT) { | |
ba516de3 AS |
238 | /* timeout, bad CRC, wrong PID, etc */ |
239 | ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", | |
240 | urb->dev->devpath, | |
241 | usb_pipeendpoint(urb->pipe), | |
242 | usb_pipein(urb->pipe) ? "in" : "out"); | |
14c04c0f | 243 | status = -EPROTO; |
ba516de3 AS |
244 | } else { /* unknown */ |
245 | status = -EPROTO; | |
246 | } | |
1da177e4 | 247 | } |
14c04c0f AS |
248 | |
249 | return status; | |
1da177e4 LT |
250 | } |
251 | ||
252 | static void | |
14c04c0f | 253 | ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) |
1da177e4 | 254 | { |
2656a9ab AS |
255 | if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { |
256 | /* ... update hc-wide periodic stats */ | |
257 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | |
1da177e4 LT |
258 | } |
259 | ||
eb231054 AS |
260 | if (unlikely(urb->unlinked)) { |
261 | COUNT(ehci->stats.unlink); | |
262 | } else { | |
4f667627 DB |
263 | /* report non-error and short read status as zero */ |
264 | if (status == -EINPROGRESS || status == -EREMOTEIO) | |
14c04c0f | 265 | status = 0; |
eb231054 | 266 | COUNT(ehci->stats.complete); |
1da177e4 | 267 | } |
1da177e4 LT |
268 | |
269 | #ifdef EHCI_URB_TRACE | |
270 | ehci_dbg (ehci, | |
271 | "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
441b62c1 | 272 | __func__, urb->dev->devpath, urb, |
1da177e4 LT |
273 | usb_pipeendpoint (urb->pipe), |
274 | usb_pipein (urb->pipe) ? "in" : "out", | |
14c04c0f | 275 | status, |
1da177e4 LT |
276 | urb->actual_length, urb->transfer_buffer_length); |
277 | #endif | |
278 | ||
e9df41c5 | 279 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
4a00027d | 280 | usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); |
1da177e4 LT |
281 | } |
282 | ||
1da177e4 LT |
283 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); |
284 | ||
285 | /* | |
286 | * Process and free completed qtds for a qh, returning URBs to drivers. | |
79bcf7b0 AS |
287 | * Chases up to qh->hw_current. Returns nonzero if the caller should |
288 | * unlink qh. | |
1da177e4 | 289 | */ |
1da177e4 | 290 | static unsigned |
7d12e780 | 291 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 292 | { |
3a44494e | 293 | struct ehci_qtd *last, *end = qh->dummy; |
1da177e4 | 294 | struct list_head *entry, *tmp; |
3a44494e | 295 | int last_status; |
1da177e4 | 296 | int stopped; |
1da177e4 | 297 | u8 state; |
3807e26d | 298 | struct ehci_qh_hw *hw = qh->hw; |
1da177e4 | 299 | |
1da177e4 LT |
300 | /* completions (or tasks on other cpus) must never clobber HALT |
301 | * till we've gone through and cleaned everything up, even when | |
302 | * they add urbs to this qh's queue or mark them for unlinking. | |
303 | * | |
304 | * NOTE: unlinking expects to be done in queue order. | |
3a44494e AS |
305 | * |
306 | * It's a bug for qh->qh_state to be anything other than | |
307 | * QH_STATE_IDLE, unless our caller is scan_async() or | |
569b394f | 308 | * scan_intr(). |
1da177e4 LT |
309 | */ |
310 | state = qh->qh_state; | |
311 | qh->qh_state = QH_STATE_COMPLETING; | |
312 | stopped = (state == QH_STATE_IDLE); | |
313 | ||
3a44494e AS |
314 | rescan: |
315 | last = NULL; | |
316 | last_status = -EINPROGRESS; | |
7bc782d7 | 317 | qh->dequeue_during_giveback = 0; |
3a44494e | 318 | |
1da177e4 LT |
319 | /* remove de-activated QTDs from front of queue. |
320 | * after faults (including short reads), cleanup this urb | |
321 | * then let the queue advance. | |
322 | * if queue is stopped, handles unlinks. | |
323 | */ | |
324 | list_for_each_safe (entry, tmp, &qh->qtd_list) { | |
325 | struct ehci_qtd *qtd; | |
326 | struct urb *urb; | |
327 | u32 token = 0; | |
328 | ||
329 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
330 | urb = qtd->urb; | |
331 | ||
332 | /* clean up any state from previous QTD ...*/ | |
333 | if (last) { | |
334 | if (likely (last->urb != urb)) { | |
14c04c0f | 335 | ehci_urb_done(ehci, last->urb, last_status); |
b5f7a0ec | 336 | last_status = -EINPROGRESS; |
1da177e4 LT |
337 | } |
338 | ehci_qtd_free (ehci, last); | |
339 | last = NULL; | |
340 | } | |
341 | ||
342 | /* ignore urbs submitted during completions we reported */ | |
343 | if (qtd == end) | |
344 | break; | |
345 | ||
346 | /* hardware copies qtd out of qh overlay */ | |
347 | rmb (); | |
6dbd682b | 348 | token = hc32_to_cpu(ehci, qtd->hw_token); |
1da177e4 LT |
349 | |
350 | /* always clean up qtds the hc de-activated */ | |
a2c2706e | 351 | retry_xacterr: |
1da177e4 LT |
352 | if ((token & QTD_STS_ACTIVE) == 0) { |
353 | ||
332960bd VP |
354 | /* Report Data Buffer Error: non-fatal but useful */ |
355 | if (token & QTD_STS_DBE) | |
356 | ehci_dbg(ehci, | |
357 | "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
358 | urb, | |
359 | usb_endpoint_num(&urb->ep->desc), | |
360 | usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", | |
361 | urb->transfer_buffer_length, | |
362 | qtd, | |
363 | qh); | |
364 | ||
a082b5c7 DB |
365 | /* on STALL, error, and short reads this urb must |
366 | * complete and all its qtds must be recycled. | |
367 | */ | |
1da177e4 | 368 | if ((token & QTD_STS_HALT) != 0) { |
a2c2706e AS |
369 | |
370 | /* retry transaction errors until we | |
371 | * reach the software xacterr limit | |
372 | */ | |
373 | if ((token & QTD_STS_XACT) && | |
374 | QTD_CERR(token) == 0 && | |
ef4638f9 | 375 | ++qh->xacterrs < QH_XACTERR_MAX && |
a2c2706e AS |
376 | !urb->unlinked) { |
377 | ehci_dbg(ehci, | |
d0626808 | 378 | "detected XactErr len %zu/%zu retry %d\n", |
ef4638f9 | 379 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
a2c2706e AS |
380 | |
381 | /* reset the token in the qtd and the | |
382 | * qh overlay (which still contains | |
383 | * the qtd) so that we pick up from | |
384 | * where we left off | |
385 | */ | |
386 | token &= ~QTD_STS_HALT; | |
387 | token |= QTD_STS_ACTIVE | | |
388 | (EHCI_TUNE_CERR << 10); | |
389 | qtd->hw_token = cpu_to_hc32(ehci, | |
390 | token); | |
391 | wmb(); | |
3807e26d AD |
392 | hw->hw_token = cpu_to_hc32(ehci, |
393 | token); | |
a2c2706e AS |
394 | goto retry_xacterr; |
395 | } | |
1da177e4 LT |
396 | stopped = 1; |
397 | ||
398 | /* magic dummy for some short reads; qh won't advance. | |
399 | * that silicon quirk can kick in with this dummy too. | |
a082b5c7 DB |
400 | * |
401 | * other short reads won't stop the queue, including | |
402 | * control transfers (status stage handles that) or | |
403 | * most other single-qtd reads ... the queue stops if | |
404 | * URB_SHORT_NOT_OK was set so the driver submitting | |
405 | * the urbs could clean it up. | |
1da177e4 LT |
406 | */ |
407 | } else if (IS_SHORT_READ (token) | |
6dbd682b SR |
408 | && !(qtd->hw_alt_next |
409 | & EHCI_LIST_END(ehci))) { | |
1da177e4 | 410 | stopped = 1; |
1da177e4 LT |
411 | } |
412 | ||
413 | /* stop scanning when we reach qtds the hc is using */ | |
414 | } else if (likely (!stopped | |
c0c53dbc | 415 | && ehci->rh_state >= EHCI_RH_RUNNING)) { |
1da177e4 LT |
416 | break; |
417 | ||
a082b5c7 | 418 | /* scan the whole queue for unlinks whenever it stops */ |
1da177e4 LT |
419 | } else { |
420 | stopped = 1; | |
421 | ||
a082b5c7 | 422 | /* cancel everything if we halt, suspend, etc */ |
c0c53dbc | 423 | if (ehci->rh_state < EHCI_RH_RUNNING) |
14c04c0f | 424 | last_status = -ESHUTDOWN; |
1da177e4 | 425 | |
a082b5c7 DB |
426 | /* this qtd is active; skip it unless a previous qtd |
427 | * for its urb faulted, or its urb was canceled. | |
1da177e4 | 428 | */ |
a082b5c7 | 429 | else if (last_status == -EINPROGRESS && !urb->unlinked) |
1da177e4 | 430 | continue; |
53bd6a60 | 431 | |
feca7746 AS |
432 | /* |
433 | * If this was the active qtd when the qh was unlinked | |
434 | * and the overlay's token is active, then the overlay | |
435 | * hasn't been written back to the qtd yet so use its | |
436 | * token instead of the qtd's. After the qtd is | |
437 | * processed and removed, the overlay won't be valid | |
438 | * any more. | |
439 | */ | |
440 | if (state == QH_STATE_IDLE && | |
441 | qh->qtd_list.next == &qtd->qtd_list && | |
442 | (hw->hw_token & ACTIVE_BIT(ehci))) { | |
3807e26d | 443 | token = hc32_to_cpu(ehci, hw->hw_token); |
feca7746 | 444 | hw->hw_token &= ~ACTIVE_BIT(ehci); |
fc0855f2 | 445 | qh->should_be_inactive = 1; |
1da177e4 | 446 | |
914b7012 AS |
447 | /* An unlink may leave an incomplete |
448 | * async transaction in the TT buffer. | |
449 | * We have to clear it. | |
450 | */ | |
451 | ehci_clear_tt_buffer(ehci, qh, urb, token); | |
452 | } | |
1da177e4 | 453 | } |
53bd6a60 | 454 | |
4f667627 DB |
455 | /* unless we already know the urb's status, collect qtd status |
456 | * and update count of bytes transferred. in common short read | |
457 | * cases with only one data qtd (including control transfers), | |
458 | * queue processing won't halt. but with two or more qtds (for | |
459 | * example, with a 32 KB transfer), when the first qtd gets a | |
460 | * short read the second must be removed by hand. | |
461 | */ | |
462 | if (last_status == -EINPROGRESS) { | |
463 | last_status = qtd_copy_status(ehci, urb, | |
464 | qtd->length, token); | |
465 | if (last_status == -EREMOTEIO | |
466 | && (qtd->hw_alt_next | |
467 | & EHCI_LIST_END(ehci))) | |
468 | last_status = -EINPROGRESS; | |
914b7012 AS |
469 | |
470 | /* As part of low/full-speed endpoint-halt processing | |
471 | * we must clear the TT buffer (11.17.5). | |
472 | */ | |
473 | if (unlikely(last_status != -EINPROGRESS && | |
c2f6595f AS |
474 | last_status != -EREMOTEIO)) { |
475 | /* The TT's in some hubs malfunction when they | |
476 | * receive this request following a STALL (they | |
477 | * stop sending isochronous packets). Since a | |
478 | * STALL can't leave the TT buffer in a busy | |
479 | * state (if you believe Figures 11-48 - 11-51 | |
480 | * in the USB 2.0 spec), we won't clear the TT | |
481 | * buffer in this case. Strictly speaking this | |
482 | * is a violation of the spec. | |
483 | */ | |
484 | if (last_status != -EPIPE) | |
485 | ehci_clear_tt_buffer(ehci, qh, urb, | |
486 | token); | |
487 | } | |
b0d9efba | 488 | } |
1da177e4 | 489 | |
a082b5c7 DB |
490 | /* if we're removing something not at the queue head, |
491 | * patch the hardware queue pointer. | |
492 | */ | |
1da177e4 LT |
493 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { |
494 | last = list_entry (qtd->qtd_list.prev, | |
495 | struct ehci_qtd, qtd_list); | |
496 | last->hw_next = qtd->hw_next; | |
497 | } | |
a082b5c7 DB |
498 | |
499 | /* remove qtd; it's recycled after possible urb completion */ | |
1da177e4 LT |
500 | list_del (&qtd->qtd_list); |
501 | last = qtd; | |
a2c2706e AS |
502 | |
503 | /* reinit the xacterr counter for the next qtd */ | |
ef4638f9 | 504 | qh->xacterrs = 0; |
1da177e4 LT |
505 | } |
506 | ||
507 | /* last urb's completion might still need calling */ | |
508 | if (likely (last != NULL)) { | |
14c04c0f | 509 | ehci_urb_done(ehci, last->urb, last_status); |
1da177e4 LT |
510 | ehci_qtd_free (ehci, last); |
511 | } | |
512 | ||
3a44494e | 513 | /* Do we need to rescan for URBs dequeued during a giveback? */ |
7bc782d7 | 514 | if (unlikely(qh->dequeue_during_giveback)) { |
3a44494e AS |
515 | /* If the QH is already unlinked, do the rescan now. */ |
516 | if (state == QH_STATE_IDLE) | |
517 | goto rescan; | |
518 | ||
7bc782d7 | 519 | /* Otherwise the caller must unlink the QH. */ |
3a44494e AS |
520 | } |
521 | ||
1da177e4 LT |
522 | /* restore original state; caller must unlink or relink */ |
523 | qh->qh_state = state; | |
524 | ||
525 | /* be sure the hardware's done with the qh before refreshing | |
526 | * it after fault cleanup, or recovering from silicon wrongly | |
527 | * overlaying the dummy qtd (which reduces DMA chatter). | |
7bc782d7 AS |
528 | * |
529 | * We won't refresh a QH that's linked (after the HC | |
530 | * stopped the queue). That avoids a race: | |
531 | * - HC reads first part of QH; | |
532 | * - CPU updates that first part and the token; | |
533 | * - HC reads rest of that QH, including token | |
534 | * Result: HC gets an inconsistent image, and then | |
535 | * DMAs to/from the wrong memory (corrupting it). | |
536 | * | |
537 | * That should be rare for interrupt transfers, | |
538 | * except maybe high bandwidth ... | |
1da177e4 | 539 | */ |
7bc782d7 AS |
540 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) |
541 | qh->exception = 1; | |
1da177e4 | 542 | |
7bc782d7 AS |
543 | /* Let the caller know if the QH needs to be unlinked. */ |
544 | return qh->exception; | |
1da177e4 LT |
545 | } |
546 | ||
547 | /*-------------------------------------------------------------------------*/ | |
548 | ||
549 | // high bandwidth multiplier, as encoded in highspeed endpoint descriptors | |
550 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | |
551 | // ... and packet size, for any kind of endpoint descriptor | |
552 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | |
553 | ||
554 | /* | |
555 | * reverse of qh_urb_transaction: free a list of TDs. | |
556 | * used for cleanup after errors, before HC sees an URB's TDs. | |
557 | */ | |
558 | static void qtd_list_free ( | |
559 | struct ehci_hcd *ehci, | |
560 | struct urb *urb, | |
561 | struct list_head *qtd_list | |
562 | ) { | |
563 | struct list_head *entry, *temp; | |
564 | ||
565 | list_for_each_safe (entry, temp, qtd_list) { | |
566 | struct ehci_qtd *qtd; | |
567 | ||
568 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
569 | list_del (&qtd->qtd_list); | |
570 | ehci_qtd_free (ehci, qtd); | |
571 | } | |
572 | } | |
573 | ||
574 | /* | |
575 | * create a list of filled qtds for this URB; won't link into qh. | |
576 | */ | |
577 | static struct list_head * | |
578 | qh_urb_transaction ( | |
579 | struct ehci_hcd *ehci, | |
580 | struct urb *urb, | |
581 | struct list_head *head, | |
55016f10 | 582 | gfp_t flags |
1da177e4 LT |
583 | ) { |
584 | struct ehci_qtd *qtd, *qtd_prev; | |
585 | dma_addr_t buf; | |
40f8db8f | 586 | int len, this_sg_len, maxpacket; |
1da177e4 LT |
587 | int is_input; |
588 | u32 token; | |
40f8db8f AS |
589 | int i; |
590 | struct scatterlist *sg; | |
1da177e4 LT |
591 | |
592 | /* | |
593 | * URBs map to sequences of QTDs: one logical transaction | |
594 | */ | |
595 | qtd = ehci_qtd_alloc (ehci, flags); | |
596 | if (unlikely (!qtd)) | |
597 | return NULL; | |
598 | list_add_tail (&qtd->qtd_list, head); | |
599 | qtd->urb = urb; | |
600 | ||
601 | token = QTD_STS_ACTIVE; | |
602 | token |= (EHCI_TUNE_CERR << 10); | |
603 | /* for split transactions, SplitXState initialized to zero */ | |
604 | ||
605 | len = urb->transfer_buffer_length; | |
606 | is_input = usb_pipein (urb->pipe); | |
607 | if (usb_pipecontrol (urb->pipe)) { | |
608 | /* SETUP pid */ | |
6dbd682b SR |
609 | qtd_fill(ehci, qtd, urb->setup_dma, |
610 | sizeof (struct usb_ctrlrequest), | |
611 | token | (2 /* "setup" */ << 8), 8); | |
1da177e4 LT |
612 | |
613 | /* ... and always at least one more pid */ | |
614 | token ^= QTD_TOGGLE; | |
615 | qtd_prev = qtd; | |
616 | qtd = ehci_qtd_alloc (ehci, flags); | |
617 | if (unlikely (!qtd)) | |
618 | goto cleanup; | |
619 | qtd->urb = urb; | |
6dbd682b | 620 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 | 621 | list_add_tail (&qtd->qtd_list, head); |
6912354a AS |
622 | |
623 | /* for zero length DATA stages, STATUS is always IN */ | |
624 | if (len == 0) | |
625 | token |= (1 /* "in" */ << 8); | |
53bd6a60 | 626 | } |
1da177e4 LT |
627 | |
628 | /* | |
629 | * data transfer stage: buffer setup | |
630 | */ | |
bc677d5b | 631 | i = urb->num_mapped_sgs; |
40f8db8f | 632 | if (len > 0 && i > 0) { |
910f8d0c | 633 | sg = urb->sg; |
40f8db8f AS |
634 | buf = sg_dma_address(sg); |
635 | ||
636 | /* urb->transfer_buffer_length may be smaller than the | |
637 | * size of the scatterlist (or vice versa) | |
638 | */ | |
639 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
640 | } else { | |
641 | sg = NULL; | |
642 | buf = urb->transfer_dma; | |
643 | this_sg_len = len; | |
644 | } | |
1da177e4 | 645 | |
6912354a | 646 | if (is_input) |
1da177e4 LT |
647 | token |= (1 /* "in" */ << 8); |
648 | /* else it's already initted to "out" pid (0 << 8) */ | |
649 | ||
650 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | |
651 | ||
652 | /* | |
653 | * buffer gets wrapped in one or more qtds; | |
654 | * last one may be "short" (including zero len) | |
655 | * and may serve as a control status ack | |
656 | */ | |
657 | for (;;) { | |
658 | int this_qtd_len; | |
659 | ||
40f8db8f AS |
660 | this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, |
661 | maxpacket); | |
662 | this_sg_len -= this_qtd_len; | |
1da177e4 LT |
663 | len -= this_qtd_len; |
664 | buf += this_qtd_len; | |
a082b5c7 DB |
665 | |
666 | /* | |
667 | * short reads advance to a "magic" dummy instead of the next | |
668 | * qtd ... that forces the queue to stop, for manual cleanup. | |
669 | * (this will usually be overridden later.) | |
670 | */ | |
1da177e4 | 671 | if (is_input) |
3807e26d | 672 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
1da177e4 LT |
673 | |
674 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
675 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
676 | token ^= QTD_TOGGLE; | |
677 | ||
40f8db8f AS |
678 | if (likely(this_sg_len <= 0)) { |
679 | if (--i <= 0 || len <= 0) | |
680 | break; | |
681 | sg = sg_next(sg); | |
682 | buf = sg_dma_address(sg); | |
683 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
684 | } | |
1da177e4 LT |
685 | |
686 | qtd_prev = qtd; | |
687 | qtd = ehci_qtd_alloc (ehci, flags); | |
688 | if (unlikely (!qtd)) | |
689 | goto cleanup; | |
690 | qtd->urb = urb; | |
6dbd682b | 691 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
692 | list_add_tail (&qtd->qtd_list, head); |
693 | } | |
694 | ||
a082b5c7 DB |
695 | /* |
696 | * unless the caller requires manual cleanup after short reads, | |
697 | * have the alt_next mechanism keep the queue running after the | |
698 | * last data qtd (the only one, for control and most other cases). | |
1da177e4 LT |
699 | */ |
700 | if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
701 | || usb_pipecontrol (urb->pipe))) | |
6dbd682b | 702 | qtd->hw_alt_next = EHCI_LIST_END(ehci); |
1da177e4 LT |
703 | |
704 | /* | |
705 | * control requests may need a terminating data "status" ack; | |
9a971dda ML |
706 | * other OUT ones may need a terminating short packet |
707 | * (zero length). | |
1da177e4 | 708 | */ |
6912354a | 709 | if (likely (urb->transfer_buffer_length != 0)) { |
1da177e4 LT |
710 | int one_more = 0; |
711 | ||
712 | if (usb_pipecontrol (urb->pipe)) { | |
713 | one_more = 1; | |
714 | token ^= 0x0100; /* "in" <--> "out" */ | |
715 | token |= QTD_TOGGLE; /* force DATA1 */ | |
9a971dda | 716 | } else if (usb_pipeout(urb->pipe) |
1da177e4 LT |
717 | && (urb->transfer_flags & URB_ZERO_PACKET) |
718 | && !(urb->transfer_buffer_length % maxpacket)) { | |
719 | one_more = 1; | |
720 | } | |
721 | if (one_more) { | |
722 | qtd_prev = qtd; | |
723 | qtd = ehci_qtd_alloc (ehci, flags); | |
724 | if (unlikely (!qtd)) | |
725 | goto cleanup; | |
726 | qtd->urb = urb; | |
6dbd682b | 727 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
728 | list_add_tail (&qtd->qtd_list, head); |
729 | ||
730 | /* never any data in such packets */ | |
6dbd682b | 731 | qtd_fill(ehci, qtd, 0, 0, token, 0); |
1da177e4 LT |
732 | } |
733 | } | |
734 | ||
735 | /* by default, enable interrupt on urb completion */ | |
736 | if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
6dbd682b | 737 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); |
1da177e4 LT |
738 | return head; |
739 | ||
740 | cleanup: | |
741 | qtd_list_free (ehci, urb, head); | |
742 | return NULL; | |
743 | } | |
744 | ||
745 | /*-------------------------------------------------------------------------*/ | |
746 | ||
747 | // Would be best to create all qh's from config descriptors, | |
748 | // when each interface/altsetting is established. Unlink | |
749 | // any previous qh and cancel its urbs first; endpoints are | |
750 | // implicitly reset then (data toggle too). | |
751 | // That'd mean updating how usbcore talks to HCDs. (2.7?) | |
752 | ||
753 | ||
754 | /* | |
755 | * Each QH holds a qtd list; a QH is used for everything except iso. | |
756 | * | |
757 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
758 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
759 | * just one microframe in the s-mask. For split interrupt transactions | |
760 | * there are additional complications: c-mask, maybe FSTNs. | |
761 | */ | |
762 | static struct ehci_qh * | |
763 | qh_make ( | |
764 | struct ehci_hcd *ehci, | |
765 | struct urb *urb, | |
55016f10 | 766 | gfp_t flags |
1da177e4 LT |
767 | ) { |
768 | struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); | |
769 | u32 info1 = 0, info2 = 0; | |
770 | int is_input, type; | |
771 | int maxp = 0; | |
340ba5f9 | 772 | struct usb_tt *tt = urb->dev->tt; |
3807e26d | 773 | struct ehci_qh_hw *hw; |
1da177e4 LT |
774 | |
775 | if (!qh) | |
776 | return qh; | |
777 | ||
778 | /* | |
779 | * init endpoint/device data for this QH | |
780 | */ | |
781 | info1 |= usb_pipeendpoint (urb->pipe) << 8; | |
782 | info1 |= usb_pipedevice (urb->pipe) << 0; | |
783 | ||
784 | is_input = usb_pipein (urb->pipe); | |
785 | type = usb_pipetype (urb->pipe); | |
786 | maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); | |
787 | ||
caa9ef67 DB |
788 | /* 1024 byte maxpacket is a hardware ceiling. High bandwidth |
789 | * acts like up to 3KB, but is built from smaller packets. | |
790 | */ | |
791 | if (max_packet(maxp) > 1024) { | |
792 | ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); | |
793 | goto done; | |
794 | } | |
795 | ||
1da177e4 LT |
796 | /* Compute interrupt scheduling parameters just once, and save. |
797 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
798 | * - split transactions need a second CSPLIT uframe; same question | |
799 | * - splits also need a schedule gap (for full/low speed I/O) | |
800 | * - qh has a polling interval | |
801 | * | |
802 | * For control/bulk requests, the HC or TT handles these. | |
803 | */ | |
804 | if (type == PIPE_INTERRUPT) { | |
d0ce5c6b AS |
805 | unsigned tmp; |
806 | ||
ffa0248e | 807 | qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, |
340ba5f9 DB |
808 | is_input, 0, |
809 | hb_mult(maxp) * max_packet(maxp))); | |
ffa0248e | 810 | qh->ps.phase = NO_FRAME; |
1da177e4 LT |
811 | |
812 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
ffa0248e | 813 | qh->ps.c_usecs = 0; |
1da177e4 LT |
814 | qh->gap_uf = 0; |
815 | ||
ffa0248e | 816 | if (urb->interval > 1 && urb->interval < 8) { |
1da177e4 LT |
817 | /* NOTE interval 2 or 4 uframes could work. |
818 | * But interval 1 scheduling is simpler, and | |
819 | * includes high bandwidth. | |
820 | */ | |
1b9a38bf | 821 | urb->interval = 1; |
ffa0248e AS |
822 | } else if (urb->interval > ehci->periodic_size << 3) { |
823 | urb->interval = ehci->periodic_size << 3; | |
1da177e4 | 824 | } |
ffa0248e | 825 | qh->ps.period = urb->interval >> 3; |
d0ce5c6b AS |
826 | |
827 | /* period for bandwidth allocation */ | |
828 | tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, | |
829 | 1 << (urb->ep->desc.bInterval - 1)); | |
830 | ||
831 | /* Allow urb->interval to override */ | |
832 | qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); | |
833 | qh->ps.bw_period = qh->ps.bw_uperiod >> 3; | |
1da177e4 | 834 | } else { |
d0384200 | 835 | int think_time; |
836 | ||
1da177e4 LT |
837 | /* gap is f(FS/LS transfer times) */ |
838 | qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, | |
839 | is_input, 0, maxp) / (125 * 1000); | |
840 | ||
841 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
842 | if (is_input) { // SPLIT, gap, CSPLIT+DATA | |
ffa0248e AS |
843 | qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0); |
844 | qh->ps.usecs = HS_USECS(1); | |
1da177e4 | 845 | } else { // SPLIT+DATA, gap, CSPLIT |
ffa0248e AS |
846 | qh->ps.usecs += HS_USECS(1); |
847 | qh->ps.c_usecs = HS_USECS(0); | |
1da177e4 LT |
848 | } |
849 | ||
d0384200 | 850 | think_time = tt ? tt->think_time : 0; |
ffa0248e | 851 | qh->ps.tt_usecs = NS_TO_US(think_time + |
d0384200 | 852 | usb_calc_bus_time (urb->dev->speed, |
853 | is_input, 0, max_packet (maxp))); | |
ffa0248e AS |
854 | if (urb->interval > ehci->periodic_size) |
855 | urb->interval = ehci->periodic_size; | |
856 | qh->ps.period = urb->interval; | |
d0ce5c6b AS |
857 | |
858 | /* period for bandwidth allocation */ | |
859 | tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, | |
860 | urb->ep->desc.bInterval); | |
861 | tmp = rounddown_pow_of_two(tmp); | |
862 | ||
863 | /* Allow urb->interval to override */ | |
864 | qh->ps.bw_period = min_t(unsigned, tmp, urb->interval); | |
865 | qh->ps.bw_uperiod = qh->ps.bw_period << 3; | |
1da177e4 LT |
866 | } |
867 | } | |
868 | ||
869 | /* support for tt scheduling, and access to toggles */ | |
ffa0248e AS |
870 | qh->ps.udev = urb->dev; |
871 | qh->ps.ep = urb->ep; | |
1da177e4 LT |
872 | |
873 | /* using TT? */ | |
874 | switch (urb->dev->speed) { | |
875 | case USB_SPEED_LOW: | |
4c53de72 | 876 | info1 |= QH_LOW_SPEED; |
1da177e4 LT |
877 | /* FALL THROUGH */ |
878 | ||
879 | case USB_SPEED_FULL: | |
880 | /* EPS 0 means "full" */ | |
881 | if (type != PIPE_INTERRUPT) | |
882 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
883 | if (type == PIPE_CONTROL) { | |
4c53de72 AS |
884 | info1 |= QH_CONTROL_EP; /* for TT */ |
885 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ | |
1da177e4 LT |
886 | } |
887 | info1 |= maxp << 16; | |
888 | ||
889 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
8cd42e97 KG |
890 | |
891 | /* Some Freescale processors have an erratum in which the | |
892 | * port number in the queue head was 0..N-1 instead of 1..N. | |
893 | */ | |
894 | if (ehci_has_fsl_portno_bug(ehci)) | |
895 | info2 |= (urb->dev->ttport-1) << 23; | |
896 | else | |
897 | info2 |= urb->dev->ttport << 23; | |
1da177e4 LT |
898 | |
899 | /* set the address of the TT; for TDI's integrated | |
900 | * root hub tt, leave it zeroed. | |
901 | */ | |
340ba5f9 DB |
902 | if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) |
903 | info2 |= tt->hub->devnum << 16; | |
1da177e4 LT |
904 | |
905 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
906 | ||
907 | break; | |
908 | ||
909 | case USB_SPEED_HIGH: /* no TT involved */ | |
4c53de72 | 910 | info1 |= QH_HIGH_SPEED; |
1da177e4 LT |
911 | if (type == PIPE_CONTROL) { |
912 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
913 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
4c53de72 | 914 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ |
1da177e4 LT |
915 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
916 | } else if (type == PIPE_BULK) { | |
917 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
caa9ef67 DB |
918 | /* The USB spec says that high speed bulk endpoints |
919 | * always use 512 byte maxpacket. But some device | |
920 | * vendors decided to ignore that, and MSFT is happy | |
921 | * to help them do so. So now people expect to use | |
922 | * such nonconformant devices with Linux too; sigh. | |
923 | */ | |
924 | info1 |= max_packet(maxp) << 16; | |
1da177e4 LT |
925 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
926 | } else { /* PIPE_INTERRUPT */ | |
927 | info1 |= max_packet (maxp) << 16; | |
928 | info2 |= hb_mult (maxp) << 30; | |
929 | } | |
930 | break; | |
931 | default: | |
82491c2a GKH |
932 | ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, |
933 | urb->dev->speed); | |
1da177e4 | 934 | done: |
c83e1a9f | 935 | qh_destroy(ehci, qh); |
1da177e4 LT |
936 | return NULL; |
937 | } | |
938 | ||
939 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
940 | ||
c1fdb68e | 941 | /* init as live, toggle clear */ |
1da177e4 | 942 | qh->qh_state = QH_STATE_IDLE; |
3807e26d AD |
943 | hw = qh->hw; |
944 | hw->hw_info1 = cpu_to_hc32(ehci, info1); | |
945 | hw->hw_info2 = cpu_to_hc32(ehci, info2); | |
e04f5f7e | 946 | qh->is_out = !is_input; |
a455212d | 947 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
1da177e4 LT |
948 | return qh; |
949 | } | |
950 | ||
951 | /*-------------------------------------------------------------------------*/ | |
952 | ||
31446610 AS |
953 | static void enable_async(struct ehci_hcd *ehci) |
954 | { | |
955 | if (ehci->async_count++) | |
956 | return; | |
957 | ||
958 | /* Stop waiting to turn off the async schedule */ | |
959 | ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); | |
960 | ||
961 | /* Don't start the schedule until ASS is 0 */ | |
962 | ehci_poll_ASS(ehci); | |
18aafe64 | 963 | turn_on_io_watchdog(ehci); |
31446610 AS |
964 | } |
965 | ||
966 | static void disable_async(struct ehci_hcd *ehci) | |
967 | { | |
968 | if (--ehci->async_count) | |
969 | return; | |
970 | ||
6e018751 AS |
971 | /* The async schedule and unlink lists are supposed to be empty */ |
972 | WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || | |
214ac7a0 | 973 | !list_empty(&ehci->async_idle)); |
31446610 AS |
974 | |
975 | /* Don't turn off the schedule until ASS is 1 */ | |
976 | ehci_poll_ASS(ehci); | |
977 | } | |
978 | ||
1da177e4 LT |
979 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ |
980 | ||
981 | static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
982 | { | |
6dbd682b | 983 | __hc32 dma = QH_NEXT(ehci, qh->qh_dma); |
1da177e4 LT |
984 | struct ehci_qh *head; |
985 | ||
914b7012 AS |
986 | /* Don't link a QH if there's a Clear-TT-Buffer pending */ |
987 | if (unlikely(qh->clearing_tt)) | |
988 | return; | |
989 | ||
3a44494e AS |
990 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
991 | ||
a455212d | 992 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ |
3a44494e | 993 | qh_refresh(ehci, qh); |
1da177e4 LT |
994 | |
995 | /* splice right after start */ | |
31446610 | 996 | head = ehci->async; |
1da177e4 | 997 | qh->qh_next = head->qh_next; |
3807e26d | 998 | qh->hw->hw_next = head->hw->hw_next; |
1da177e4 LT |
999 | wmb (); |
1000 | ||
1001 | head->qh_next.qh = qh; | |
3807e26d | 1002 | head->hw->hw_next = dma; |
1da177e4 LT |
1003 | |
1004 | qh->qh_state = QH_STATE_LINKED; | |
7bc782d7 AS |
1005 | qh->xacterrs = 0; |
1006 | qh->exception = 0; | |
1da177e4 | 1007 | /* qtd completions reported later by interrupt */ |
31446610 AS |
1008 | |
1009 | enable_async(ehci); | |
1da177e4 LT |
1010 | } |
1011 | ||
1012 | /*-------------------------------------------------------------------------*/ | |
1013 | ||
1da177e4 LT |
1014 | /* |
1015 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1016 | * Allocates and initializes the QH if necessary. | |
1017 | * Returns null if it can't allocate a QH it needs to. | |
1018 | * If the QH has TDs (urbs) already, that's great. | |
1019 | */ | |
1020 | static struct ehci_qh *qh_append_tds ( | |
1021 | struct ehci_hcd *ehci, | |
1022 | struct urb *urb, | |
1023 | struct list_head *qtd_list, | |
1024 | int epnum, | |
1025 | void **ptr | |
1026 | ) | |
1027 | { | |
1028 | struct ehci_qh *qh = NULL; | |
fd05e720 | 1029 | __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); |
1da177e4 LT |
1030 | |
1031 | qh = (struct ehci_qh *) *ptr; | |
1032 | if (unlikely (qh == NULL)) { | |
1033 | /* can't sleep here, we have ehci->lock... */ | |
1034 | qh = qh_make (ehci, urb, GFP_ATOMIC); | |
1035 | *ptr = qh; | |
1036 | } | |
1037 | if (likely (qh != NULL)) { | |
1038 | struct ehci_qtd *qtd; | |
1039 | ||
1040 | if (unlikely (list_empty (qtd_list))) | |
1041 | qtd = NULL; | |
1042 | else | |
1043 | qtd = list_entry (qtd_list->next, struct ehci_qtd, | |
1044 | qtd_list); | |
1045 | ||
1046 | /* control qh may need patching ... */ | |
1047 | if (unlikely (epnum == 0)) { | |
1048 | ||
1049 | /* usb_reset_device() briefly reverts to address 0 */ | |
1050 | if (usb_pipedevice (urb->pipe) == 0) | |
3807e26d | 1051 | qh->hw->hw_info1 &= ~qh_addr_mask; |
1da177e4 LT |
1052 | } |
1053 | ||
1054 | /* just one way to queue requests: swap with the dummy qtd. | |
1055 | * only hc or qh_refresh() ever modify the overlay. | |
1056 | */ | |
1057 | if (likely (qtd != NULL)) { | |
1058 | struct ehci_qtd *dummy; | |
1059 | dma_addr_t dma; | |
6dbd682b | 1060 | __hc32 token; |
1da177e4 LT |
1061 | |
1062 | /* to avoid racing the HC, use the dummy td instead of | |
1063 | * the first td of our list (becomes new dummy). both | |
1064 | * tds stay deactivated until we're done, when the | |
1065 | * HC is allowed to fetch the old dummy (4.10.2). | |
1066 | */ | |
1067 | token = qtd->hw_token; | |
6dbd682b | 1068 | qtd->hw_token = HALT_BIT(ehci); |
41f05ded | 1069 | |
1da177e4 LT |
1070 | dummy = qh->dummy; |
1071 | ||
1072 | dma = dummy->qtd_dma; | |
1073 | *dummy = *qtd; | |
1074 | dummy->qtd_dma = dma; | |
1075 | ||
1076 | list_del (&qtd->qtd_list); | |
1077 | list_add (&dummy->qtd_list, qtd_list); | |
7d283aee | 1078 | list_splice_tail(qtd_list, &qh->qtd_list); |
1da177e4 | 1079 | |
6dbd682b | 1080 | ehci_qtd_init(ehci, qtd, qtd->qtd_dma); |
1da177e4 LT |
1081 | qh->dummy = qtd; |
1082 | ||
1083 | /* hc must see the new dummy at list end */ | |
1084 | dma = qtd->qtd_dma; | |
1085 | qtd = list_entry (qh->qtd_list.prev, | |
1086 | struct ehci_qtd, qtd_list); | |
6dbd682b | 1087 | qtd->hw_next = QTD_NEXT(ehci, dma); |
1da177e4 LT |
1088 | |
1089 | /* let the hc process these next qtds */ | |
1090 | wmb (); | |
1091 | dummy->hw_token = token; | |
1092 | ||
c83e1a9f | 1093 | urb->hcpriv = qh; |
1da177e4 LT |
1094 | } |
1095 | } | |
1096 | return qh; | |
1097 | } | |
1098 | ||
1099 | /*-------------------------------------------------------------------------*/ | |
1100 | ||
1101 | static int | |
1102 | submit_async ( | |
1103 | struct ehci_hcd *ehci, | |
1da177e4 LT |
1104 | struct urb *urb, |
1105 | struct list_head *qtd_list, | |
55016f10 | 1106 | gfp_t mem_flags |
1da177e4 | 1107 | ) { |
1da177e4 LT |
1108 | int epnum; |
1109 | unsigned long flags; | |
1110 | struct ehci_qh *qh = NULL; | |
e9df41c5 | 1111 | int rc; |
1da177e4 | 1112 | |
e9df41c5 | 1113 | epnum = urb->ep->desc.bEndpointAddress; |
1da177e4 LT |
1114 | |
1115 | #ifdef EHCI_URB_TRACE | |
eb34a908 DD |
1116 | { |
1117 | struct ehci_qtd *qtd; | |
1118 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1119 | ehci_dbg(ehci, | |
1120 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1121 | __func__, urb->dev->devpath, urb, | |
1122 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1123 | urb->transfer_buffer_length, | |
1124 | qtd, urb->ep->hcpriv); | |
1125 | } | |
1da177e4 LT |
1126 | #endif |
1127 | ||
1128 | spin_lock_irqsave (&ehci->lock, flags); | |
541c7d43 | 1129 | if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { |
8de98402 BH |
1130 | rc = -ESHUTDOWN; |
1131 | goto done; | |
1132 | } | |
e9df41c5 AS |
1133 | rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); |
1134 | if (unlikely(rc)) | |
1135 | goto done; | |
8de98402 | 1136 | |
e9df41c5 | 1137 | qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); |
8de98402 | 1138 | if (unlikely(qh == NULL)) { |
e9df41c5 | 1139 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
8de98402 BH |
1140 | rc = -ENOMEM; |
1141 | goto done; | |
1142 | } | |
1da177e4 LT |
1143 | |
1144 | /* Control/bulk operations through TTs don't need scheduling, | |
1145 | * the HC and TT handle it when the TT has a buffer ready. | |
1146 | */ | |
8de98402 | 1147 | if (likely (qh->qh_state == QH_STATE_IDLE)) |
7a0f0d95 | 1148 | qh_link_async(ehci, qh); |
8de98402 | 1149 | done: |
1da177e4 | 1150 | spin_unlock_irqrestore (&ehci->lock, flags); |
8de98402 | 1151 | if (unlikely (qh == NULL)) |
1da177e4 | 1152 | qtd_list_free (ehci, urb, qtd_list); |
8de98402 | 1153 | return rc; |
1da177e4 LT |
1154 | } |
1155 | ||
1156 | /*-------------------------------------------------------------------------*/ | |
726a85ca | 1157 | #ifdef CONFIG_USB_HCD_TEST_MODE |
9841f37a MG |
1158 | /* |
1159 | * This function creates the qtds and submits them for the | |
1160 | * SINGLE_STEP_SET_FEATURE Test. | |
1161 | * This is done in two parts: first SETUP req for GetDesc is sent then | |
1162 | * 15 seconds later, the IN stage for GetDesc starts to req data from dev | |
1163 | * | |
1164 | * is_setup : i/p arguement decides which of the two stage needs to be | |
1165 | * performed; TRUE - SETUP and FALSE - IN+STATUS | |
1166 | * Returns 0 if success | |
1167 | */ | |
1168 | static int submit_single_step_set_feature( | |
1169 | struct usb_hcd *hcd, | |
1170 | struct urb *urb, | |
1171 | int is_setup | |
1172 | ) { | |
1173 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
1174 | struct list_head qtd_list; | |
1175 | struct list_head *head; | |
1176 | ||
1177 | struct ehci_qtd *qtd, *qtd_prev; | |
1178 | dma_addr_t buf; | |
1179 | int len, maxpacket; | |
1180 | u32 token; | |
1181 | ||
1182 | INIT_LIST_HEAD(&qtd_list); | |
1183 | head = &qtd_list; | |
1184 | ||
1185 | /* URBs map to sequences of QTDs: one logical transaction */ | |
1186 | qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); | |
1187 | if (unlikely(!qtd)) | |
1188 | return -1; | |
1189 | list_add_tail(&qtd->qtd_list, head); | |
1190 | qtd->urb = urb; | |
1191 | ||
1192 | token = QTD_STS_ACTIVE; | |
1193 | token |= (EHCI_TUNE_CERR << 10); | |
1194 | ||
1195 | len = urb->transfer_buffer_length; | |
1196 | /* | |
1197 | * Check if the request is to perform just the SETUP stage (getDesc) | |
1198 | * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens | |
1199 | * 15 secs after the setup | |
1200 | */ | |
1201 | if (is_setup) { | |
1202 | /* SETUP pid */ | |
1203 | qtd_fill(ehci, qtd, urb->setup_dma, | |
1204 | sizeof(struct usb_ctrlrequest), | |
1205 | token | (2 /* "setup" */ << 8), 8); | |
1206 | ||
1207 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); | |
1208 | return 0; /*Return now; we shall come back after 15 seconds*/ | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * IN: data transfer stage: buffer setup : start the IN txn phase for | |
1213 | * the get_Desc SETUP which was sent 15seconds back | |
1214 | */ | |
1215 | token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ | |
1216 | buf = urb->transfer_dma; | |
1217 | ||
1218 | token |= (1 /* "in" */ << 8); /*This is IN stage*/ | |
1219 | ||
1220 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); | |
1221 | ||
1222 | qtd_fill(ehci, qtd, buf, len, token, maxpacket); | |
1223 | ||
1224 | /* | |
1225 | * Our IN phase shall always be a short read; so keep the queue running | |
1226 | * and let it advance to the next qtd which zero length OUT status | |
1227 | */ | |
1228 | qtd->hw_alt_next = EHCI_LIST_END(ehci); | |
1229 | ||
1230 | /* STATUS stage for GetDesc control request */ | |
1231 | token ^= 0x0100; /* "in" <--> "out" */ | |
1232 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1233 | ||
1234 | qtd_prev = qtd; | |
1235 | qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); | |
1236 | if (unlikely(!qtd)) | |
1237 | goto cleanup; | |
1238 | qtd->urb = urb; | |
1239 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); | |
1240 | list_add_tail(&qtd->qtd_list, head); | |
1241 | ||
1242 | /* dont fill any data in such packets */ | |
1243 | qtd_fill(ehci, qtd, 0, 0, token, 0); | |
1244 | ||
1245 | /* by default, enable interrupt on urb completion */ | |
1246 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
1247 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); | |
1248 | ||
1249 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); | |
1250 | ||
1251 | return 0; | |
1252 | ||
1253 | cleanup: | |
1254 | qtd_list_free(ehci, urb, head); | |
1255 | return -1; | |
1256 | } | |
726a85ca | 1257 | #endif /* CONFIG_USB_HCD_TEST_MODE */ |
9841f37a MG |
1258 | |
1259 | /*-------------------------------------------------------------------------*/ | |
1da177e4 | 1260 | |
3c273a05 | 1261 | static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1262 | { |
3c273a05 | 1263 | struct ehci_qh *prev; |
1da177e4 | 1264 | |
3c273a05 | 1265 | /* Add to the end of the list of QHs waiting for the next IAAD */ |
6402c796 | 1266 | qh->qh_state = QH_STATE_UNLINK_WAIT; |
6e018751 | 1267 | list_add_tail(&qh->unlink_node, &ehci->async_unlink); |
3c273a05 AS |
1268 | |
1269 | /* Unlink it from the schedule */ | |
1270 | prev = ehci->async; | |
1271 | while (prev->qh_next.qh != qh) | |
1272 | prev = prev->qh_next.qh; | |
1273 | ||
1274 | prev->hw->hw_next = qh->hw->hw_next; | |
1275 | prev->qh_next = qh->qh_next; | |
1276 | if (ehci->qh_scan_next == qh) | |
1277 | ehci->qh_scan_next = qh->qh_next.qh; | |
1278 | } | |
1da177e4 | 1279 | |
214ac7a0 | 1280 | static void start_iaa_cycle(struct ehci_hcd *ehci) |
3c273a05 | 1281 | { |
214ac7a0 AS |
1282 | /* Do nothing if an IAA cycle is already running */ |
1283 | if (ehci->iaa_in_progress) | |
3c273a05 | 1284 | return; |
214ac7a0 | 1285 | ehci->iaa_in_progress = true; |
1da177e4 | 1286 | |
3c273a05 AS |
1287 | /* If the controller isn't running, we don't have to wait for it */ |
1288 | if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { | |
214ac7a0 | 1289 | end_unlink_async(ehci); |
31446610 | 1290 | |
3c273a05 | 1291 | /* Otherwise start a new IAA cycle */ |
32830f20 | 1292 | } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { |
6e0c3339 | 1293 | |
3c273a05 AS |
1294 | /* Make sure the unlinks are all visible to the hardware */ |
1295 | wmb(); | |
1da177e4 | 1296 | |
3c273a05 AS |
1297 | ehci_writel(ehci, ehci->command | CMD_IAAD, |
1298 | &ehci->regs->command); | |
1299 | ehci_readl(ehci, &ehci->regs->command); | |
1300 | ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); | |
1da177e4 | 1301 | } |
3c273a05 AS |
1302 | } |
1303 | ||
1304 | /* the async qh for the qtds being unlinked are now gone from the HC */ | |
1305 | ||
1306 | static void end_unlink_async(struct ehci_hcd *ehci) | |
1307 | { | |
1308 | struct ehci_qh *qh; | |
214ac7a0 | 1309 | bool early_exit; |
2f7ac6c1 GJ |
1310 | |
1311 | if (ehci->has_synopsys_hc_bug) | |
1312 | ehci_writel(ehci, (u32) ehci->async->qh_dma, | |
1313 | &ehci->regs->async_next); | |
3c273a05 | 1314 | |
214ac7a0 AS |
1315 | /* The current IAA cycle has ended */ |
1316 | ehci->iaa_in_progress = false; | |
1317 | ||
1318 | if (list_empty(&ehci->async_unlink)) | |
1319 | return; | |
1320 | qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, | |
1321 | unlink_node); /* QH whose IAA cycle just ended */ | |
1322 | ||
1323 | /* | |
1324 | * If async_unlinking is set then this routine is already running, | |
1325 | * either on the stack or on another CPU. | |
1326 | */ | |
1327 | early_exit = ehci->async_unlinking; | |
1328 | ||
1329 | /* If the controller isn't running, process all the waiting QHs */ | |
1330 | if (ehci->rh_state < EHCI_RH_RUNNING) | |
1331 | list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); | |
1332 | ||
1333 | /* | |
1334 | * Intel (?) bug: The HC can write back the overlay region even | |
1335 | * after the IAA interrupt occurs. In self-defense, always go | |
1336 | * through two IAA cycles for each QH. | |
1337 | */ | |
1338 | else if (qh->qh_state == QH_STATE_UNLINK_WAIT) { | |
1339 | qh->qh_state = QH_STATE_UNLINK; | |
1340 | early_exit = true; | |
1341 | } | |
1342 | ||
1343 | /* Otherwise process only the first waiting QH (NVIDIA bug?) */ | |
1344 | else | |
1345 | list_move_tail(&qh->unlink_node, &ehci->async_idle); | |
1346 | ||
1347 | /* Start a new IAA cycle if any QHs are waiting for it */ | |
1348 | if (!list_empty(&ehci->async_unlink)) | |
1349 | start_iaa_cycle(ehci); | |
1350 | ||
1351 | /* | |
1352 | * Don't allow nesting or concurrent calls, | |
1353 | * or wait for the second IAA cycle for the next QH. | |
1354 | */ | |
1355 | if (early_exit) | |
1356 | return; | |
1357 | ||
3c273a05 | 1358 | /* Process the idle QHs */ |
3c273a05 | 1359 | ehci->async_unlinking = true; |
214ac7a0 AS |
1360 | while (!list_empty(&ehci->async_idle)) { |
1361 | qh = list_first_entry(&ehci->async_idle, struct ehci_qh, | |
6e018751 AS |
1362 | unlink_node); |
1363 | list_del(&qh->unlink_node); | |
3c273a05 AS |
1364 | |
1365 | qh->qh_state = QH_STATE_IDLE; | |
1366 | qh->qh_next.qh = NULL; | |
1367 | ||
79bcf7b0 AS |
1368 | if (!list_empty(&qh->qtd_list)) |
1369 | qh_completions(ehci, qh); | |
3c273a05 AS |
1370 | if (!list_empty(&qh->qtd_list) && |
1371 | ehci->rh_state == EHCI_RH_RUNNING) | |
1372 | qh_link_async(ehci, qh); | |
1373 | disable_async(ehci); | |
1374 | } | |
1375 | ehci->async_unlinking = false; | |
1da177e4 LT |
1376 | } |
1377 | ||
6e0c3339 AS |
1378 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
1379 | ||
32830f20 AS |
1380 | static void unlink_empty_async(struct ehci_hcd *ehci) |
1381 | { | |
6e0c3339 AS |
1382 | struct ehci_qh *qh; |
1383 | struct ehci_qh *qh_to_unlink = NULL; | |
6e0c3339 | 1384 | int count = 0; |
32830f20 | 1385 | |
6e0c3339 AS |
1386 | /* Find the last async QH which has been empty for a timer cycle */ |
1387 | for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { | |
32830f20 AS |
1388 | if (list_empty(&qh->qtd_list) && |
1389 | qh->qh_state == QH_STATE_LINKED) { | |
6e0c3339 | 1390 | ++count; |
afc2c9a2 | 1391 | if (qh->unlink_cycle != ehci->async_unlink_cycle) |
6e0c3339 | 1392 | qh_to_unlink = qh; |
32830f20 AS |
1393 | } |
1394 | } | |
1395 | ||
6e0c3339 | 1396 | /* If nothing else is being unlinked, unlink the last empty QH */ |
214ac7a0 | 1397 | if (list_empty(&ehci->async_unlink) && qh_to_unlink) { |
6e0c3339 AS |
1398 | start_unlink_async(ehci, qh_to_unlink); |
1399 | --count; | |
1400 | } | |
32830f20 | 1401 | |
6e0c3339 AS |
1402 | /* Other QHs will be handled later */ |
1403 | if (count > 0) { | |
32830f20 AS |
1404 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); |
1405 | ++ehci->async_unlink_cycle; | |
1406 | } | |
1407 | } | |
1408 | ||
2a40f324 | 1409 | /* The root hub is suspended; unlink all the async QHs */ |
70b55c2a | 1410 | static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci) |
2a40f324 AS |
1411 | { |
1412 | struct ehci_qh *qh; | |
1413 | ||
1414 | while (ehci->async->qh_next.qh) { | |
1415 | qh = ehci->async->qh_next.qh; | |
1416 | WARN_ON(!list_empty(&qh->qtd_list)); | |
1417 | single_unlink_async(ehci, qh); | |
1418 | } | |
214ac7a0 | 1419 | start_iaa_cycle(ehci); |
2a40f324 AS |
1420 | } |
1421 | ||
1da177e4 LT |
1422 | /* makes sure the async qh will become idle */ |
1423 | /* caller must own ehci->lock */ | |
1424 | ||
3c273a05 | 1425 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1426 | { |
7bc782d7 AS |
1427 | /* If the QH isn't linked then there's nothing we can do. */ |
1428 | if (qh->qh_state != QH_STATE_LINKED) | |
1da177e4 | 1429 | return; |
1da177e4 | 1430 | |
3c273a05 | 1431 | single_unlink_async(ehci, qh); |
214ac7a0 | 1432 | start_iaa_cycle(ehci); |
1da177e4 LT |
1433 | } |
1434 | ||
1435 | /*-------------------------------------------------------------------------*/ | |
1436 | ||
7d12e780 | 1437 | static void scan_async (struct ehci_hcd *ehci) |
1da177e4 LT |
1438 | { |
1439 | struct ehci_qh *qh; | |
32830f20 | 1440 | bool check_unlinks_later = false; |
1da177e4 | 1441 | |
004c1968 AS |
1442 | ehci->qh_scan_next = ehci->async->qh_next.qh; |
1443 | while (ehci->qh_scan_next) { | |
1444 | qh = ehci->qh_scan_next; | |
1445 | ehci->qh_scan_next = qh->qh_next.qh; | |
79bcf7b0 | 1446 | |
004c1968 AS |
1447 | /* clean any finished work for this qh */ |
1448 | if (!list_empty(&qh->qtd_list)) { | |
1449 | int temp; | |
1450 | ||
1451 | /* | |
1452 | * Unlinks could happen here; completion reporting | |
1453 | * drops the lock. That's why ehci->qh_scan_next | |
1454 | * always holds the next qh to scan; if the next qh | |
1455 | * gets unlinked then ehci->qh_scan_next is adjusted | |
3c273a05 | 1456 | * in single_unlink_async(). |
1da177e4 | 1457 | */ |
004c1968 | 1458 | temp = qh_completions(ehci, qh); |
79bcf7b0 | 1459 | if (unlikely(temp)) { |
3c273a05 | 1460 | start_unlink_async(ehci, qh); |
32830f20 AS |
1461 | } else if (list_empty(&qh->qtd_list) |
1462 | && qh->qh_state == QH_STATE_LINKED) { | |
1463 | qh->unlink_cycle = ehci->async_unlink_cycle; | |
1464 | check_unlinks_later = true; | |
79bcf7b0 | 1465 | } |
004c1968 | 1466 | } |
32830f20 | 1467 | } |
1da177e4 | 1468 | |
32830f20 AS |
1469 | /* |
1470 | * Unlink empty entries, reducing DMA usage as well | |
1471 | * as HCD schedule-scanning costs. Delay for any qh | |
1472 | * we just scanned, there's a not-unusual case that it | |
1473 | * doesn't stay idle for long. | |
1474 | */ | |
1475 | if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && | |
1476 | !(ehci->enabled_hrtimer_events & | |
1477 | BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { | |
1478 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); | |
1479 | ++ehci->async_unlink_cycle; | |
1da177e4 | 1480 | } |
1da177e4 | 1481 | } |