Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/irq.h> | |
8df75f42 | 24 | #include <linux/log2.h> |
66d4eadd | 25 | #include <linux/module.h> |
b0567b3f | 26 | #include <linux/moduleparam.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
66d4eadd SS |
28 | |
29 | #include "xhci.h" | |
30 | ||
31 | #define DRIVER_AUTHOR "Sarah Sharp" | |
32 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | |
33 | ||
b0567b3f SS |
34 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
35 | static int link_quirk; | |
36 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | |
37 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | |
38 | ||
66d4eadd SS |
39 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
40 | /* | |
41 | * handshake - spin reading hc until handshake completes or fails | |
42 | * @ptr: address of hc register to be read | |
43 | * @mask: bits to look at in result of read | |
44 | * @done: value of those bits when handshake succeeds | |
45 | * @usec: timeout in microseconds | |
46 | * | |
47 | * Returns negative errno, or zero on success | |
48 | * | |
49 | * Success happens when the "mask" bits have the specified value (hardware | |
50 | * handshake done). There are two failure modes: "usec" have passed (major | |
51 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
52 | */ | |
53 | static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, | |
54 | u32 mask, u32 done, int usec) | |
55 | { | |
56 | u32 result; | |
57 | ||
58 | do { | |
59 | result = xhci_readl(xhci, ptr); | |
60 | if (result == ~(u32)0) /* card removed */ | |
61 | return -ENODEV; | |
62 | result &= mask; | |
63 | if (result == done) | |
64 | return 0; | |
65 | udelay(1); | |
66 | usec--; | |
67 | } while (usec > 0); | |
68 | return -ETIMEDOUT; | |
69 | } | |
70 | ||
71 | /* | |
4f0f0bae | 72 | * Disable interrupts and begin the xHCI halting process. |
66d4eadd | 73 | */ |
4f0f0bae | 74 | void xhci_quiesce(struct xhci_hcd *xhci) |
66d4eadd SS |
75 | { |
76 | u32 halted; | |
77 | u32 cmd; | |
78 | u32 mask; | |
79 | ||
66d4eadd SS |
80 | mask = ~(XHCI_IRQS); |
81 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; | |
82 | if (!halted) | |
83 | mask &= ~CMD_RUN; | |
84 | ||
85 | cmd = xhci_readl(xhci, &xhci->op_regs->command); | |
86 | cmd &= mask; | |
87 | xhci_writel(xhci, cmd, &xhci->op_regs->command); | |
4f0f0bae SS |
88 | } |
89 | ||
90 | /* | |
91 | * Force HC into halt state. | |
92 | * | |
93 | * Disable any IRQs and clear the run/stop bit. | |
94 | * HC will complete any current and actively pipelined transactions, and | |
95 | * should halt within 16 microframes of the run/stop bit being cleared. | |
96 | * Read HC Halted bit in the status register to see when the HC is finished. | |
97 | * XXX: shouldn't we set HC_STATE_HALT here somewhere? | |
98 | */ | |
99 | int xhci_halt(struct xhci_hcd *xhci) | |
100 | { | |
101 | xhci_dbg(xhci, "// Halt the HC\n"); | |
102 | xhci_quiesce(xhci); | |
66d4eadd SS |
103 | |
104 | return handshake(xhci, &xhci->op_regs->status, | |
105 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); | |
106 | } | |
107 | ||
108 | /* | |
109 | * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. | |
110 | * | |
111 | * This resets pipelines, timers, counters, state machines, etc. | |
112 | * Transactions will be terminated immediately, and operational registers | |
113 | * will be set to their defaults. | |
114 | */ | |
115 | int xhci_reset(struct xhci_hcd *xhci) | |
116 | { | |
117 | u32 command; | |
118 | u32 state; | |
119 | ||
120 | state = xhci_readl(xhci, &xhci->op_regs->status); | |
d3512f63 SS |
121 | if ((state & STS_HALT) == 0) { |
122 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | |
123 | return 0; | |
124 | } | |
66d4eadd SS |
125 | |
126 | xhci_dbg(xhci, "// Reset the HC\n"); | |
127 | command = xhci_readl(xhci, &xhci->op_regs->command); | |
128 | command |= CMD_RESET; | |
129 | xhci_writel(xhci, command, &xhci->op_regs->command); | |
130 | /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ | |
131 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | |
132 | ||
133 | return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); | |
134 | } | |
135 | ||
66d4eadd SS |
136 | |
137 | #if 0 | |
138 | /* Set up MSI-X table for entry 0 (may claim other entries later) */ | |
139 | static int xhci_setup_msix(struct xhci_hcd *xhci) | |
140 | { | |
141 | int ret; | |
142 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
143 | ||
144 | xhci->msix_count = 0; | |
145 | /* XXX: did I do this right? ixgbe does kcalloc for more than one */ | |
146 | xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); | |
147 | if (!xhci->msix_entries) { | |
148 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); | |
149 | return -ENOMEM; | |
150 | } | |
151 | xhci->msix_entries[0].entry = 0; | |
152 | ||
153 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); | |
154 | if (ret) { | |
155 | xhci_err(xhci, "Failed to enable MSI-X\n"); | |
156 | goto free_entries; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Pass the xhci pointer value as the request_irq "cookie". | |
161 | * If more irqs are added, this will need to be unique for each one. | |
162 | */ | |
163 | ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, | |
164 | "xHCI", xhci_to_hcd(xhci)); | |
165 | if (ret) { | |
166 | xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); | |
167 | goto disable_msix; | |
168 | } | |
169 | xhci_dbg(xhci, "Finished setting up MSI-X\n"); | |
170 | return 0; | |
171 | ||
172 | disable_msix: | |
173 | pci_disable_msix(pdev); | |
174 | free_entries: | |
175 | kfree(xhci->msix_entries); | |
176 | xhci->msix_entries = NULL; | |
177 | return ret; | |
178 | } | |
179 | ||
180 | /* XXX: code duplication; can xhci_setup_msix call this? */ | |
181 | /* Free any IRQs and disable MSI-X */ | |
182 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |
183 | { | |
184 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
185 | if (!xhci->msix_entries) | |
186 | return; | |
187 | ||
188 | free_irq(xhci->msix_entries[0].vector, xhci); | |
189 | pci_disable_msix(pdev); | |
190 | kfree(xhci->msix_entries); | |
191 | xhci->msix_entries = NULL; | |
192 | xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); | |
193 | } | |
194 | #endif | |
195 | ||
196 | /* | |
197 | * Initialize memory for HCD and xHC (one-time init). | |
198 | * | |
199 | * Program the PAGESIZE register, initialize the device context array, create | |
200 | * device contexts (?), set up a command ring segment (or two?), create event | |
201 | * ring (one for now). | |
202 | */ | |
203 | int xhci_init(struct usb_hcd *hcd) | |
204 | { | |
205 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
206 | int retval = 0; | |
207 | ||
208 | xhci_dbg(xhci, "xhci_init\n"); | |
209 | spin_lock_init(&xhci->lock); | |
b0567b3f SS |
210 | if (link_quirk) { |
211 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | |
212 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | |
213 | } else { | |
ac9d8fe7 | 214 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); |
b0567b3f | 215 | } |
66d4eadd SS |
216 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
217 | xhci_dbg(xhci, "Finished xhci_init\n"); | |
218 | ||
219 | return retval; | |
220 | } | |
221 | ||
7f84eef0 SS |
222 | /* |
223 | * Called in interrupt context when there might be work | |
224 | * queued on the event ring | |
225 | * | |
226 | * xhci->lock must be held by caller. | |
227 | */ | |
228 | static void xhci_work(struct xhci_hcd *xhci) | |
229 | { | |
230 | u32 temp; | |
8e595a5d | 231 | u64 temp_64; |
7f84eef0 SS |
232 | |
233 | /* | |
234 | * Clear the op reg interrupt status first, | |
235 | * so we can receive interrupts from other MSI-X interrupters. | |
236 | * Write 1 to clear the interrupt status. | |
237 | */ | |
238 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
239 | temp |= STS_EINT; | |
240 | xhci_writel(xhci, temp, &xhci->op_regs->status); | |
241 | /* FIXME when MSI-X is supported and there are multiple vectors */ | |
242 | /* Clear the MSI-X event interrupt status */ | |
243 | ||
244 | /* Acknowledge the interrupt */ | |
245 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
246 | temp |= 0x3; | |
247 | xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); | |
248 | /* Flush posted writes */ | |
249 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
250 | ||
6f5165cf SS |
251 | if (xhci->xhc_state & XHCI_STATE_DYING) |
252 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | |
253 | "Shouldn't IRQs be disabled?\n"); | |
254 | else | |
255 | /* FIXME this should be a delayed service routine | |
256 | * that clears the EHB. | |
257 | */ | |
258 | xhci_handle_event(xhci); | |
7f84eef0 | 259 | |
2d83109b | 260 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
8e595a5d | 261 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2d83109b | 262 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); |
7f84eef0 SS |
263 | /* Flush posted writes -- FIXME is this necessary? */ |
264 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
265 | } | |
266 | ||
267 | /*-------------------------------------------------------------------------*/ | |
268 | ||
269 | /* | |
270 | * xHCI spec says we can get an interrupt, and if the HC has an error condition, | |
271 | * we might get bad data out of the event ring. Section 4.10.2.7 has a list of | |
272 | * indicators of an event TRB error, but we check the status *first* to be safe. | |
273 | */ | |
274 | irqreturn_t xhci_irq(struct usb_hcd *hcd) | |
275 | { | |
276 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
277 | u32 temp, temp2; | |
66e49d87 | 278 | union xhci_trb *trb; |
7f84eef0 SS |
279 | |
280 | spin_lock(&xhci->lock); | |
66e49d87 | 281 | trb = xhci->event_ring->dequeue; |
7f84eef0 SS |
282 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
283 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
284 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
fcf8f576 SS |
285 | if (temp == 0xffffffff && temp2 == 0xffffffff) |
286 | goto hw_died; | |
287 | ||
7f84eef0 SS |
288 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { |
289 | spin_unlock(&xhci->lock); | |
290 | return IRQ_NONE; | |
291 | } | |
66e49d87 SS |
292 | xhci_dbg(xhci, "op reg status = %08x\n", temp); |
293 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | |
294 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | |
295 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | |
296 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | |
297 | lower_32_bits(trb->link.segment_ptr), | |
298 | upper_32_bits(trb->link.segment_ptr), | |
299 | (unsigned int) trb->link.intr_target, | |
300 | (unsigned int) trb->link.control); | |
7f84eef0 | 301 | |
7f84eef0 SS |
302 | if (temp & STS_FATAL) { |
303 | xhci_warn(xhci, "WARNING: Host System Error\n"); | |
304 | xhci_halt(xhci); | |
fcf8f576 | 305 | hw_died: |
7f84eef0 | 306 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; |
c96a2b81 | 307 | spin_unlock(&xhci->lock); |
7f84eef0 SS |
308 | return -ESHUTDOWN; |
309 | } | |
310 | ||
311 | xhci_work(xhci); | |
312 | spin_unlock(&xhci->lock); | |
313 | ||
314 | return IRQ_HANDLED; | |
315 | } | |
316 | ||
317 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | |
23e3be11 | 318 | void xhci_event_ring_work(unsigned long arg) |
7f84eef0 SS |
319 | { |
320 | unsigned long flags; | |
321 | int temp; | |
8e595a5d | 322 | u64 temp_64; |
7f84eef0 SS |
323 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
324 | int i, j; | |
325 | ||
326 | xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); | |
327 | ||
328 | spin_lock_irqsave(&xhci->lock, flags); | |
329 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
330 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | |
6f5165cf | 331 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
e4ab05df SS |
332 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
333 | spin_unlock_irqrestore(&xhci->lock, flags); | |
334 | return; | |
335 | } | |
336 | ||
7f84eef0 SS |
337 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
338 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); | |
339 | xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); | |
340 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); | |
341 | xhci->error_bitmask = 0; | |
342 | xhci_dbg(xhci, "Event ring:\n"); | |
343 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | |
344 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | |
8e595a5d SS |
345 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
346 | temp_64 &= ~ERST_PTR_MASK; | |
347 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | |
7f84eef0 SS |
348 | xhci_dbg(xhci, "Command ring:\n"); |
349 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | |
350 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | |
351 | xhci_dbg_cmd_ptrs(xhci); | |
3ffbba95 | 352 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
63a0d9ab SS |
353 | if (!xhci->devs[i]) |
354 | continue; | |
355 | for (j = 0; j < 31; ++j) { | |
e9df17eb | 356 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
3ffbba95 SS |
357 | } |
358 | } | |
7f84eef0 SS |
359 | |
360 | if (xhci->noops_submitted != NUM_TEST_NOOPS) | |
23e3be11 SS |
361 | if (xhci_setup_one_noop(xhci)) |
362 | xhci_ring_cmd_db(xhci); | |
7f84eef0 SS |
363 | spin_unlock_irqrestore(&xhci->lock, flags); |
364 | ||
365 | if (!xhci->zombie) | |
366 | mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); | |
367 | else | |
368 | xhci_dbg(xhci, "Quit polling the event ring.\n"); | |
369 | } | |
370 | #endif | |
371 | ||
66d4eadd SS |
372 | /* |
373 | * Start the HC after it was halted. | |
374 | * | |
375 | * This function is called by the USB core when the HC driver is added. | |
376 | * Its opposite is xhci_stop(). | |
377 | * | |
378 | * xhci_init() must be called once before this function can be called. | |
379 | * Reset the HC, enable device slot contexts, program DCBAAP, and | |
380 | * set command ring pointer and event ring pointer. | |
381 | * | |
382 | * Setup MSI-X vectors and enable interrupts. | |
383 | */ | |
384 | int xhci_run(struct usb_hcd *hcd) | |
385 | { | |
386 | u32 temp; | |
8e595a5d | 387 | u64 temp_64; |
66d4eadd | 388 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
7f84eef0 | 389 | void (*doorbell)(struct xhci_hcd *) = NULL; |
66d4eadd | 390 | |
0f2a7930 SS |
391 | hcd->uses_new_polling = 1; |
392 | hcd->poll_rh = 0; | |
393 | ||
7f84eef0 | 394 | xhci_dbg(xhci, "xhci_run\n"); |
66d4eadd SS |
395 | #if 0 /* FIXME: MSI not setup yet */ |
396 | /* Do this at the very last minute */ | |
397 | ret = xhci_setup_msix(xhci); | |
398 | if (!ret) | |
399 | return ret; | |
400 | ||
401 | return -ENOSYS; | |
402 | #endif | |
7f84eef0 SS |
403 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
404 | init_timer(&xhci->event_ring_timer); | |
405 | xhci->event_ring_timer.data = (unsigned long) xhci; | |
23e3be11 | 406 | xhci->event_ring_timer.function = xhci_event_ring_work; |
7f84eef0 SS |
407 | /* Poll the event ring */ |
408 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; | |
409 | xhci->zombie = 0; | |
410 | xhci_dbg(xhci, "Setting event ring polling timer\n"); | |
411 | add_timer(&xhci->event_ring_timer); | |
412 | #endif | |
413 | ||
66e49d87 SS |
414 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
415 | xhci_debug_ring(xhci, xhci->cmd_ring); | |
416 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | |
417 | xhci_dbg_cmd_ptrs(xhci); | |
418 | ||
419 | xhci_dbg(xhci, "ERST memory map follows:\n"); | |
420 | xhci_dbg_erst(xhci, &xhci->erst); | |
421 | xhci_dbg(xhci, "Event ring:\n"); | |
422 | xhci_debug_ring(xhci, xhci->event_ring); | |
423 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | |
424 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | |
425 | temp_64 &= ~ERST_PTR_MASK; | |
426 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | |
427 | ||
66d4eadd SS |
428 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
429 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | |
a4d88302 | 430 | temp &= ~ER_IRQ_INTERVAL_MASK; |
66d4eadd SS |
431 | temp |= (u32) 160; |
432 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); | |
433 | ||
434 | /* Set the HCD state before we enable the irqs */ | |
435 | hcd->state = HC_STATE_RUNNING; | |
436 | temp = xhci_readl(xhci, &xhci->op_regs->command); | |
437 | temp |= (CMD_EIE); | |
438 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", | |
439 | temp); | |
440 | xhci_writel(xhci, temp, &xhci->op_regs->command); | |
441 | ||
442 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
700e2052 GKH |
443 | xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", |
444 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | |
66d4eadd SS |
445 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
446 | &xhci->ir_set->irq_pending); | |
447 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
448 | ||
7f84eef0 | 449 | if (NUM_TEST_NOOPS > 0) |
23e3be11 | 450 | doorbell = xhci_setup_one_noop(xhci); |
7f84eef0 | 451 | |
66d4eadd SS |
452 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
453 | temp |= (CMD_RUN); | |
454 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | |
455 | temp); | |
456 | xhci_writel(xhci, temp, &xhci->op_regs->command); | |
457 | /* Flush PCI posted writes */ | |
458 | temp = xhci_readl(xhci, &xhci->op_regs->command); | |
700e2052 | 459 | xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); |
7f84eef0 SS |
460 | if (doorbell) |
461 | (*doorbell)(xhci); | |
66d4eadd SS |
462 | |
463 | xhci_dbg(xhci, "Finished xhci_run\n"); | |
464 | return 0; | |
465 | } | |
466 | ||
467 | /* | |
468 | * Stop xHCI driver. | |
469 | * | |
470 | * This function is called by the USB core when the HC driver is removed. | |
471 | * Its opposite is xhci_run(). | |
472 | * | |
473 | * Disable device contexts, disable IRQs, and quiesce the HC. | |
474 | * Reset the HC, finish any completed transactions, and cleanup memory. | |
475 | */ | |
476 | void xhci_stop(struct usb_hcd *hcd) | |
477 | { | |
478 | u32 temp; | |
479 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
480 | ||
481 | spin_lock_irq(&xhci->lock); | |
66d4eadd SS |
482 | xhci_halt(xhci); |
483 | xhci_reset(xhci); | |
484 | spin_unlock_irq(&xhci->lock); | |
485 | ||
486 | #if 0 /* No MSI yet */ | |
487 | xhci_cleanup_msix(xhci); | |
488 | #endif | |
7f84eef0 SS |
489 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
490 | /* Tell the event ring poll function not to reschedule */ | |
491 | xhci->zombie = 1; | |
492 | del_timer_sync(&xhci->event_ring_timer); | |
493 | #endif | |
494 | ||
66d4eadd SS |
495 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
496 | temp = xhci_readl(xhci, &xhci->op_regs->status); | |
497 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); | |
498 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | |
499 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | |
500 | &xhci->ir_set->irq_pending); | |
501 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
502 | ||
503 | xhci_dbg(xhci, "cleaning up memory\n"); | |
504 | xhci_mem_cleanup(xhci); | |
505 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | |
506 | xhci_readl(xhci, &xhci->op_regs->status)); | |
507 | } | |
508 | ||
509 | /* | |
510 | * Shutdown HC (not bus-specific) | |
511 | * | |
512 | * This is called when the machine is rebooting or halting. We assume that the | |
513 | * machine will be powered off, and the HC's internal state will be reset. | |
514 | * Don't bother to free memory. | |
515 | */ | |
516 | void xhci_shutdown(struct usb_hcd *hcd) | |
517 | { | |
518 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
519 | ||
520 | spin_lock_irq(&xhci->lock); | |
521 | xhci_halt(xhci); | |
522 | spin_unlock_irq(&xhci->lock); | |
523 | ||
524 | #if 0 | |
525 | xhci_cleanup_msix(xhci); | |
526 | #endif | |
527 | ||
528 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", | |
529 | xhci_readl(xhci, &xhci->op_regs->status)); | |
530 | } | |
531 | ||
7f84eef0 SS |
532 | /*-------------------------------------------------------------------------*/ |
533 | ||
d0e96f5a SS |
534 | /** |
535 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and | |
536 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | |
537 | * value to right shift 1 for the bitmask. | |
538 | * | |
539 | * Index = (epnum * 2) + direction - 1, | |
540 | * where direction = 0 for OUT, 1 for IN. | |
541 | * For control endpoints, the IN index is used (OUT index is unused), so | |
542 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | |
543 | */ | |
544 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | |
545 | { | |
546 | unsigned int index; | |
547 | if (usb_endpoint_xfer_control(desc)) | |
548 | index = (unsigned int) (usb_endpoint_num(desc)*2); | |
549 | else | |
550 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | |
551 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | |
552 | return index; | |
553 | } | |
554 | ||
f94e0186 SS |
555 | /* Find the flag for this endpoint (for use in the control context). Use the |
556 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
557 | * bit 1, etc. | |
558 | */ | |
559 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | |
560 | { | |
561 | return 1 << (xhci_get_endpoint_index(desc) + 1); | |
562 | } | |
563 | ||
ac9d8fe7 SS |
564 | /* Find the flag for this endpoint (for use in the control context). Use the |
565 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
566 | * bit 1, etc. | |
567 | */ | |
568 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | |
569 | { | |
570 | return 1 << (ep_index + 1); | |
571 | } | |
572 | ||
f94e0186 SS |
573 | /* Compute the last valid endpoint context index. Basically, this is the |
574 | * endpoint index plus one. For slot contexts with more than valid endpoint, | |
575 | * we find the most significant bit set in the added contexts flags. | |
576 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | |
577 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | |
578 | */ | |
ac9d8fe7 | 579 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
f94e0186 SS |
580 | { |
581 | return fls(added_ctxs) - 1; | |
582 | } | |
583 | ||
d0e96f5a SS |
584 | /* Returns 1 if the arguments are OK; |
585 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | |
586 | */ | |
587 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |
588 | struct usb_host_endpoint *ep, int check_ep, const char *func) { | |
589 | if (!hcd || (check_ep && !ep) || !udev) { | |
590 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", | |
591 | func); | |
592 | return -EINVAL; | |
593 | } | |
594 | if (!udev->parent) { | |
595 | printk(KERN_DEBUG "xHCI %s called for root hub\n", | |
596 | func); | |
597 | return 0; | |
598 | } | |
599 | if (!udev->slot_id) { | |
600 | printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", | |
601 | func); | |
602 | return -EINVAL; | |
603 | } | |
604 | return 1; | |
605 | } | |
606 | ||
2d3f1fac | 607 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
913a8a34 SS |
608 | struct usb_device *udev, struct xhci_command *command, |
609 | bool ctx_change, bool must_succeed); | |
2d3f1fac SS |
610 | |
611 | /* | |
612 | * Full speed devices may have a max packet size greater than 8 bytes, but the | |
613 | * USB core doesn't know that until it reads the first 8 bytes of the | |
614 | * descriptor. If the usb_device's max packet size changes after that point, | |
615 | * we need to issue an evaluate context command and wait on it. | |
616 | */ | |
617 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |
618 | unsigned int ep_index, struct urb *urb) | |
619 | { | |
620 | struct xhci_container_ctx *in_ctx; | |
621 | struct xhci_container_ctx *out_ctx; | |
622 | struct xhci_input_control_ctx *ctrl_ctx; | |
623 | struct xhci_ep_ctx *ep_ctx; | |
624 | int max_packet_size; | |
625 | int hw_max_packet_size; | |
626 | int ret = 0; | |
627 | ||
628 | out_ctx = xhci->devs[slot_id]->out_ctx; | |
629 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | |
630 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | |
631 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | |
632 | if (hw_max_packet_size != max_packet_size) { | |
633 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | |
634 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | |
635 | max_packet_size); | |
636 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", | |
637 | hw_max_packet_size); | |
638 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | |
639 | ||
640 | /* Set up the modified control endpoint 0 */ | |
913a8a34 SS |
641 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
642 | xhci->devs[slot_id]->out_ctx, ep_index); | |
2d3f1fac SS |
643 | in_ctx = xhci->devs[slot_id]->in_ctx; |
644 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
645 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | |
646 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | |
647 | ||
648 | /* Set up the input context flags for the command */ | |
649 | /* FIXME: This won't work if a non-default control endpoint | |
650 | * changes max packet sizes. | |
651 | */ | |
652 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
653 | ctrl_ctx->add_flags = EP0_FLAG; | |
654 | ctrl_ctx->drop_flags = 0; | |
655 | ||
656 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | |
657 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | |
658 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | |
659 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | |
660 | ||
913a8a34 SS |
661 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, |
662 | true, false); | |
2d3f1fac SS |
663 | |
664 | /* Clean up the input context for later use by bandwidth | |
665 | * functions. | |
666 | */ | |
667 | ctrl_ctx->add_flags = SLOT_FLAG; | |
668 | } | |
669 | return ret; | |
670 | } | |
671 | ||
d0e96f5a SS |
672 | /* |
673 | * non-error returns are a promise to giveback() the urb later | |
674 | * we drop ownership so next owner (or urb unlink) can get it | |
675 | */ | |
676 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |
677 | { | |
678 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
679 | unsigned long flags; | |
680 | int ret = 0; | |
681 | unsigned int slot_id, ep_index; | |
682 | ||
2d3f1fac | 683 | |
d0e96f5a SS |
684 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) |
685 | return -EINVAL; | |
686 | ||
687 | slot_id = urb->dev->slot_id; | |
688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
d0e96f5a | 689 | |
d0e96f5a SS |
690 | if (!xhci->devs || !xhci->devs[slot_id]) { |
691 | if (!in_interrupt()) | |
692 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | |
c7959fb2 SS |
693 | ret = -EINVAL; |
694 | goto exit; | |
d0e96f5a SS |
695 | } |
696 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { | |
697 | if (!in_interrupt()) | |
698 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | |
699 | ret = -ESHUTDOWN; | |
700 | goto exit; | |
701 | } | |
2d3f1fac SS |
702 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
703 | /* Check to see if the max packet size for the default control | |
704 | * endpoint changed during FS device enumeration | |
705 | */ | |
706 | if (urb->dev->speed == USB_SPEED_FULL) { | |
707 | ret = xhci_check_maxpacket(xhci, slot_id, | |
708 | ep_index, urb); | |
709 | if (ret < 0) | |
710 | return ret; | |
711 | } | |
712 | ||
b11069f5 SS |
713 | /* We have a spinlock and interrupts disabled, so we must pass |
714 | * atomic context to this function, which may allocate memory. | |
715 | */ | |
2d3f1fac | 716 | spin_lock_irqsave(&xhci->lock, flags); |
6f5165cf SS |
717 | if (xhci->xhc_state & XHCI_STATE_DYING) |
718 | goto dying; | |
b11069f5 | 719 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
23e3be11 | 720 | slot_id, ep_index); |
2d3f1fac SS |
721 | spin_unlock_irqrestore(&xhci->lock, flags); |
722 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | |
723 | spin_lock_irqsave(&xhci->lock, flags); | |
6f5165cf SS |
724 | if (xhci->xhc_state & XHCI_STATE_DYING) |
725 | goto dying; | |
8df75f42 SS |
726 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
727 | EP_GETTING_STREAMS) { | |
728 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
729 | "is transitioning to using streams.\n"); | |
730 | ret = -EINVAL; | |
731 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & | |
732 | EP_GETTING_NO_STREAMS) { | |
733 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
734 | "is transitioning to " | |
735 | "not having streams.\n"); | |
736 | ret = -EINVAL; | |
737 | } else { | |
738 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | |
739 | slot_id, ep_index); | |
740 | } | |
2d3f1fac | 741 | spin_unlock_irqrestore(&xhci->lock, flags); |
624defa1 SS |
742 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
743 | spin_lock_irqsave(&xhci->lock, flags); | |
6f5165cf SS |
744 | if (xhci->xhc_state & XHCI_STATE_DYING) |
745 | goto dying; | |
624defa1 SS |
746 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
747 | slot_id, ep_index); | |
748 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2d3f1fac | 749 | } else { |
b10de142 | 750 | ret = -EINVAL; |
2d3f1fac | 751 | } |
d0e96f5a | 752 | exit: |
d0e96f5a | 753 | return ret; |
6f5165cf SS |
754 | dying: |
755 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | |
756 | "non-responsive xHCI host.\n", | |
757 | urb->ep->desc.bEndpointAddress, urb); | |
758 | spin_unlock_irqrestore(&xhci->lock, flags); | |
759 | return -ESHUTDOWN; | |
d0e96f5a SS |
760 | } |
761 | ||
ae636747 SS |
762 | /* |
763 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | |
764 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | |
765 | * should pick up where it left off in the TD, unless a Set Transfer Ring | |
766 | * Dequeue Pointer is issued. | |
767 | * | |
768 | * The TRBs that make up the buffers for the canceled URB will be "removed" from | |
769 | * the ring. Since the ring is a contiguous structure, they can't be physically | |
770 | * removed. Instead, there are two options: | |
771 | * | |
772 | * 1) If the HC is in the middle of processing the URB to be canceled, we | |
773 | * simply move the ring's dequeue pointer past those TRBs using the Set | |
774 | * Transfer Ring Dequeue Pointer command. This will be the common case, | |
775 | * when drivers timeout on the last submitted URB and attempt to cancel. | |
776 | * | |
777 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a | |
778 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The | |
779 | * HC will need to invalidate the any TRBs it has cached after the stop | |
780 | * endpoint command, as noted in the xHCI 0.95 errata. | |
781 | * | |
782 | * 3) The TD may have completed by the time the Stop Endpoint Command | |
783 | * completes, so software needs to handle that case too. | |
784 | * | |
785 | * This function should protect against the TD enqueueing code ringing the | |
786 | * doorbell while this code is waiting for a Stop Endpoint command to complete. | |
787 | * It also needs to account for multiple cancellations on happening at the same | |
788 | * time for the same endpoint. | |
789 | * | |
790 | * Note that this function can be called in any context, or so says | |
791 | * usb_hcd_unlink_urb() | |
d0e96f5a SS |
792 | */ |
793 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
794 | { | |
ae636747 SS |
795 | unsigned long flags; |
796 | int ret; | |
e34b2fbf | 797 | u32 temp; |
ae636747 SS |
798 | struct xhci_hcd *xhci; |
799 | struct xhci_td *td; | |
800 | unsigned int ep_index; | |
801 | struct xhci_ring *ep_ring; | |
63a0d9ab | 802 | struct xhci_virt_ep *ep; |
ae636747 SS |
803 | |
804 | xhci = hcd_to_xhci(hcd); | |
805 | spin_lock_irqsave(&xhci->lock, flags); | |
806 | /* Make sure the URB hasn't completed or been unlinked already */ | |
807 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
808 | if (ret || !urb->hcpriv) | |
809 | goto done; | |
e34b2fbf SS |
810 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
811 | if (temp == 0xffffffff) { | |
812 | xhci_dbg(xhci, "HW died, freeing TD.\n"); | |
813 | td = (struct xhci_td *) urb->hcpriv; | |
814 | ||
815 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
816 | spin_unlock_irqrestore(&xhci->lock, flags); | |
817 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); | |
818 | kfree(td); | |
819 | return ret; | |
820 | } | |
6f5165cf SS |
821 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
822 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " | |
823 | "non-responsive xHCI host.\n", | |
824 | urb->ep->desc.bEndpointAddress, urb); | |
825 | /* Let the stop endpoint command watchdog timer (which set this | |
826 | * state) finish cleaning up the endpoint TD lists. We must | |
827 | * have caught it in the middle of dropping a lock and giving | |
828 | * back an URB. | |
829 | */ | |
830 | goto done; | |
831 | } | |
ae636747 | 832 | |
700e2052 | 833 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
66e49d87 SS |
834 | xhci_dbg(xhci, "Event ring:\n"); |
835 | xhci_debug_ring(xhci, xhci->event_ring); | |
ae636747 | 836 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
63a0d9ab | 837 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
e9df17eb SS |
838 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
839 | if (!ep_ring) { | |
840 | ret = -EINVAL; | |
841 | goto done; | |
842 | } | |
843 | ||
66e49d87 SS |
844 | xhci_dbg(xhci, "Endpoint ring:\n"); |
845 | xhci_debug_ring(xhci, ep_ring); | |
ae636747 SS |
846 | td = (struct xhci_td *) urb->hcpriv; |
847 | ||
63a0d9ab | 848 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
ae636747 SS |
849 | /* Queue a stop endpoint command, but only if this is |
850 | * the first cancellation to be handled. | |
851 | */ | |
678539cf SS |
852 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
853 | ep->ep_state |= EP_HALT_PENDING; | |
6f5165cf SS |
854 | ep->stop_cmds_pending++; |
855 | ep->stop_cmd_timer.expires = jiffies + | |
856 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | |
857 | add_timer(&ep->stop_cmd_timer); | |
23e3be11 SS |
858 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
859 | xhci_ring_cmd_db(xhci); | |
ae636747 SS |
860 | } |
861 | done: | |
862 | spin_unlock_irqrestore(&xhci->lock, flags); | |
863 | return ret; | |
d0e96f5a SS |
864 | } |
865 | ||
f94e0186 SS |
866 | /* Drop an endpoint from a new bandwidth configuration for this device. |
867 | * Only one call to this function is allowed per endpoint before | |
868 | * check_bandwidth() or reset_bandwidth() must be called. | |
869 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
870 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
871 | * different endpoint descriptor in usb_host_endpoint. | |
872 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
873 | * not allowed. | |
f88ba78d SS |
874 | * |
875 | * The USB core will not allow URBs to be queued to an endpoint that is being | |
876 | * disabled, so there's no need for mutual exclusion to protect | |
877 | * the xhci->devs[slot_id] structure. | |
f94e0186 SS |
878 | */ |
879 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
880 | struct usb_host_endpoint *ep) | |
881 | { | |
f94e0186 | 882 | struct xhci_hcd *xhci; |
d115b048 JY |
883 | struct xhci_container_ctx *in_ctx, *out_ctx; |
884 | struct xhci_input_control_ctx *ctrl_ctx; | |
885 | struct xhci_slot_ctx *slot_ctx; | |
f94e0186 SS |
886 | unsigned int last_ctx; |
887 | unsigned int ep_index; | |
888 | struct xhci_ep_ctx *ep_ctx; | |
889 | u32 drop_flag; | |
890 | u32 new_add_flags, new_drop_flags, new_slot_info; | |
891 | int ret; | |
892 | ||
893 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | |
f94e0186 SS |
894 | if (ret <= 0) |
895 | return ret; | |
896 | xhci = hcd_to_xhci(hcd); | |
700e2052 | 897 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
898 | |
899 | drop_flag = xhci_get_endpoint_flag(&ep->desc); | |
900 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | |
901 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | |
902 | __func__, drop_flag); | |
903 | return 0; | |
904 | } | |
905 | ||
f94e0186 SS |
906 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { |
907 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | |
908 | __func__); | |
f94e0186 SS |
909 | return -EINVAL; |
910 | } | |
911 | ||
912 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | |
d115b048 JY |
913 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
914 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
f94e0186 | 915 | ep_index = xhci_get_endpoint_index(&ep->desc); |
d115b048 | 916 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
f94e0186 SS |
917 | /* If the HC already knows the endpoint is disabled, |
918 | * or the HCD has noted it is disabled, ignore this request | |
919 | */ | |
920 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | |
d115b048 | 921 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { |
700e2052 GKH |
922 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
923 | __func__, ep); | |
f94e0186 SS |
924 | return 0; |
925 | } | |
926 | ||
d115b048 JY |
927 | ctrl_ctx->drop_flags |= drop_flag; |
928 | new_drop_flags = ctrl_ctx->drop_flags; | |
f94e0186 | 929 | |
0a023c6c | 930 | ctrl_ctx->add_flags &= ~drop_flag; |
d115b048 | 931 | new_add_flags = ctrl_ctx->add_flags; |
f94e0186 | 932 | |
d115b048 JY |
933 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); |
934 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | |
f94e0186 | 935 | /* Update the last valid endpoint context, if we deleted the last one */ |
d115b048 JY |
936 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { |
937 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | |
938 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | |
f94e0186 | 939 | } |
d115b048 | 940 | new_slot_info = slot_ctx->dev_info; |
f94e0186 SS |
941 | |
942 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | |
943 | ||
f94e0186 SS |
944 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
945 | (unsigned int) ep->desc.bEndpointAddress, | |
946 | udev->slot_id, | |
947 | (unsigned int) new_drop_flags, | |
948 | (unsigned int) new_add_flags, | |
949 | (unsigned int) new_slot_info); | |
950 | return 0; | |
951 | } | |
952 | ||
953 | /* Add an endpoint to a new possible bandwidth configuration for this device. | |
954 | * Only one call to this function is allowed per endpoint before | |
955 | * check_bandwidth() or reset_bandwidth() must be called. | |
956 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
957 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
958 | * different endpoint descriptor in usb_host_endpoint. | |
959 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
960 | * not allowed. | |
f88ba78d SS |
961 | * |
962 | * The USB core will not allow URBs to be queued to an endpoint until the | |
963 | * configuration or alt setting is installed in the device, so there's no need | |
964 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. | |
f94e0186 SS |
965 | */ |
966 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
967 | struct usb_host_endpoint *ep) | |
968 | { | |
f94e0186 | 969 | struct xhci_hcd *xhci; |
d115b048 | 970 | struct xhci_container_ctx *in_ctx, *out_ctx; |
f94e0186 SS |
971 | unsigned int ep_index; |
972 | struct xhci_ep_ctx *ep_ctx; | |
d115b048 JY |
973 | struct xhci_slot_ctx *slot_ctx; |
974 | struct xhci_input_control_ctx *ctrl_ctx; | |
f94e0186 SS |
975 | u32 added_ctxs; |
976 | unsigned int last_ctx; | |
977 | u32 new_add_flags, new_drop_flags, new_slot_info; | |
978 | int ret = 0; | |
979 | ||
980 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | |
a1587d97 SS |
981 | if (ret <= 0) { |
982 | /* So we won't queue a reset ep command for a root hub */ | |
983 | ep->hcpriv = NULL; | |
f94e0186 | 984 | return ret; |
a1587d97 | 985 | } |
f94e0186 SS |
986 | xhci = hcd_to_xhci(hcd); |
987 | ||
988 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | |
989 | last_ctx = xhci_last_valid_endpoint(added_ctxs); | |
990 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { | |
991 | /* FIXME when we have to issue an evaluate endpoint command to | |
992 | * deal with ep0 max packet size changing once we get the | |
993 | * descriptors | |
994 | */ | |
995 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", | |
996 | __func__, added_ctxs); | |
997 | return 0; | |
998 | } | |
999 | ||
f94e0186 SS |
1000 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { |
1001 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | |
1002 | __func__); | |
f94e0186 SS |
1003 | return -EINVAL; |
1004 | } | |
1005 | ||
1006 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | |
d115b048 JY |
1007 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
1008 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | |
f94e0186 | 1009 | ep_index = xhci_get_endpoint_index(&ep->desc); |
d115b048 | 1010 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
f94e0186 SS |
1011 | /* If the HCD has already noted the endpoint is enabled, |
1012 | * ignore this request. | |
1013 | */ | |
d115b048 | 1014 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { |
700e2052 GKH |
1015 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
1016 | __func__, ep); | |
f94e0186 SS |
1017 | return 0; |
1018 | } | |
1019 | ||
f88ba78d SS |
1020 | /* |
1021 | * Configuration and alternate setting changes must be done in | |
1022 | * process context, not interrupt context (or so documenation | |
1023 | * for usb_set_interface() and usb_set_configuration() claim). | |
1024 | */ | |
1025 | if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], | |
319c3ea4 | 1026 | udev, ep, GFP_NOIO) < 0) { |
f94e0186 SS |
1027 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
1028 | __func__, ep->desc.bEndpointAddress); | |
f94e0186 SS |
1029 | return -ENOMEM; |
1030 | } | |
1031 | ||
d115b048 JY |
1032 | ctrl_ctx->add_flags |= added_ctxs; |
1033 | new_add_flags = ctrl_ctx->add_flags; | |
f94e0186 SS |
1034 | |
1035 | /* If xhci_endpoint_disable() was called for this endpoint, but the | |
1036 | * xHC hasn't been notified yet through the check_bandwidth() call, | |
1037 | * this re-adds a new state for the endpoint from the new endpoint | |
1038 | * descriptors. We must drop and re-add this endpoint, so we leave the | |
1039 | * drop flags alone. | |
1040 | */ | |
d115b048 | 1041 | new_drop_flags = ctrl_ctx->drop_flags; |
f94e0186 | 1042 | |
d115b048 | 1043 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
f94e0186 | 1044 | /* Update the last valid endpoint context, if we just added one past */ |
d115b048 JY |
1045 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { |
1046 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | |
1047 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | |
f94e0186 | 1048 | } |
d115b048 | 1049 | new_slot_info = slot_ctx->dev_info; |
f94e0186 | 1050 | |
a1587d97 SS |
1051 | /* Store the usb_device pointer for later use */ |
1052 | ep->hcpriv = udev; | |
1053 | ||
f94e0186 SS |
1054 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
1055 | (unsigned int) ep->desc.bEndpointAddress, | |
1056 | udev->slot_id, | |
1057 | (unsigned int) new_drop_flags, | |
1058 | (unsigned int) new_add_flags, | |
1059 | (unsigned int) new_slot_info); | |
1060 | return 0; | |
1061 | } | |
1062 | ||
d115b048 | 1063 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
f94e0186 | 1064 | { |
d115b048 | 1065 | struct xhci_input_control_ctx *ctrl_ctx; |
f94e0186 | 1066 | struct xhci_ep_ctx *ep_ctx; |
d115b048 | 1067 | struct xhci_slot_ctx *slot_ctx; |
f94e0186 SS |
1068 | int i; |
1069 | ||
1070 | /* When a device's add flag and drop flag are zero, any subsequent | |
1071 | * configure endpoint command will leave that endpoint's state | |
1072 | * untouched. Make sure we don't leave any old state in the input | |
1073 | * endpoint contexts. | |
1074 | */ | |
d115b048 JY |
1075 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1076 | ctrl_ctx->drop_flags = 0; | |
1077 | ctrl_ctx->add_flags = 0; | |
1078 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
1079 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | |
f94e0186 | 1080 | /* Endpoint 0 is always valid */ |
d115b048 | 1081 | slot_ctx->dev_info |= LAST_CTX(1); |
f94e0186 | 1082 | for (i = 1; i < 31; ++i) { |
d115b048 | 1083 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
f94e0186 SS |
1084 | ep_ctx->ep_info = 0; |
1085 | ep_ctx->ep_info2 = 0; | |
8e595a5d | 1086 | ep_ctx->deq = 0; |
f94e0186 SS |
1087 | ep_ctx->tx_info = 0; |
1088 | } | |
1089 | } | |
1090 | ||
f2217e8e | 1091 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
913a8a34 | 1092 | struct usb_device *udev, int *cmd_status) |
f2217e8e SS |
1093 | { |
1094 | int ret; | |
1095 | ||
913a8a34 | 1096 | switch (*cmd_status) { |
f2217e8e SS |
1097 | case COMP_ENOMEM: |
1098 | dev_warn(&udev->dev, "Not enough host controller resources " | |
1099 | "for new device state.\n"); | |
1100 | ret = -ENOMEM; | |
1101 | /* FIXME: can we allocate more resources for the HC? */ | |
1102 | break; | |
1103 | case COMP_BW_ERR: | |
1104 | dev_warn(&udev->dev, "Not enough bandwidth " | |
1105 | "for new device state.\n"); | |
1106 | ret = -ENOSPC; | |
1107 | /* FIXME: can we go back to the old state? */ | |
1108 | break; | |
1109 | case COMP_TRB_ERR: | |
1110 | /* the HCD set up something wrong */ | |
1111 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | |
1112 | "add flag = 1, " | |
1113 | "and endpoint is not disabled.\n"); | |
1114 | ret = -EINVAL; | |
1115 | break; | |
1116 | case COMP_SUCCESS: | |
1117 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | |
1118 | ret = 0; | |
1119 | break; | |
1120 | default: | |
1121 | xhci_err(xhci, "ERROR: unexpected command completion " | |
913a8a34 | 1122 | "code 0x%x.\n", *cmd_status); |
f2217e8e SS |
1123 | ret = -EINVAL; |
1124 | break; | |
1125 | } | |
1126 | return ret; | |
1127 | } | |
1128 | ||
1129 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |
913a8a34 | 1130 | struct usb_device *udev, int *cmd_status) |
f2217e8e SS |
1131 | { |
1132 | int ret; | |
913a8a34 | 1133 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
f2217e8e | 1134 | |
913a8a34 | 1135 | switch (*cmd_status) { |
f2217e8e SS |
1136 | case COMP_EINVAL: |
1137 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | |
1138 | "context command.\n"); | |
1139 | ret = -EINVAL; | |
1140 | break; | |
1141 | case COMP_EBADSLT: | |
1142 | dev_warn(&udev->dev, "WARN: slot not enabled for" | |
1143 | "evaluate context command.\n"); | |
1144 | case COMP_CTX_STATE: | |
1145 | dev_warn(&udev->dev, "WARN: invalid context state for " | |
1146 | "evaluate context command.\n"); | |
1147 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | |
1148 | ret = -EINVAL; | |
1149 | break; | |
1150 | case COMP_SUCCESS: | |
1151 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | |
1152 | ret = 0; | |
1153 | break; | |
1154 | default: | |
1155 | xhci_err(xhci, "ERROR: unexpected command completion " | |
913a8a34 | 1156 | "code 0x%x.\n", *cmd_status); |
f2217e8e SS |
1157 | ret = -EINVAL; |
1158 | break; | |
1159 | } | |
1160 | return ret; | |
1161 | } | |
1162 | ||
1163 | /* Issue a configure endpoint command or evaluate context command | |
1164 | * and wait for it to finish. | |
1165 | */ | |
1166 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |
913a8a34 SS |
1167 | struct usb_device *udev, |
1168 | struct xhci_command *command, | |
1169 | bool ctx_change, bool must_succeed) | |
f2217e8e SS |
1170 | { |
1171 | int ret; | |
1172 | int timeleft; | |
1173 | unsigned long flags; | |
913a8a34 SS |
1174 | struct xhci_container_ctx *in_ctx; |
1175 | struct completion *cmd_completion; | |
1176 | int *cmd_status; | |
1177 | struct xhci_virt_device *virt_dev; | |
f2217e8e SS |
1178 | |
1179 | spin_lock_irqsave(&xhci->lock, flags); | |
913a8a34 SS |
1180 | virt_dev = xhci->devs[udev->slot_id]; |
1181 | if (command) { | |
1182 | in_ctx = command->in_ctx; | |
1183 | cmd_completion = command->completion; | |
1184 | cmd_status = &command->status; | |
1185 | command->command_trb = xhci->cmd_ring->enqueue; | |
1186 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | |
1187 | } else { | |
1188 | in_ctx = virt_dev->in_ctx; | |
1189 | cmd_completion = &virt_dev->cmd_completion; | |
1190 | cmd_status = &virt_dev->cmd_status; | |
1191 | } | |
1d68064a | 1192 | init_completion(cmd_completion); |
913a8a34 | 1193 | |
f2217e8e | 1194 | if (!ctx_change) |
913a8a34 SS |
1195 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
1196 | udev->slot_id, must_succeed); | |
f2217e8e | 1197 | else |
913a8a34 | 1198 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, |
f2217e8e SS |
1199 | udev->slot_id); |
1200 | if (ret < 0) { | |
c01591bd SS |
1201 | if (command) |
1202 | list_del(&command->cmd_list); | |
f2217e8e SS |
1203 | spin_unlock_irqrestore(&xhci->lock, flags); |
1204 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | |
1205 | return -ENOMEM; | |
1206 | } | |
1207 | xhci_ring_cmd_db(xhci); | |
1208 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1209 | ||
1210 | /* Wait for the configure endpoint command to complete */ | |
1211 | timeleft = wait_for_completion_interruptible_timeout( | |
913a8a34 | 1212 | cmd_completion, |
f2217e8e SS |
1213 | USB_CTRL_SET_TIMEOUT); |
1214 | if (timeleft <= 0) { | |
1215 | xhci_warn(xhci, "%s while waiting for %s command\n", | |
1216 | timeleft == 0 ? "Timeout" : "Signal", | |
1217 | ctx_change == 0 ? | |
1218 | "configure endpoint" : | |
1219 | "evaluate context"); | |
1220 | /* FIXME cancel the configure endpoint command */ | |
1221 | return -ETIME; | |
1222 | } | |
1223 | ||
1224 | if (!ctx_change) | |
913a8a34 SS |
1225 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); |
1226 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | |
f2217e8e SS |
1227 | } |
1228 | ||
f88ba78d SS |
1229 | /* Called after one or more calls to xhci_add_endpoint() or |
1230 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | |
1231 | * to call xhci_reset_bandwidth(). | |
1232 | * | |
1233 | * Since we are in the middle of changing either configuration or | |
1234 | * installing a new alt setting, the USB core won't allow URBs to be | |
1235 | * enqueued for any endpoint on the old config or interface. Nothing | |
1236 | * else should be touching the xhci->devs[slot_id] structure, so we | |
1237 | * don't need to take the xhci->lock for manipulating that. | |
1238 | */ | |
f94e0186 SS |
1239 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
1240 | { | |
1241 | int i; | |
1242 | int ret = 0; | |
f94e0186 SS |
1243 | struct xhci_hcd *xhci; |
1244 | struct xhci_virt_device *virt_dev; | |
d115b048 JY |
1245 | struct xhci_input_control_ctx *ctrl_ctx; |
1246 | struct xhci_slot_ctx *slot_ctx; | |
f94e0186 SS |
1247 | |
1248 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | |
1249 | if (ret <= 0) | |
1250 | return ret; | |
1251 | xhci = hcd_to_xhci(hcd); | |
1252 | ||
f94e0186 SS |
1253 | if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { |
1254 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | |
1255 | __func__); | |
f94e0186 SS |
1256 | return -EINVAL; |
1257 | } | |
700e2052 | 1258 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
1259 | virt_dev = xhci->devs[udev->slot_id]; |
1260 | ||
1261 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | |
d115b048 JY |
1262 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1263 | ctrl_ctx->add_flags |= SLOT_FLAG; | |
1264 | ctrl_ctx->add_flags &= ~EP0_FLAG; | |
1265 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; | |
1266 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | |
f94e0186 | 1267 | xhci_dbg(xhci, "New Input Control Context:\n"); |
d115b048 JY |
1268 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1269 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | |
1270 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | |
f94e0186 | 1271 | |
913a8a34 SS |
1272 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
1273 | false, false); | |
f94e0186 SS |
1274 | if (ret) { |
1275 | /* Callee should call reset_bandwidth() */ | |
f94e0186 SS |
1276 | return ret; |
1277 | } | |
1278 | ||
1279 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | |
d115b048 JY |
1280 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
1281 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | |
f94e0186 | 1282 | |
d115b048 | 1283 | xhci_zero_in_ctx(xhci, virt_dev); |
74f9fe21 | 1284 | /* Install new rings and free or cache any old rings */ |
f94e0186 | 1285 | for (i = 1; i < 31; ++i) { |
74f9fe21 SS |
1286 | if (!virt_dev->eps[i].new_ring) |
1287 | continue; | |
1288 | /* Only cache or free the old ring if it exists. | |
1289 | * It may not if this is the first add of an endpoint. | |
1290 | */ | |
1291 | if (virt_dev->eps[i].ring) { | |
412566bd | 1292 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
f94e0186 | 1293 | } |
74f9fe21 SS |
1294 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
1295 | virt_dev->eps[i].new_ring = NULL; | |
f94e0186 SS |
1296 | } |
1297 | ||
f94e0186 SS |
1298 | return ret; |
1299 | } | |
1300 | ||
1301 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |
1302 | { | |
f94e0186 SS |
1303 | struct xhci_hcd *xhci; |
1304 | struct xhci_virt_device *virt_dev; | |
1305 | int i, ret; | |
1306 | ||
1307 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | |
1308 | if (ret <= 0) | |
1309 | return; | |
1310 | xhci = hcd_to_xhci(hcd); | |
1311 | ||
f94e0186 SS |
1312 | if (!xhci->devs || !xhci->devs[udev->slot_id]) { |
1313 | xhci_warn(xhci, "xHCI %s called with unaddressed device\n", | |
1314 | __func__); | |
f94e0186 SS |
1315 | return; |
1316 | } | |
700e2052 | 1317 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
f94e0186 SS |
1318 | virt_dev = xhci->devs[udev->slot_id]; |
1319 | /* Free any rings allocated for added endpoints */ | |
1320 | for (i = 0; i < 31; ++i) { | |
63a0d9ab SS |
1321 | if (virt_dev->eps[i].new_ring) { |
1322 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); | |
1323 | virt_dev->eps[i].new_ring = NULL; | |
f94e0186 SS |
1324 | } |
1325 | } | |
d115b048 | 1326 | xhci_zero_in_ctx(xhci, virt_dev); |
f94e0186 SS |
1327 | } |
1328 | ||
5270b951 | 1329 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
913a8a34 SS |
1330 | struct xhci_container_ctx *in_ctx, |
1331 | struct xhci_container_ctx *out_ctx, | |
1332 | u32 add_flags, u32 drop_flags) | |
5270b951 SS |
1333 | { |
1334 | struct xhci_input_control_ctx *ctrl_ctx; | |
913a8a34 | 1335 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
5270b951 SS |
1336 | ctrl_ctx->add_flags = add_flags; |
1337 | ctrl_ctx->drop_flags = drop_flags; | |
913a8a34 | 1338 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
5270b951 SS |
1339 | ctrl_ctx->add_flags |= SLOT_FLAG; |
1340 | ||
913a8a34 SS |
1341 | xhci_dbg(xhci, "Input Context:\n"); |
1342 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | |
5270b951 SS |
1343 | } |
1344 | ||
ac9d8fe7 SS |
1345 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
1346 | unsigned int slot_id, unsigned int ep_index, | |
1347 | struct xhci_dequeue_state *deq_state) | |
1348 | { | |
1349 | struct xhci_container_ctx *in_ctx; | |
ac9d8fe7 SS |
1350 | struct xhci_ep_ctx *ep_ctx; |
1351 | u32 added_ctxs; | |
1352 | dma_addr_t addr; | |
1353 | ||
913a8a34 SS |
1354 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
1355 | xhci->devs[slot_id]->out_ctx, ep_index); | |
ac9d8fe7 SS |
1356 | in_ctx = xhci->devs[slot_id]->in_ctx; |
1357 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
1358 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | |
1359 | deq_state->new_deq_ptr); | |
1360 | if (addr == 0) { | |
1361 | xhci_warn(xhci, "WARN Cannot submit config ep after " | |
1362 | "reset ep command\n"); | |
1363 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | |
1364 | deq_state->new_deq_seg, | |
1365 | deq_state->new_deq_ptr); | |
1366 | return; | |
1367 | } | |
1368 | ep_ctx->deq = addr | deq_state->new_cycle_state; | |
1369 | ||
ac9d8fe7 | 1370 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
913a8a34 SS |
1371 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
1372 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); | |
ac9d8fe7 SS |
1373 | } |
1374 | ||
82d1009f | 1375 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
63a0d9ab | 1376 | struct usb_device *udev, unsigned int ep_index) |
82d1009f SS |
1377 | { |
1378 | struct xhci_dequeue_state deq_state; | |
63a0d9ab | 1379 | struct xhci_virt_ep *ep; |
82d1009f SS |
1380 | |
1381 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | |
63a0d9ab | 1382 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
82d1009f SS |
1383 | /* We need to move the HW's dequeue pointer past this TD, |
1384 | * or it will attempt to resend it on the next doorbell ring. | |
1385 | */ | |
1386 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | |
e9df17eb | 1387 | ep_index, ep->stopped_stream, ep->stopped_td, |
ac9d8fe7 | 1388 | &deq_state); |
82d1009f | 1389 | |
ac9d8fe7 SS |
1390 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
1391 | * issue a configure endpoint command later. | |
1392 | */ | |
1393 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | |
1394 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | |
63a0d9ab | 1395 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
e9df17eb | 1396 | ep_index, ep->stopped_stream, &deq_state); |
ac9d8fe7 SS |
1397 | } else { |
1398 | /* Better hope no one uses the input context between now and the | |
1399 | * reset endpoint completion! | |
e9df17eb SS |
1400 | * XXX: No idea how this hardware will react when stream rings |
1401 | * are enabled. | |
ac9d8fe7 SS |
1402 | */ |
1403 | xhci_dbg(xhci, "Setting up input context for " | |
1404 | "configure endpoint command\n"); | |
1405 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | |
1406 | ep_index, &deq_state); | |
1407 | } | |
82d1009f SS |
1408 | } |
1409 | ||
a1587d97 SS |
1410 | /* Deal with stalled endpoints. The core should have sent the control message |
1411 | * to clear the halt condition. However, we need to make the xHCI hardware | |
1412 | * reset its sequence number, since a device will expect a sequence number of | |
1413 | * zero after the halt condition is cleared. | |
1414 | * Context: in_interrupt | |
1415 | */ | |
1416 | void xhci_endpoint_reset(struct usb_hcd *hcd, | |
1417 | struct usb_host_endpoint *ep) | |
1418 | { | |
1419 | struct xhci_hcd *xhci; | |
1420 | struct usb_device *udev; | |
1421 | unsigned int ep_index; | |
1422 | unsigned long flags; | |
1423 | int ret; | |
63a0d9ab | 1424 | struct xhci_virt_ep *virt_ep; |
a1587d97 SS |
1425 | |
1426 | xhci = hcd_to_xhci(hcd); | |
1427 | udev = (struct usb_device *) ep->hcpriv; | |
1428 | /* Called with a root hub endpoint (or an endpoint that wasn't added | |
1429 | * with xhci_add_endpoint() | |
1430 | */ | |
1431 | if (!ep->hcpriv) | |
1432 | return; | |
1433 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
63a0d9ab SS |
1434 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
1435 | if (!virt_ep->stopped_td) { | |
c92bcfa7 SS |
1436 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
1437 | ep->desc.bEndpointAddress); | |
1438 | return; | |
1439 | } | |
82d1009f SS |
1440 | if (usb_endpoint_xfer_control(&ep->desc)) { |
1441 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); | |
1442 | return; | |
1443 | } | |
a1587d97 SS |
1444 | |
1445 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | |
1446 | spin_lock_irqsave(&xhci->lock, flags); | |
1447 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | |
c92bcfa7 SS |
1448 | /* |
1449 | * Can't change the ring dequeue pointer until it's transitioned to the | |
1450 | * stopped state, which is only upon a successful reset endpoint | |
1451 | * command. Better hope that last command worked! | |
1452 | */ | |
a1587d97 | 1453 | if (!ret) { |
63a0d9ab SS |
1454 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
1455 | kfree(virt_ep->stopped_td); | |
a1587d97 SS |
1456 | xhci_ring_cmd_db(xhci); |
1457 | } | |
1624ae1c SS |
1458 | virt_ep->stopped_td = NULL; |
1459 | virt_ep->stopped_trb = NULL; | |
a1587d97 SS |
1460 | spin_unlock_irqrestore(&xhci->lock, flags); |
1461 | ||
1462 | if (ret) | |
1463 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | |
1464 | } | |
1465 | ||
8df75f42 SS |
1466 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
1467 | struct usb_device *udev, struct usb_host_endpoint *ep, | |
1468 | unsigned int slot_id) | |
1469 | { | |
1470 | int ret; | |
1471 | unsigned int ep_index; | |
1472 | unsigned int ep_state; | |
1473 | ||
1474 | if (!ep) | |
1475 | return -EINVAL; | |
1476 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); | |
1477 | if (ret <= 0) | |
1478 | return -EINVAL; | |
842f1690 | 1479 | if (ep->ss_ep_comp.bmAttributes == 0) { |
8df75f42 SS |
1480 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
1481 | " descriptor for ep 0x%x does not support streams\n", | |
1482 | ep->desc.bEndpointAddress); | |
1483 | return -EINVAL; | |
1484 | } | |
1485 | ||
1486 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
1487 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
1488 | if (ep_state & EP_HAS_STREAMS || | |
1489 | ep_state & EP_GETTING_STREAMS) { | |
1490 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | |
1491 | "already has streams set up.\n", | |
1492 | ep->desc.bEndpointAddress); | |
1493 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | |
1494 | "dynamic stream context array reallocation.\n"); | |
1495 | return -EINVAL; | |
1496 | } | |
1497 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | |
1498 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | |
1499 | "endpoint 0x%x; URBs are pending.\n", | |
1500 | ep->desc.bEndpointAddress); | |
1501 | return -EINVAL; | |
1502 | } | |
1503 | return 0; | |
1504 | } | |
1505 | ||
1506 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | |
1507 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | |
1508 | { | |
1509 | unsigned int max_streams; | |
1510 | ||
1511 | /* The stream context array size must be a power of two */ | |
1512 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | |
1513 | /* | |
1514 | * Find out how many primary stream array entries the host controller | |
1515 | * supports. Later we may use secondary stream arrays (similar to 2nd | |
1516 | * level page entries), but that's an optional feature for xHCI host | |
1517 | * controllers. xHCs must support at least 4 stream IDs. | |
1518 | */ | |
1519 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | |
1520 | if (*num_stream_ctxs > max_streams) { | |
1521 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | |
1522 | max_streams); | |
1523 | *num_stream_ctxs = max_streams; | |
1524 | *num_streams = max_streams; | |
1525 | } | |
1526 | } | |
1527 | ||
1528 | /* Returns an error code if one of the endpoint already has streams. | |
1529 | * This does not change any data structures, it only checks and gathers | |
1530 | * information. | |
1531 | */ | |
1532 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | |
1533 | struct usb_device *udev, | |
1534 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
1535 | unsigned int *num_streams, u32 *changed_ep_bitmask) | |
1536 | { | |
8df75f42 SS |
1537 | unsigned int max_streams; |
1538 | unsigned int endpoint_flag; | |
1539 | int i; | |
1540 | int ret; | |
1541 | ||
1542 | for (i = 0; i < num_eps; i++) { | |
1543 | ret = xhci_check_streams_endpoint(xhci, udev, | |
1544 | eps[i], udev->slot_id); | |
1545 | if (ret < 0) | |
1546 | return ret; | |
1547 | ||
842f1690 AS |
1548 | max_streams = USB_SS_MAX_STREAMS( |
1549 | eps[i]->ss_ep_comp.bmAttributes); | |
8df75f42 SS |
1550 | if (max_streams < (*num_streams - 1)) { |
1551 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | |
1552 | eps[i]->desc.bEndpointAddress, | |
1553 | max_streams); | |
1554 | *num_streams = max_streams+1; | |
1555 | } | |
1556 | ||
1557 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | |
1558 | if (*changed_ep_bitmask & endpoint_flag) | |
1559 | return -EINVAL; | |
1560 | *changed_ep_bitmask |= endpoint_flag; | |
1561 | } | |
1562 | return 0; | |
1563 | } | |
1564 | ||
1565 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | |
1566 | struct usb_device *udev, | |
1567 | struct usb_host_endpoint **eps, unsigned int num_eps) | |
1568 | { | |
1569 | u32 changed_ep_bitmask = 0; | |
1570 | unsigned int slot_id; | |
1571 | unsigned int ep_index; | |
1572 | unsigned int ep_state; | |
1573 | int i; | |
1574 | ||
1575 | slot_id = udev->slot_id; | |
1576 | if (!xhci->devs[slot_id]) | |
1577 | return 0; | |
1578 | ||
1579 | for (i = 0; i < num_eps; i++) { | |
1580 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1581 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
1582 | /* Are streams already being freed for the endpoint? */ | |
1583 | if (ep_state & EP_GETTING_NO_STREAMS) { | |
1584 | xhci_warn(xhci, "WARN Can't disable streams for " | |
1585 | "endpoint 0x%x\n, " | |
1586 | "streams are being disabled already.", | |
1587 | eps[i]->desc.bEndpointAddress); | |
1588 | return 0; | |
1589 | } | |
1590 | /* Are there actually any streams to free? */ | |
1591 | if (!(ep_state & EP_HAS_STREAMS) && | |
1592 | !(ep_state & EP_GETTING_STREAMS)) { | |
1593 | xhci_warn(xhci, "WARN Can't disable streams for " | |
1594 | "endpoint 0x%x\n, " | |
1595 | "streams are already disabled!", | |
1596 | eps[i]->desc.bEndpointAddress); | |
1597 | xhci_warn(xhci, "WARN xhci_free_streams() called " | |
1598 | "with non-streams endpoint\n"); | |
1599 | return 0; | |
1600 | } | |
1601 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | |
1602 | } | |
1603 | return changed_ep_bitmask; | |
1604 | } | |
1605 | ||
1606 | /* | |
1607 | * The USB device drivers use this function (though the HCD interface in USB | |
1608 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to | |
1609 | * coordinate mass storage command queueing across multiple endpoints (basically | |
1610 | * a stream ID == a task ID). | |
1611 | * | |
1612 | * Setting up streams involves allocating the same size stream context array | |
1613 | * for each endpoint and issuing a configure endpoint command for all endpoints. | |
1614 | * | |
1615 | * Don't allow the call to succeed if one endpoint only supports one stream | |
1616 | * (which means it doesn't support streams at all). | |
1617 | * | |
1618 | * Drivers may get less stream IDs than they asked for, if the host controller | |
1619 | * hardware or endpoints claim they can't support the number of requested | |
1620 | * stream IDs. | |
1621 | */ | |
1622 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
1623 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
1624 | unsigned int num_streams, gfp_t mem_flags) | |
1625 | { | |
1626 | int i, ret; | |
1627 | struct xhci_hcd *xhci; | |
1628 | struct xhci_virt_device *vdev; | |
1629 | struct xhci_command *config_cmd; | |
1630 | unsigned int ep_index; | |
1631 | unsigned int num_stream_ctxs; | |
1632 | unsigned long flags; | |
1633 | u32 changed_ep_bitmask = 0; | |
1634 | ||
1635 | if (!eps) | |
1636 | return -EINVAL; | |
1637 | ||
1638 | /* Add one to the number of streams requested to account for | |
1639 | * stream 0 that is reserved for xHCI usage. | |
1640 | */ | |
1641 | num_streams += 1; | |
1642 | xhci = hcd_to_xhci(hcd); | |
1643 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | |
1644 | num_streams); | |
1645 | ||
1646 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | |
1647 | if (!config_cmd) { | |
1648 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
1649 | return -ENOMEM; | |
1650 | } | |
1651 | ||
1652 | /* Check to make sure all endpoints are not already configured for | |
1653 | * streams. While we're at it, find the maximum number of streams that | |
1654 | * all the endpoints will support and check for duplicate endpoints. | |
1655 | */ | |
1656 | spin_lock_irqsave(&xhci->lock, flags); | |
1657 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | |
1658 | num_eps, &num_streams, &changed_ep_bitmask); | |
1659 | if (ret < 0) { | |
1660 | xhci_free_command(xhci, config_cmd); | |
1661 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1662 | return ret; | |
1663 | } | |
1664 | if (num_streams <= 1) { | |
1665 | xhci_warn(xhci, "WARN: endpoints can't handle " | |
1666 | "more than one stream.\n"); | |
1667 | xhci_free_command(xhci, config_cmd); | |
1668 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1669 | return -EINVAL; | |
1670 | } | |
1671 | vdev = xhci->devs[udev->slot_id]; | |
1672 | /* Mark each endpoint as being in transistion, so | |
1673 | * xhci_urb_enqueue() will reject all URBs. | |
1674 | */ | |
1675 | for (i = 0; i < num_eps; i++) { | |
1676 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1677 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | |
1678 | } | |
1679 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1680 | ||
1681 | /* Setup internal data structures and allocate HW data structures for | |
1682 | * streams (but don't install the HW structures in the input context | |
1683 | * until we're sure all memory allocation succeeded). | |
1684 | */ | |
1685 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | |
1686 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | |
1687 | num_stream_ctxs, num_streams); | |
1688 | ||
1689 | for (i = 0; i < num_eps; i++) { | |
1690 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1691 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, | |
1692 | num_stream_ctxs, | |
1693 | num_streams, mem_flags); | |
1694 | if (!vdev->eps[ep_index].stream_info) | |
1695 | goto cleanup; | |
1696 | /* Set maxPstreams in endpoint context and update deq ptr to | |
1697 | * point to stream context array. FIXME | |
1698 | */ | |
1699 | } | |
1700 | ||
1701 | /* Set up the input context for a configure endpoint command. */ | |
1702 | for (i = 0; i < num_eps; i++) { | |
1703 | struct xhci_ep_ctx *ep_ctx; | |
1704 | ||
1705 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1706 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | |
1707 | ||
1708 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | |
1709 | vdev->out_ctx, ep_index); | |
1710 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | |
1711 | vdev->eps[ep_index].stream_info); | |
1712 | } | |
1713 | /* Tell the HW to drop its old copy of the endpoint context info | |
1714 | * and add the updated copy from the input context. | |
1715 | */ | |
1716 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | |
1717 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | |
1718 | ||
1719 | /* Issue and wait for the configure endpoint command */ | |
1720 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | |
1721 | false, false); | |
1722 | ||
1723 | /* xHC rejected the configure endpoint command for some reason, so we | |
1724 | * leave the old ring intact and free our internal streams data | |
1725 | * structure. | |
1726 | */ | |
1727 | if (ret < 0) | |
1728 | goto cleanup; | |
1729 | ||
1730 | spin_lock_irqsave(&xhci->lock, flags); | |
1731 | for (i = 0; i < num_eps; i++) { | |
1732 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1733 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
1734 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | |
1735 | udev->slot_id, ep_index); | |
1736 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | |
1737 | } | |
1738 | xhci_free_command(xhci, config_cmd); | |
1739 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1740 | ||
1741 | /* Subtract 1 for stream 0, which drivers can't use */ | |
1742 | return num_streams - 1; | |
1743 | ||
1744 | cleanup: | |
1745 | /* If it didn't work, free the streams! */ | |
1746 | for (i = 0; i < num_eps; i++) { | |
1747 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1748 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 1749 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
1750 | /* FIXME Unset maxPstreams in endpoint context and |
1751 | * update deq ptr to point to normal string ring. | |
1752 | */ | |
1753 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
1754 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
1755 | xhci_endpoint_zero(xhci, vdev, eps[i]); | |
1756 | } | |
1757 | xhci_free_command(xhci, config_cmd); | |
1758 | return -ENOMEM; | |
1759 | } | |
1760 | ||
1761 | /* Transition the endpoint from using streams to being a "normal" endpoint | |
1762 | * without streams. | |
1763 | * | |
1764 | * Modify the endpoint context state, submit a configure endpoint command, | |
1765 | * and free all endpoint rings for streams if that completes successfully. | |
1766 | */ | |
1767 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
1768 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
1769 | gfp_t mem_flags) | |
1770 | { | |
1771 | int i, ret; | |
1772 | struct xhci_hcd *xhci; | |
1773 | struct xhci_virt_device *vdev; | |
1774 | struct xhci_command *command; | |
1775 | unsigned int ep_index; | |
1776 | unsigned long flags; | |
1777 | u32 changed_ep_bitmask; | |
1778 | ||
1779 | xhci = hcd_to_xhci(hcd); | |
1780 | vdev = xhci->devs[udev->slot_id]; | |
1781 | ||
1782 | /* Set up a configure endpoint command to remove the streams rings */ | |
1783 | spin_lock_irqsave(&xhci->lock, flags); | |
1784 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | |
1785 | udev, eps, num_eps); | |
1786 | if (changed_ep_bitmask == 0) { | |
1787 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1788 | return -EINVAL; | |
1789 | } | |
1790 | ||
1791 | /* Use the xhci_command structure from the first endpoint. We may have | |
1792 | * allocated too many, but the driver may call xhci_free_streams() for | |
1793 | * each endpoint it grouped into one call to xhci_alloc_streams(). | |
1794 | */ | |
1795 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | |
1796 | command = vdev->eps[ep_index].stream_info->free_streams_command; | |
1797 | for (i = 0; i < num_eps; i++) { | |
1798 | struct xhci_ep_ctx *ep_ctx; | |
1799 | ||
1800 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1801 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | |
1802 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | |
1803 | EP_GETTING_NO_STREAMS; | |
1804 | ||
1805 | xhci_endpoint_copy(xhci, command->in_ctx, | |
1806 | vdev->out_ctx, ep_index); | |
1807 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, | |
1808 | &vdev->eps[ep_index]); | |
1809 | } | |
1810 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | |
1811 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); | |
1812 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1813 | ||
1814 | /* Issue and wait for the configure endpoint command, | |
1815 | * which must succeed. | |
1816 | */ | |
1817 | ret = xhci_configure_endpoint(xhci, udev, command, | |
1818 | false, true); | |
1819 | ||
1820 | /* xHC rejected the configure endpoint command for some reason, so we | |
1821 | * leave the streams rings intact. | |
1822 | */ | |
1823 | if (ret < 0) | |
1824 | return ret; | |
1825 | ||
1826 | spin_lock_irqsave(&xhci->lock, flags); | |
1827 | for (i = 0; i < num_eps; i++) { | |
1828 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
1829 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
8a007748 | 1830 | vdev->eps[ep_index].stream_info = NULL; |
8df75f42 SS |
1831 | /* FIXME Unset maxPstreams in endpoint context and |
1832 | * update deq ptr to point to normal string ring. | |
1833 | */ | |
1834 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | |
1835 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
1836 | } | |
1837 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1838 | ||
1839 | return 0; | |
1840 | } | |
1841 | ||
2a8f82c4 SS |
1842 | /* |
1843 | * This submits a Reset Device Command, which will set the device state to 0, | |
1844 | * set the device address to 0, and disable all the endpoints except the default | |
1845 | * control endpoint. The USB core should come back and call | |
1846 | * xhci_address_device(), and then re-set up the configuration. If this is | |
1847 | * called because of a usb_reset_and_verify_device(), then the old alternate | |
1848 | * settings will be re-installed through the normal bandwidth allocation | |
1849 | * functions. | |
1850 | * | |
1851 | * Wait for the Reset Device command to finish. Remove all structures | |
1852 | * associated with the endpoints that were disabled. Clear the input device | |
1853 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? | |
1854 | */ | |
1855 | int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |
1856 | { | |
1857 | int ret, i; | |
1858 | unsigned long flags; | |
1859 | struct xhci_hcd *xhci; | |
1860 | unsigned int slot_id; | |
1861 | struct xhci_virt_device *virt_dev; | |
1862 | struct xhci_command *reset_device_cmd; | |
1863 | int timeleft; | |
1864 | int last_freed_endpoint; | |
1865 | ||
1866 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | |
1867 | if (ret <= 0) | |
1868 | return ret; | |
1869 | xhci = hcd_to_xhci(hcd); | |
1870 | slot_id = udev->slot_id; | |
1871 | virt_dev = xhci->devs[slot_id]; | |
1872 | if (!virt_dev) { | |
1873 | xhci_dbg(xhci, "%s called with invalid slot ID %u\n", | |
1874 | __func__, slot_id); | |
1875 | return -EINVAL; | |
1876 | } | |
1877 | ||
1878 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); | |
1879 | /* Allocate the command structure that holds the struct completion. | |
1880 | * Assume we're in process context, since the normal device reset | |
1881 | * process has to wait for the device anyway. Storage devices are | |
1882 | * reset as part of error handling, so use GFP_NOIO instead of | |
1883 | * GFP_KERNEL. | |
1884 | */ | |
1885 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | |
1886 | if (!reset_device_cmd) { | |
1887 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | |
1888 | return -ENOMEM; | |
1889 | } | |
1890 | ||
1891 | /* Attempt to submit the Reset Device command to the command ring */ | |
1892 | spin_lock_irqsave(&xhci->lock, flags); | |
1893 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; | |
1894 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); | |
1895 | ret = xhci_queue_reset_device(xhci, slot_id); | |
1896 | if (ret) { | |
1897 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
1898 | list_del(&reset_device_cmd->cmd_list); | |
1899 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1900 | goto command_cleanup; | |
1901 | } | |
1902 | xhci_ring_cmd_db(xhci); | |
1903 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1904 | ||
1905 | /* Wait for the Reset Device command to finish */ | |
1906 | timeleft = wait_for_completion_interruptible_timeout( | |
1907 | reset_device_cmd->completion, | |
1908 | USB_CTRL_SET_TIMEOUT); | |
1909 | if (timeleft <= 0) { | |
1910 | xhci_warn(xhci, "%s while waiting for reset device command\n", | |
1911 | timeleft == 0 ? "Timeout" : "Signal"); | |
1912 | spin_lock_irqsave(&xhci->lock, flags); | |
1913 | /* The timeout might have raced with the event ring handler, so | |
1914 | * only delete from the list if the item isn't poisoned. | |
1915 | */ | |
1916 | if (reset_device_cmd->cmd_list.next != LIST_POISON1) | |
1917 | list_del(&reset_device_cmd->cmd_list); | |
1918 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1919 | ret = -ETIME; | |
1920 | goto command_cleanup; | |
1921 | } | |
1922 | ||
1923 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, | |
1924 | * unless we tried to reset a slot ID that wasn't enabled, | |
1925 | * or the device wasn't in the addressed or configured state. | |
1926 | */ | |
1927 | ret = reset_device_cmd->status; | |
1928 | switch (ret) { | |
1929 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ | |
1930 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ | |
1931 | xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", | |
1932 | slot_id, | |
1933 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); | |
1934 | xhci_info(xhci, "Not freeing device rings.\n"); | |
1935 | /* Don't treat this as an error. May change my mind later. */ | |
1936 | ret = 0; | |
1937 | goto command_cleanup; | |
1938 | case COMP_SUCCESS: | |
1939 | xhci_dbg(xhci, "Successful reset device command.\n"); | |
1940 | break; | |
1941 | default: | |
1942 | if (xhci_is_vendor_info_code(xhci, ret)) | |
1943 | break; | |
1944 | xhci_warn(xhci, "Unknown completion code %u for " | |
1945 | "reset device command.\n", ret); | |
1946 | ret = -EINVAL; | |
1947 | goto command_cleanup; | |
1948 | } | |
1949 | ||
1950 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | |
1951 | last_freed_endpoint = 1; | |
1952 | for (i = 1; i < 31; ++i) { | |
1953 | if (!virt_dev->eps[i].ring) | |
1954 | continue; | |
1955 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | |
1956 | last_freed_endpoint = i; | |
1957 | } | |
1958 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | |
1959 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | |
1960 | ret = 0; | |
1961 | ||
1962 | command_cleanup: | |
1963 | xhci_free_command(xhci, reset_device_cmd); | |
1964 | return ret; | |
1965 | } | |
1966 | ||
3ffbba95 SS |
1967 | /* |
1968 | * At this point, the struct usb_device is about to go away, the device has | |
1969 | * disconnected, and all traffic has been stopped and the endpoints have been | |
1970 | * disabled. Free any HC data structures associated with that device. | |
1971 | */ | |
1972 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
1973 | { | |
1974 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
6f5165cf | 1975 | struct xhci_virt_device *virt_dev; |
3ffbba95 | 1976 | unsigned long flags; |
c526d0d4 | 1977 | u32 state; |
6f5165cf | 1978 | int i; |
3ffbba95 SS |
1979 | |
1980 | if (udev->slot_id == 0) | |
1981 | return; | |
6f5165cf SS |
1982 | virt_dev = xhci->devs[udev->slot_id]; |
1983 | if (!virt_dev) | |
1984 | return; | |
1985 | ||
1986 | /* Stop any wayward timer functions (which may grab the lock) */ | |
1987 | for (i = 0; i < 31; ++i) { | |
1988 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | |
1989 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | |
1990 | } | |
3ffbba95 SS |
1991 | |
1992 | spin_lock_irqsave(&xhci->lock, flags); | |
c526d0d4 SS |
1993 | /* Don't disable the slot if the host controller is dead. */ |
1994 | state = xhci_readl(xhci, &xhci->op_regs->status); | |
6f5165cf | 1995 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
c526d0d4 SS |
1996 | xhci_free_virt_device(xhci, udev->slot_id); |
1997 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1998 | return; | |
1999 | } | |
2000 | ||
23e3be11 | 2001 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { |
3ffbba95 SS |
2002 | spin_unlock_irqrestore(&xhci->lock, flags); |
2003 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
2004 | return; | |
2005 | } | |
23e3be11 | 2006 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
2007 | spin_unlock_irqrestore(&xhci->lock, flags); |
2008 | /* | |
2009 | * Event command completion handler will free any data structures | |
f88ba78d | 2010 | * associated with the slot. XXX Can free sleep? |
3ffbba95 SS |
2011 | */ |
2012 | } | |
2013 | ||
2014 | /* | |
2015 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | |
2016 | * timed out, or allocating memory failed. Returns 1 on success. | |
2017 | */ | |
2018 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
2019 | { | |
2020 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
2021 | unsigned long flags; | |
2022 | int timeleft; | |
2023 | int ret; | |
2024 | ||
2025 | spin_lock_irqsave(&xhci->lock, flags); | |
23e3be11 | 2026 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
3ffbba95 SS |
2027 | if (ret) { |
2028 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2029 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
2030 | return 0; | |
2031 | } | |
23e3be11 | 2032 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
2033 | spin_unlock_irqrestore(&xhci->lock, flags); |
2034 | ||
2035 | /* XXX: how much time for xHC slot assignment? */ | |
2036 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | |
2037 | USB_CTRL_SET_TIMEOUT); | |
2038 | if (timeleft <= 0) { | |
2039 | xhci_warn(xhci, "%s while waiting for a slot\n", | |
2040 | timeleft == 0 ? "Timeout" : "Signal"); | |
2041 | /* FIXME cancel the enable slot request */ | |
2042 | return 0; | |
2043 | } | |
2044 | ||
3ffbba95 SS |
2045 | if (!xhci->slot_id) { |
2046 | xhci_err(xhci, "Error while assigning device slot ID\n"); | |
3ffbba95 SS |
2047 | return 0; |
2048 | } | |
f88ba78d | 2049 | /* xhci_alloc_virt_device() does not touch rings; no need to lock */ |
3ffbba95 SS |
2050 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { |
2051 | /* Disable slot, if we can do it without mem alloc */ | |
2052 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | |
f88ba78d | 2053 | spin_lock_irqsave(&xhci->lock, flags); |
23e3be11 SS |
2054 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) |
2055 | xhci_ring_cmd_db(xhci); | |
3ffbba95 SS |
2056 | spin_unlock_irqrestore(&xhci->lock, flags); |
2057 | return 0; | |
2058 | } | |
2059 | udev->slot_id = xhci->slot_id; | |
2060 | /* Is this a LS or FS device under a HS hub? */ | |
2061 | /* Hub or peripherial? */ | |
3ffbba95 SS |
2062 | return 1; |
2063 | } | |
2064 | ||
2065 | /* | |
2066 | * Issue an Address Device command (which will issue a SetAddress request to | |
2067 | * the device). | |
2068 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so | |
2069 | * we should only issue and wait on one address command at the same time. | |
2070 | * | |
2071 | * We add one to the device address issued by the hardware because the USB core | |
2072 | * uses address 1 for the root hubs (even though they're not really devices). | |
2073 | */ | |
2074 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |
2075 | { | |
2076 | unsigned long flags; | |
2077 | int timeleft; | |
2078 | struct xhci_virt_device *virt_dev; | |
2079 | int ret = 0; | |
2080 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
d115b048 JY |
2081 | struct xhci_slot_ctx *slot_ctx; |
2082 | struct xhci_input_control_ctx *ctrl_ctx; | |
8e595a5d | 2083 | u64 temp_64; |
3ffbba95 SS |
2084 | |
2085 | if (!udev->slot_id) { | |
2086 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | |
2087 | return -EINVAL; | |
2088 | } | |
2089 | ||
3ffbba95 SS |
2090 | virt_dev = xhci->devs[udev->slot_id]; |
2091 | ||
2092 | /* If this is a Set Address to an unconfigured device, setup ep 0 */ | |
2093 | if (!udev->config) | |
2094 | xhci_setup_addressable_virt_dev(xhci, udev); | |
2095 | /* Otherwise, assume the core has the device configured how it wants */ | |
66e49d87 | 2096 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
d115b048 | 2097 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
3ffbba95 | 2098 | |
f88ba78d | 2099 | spin_lock_irqsave(&xhci->lock, flags); |
d115b048 JY |
2100 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
2101 | udev->slot_id); | |
3ffbba95 SS |
2102 | if (ret) { |
2103 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2104 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
2105 | return ret; | |
2106 | } | |
23e3be11 | 2107 | xhci_ring_cmd_db(xhci); |
3ffbba95 SS |
2108 | spin_unlock_irqrestore(&xhci->lock, flags); |
2109 | ||
2110 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | |
2111 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, | |
2112 | USB_CTRL_SET_TIMEOUT); | |
2113 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing | |
2114 | * the SetAddress() "recovery interval" required by USB and aborting the | |
2115 | * command on a timeout. | |
2116 | */ | |
2117 | if (timeleft <= 0) { | |
2118 | xhci_warn(xhci, "%s while waiting for a slot\n", | |
2119 | timeleft == 0 ? "Timeout" : "Signal"); | |
2120 | /* FIXME cancel the address device command */ | |
2121 | return -ETIME; | |
2122 | } | |
2123 | ||
3ffbba95 SS |
2124 | switch (virt_dev->cmd_status) { |
2125 | case COMP_CTX_STATE: | |
2126 | case COMP_EBADSLT: | |
2127 | xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", | |
2128 | udev->slot_id); | |
2129 | ret = -EINVAL; | |
2130 | break; | |
2131 | case COMP_TX_ERR: | |
2132 | dev_warn(&udev->dev, "Device not responding to set address.\n"); | |
2133 | ret = -EPROTO; | |
2134 | break; | |
2135 | case COMP_SUCCESS: | |
2136 | xhci_dbg(xhci, "Successful Address Device command\n"); | |
2137 | break; | |
2138 | default: | |
2139 | xhci_err(xhci, "ERROR: unexpected command completion " | |
2140 | "code 0x%x.\n", virt_dev->cmd_status); | |
66e49d87 | 2141 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
d115b048 | 2142 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
3ffbba95 SS |
2143 | ret = -EINVAL; |
2144 | break; | |
2145 | } | |
2146 | if (ret) { | |
3ffbba95 SS |
2147 | return ret; |
2148 | } | |
8e595a5d SS |
2149 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
2150 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); | |
2151 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", | |
3ffbba95 | 2152 | udev->slot_id, |
8e595a5d SS |
2153 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
2154 | (unsigned long long) | |
2155 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | |
700e2052 | 2156 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
d115b048 | 2157 | (unsigned long long)virt_dev->out_ctx->dma); |
3ffbba95 | 2158 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
d115b048 | 2159 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
3ffbba95 | 2160 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
d115b048 | 2161 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
3ffbba95 SS |
2162 | /* |
2163 | * USB core uses address 1 for the roothubs, so we add one to the | |
2164 | * address given back to us by the HC. | |
2165 | */ | |
d115b048 JY |
2166 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
2167 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | |
f94e0186 | 2168 | /* Zero the input context control for later use */ |
d115b048 JY |
2169 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
2170 | ctrl_ctx->add_flags = 0; | |
2171 | ctrl_ctx->drop_flags = 0; | |
3ffbba95 SS |
2172 | |
2173 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | |
2174 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | |
2175 | set_bit(udev->devnum, udev->bus->devmap.devicemap); | |
2176 | ||
2177 | return 0; | |
2178 | } | |
2179 | ||
ac1c1b7f SS |
2180 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
2181 | * internal data structures for the device. | |
2182 | */ | |
2183 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |
2184 | struct usb_tt *tt, gfp_t mem_flags) | |
2185 | { | |
2186 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
2187 | struct xhci_virt_device *vdev; | |
2188 | struct xhci_command *config_cmd; | |
2189 | struct xhci_input_control_ctx *ctrl_ctx; | |
2190 | struct xhci_slot_ctx *slot_ctx; | |
2191 | unsigned long flags; | |
2192 | unsigned think_time; | |
2193 | int ret; | |
2194 | ||
2195 | /* Ignore root hubs */ | |
2196 | if (!hdev->parent) | |
2197 | return 0; | |
2198 | ||
2199 | vdev = xhci->devs[hdev->slot_id]; | |
2200 | if (!vdev) { | |
2201 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | |
2202 | return -EINVAL; | |
2203 | } | |
a1d78c16 | 2204 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
ac1c1b7f SS |
2205 | if (!config_cmd) { |
2206 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
2207 | return -ENOMEM; | |
2208 | } | |
2209 | ||
2210 | spin_lock_irqsave(&xhci->lock, flags); | |
2211 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | |
2212 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | |
2213 | ctrl_ctx->add_flags |= SLOT_FLAG; | |
2214 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | |
2215 | slot_ctx->dev_info |= DEV_HUB; | |
2216 | if (tt->multi) | |
2217 | slot_ctx->dev_info |= DEV_MTT; | |
2218 | if (xhci->hci_version > 0x95) { | |
2219 | xhci_dbg(xhci, "xHCI version %x needs hub " | |
2220 | "TT think time and number of ports\n", | |
2221 | (unsigned int) xhci->hci_version); | |
2222 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | |
2223 | /* Set TT think time - convert from ns to FS bit times. | |
2224 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | |
2225 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | |
2226 | */ | |
2227 | think_time = tt->think_time; | |
2228 | if (think_time != 0) | |
2229 | think_time = (think_time / 666) - 1; | |
2230 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | |
2231 | } else { | |
2232 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | |
2233 | "TT think time or number of ports\n", | |
2234 | (unsigned int) xhci->hci_version); | |
2235 | } | |
2236 | slot_ctx->dev_state = 0; | |
2237 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2238 | ||
2239 | xhci_dbg(xhci, "Set up %s for hub device.\n", | |
2240 | (xhci->hci_version > 0x95) ? | |
2241 | "configure endpoint" : "evaluate context"); | |
2242 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | |
2243 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | |
2244 | ||
2245 | /* Issue and wait for the configure endpoint or | |
2246 | * evaluate context command. | |
2247 | */ | |
2248 | if (xhci->hci_version > 0x95) | |
2249 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
2250 | false, false); | |
2251 | else | |
2252 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
2253 | true, false); | |
2254 | ||
2255 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | |
2256 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | |
2257 | ||
2258 | xhci_free_command(xhci, config_cmd); | |
2259 | return ret; | |
2260 | } | |
2261 | ||
66d4eadd SS |
2262 | int xhci_get_frame(struct usb_hcd *hcd) |
2263 | { | |
2264 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
2265 | /* EHCI mods by the periodic size. Why? */ | |
2266 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; | |
2267 | } | |
2268 | ||
2269 | MODULE_DESCRIPTION(DRIVER_DESC); | |
2270 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
2271 | MODULE_LICENSE("GPL"); | |
2272 | ||
2273 | static int __init xhci_hcd_init(void) | |
2274 | { | |
2275 | #ifdef CONFIG_PCI | |
2276 | int retval = 0; | |
2277 | ||
2278 | retval = xhci_register_pci(); | |
2279 | ||
2280 | if (retval < 0) { | |
2281 | printk(KERN_DEBUG "Problem registering PCI driver."); | |
2282 | return retval; | |
2283 | } | |
2284 | #endif | |
98441973 SS |
2285 | /* |
2286 | * Check the compiler generated sizes of structures that must be laid | |
2287 | * out in specific ways for hardware access. | |
2288 | */ | |
2289 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
2290 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); | |
2291 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); | |
2292 | /* xhci_device_control has eight fields, and also | |
2293 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | |
2294 | */ | |
98441973 SS |
2295 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
2296 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | |
2297 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | |
2298 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); | |
2299 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); | |
2300 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ | |
2301 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); | |
2302 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
66d4eadd SS |
2303 | return 0; |
2304 | } | |
2305 | module_init(xhci_hcd_init); | |
2306 | ||
2307 | static void __exit xhci_hcd_cleanup(void) | |
2308 | { | |
2309 | #ifdef CONFIG_PCI | |
2310 | xhci_unregister_pci(); | |
2311 | #endif | |
2312 | } | |
2313 | module_exit(xhci_hcd_cleanup); |