USB: xhci: Handle short control packets correctly.
[deliverable/linux.git] / drivers / usb / host / xhci-hcd.c
CommitLineData
66d4eadd
SS
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/irq.h>
24#include <linux/module.h>
25
26#include "xhci.h"
27
28#define DRIVER_AUTHOR "Sarah Sharp"
29#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
30
31/* TODO: copied from ehci-hcd.c - can this be refactored? */
32/*
33 * handshake - spin reading hc until handshake completes or fails
34 * @ptr: address of hc register to be read
35 * @mask: bits to look at in result of read
36 * @done: value of those bits when handshake succeeds
37 * @usec: timeout in microseconds
38 *
39 * Returns negative errno, or zero on success
40 *
41 * Success happens when the "mask" bits have the specified value (hardware
42 * handshake done). There are two failure modes: "usec" have passed (major
43 * hardware flakeout), or the register reads as all-ones (hardware removed).
44 */
45static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
46 u32 mask, u32 done, int usec)
47{
48 u32 result;
49
50 do {
51 result = xhci_readl(xhci, ptr);
52 if (result == ~(u32)0) /* card removed */
53 return -ENODEV;
54 result &= mask;
55 if (result == done)
56 return 0;
57 udelay(1);
58 usec--;
59 } while (usec > 0);
60 return -ETIMEDOUT;
61}
62
63/*
64 * Force HC into halt state.
65 *
66 * Disable any IRQs and clear the run/stop bit.
67 * HC will complete any current and actively pipelined transactions, and
68 * should halt within 16 microframes of the run/stop bit being cleared.
69 * Read HC Halted bit in the status register to see when the HC is finished.
70 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
71 */
72int xhci_halt(struct xhci_hcd *xhci)
73{
74 u32 halted;
75 u32 cmd;
76 u32 mask;
77
78 xhci_dbg(xhci, "// Halt the HC\n");
79 /* Disable all interrupts from the host controller */
80 mask = ~(XHCI_IRQS);
81 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = xhci_readl(xhci, &xhci->op_regs->command);
86 cmd &= mask;
87 xhci_writel(xhci, cmd, &xhci->op_regs->command);
88
89 return handshake(xhci, &xhci->op_regs->status,
90 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
91}
92
93/*
94 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
95 *
96 * This resets pipelines, timers, counters, state machines, etc.
97 * Transactions will be terminated immediately, and operational registers
98 * will be set to their defaults.
99 */
100int xhci_reset(struct xhci_hcd *xhci)
101{
102 u32 command;
103 u32 state;
104
105 state = xhci_readl(xhci, &xhci->op_regs->status);
106 BUG_ON((state & STS_HALT) == 0);
107
108 xhci_dbg(xhci, "// Reset the HC\n");
109 command = xhci_readl(xhci, &xhci->op_regs->command);
110 command |= CMD_RESET;
111 xhci_writel(xhci, command, &xhci->op_regs->command);
112 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
113 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
114
115 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
116}
117
118/*
119 * Stop the HC from processing the endpoint queues.
120 */
121static void xhci_quiesce(struct xhci_hcd *xhci)
122{
123 /*
124 * Queues are per endpoint, so we need to disable an endpoint or slot.
125 *
126 * To disable a slot, we need to insert a disable slot command on the
127 * command ring and ring the doorbell. This will also free any internal
128 * resources associated with the slot (which might not be what we want).
129 *
130 * A Release Endpoint command sounds better - doesn't free internal HC
131 * memory, but removes the endpoints from the schedule and releases the
132 * bandwidth, disables the doorbells, and clears the endpoint enable
133 * flag. Usually used prior to a set interface command.
134 *
135 * TODO: Implement after command ring code is done.
136 */
137 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
138 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
139}
140
141#if 0
142/* Set up MSI-X table for entry 0 (may claim other entries later) */
143static int xhci_setup_msix(struct xhci_hcd *xhci)
144{
145 int ret;
146 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
147
148 xhci->msix_count = 0;
149 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
150 xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
151 if (!xhci->msix_entries) {
152 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
153 return -ENOMEM;
154 }
155 xhci->msix_entries[0].entry = 0;
156
157 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
158 if (ret) {
159 xhci_err(xhci, "Failed to enable MSI-X\n");
160 goto free_entries;
161 }
162
163 /*
164 * Pass the xhci pointer value as the request_irq "cookie".
165 * If more irqs are added, this will need to be unique for each one.
166 */
167 ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
168 "xHCI", xhci_to_hcd(xhci));
169 if (ret) {
170 xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
171 goto disable_msix;
172 }
173 xhci_dbg(xhci, "Finished setting up MSI-X\n");
174 return 0;
175
176disable_msix:
177 pci_disable_msix(pdev);
178free_entries:
179 kfree(xhci->msix_entries);
180 xhci->msix_entries = NULL;
181 return ret;
182}
183
184/* XXX: code duplication; can xhci_setup_msix call this? */
185/* Free any IRQs and disable MSI-X */
186static void xhci_cleanup_msix(struct xhci_hcd *xhci)
187{
188 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
189 if (!xhci->msix_entries)
190 return;
191
192 free_irq(xhci->msix_entries[0].vector, xhci);
193 pci_disable_msix(pdev);
194 kfree(xhci->msix_entries);
195 xhci->msix_entries = NULL;
196 xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
197}
198#endif
199
200/*
201 * Initialize memory for HCD and xHC (one-time init).
202 *
203 * Program the PAGESIZE register, initialize the device context array, create
204 * device contexts (?), set up a command ring segment (or two?), create event
205 * ring (one for now).
206 */
207int xhci_init(struct usb_hcd *hcd)
208{
209 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
210 int retval = 0;
211
212 xhci_dbg(xhci, "xhci_init\n");
213 spin_lock_init(&xhci->lock);
214 retval = xhci_mem_init(xhci, GFP_KERNEL);
215 xhci_dbg(xhci, "Finished xhci_init\n");
216
217 return retval;
218}
219
7f84eef0
SS
220/*
221 * Called in interrupt context when there might be work
222 * queued on the event ring
223 *
224 * xhci->lock must be held by caller.
225 */
226static void xhci_work(struct xhci_hcd *xhci)
227{
228 u32 temp;
8e595a5d 229 u64 temp_64;
7f84eef0
SS
230
231 /*
232 * Clear the op reg interrupt status first,
233 * so we can receive interrupts from other MSI-X interrupters.
234 * Write 1 to clear the interrupt status.
235 */
236 temp = xhci_readl(xhci, &xhci->op_regs->status);
237 temp |= STS_EINT;
238 xhci_writel(xhci, temp, &xhci->op_regs->status);
239 /* FIXME when MSI-X is supported and there are multiple vectors */
240 /* Clear the MSI-X event interrupt status */
241
242 /* Acknowledge the interrupt */
243 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
244 temp |= 0x3;
245 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
246 /* Flush posted writes */
247 xhci_readl(xhci, &xhci->ir_set->irq_pending);
248
249 /* FIXME this should be a delayed service routine that clears the EHB */
b7258a4a 250 xhci_handle_event(xhci);
7f84eef0
SS
251
252 /* Clear the event handler busy flag; the event ring should be empty. */
8e595a5d
SS
253 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
254 xhci_write_64(xhci, temp_64 & ~ERST_EHB, &xhci->ir_set->erst_dequeue);
7f84eef0
SS
255 /* Flush posted writes -- FIXME is this necessary? */
256 xhci_readl(xhci, &xhci->ir_set->irq_pending);
257}
258
259/*-------------------------------------------------------------------------*/
260
261/*
262 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
263 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
264 * indicators of an event TRB error, but we check the status *first* to be safe.
265 */
266irqreturn_t xhci_irq(struct usb_hcd *hcd)
267{
268 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
269 u32 temp, temp2;
270
271 spin_lock(&xhci->lock);
272 /* Check if the xHC generated the interrupt, or the irq is shared */
273 temp = xhci_readl(xhci, &xhci->op_regs->status);
274 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
275 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
276 spin_unlock(&xhci->lock);
277 return IRQ_NONE;
278 }
279
7f84eef0
SS
280 if (temp & STS_FATAL) {
281 xhci_warn(xhci, "WARNING: Host System Error\n");
282 xhci_halt(xhci);
283 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
c96a2b81 284 spin_unlock(&xhci->lock);
7f84eef0
SS
285 return -ESHUTDOWN;
286 }
287
288 xhci_work(xhci);
289 spin_unlock(&xhci->lock);
290
291 return IRQ_HANDLED;
292}
293
294#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
23e3be11 295void xhci_event_ring_work(unsigned long arg)
7f84eef0
SS
296{
297 unsigned long flags;
298 int temp;
8e595a5d 299 u64 temp_64;
7f84eef0
SS
300 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
301 int i, j;
302
303 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
304
305 spin_lock_irqsave(&xhci->lock, flags);
306 temp = xhci_readl(xhci, &xhci->op_regs->status);
307 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
308 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
309 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
310 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
311 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
312 xhci->error_bitmask = 0;
313 xhci_dbg(xhci, "Event ring:\n");
314 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
315 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
8e595a5d
SS
316 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
317 temp_64 &= ~ERST_PTR_MASK;
318 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
7f84eef0
SS
319 xhci_dbg(xhci, "Command ring:\n");
320 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
321 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
322 xhci_dbg_cmd_ptrs(xhci);
3ffbba95
SS
323 for (i = 0; i < MAX_HC_SLOTS; ++i) {
324 if (xhci->devs[i]) {
325 for (j = 0; j < 31; ++j) {
326 if (xhci->devs[i]->ep_rings[j]) {
327 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
328 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
329 }
330 }
331 }
332 }
7f84eef0
SS
333
334 if (xhci->noops_submitted != NUM_TEST_NOOPS)
23e3be11
SS
335 if (xhci_setup_one_noop(xhci))
336 xhci_ring_cmd_db(xhci);
7f84eef0
SS
337 spin_unlock_irqrestore(&xhci->lock, flags);
338
339 if (!xhci->zombie)
340 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
341 else
342 xhci_dbg(xhci, "Quit polling the event ring.\n");
343}
344#endif
345
66d4eadd
SS
346/*
347 * Start the HC after it was halted.
348 *
349 * This function is called by the USB core when the HC driver is added.
350 * Its opposite is xhci_stop().
351 *
352 * xhci_init() must be called once before this function can be called.
353 * Reset the HC, enable device slot contexts, program DCBAAP, and
354 * set command ring pointer and event ring pointer.
355 *
356 * Setup MSI-X vectors and enable interrupts.
357 */
358int xhci_run(struct usb_hcd *hcd)
359{
360 u32 temp;
8e595a5d 361 u64 temp_64;
66d4eadd 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7f84eef0 363 void (*doorbell)(struct xhci_hcd *) = NULL;
66d4eadd 364
0f2a7930
SS
365 hcd->uses_new_polling = 1;
366 hcd->poll_rh = 0;
367
7f84eef0 368 xhci_dbg(xhci, "xhci_run\n");
66d4eadd
SS
369#if 0 /* FIXME: MSI not setup yet */
370 /* Do this at the very last minute */
371 ret = xhci_setup_msix(xhci);
372 if (!ret)
373 return ret;
374
375 return -ENOSYS;
376#endif
7f84eef0
SS
377#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
378 init_timer(&xhci->event_ring_timer);
379 xhci->event_ring_timer.data = (unsigned long) xhci;
23e3be11 380 xhci->event_ring_timer.function = xhci_event_ring_work;
7f84eef0
SS
381 /* Poll the event ring */
382 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
383 xhci->zombie = 0;
384 xhci_dbg(xhci, "Setting event ring polling timer\n");
385 add_timer(&xhci->event_ring_timer);
386#endif
387
66d4eadd
SS
388 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
389 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
a4d88302 390 temp &= ~ER_IRQ_INTERVAL_MASK;
66d4eadd
SS
391 temp |= (u32) 160;
392 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
393
394 /* Set the HCD state before we enable the irqs */
395 hcd->state = HC_STATE_RUNNING;
396 temp = xhci_readl(xhci, &xhci->op_regs->command);
397 temp |= (CMD_EIE);
398 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
399 temp);
400 xhci_writel(xhci, temp, &xhci->op_regs->command);
401
402 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
700e2052
GKH
403 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
404 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
66d4eadd
SS
405 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
406 &xhci->ir_set->irq_pending);
407 xhci_print_ir_set(xhci, xhci->ir_set, 0);
408
7f84eef0 409 if (NUM_TEST_NOOPS > 0)
23e3be11 410 doorbell = xhci_setup_one_noop(xhci);
7f84eef0 411
0ebbab37
SS
412 xhci_dbg(xhci, "Command ring memory map follows:\n");
413 xhci_debug_ring(xhci, xhci->cmd_ring);
7f84eef0
SS
414 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
415 xhci_dbg_cmd_ptrs(xhci);
416
0ebbab37
SS
417 xhci_dbg(xhci, "ERST memory map follows:\n");
418 xhci_dbg_erst(xhci, &xhci->erst);
7f84eef0
SS
419 xhci_dbg(xhci, "Event ring:\n");
420 xhci_debug_ring(xhci, xhci->event_ring);
421 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
8e595a5d
SS
422 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
423 temp_64 &= ~ERST_PTR_MASK;
424 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
0ebbab37 425
66d4eadd
SS
426 temp = xhci_readl(xhci, &xhci->op_regs->command);
427 temp |= (CMD_RUN);
428 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
429 temp);
430 xhci_writel(xhci, temp, &xhci->op_regs->command);
431 /* Flush PCI posted writes */
432 temp = xhci_readl(xhci, &xhci->op_regs->command);
700e2052 433 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
7f84eef0
SS
434 if (doorbell)
435 (*doorbell)(xhci);
66d4eadd
SS
436
437 xhci_dbg(xhci, "Finished xhci_run\n");
438 return 0;
439}
440
441/*
442 * Stop xHCI driver.
443 *
444 * This function is called by the USB core when the HC driver is removed.
445 * Its opposite is xhci_run().
446 *
447 * Disable device contexts, disable IRQs, and quiesce the HC.
448 * Reset the HC, finish any completed transactions, and cleanup memory.
449 */
450void xhci_stop(struct usb_hcd *hcd)
451{
452 u32 temp;
453 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
454
455 spin_lock_irq(&xhci->lock);
456 if (HC_IS_RUNNING(hcd->state))
457 xhci_quiesce(xhci);
458 xhci_halt(xhci);
459 xhci_reset(xhci);
460 spin_unlock_irq(&xhci->lock);
461
462#if 0 /* No MSI yet */
463 xhci_cleanup_msix(xhci);
464#endif
7f84eef0
SS
465#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
466 /* Tell the event ring poll function not to reschedule */
467 xhci->zombie = 1;
468 del_timer_sync(&xhci->event_ring_timer);
469#endif
470
66d4eadd
SS
471 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
472 temp = xhci_readl(xhci, &xhci->op_regs->status);
473 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
474 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
475 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
476 &xhci->ir_set->irq_pending);
477 xhci_print_ir_set(xhci, xhci->ir_set, 0);
478
479 xhci_dbg(xhci, "cleaning up memory\n");
480 xhci_mem_cleanup(xhci);
481 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
482 xhci_readl(xhci, &xhci->op_regs->status));
483}
484
485/*
486 * Shutdown HC (not bus-specific)
487 *
488 * This is called when the machine is rebooting or halting. We assume that the
489 * machine will be powered off, and the HC's internal state will be reset.
490 * Don't bother to free memory.
491 */
492void xhci_shutdown(struct usb_hcd *hcd)
493{
494 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
495
496 spin_lock_irq(&xhci->lock);
497 xhci_halt(xhci);
498 spin_unlock_irq(&xhci->lock);
499
500#if 0
501 xhci_cleanup_msix(xhci);
502#endif
503
504 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
505 xhci_readl(xhci, &xhci->op_regs->status));
506}
507
7f84eef0
SS
508/*-------------------------------------------------------------------------*/
509
d0e96f5a
SS
510/**
511 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
512 * HCDs. Find the index for an endpoint given its descriptor. Use the return
513 * value to right shift 1 for the bitmask.
514 *
515 * Index = (epnum * 2) + direction - 1,
516 * where direction = 0 for OUT, 1 for IN.
517 * For control endpoints, the IN index is used (OUT index is unused), so
518 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
519 */
520unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
521{
522 unsigned int index;
523 if (usb_endpoint_xfer_control(desc))
524 index = (unsigned int) (usb_endpoint_num(desc)*2);
525 else
526 index = (unsigned int) (usb_endpoint_num(desc)*2) +
527 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
528 return index;
529}
530
f94e0186
SS
531/* Find the flag for this endpoint (for use in the control context). Use the
532 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
533 * bit 1, etc.
534 */
535unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
536{
537 return 1 << (xhci_get_endpoint_index(desc) + 1);
538}
539
540/* Compute the last valid endpoint context index. Basically, this is the
541 * endpoint index plus one. For slot contexts with more than valid endpoint,
542 * we find the most significant bit set in the added contexts flags.
543 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
544 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
545 */
546static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
547{
548 return fls(added_ctxs) - 1;
549}
550
d0e96f5a
SS
551/* Returns 1 if the arguments are OK;
552 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
553 */
554int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
555 struct usb_host_endpoint *ep, int check_ep, const char *func) {
556 if (!hcd || (check_ep && !ep) || !udev) {
557 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
558 func);
559 return -EINVAL;
560 }
561 if (!udev->parent) {
562 printk(KERN_DEBUG "xHCI %s called for root hub\n",
563 func);
564 return 0;
565 }
566 if (!udev->slot_id) {
567 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
568 func);
569 return -EINVAL;
570 }
571 return 1;
572}
573
574/*
575 * non-error returns are a promise to giveback() the urb later
576 * we drop ownership so next owner (or urb unlink) can get it
577 */
578int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
579{
580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
581 unsigned long flags;
582 int ret = 0;
583 unsigned int slot_id, ep_index;
584
585 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
586 return -EINVAL;
587
588 slot_id = urb->dev->slot_id;
589 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
d0e96f5a
SS
590
591 spin_lock_irqsave(&xhci->lock, flags);
592 if (!xhci->devs || !xhci->devs[slot_id]) {
593 if (!in_interrupt())
594 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
c7959fb2
SS
595 ret = -EINVAL;
596 goto exit;
d0e96f5a
SS
597 }
598 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
599 if (!in_interrupt())
600 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
601 ret = -ESHUTDOWN;
602 goto exit;
603 }
b10de142 604 if (usb_endpoint_xfer_control(&urb->ep->desc))
b11069f5
SS
605 /* We have a spinlock and interrupts disabled, so we must pass
606 * atomic context to this function, which may allocate memory.
607 */
608 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
23e3be11 609 slot_id, ep_index);
b10de142 610 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
b11069f5 611 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
23e3be11 612 slot_id, ep_index);
b10de142
SS
613 else
614 ret = -EINVAL;
d0e96f5a
SS
615exit:
616 spin_unlock_irqrestore(&xhci->lock, flags);
617 return ret;
618}
619
ae636747
SS
620/*
621 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
622 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
623 * should pick up where it left off in the TD, unless a Set Transfer Ring
624 * Dequeue Pointer is issued.
625 *
626 * The TRBs that make up the buffers for the canceled URB will be "removed" from
627 * the ring. Since the ring is a contiguous structure, they can't be physically
628 * removed. Instead, there are two options:
629 *
630 * 1) If the HC is in the middle of processing the URB to be canceled, we
631 * simply move the ring's dequeue pointer past those TRBs using the Set
632 * Transfer Ring Dequeue Pointer command. This will be the common case,
633 * when drivers timeout on the last submitted URB and attempt to cancel.
634 *
635 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
636 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
637 * HC will need to invalidate the any TRBs it has cached after the stop
638 * endpoint command, as noted in the xHCI 0.95 errata.
639 *
640 * 3) The TD may have completed by the time the Stop Endpoint Command
641 * completes, so software needs to handle that case too.
642 *
643 * This function should protect against the TD enqueueing code ringing the
644 * doorbell while this code is waiting for a Stop Endpoint command to complete.
645 * It also needs to account for multiple cancellations on happening at the same
646 * time for the same endpoint.
647 *
648 * Note that this function can be called in any context, or so says
649 * usb_hcd_unlink_urb()
d0e96f5a
SS
650 */
651int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
652{
ae636747
SS
653 unsigned long flags;
654 int ret;
655 struct xhci_hcd *xhci;
656 struct xhci_td *td;
657 unsigned int ep_index;
658 struct xhci_ring *ep_ring;
659
660 xhci = hcd_to_xhci(hcd);
661 spin_lock_irqsave(&xhci->lock, flags);
662 /* Make sure the URB hasn't completed or been unlinked already */
663 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
664 if (ret || !urb->hcpriv)
665 goto done;
666
700e2052 667 xhci_dbg(xhci, "Cancel URB %p\n", urb);
ae636747
SS
668 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
669 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
670 td = (struct xhci_td *) urb->hcpriv;
671
672 ep_ring->cancels_pending++;
673 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
674 /* Queue a stop endpoint command, but only if this is
675 * the first cancellation to be handled.
676 */
677 if (ep_ring->cancels_pending == 1) {
23e3be11
SS
678 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
679 xhci_ring_cmd_db(xhci);
ae636747
SS
680 }
681done:
682 spin_unlock_irqrestore(&xhci->lock, flags);
683 return ret;
d0e96f5a
SS
684}
685
f94e0186
SS
686/* Drop an endpoint from a new bandwidth configuration for this device.
687 * Only one call to this function is allowed per endpoint before
688 * check_bandwidth() or reset_bandwidth() must be called.
689 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
690 * add the endpoint to the schedule with possibly new parameters denoted by a
691 * different endpoint descriptor in usb_host_endpoint.
692 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
693 * not allowed.
f88ba78d
SS
694 *
695 * The USB core will not allow URBs to be queued to an endpoint that is being
696 * disabled, so there's no need for mutual exclusion to protect
697 * the xhci->devs[slot_id] structure.
f94e0186
SS
698 */
699int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
700 struct usb_host_endpoint *ep)
701{
f94e0186
SS
702 struct xhci_hcd *xhci;
703 struct xhci_device_control *in_ctx;
704 unsigned int last_ctx;
705 unsigned int ep_index;
706 struct xhci_ep_ctx *ep_ctx;
707 u32 drop_flag;
708 u32 new_add_flags, new_drop_flags, new_slot_info;
709 int ret;
710
711 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
f94e0186
SS
712 if (ret <= 0)
713 return ret;
714 xhci = hcd_to_xhci(hcd);
700e2052 715 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
716
717 drop_flag = xhci_get_endpoint_flag(&ep->desc);
718 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
719 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
720 __func__, drop_flag);
721 return 0;
722 }
723
f94e0186
SS
724 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
725 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
726 __func__);
f94e0186
SS
727 return -EINVAL;
728 }
729
730 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
731 ep_index = xhci_get_endpoint_index(&ep->desc);
732 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
733 /* If the HC already knows the endpoint is disabled,
734 * or the HCD has noted it is disabled, ignore this request
735 */
736 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
737 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
700e2052
GKH
738 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
739 __func__, ep);
f94e0186
SS
740 return 0;
741 }
742
743 in_ctx->drop_flags |= drop_flag;
744 new_drop_flags = in_ctx->drop_flags;
745
746 in_ctx->add_flags = ~drop_flag;
747 new_add_flags = in_ctx->add_flags;
748
749 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
750 /* Update the last valid endpoint context, if we deleted the last one */
751 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
752 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
753 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
754 }
755 new_slot_info = in_ctx->slot.dev_info;
756
757 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
758
f94e0186
SS
759 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
760 (unsigned int) ep->desc.bEndpointAddress,
761 udev->slot_id,
762 (unsigned int) new_drop_flags,
763 (unsigned int) new_add_flags,
764 (unsigned int) new_slot_info);
765 return 0;
766}
767
768/* Add an endpoint to a new possible bandwidth configuration for this device.
769 * Only one call to this function is allowed per endpoint before
770 * check_bandwidth() or reset_bandwidth() must be called.
771 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
772 * add the endpoint to the schedule with possibly new parameters denoted by a
773 * different endpoint descriptor in usb_host_endpoint.
774 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
775 * not allowed.
f88ba78d
SS
776 *
777 * The USB core will not allow URBs to be queued to an endpoint until the
778 * configuration or alt setting is installed in the device, so there's no need
779 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
f94e0186
SS
780 */
781int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
782 struct usb_host_endpoint *ep)
783{
f94e0186
SS
784 struct xhci_hcd *xhci;
785 struct xhci_device_control *in_ctx;
786 unsigned int ep_index;
787 struct xhci_ep_ctx *ep_ctx;
788 u32 added_ctxs;
789 unsigned int last_ctx;
790 u32 new_add_flags, new_drop_flags, new_slot_info;
791 int ret = 0;
792
793 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
a1587d97
SS
794 if (ret <= 0) {
795 /* So we won't queue a reset ep command for a root hub */
796 ep->hcpriv = NULL;
f94e0186 797 return ret;
a1587d97 798 }
f94e0186
SS
799 xhci = hcd_to_xhci(hcd);
800
801 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
802 last_ctx = xhci_last_valid_endpoint(added_ctxs);
803 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
804 /* FIXME when we have to issue an evaluate endpoint command to
805 * deal with ep0 max packet size changing once we get the
806 * descriptors
807 */
808 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
809 __func__, added_ctxs);
810 return 0;
811 }
812
f94e0186
SS
813 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
814 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
815 __func__);
f94e0186
SS
816 return -EINVAL;
817 }
818
819 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
820 ep_index = xhci_get_endpoint_index(&ep->desc);
821 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
822 /* If the HCD has already noted the endpoint is enabled,
823 * ignore this request.
824 */
825 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
700e2052
GKH
826 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
827 __func__, ep);
f94e0186
SS
828 return 0;
829 }
830
f88ba78d
SS
831 /*
832 * Configuration and alternate setting changes must be done in
833 * process context, not interrupt context (or so documenation
834 * for usb_set_interface() and usb_set_configuration() claim).
835 */
836 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
837 udev, ep, GFP_KERNEL) < 0) {
f94e0186
SS
838 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
839 __func__, ep->desc.bEndpointAddress);
f94e0186
SS
840 return -ENOMEM;
841 }
842
843 in_ctx->add_flags |= added_ctxs;
844 new_add_flags = in_ctx->add_flags;
845
846 /* If xhci_endpoint_disable() was called for this endpoint, but the
847 * xHC hasn't been notified yet through the check_bandwidth() call,
848 * this re-adds a new state for the endpoint from the new endpoint
849 * descriptors. We must drop and re-add this endpoint, so we leave the
850 * drop flags alone.
851 */
852 new_drop_flags = in_ctx->drop_flags;
853
854 /* Update the last valid endpoint context, if we just added one past */
855 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
856 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
857 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
858 }
859 new_slot_info = in_ctx->slot.dev_info;
f94e0186 860
a1587d97
SS
861 /* Store the usb_device pointer for later use */
862 ep->hcpriv = udev;
863
f94e0186
SS
864 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
865 (unsigned int) ep->desc.bEndpointAddress,
866 udev->slot_id,
867 (unsigned int) new_drop_flags,
868 (unsigned int) new_add_flags,
869 (unsigned int) new_slot_info);
870 return 0;
871}
872
873static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
874{
875 struct xhci_ep_ctx *ep_ctx;
876 int i;
877
878 /* When a device's add flag and drop flag are zero, any subsequent
879 * configure endpoint command will leave that endpoint's state
880 * untouched. Make sure we don't leave any old state in the input
881 * endpoint contexts.
882 */
883 virt_dev->in_ctx->drop_flags = 0;
884 virt_dev->in_ctx->add_flags = 0;
885 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
886 /* Endpoint 0 is always valid */
887 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
888 for (i = 1; i < 31; ++i) {
889 ep_ctx = &virt_dev->in_ctx->ep[i];
890 ep_ctx->ep_info = 0;
891 ep_ctx->ep_info2 = 0;
8e595a5d 892 ep_ctx->deq = 0;
f94e0186
SS
893 ep_ctx->tx_info = 0;
894 }
895}
896
f88ba78d
SS
897/* Called after one or more calls to xhci_add_endpoint() or
898 * xhci_drop_endpoint(). If this call fails, the USB core is expected
899 * to call xhci_reset_bandwidth().
900 *
901 * Since we are in the middle of changing either configuration or
902 * installing a new alt setting, the USB core won't allow URBs to be
903 * enqueued for any endpoint on the old config or interface. Nothing
904 * else should be touching the xhci->devs[slot_id] structure, so we
905 * don't need to take the xhci->lock for manipulating that.
906 */
f94e0186
SS
907int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
908{
909 int i;
910 int ret = 0;
911 int timeleft;
912 unsigned long flags;
913 struct xhci_hcd *xhci;
914 struct xhci_virt_device *virt_dev;
915
916 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
917 if (ret <= 0)
918 return ret;
919 xhci = hcd_to_xhci(hcd);
920
f94e0186
SS
921 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
922 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
923 __func__);
f94e0186
SS
924 return -EINVAL;
925 }
700e2052 926 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
927 virt_dev = xhci->devs[udev->slot_id];
928
929 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
930 virt_dev->in_ctx->add_flags |= SLOT_FLAG;
931 virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
932 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
933 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
934 xhci_dbg(xhci, "New Input Control Context:\n");
935 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
936 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
937
f88ba78d 938 spin_lock_irqsave(&xhci->lock, flags);
23e3be11
SS
939 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
940 udev->slot_id);
f94e0186 941 if (ret < 0) {
f94e0186 942 spin_unlock_irqrestore(&xhci->lock, flags);
f88ba78d 943 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
f94e0186
SS
944 return -ENOMEM;
945 }
23e3be11 946 xhci_ring_cmd_db(xhci);
f94e0186
SS
947 spin_unlock_irqrestore(&xhci->lock, flags);
948
949 /* Wait for the configure endpoint command to complete */
950 timeleft = wait_for_completion_interruptible_timeout(
951 &virt_dev->cmd_completion,
952 USB_CTRL_SET_TIMEOUT);
953 if (timeleft <= 0) {
954 xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
955 timeleft == 0 ? "Timeout" : "Signal");
956 /* FIXME cancel the configure endpoint command */
957 return -ETIME;
958 }
959
f94e0186
SS
960 switch (virt_dev->cmd_status) {
961 case COMP_ENOMEM:
962 dev_warn(&udev->dev, "Not enough host controller resources "
963 "for new device state.\n");
964 ret = -ENOMEM;
965 /* FIXME: can we allocate more resources for the HC? */
966 break;
967 case COMP_BW_ERR:
968 dev_warn(&udev->dev, "Not enough bandwidth "
969 "for new device state.\n");
970 ret = -ENOSPC;
971 /* FIXME: can we go back to the old state? */
972 break;
973 case COMP_TRB_ERR:
974 /* the HCD set up something wrong */
975 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
976 "and endpoint is not disabled.\n");
977 ret = -EINVAL;
978 break;
979 case COMP_SUCCESS:
980 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
981 break;
982 default:
983 xhci_err(xhci, "ERROR: unexpected command completion "
984 "code 0x%x.\n", virt_dev->cmd_status);
985 ret = -EINVAL;
986 break;
987 }
988 if (ret) {
989 /* Callee should call reset_bandwidth() */
f94e0186
SS
990 return ret;
991 }
992
993 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
994 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
995 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
996
997 xhci_zero_in_ctx(virt_dev);
998 /* Free any old rings */
999 for (i = 1; i < 31; ++i) {
1000 if (virt_dev->new_ep_rings[i]) {
1001 xhci_ring_free(xhci, virt_dev->ep_rings[i]);
1002 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
1003 virt_dev->new_ep_rings[i] = NULL;
1004 }
1005 }
1006
f94e0186
SS
1007 return ret;
1008}
1009
1010void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1011{
f94e0186
SS
1012 struct xhci_hcd *xhci;
1013 struct xhci_virt_device *virt_dev;
1014 int i, ret;
1015
1016 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1017 if (ret <= 0)
1018 return;
1019 xhci = hcd_to_xhci(hcd);
1020
f94e0186
SS
1021 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1022 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1023 __func__);
f94e0186
SS
1024 return;
1025 }
700e2052 1026 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
f94e0186
SS
1027 virt_dev = xhci->devs[udev->slot_id];
1028 /* Free any rings allocated for added endpoints */
1029 for (i = 0; i < 31; ++i) {
1030 if (virt_dev->new_ep_rings[i]) {
1031 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
1032 virt_dev->new_ep_rings[i] = NULL;
1033 }
1034 }
1035 xhci_zero_in_ctx(virt_dev);
f94e0186
SS
1036}
1037
a1587d97
SS
1038/* Deal with stalled endpoints. The core should have sent the control message
1039 * to clear the halt condition. However, we need to make the xHCI hardware
1040 * reset its sequence number, since a device will expect a sequence number of
1041 * zero after the halt condition is cleared.
1042 * Context: in_interrupt
1043 */
1044void xhci_endpoint_reset(struct usb_hcd *hcd,
1045 struct usb_host_endpoint *ep)
1046{
1047 struct xhci_hcd *xhci;
1048 struct usb_device *udev;
1049 unsigned int ep_index;
1050 unsigned long flags;
1051 int ret;
1052
1053 xhci = hcd_to_xhci(hcd);
1054 udev = (struct usb_device *) ep->hcpriv;
1055 /* Called with a root hub endpoint (or an endpoint that wasn't added
1056 * with xhci_add_endpoint()
1057 */
1058 if (!ep->hcpriv)
1059 return;
1060 ep_index = xhci_get_endpoint_index(&ep->desc);
1061
1062 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1063 spin_lock_irqsave(&xhci->lock, flags);
1064 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1065 if (!ret) {
1066 xhci_ring_cmd_db(xhci);
1067 }
1068 spin_unlock_irqrestore(&xhci->lock, flags);
1069
1070 if (ret)
1071 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1072}
1073
3ffbba95
SS
1074/*
1075 * At this point, the struct usb_device is about to go away, the device has
1076 * disconnected, and all traffic has been stopped and the endpoints have been
1077 * disabled. Free any HC data structures associated with that device.
1078 */
1079void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1080{
1081 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1082 unsigned long flags;
1083
1084 if (udev->slot_id == 0)
1085 return;
1086
1087 spin_lock_irqsave(&xhci->lock, flags);
23e3be11 1088 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3ffbba95
SS
1089 spin_unlock_irqrestore(&xhci->lock, flags);
1090 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1091 return;
1092 }
23e3be11 1093 xhci_ring_cmd_db(xhci);
3ffbba95
SS
1094 spin_unlock_irqrestore(&xhci->lock, flags);
1095 /*
1096 * Event command completion handler will free any data structures
f88ba78d 1097 * associated with the slot. XXX Can free sleep?
3ffbba95
SS
1098 */
1099}
1100
1101/*
1102 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1103 * timed out, or allocating memory failed. Returns 1 on success.
1104 */
1105int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1106{
1107 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1108 unsigned long flags;
1109 int timeleft;
1110 int ret;
1111
1112 spin_lock_irqsave(&xhci->lock, flags);
23e3be11 1113 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3ffbba95
SS
1114 if (ret) {
1115 spin_unlock_irqrestore(&xhci->lock, flags);
1116 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1117 return 0;
1118 }
23e3be11 1119 xhci_ring_cmd_db(xhci);
3ffbba95
SS
1120 spin_unlock_irqrestore(&xhci->lock, flags);
1121
1122 /* XXX: how much time for xHC slot assignment? */
1123 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1124 USB_CTRL_SET_TIMEOUT);
1125 if (timeleft <= 0) {
1126 xhci_warn(xhci, "%s while waiting for a slot\n",
1127 timeleft == 0 ? "Timeout" : "Signal");
1128 /* FIXME cancel the enable slot request */
1129 return 0;
1130 }
1131
3ffbba95
SS
1132 if (!xhci->slot_id) {
1133 xhci_err(xhci, "Error while assigning device slot ID\n");
3ffbba95
SS
1134 return 0;
1135 }
f88ba78d 1136 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
3ffbba95
SS
1137 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1138 /* Disable slot, if we can do it without mem alloc */
1139 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
f88ba78d 1140 spin_lock_irqsave(&xhci->lock, flags);
23e3be11
SS
1141 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1142 xhci_ring_cmd_db(xhci);
3ffbba95
SS
1143 spin_unlock_irqrestore(&xhci->lock, flags);
1144 return 0;
1145 }
1146 udev->slot_id = xhci->slot_id;
1147 /* Is this a LS or FS device under a HS hub? */
1148 /* Hub or peripherial? */
3ffbba95
SS
1149 return 1;
1150}
1151
1152/*
1153 * Issue an Address Device command (which will issue a SetAddress request to
1154 * the device).
1155 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1156 * we should only issue and wait on one address command at the same time.
1157 *
1158 * We add one to the device address issued by the hardware because the USB core
1159 * uses address 1 for the root hubs (even though they're not really devices).
1160 */
1161int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1162{
1163 unsigned long flags;
1164 int timeleft;
1165 struct xhci_virt_device *virt_dev;
1166 int ret = 0;
1167 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
8e595a5d 1168 u64 temp_64;
3ffbba95
SS
1169
1170 if (!udev->slot_id) {
1171 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1172 return -EINVAL;
1173 }
1174
3ffbba95
SS
1175 virt_dev = xhci->devs[udev->slot_id];
1176
1177 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1178 if (!udev->config)
1179 xhci_setup_addressable_virt_dev(xhci, udev);
1180 /* Otherwise, assume the core has the device configured how it wants */
1181
f88ba78d 1182 spin_lock_irqsave(&xhci->lock, flags);
23e3be11
SS
1183 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
1184 udev->slot_id);
3ffbba95
SS
1185 if (ret) {
1186 spin_unlock_irqrestore(&xhci->lock, flags);
1187 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1188 return ret;
1189 }
23e3be11 1190 xhci_ring_cmd_db(xhci);
3ffbba95
SS
1191 spin_unlock_irqrestore(&xhci->lock, flags);
1192
1193 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1194 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1195 USB_CTRL_SET_TIMEOUT);
1196 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1197 * the SetAddress() "recovery interval" required by USB and aborting the
1198 * command on a timeout.
1199 */
1200 if (timeleft <= 0) {
1201 xhci_warn(xhci, "%s while waiting for a slot\n",
1202 timeleft == 0 ? "Timeout" : "Signal");
1203 /* FIXME cancel the address device command */
1204 return -ETIME;
1205 }
1206
3ffbba95
SS
1207 switch (virt_dev->cmd_status) {
1208 case COMP_CTX_STATE:
1209 case COMP_EBADSLT:
1210 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
1211 udev->slot_id);
1212 ret = -EINVAL;
1213 break;
1214 case COMP_TX_ERR:
1215 dev_warn(&udev->dev, "Device not responding to set address.\n");
1216 ret = -EPROTO;
1217 break;
1218 case COMP_SUCCESS:
1219 xhci_dbg(xhci, "Successful Address Device command\n");
1220 break;
1221 default:
1222 xhci_err(xhci, "ERROR: unexpected command completion "
1223 "code 0x%x.\n", virt_dev->cmd_status);
1224 ret = -EINVAL;
1225 break;
1226 }
1227 if (ret) {
3ffbba95
SS
1228 return ret;
1229 }
8e595a5d
SS
1230 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1231 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1232 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3ffbba95 1233 udev->slot_id,
8e595a5d
SS
1234 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1235 (unsigned long long)
1236 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
700e2052
GKH
1237 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1238 (unsigned long long)virt_dev->out_ctx_dma);
3ffbba95
SS
1239 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1240 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
1241 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1242 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
1243 /*
1244 * USB core uses address 1 for the roothubs, so we add one to the
1245 * address given back to us by the HC.
1246 */
1247 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
f94e0186
SS
1248 /* Zero the input context control for later use */
1249 virt_dev->in_ctx->add_flags = 0;
1250 virt_dev->in_ctx->drop_flags = 0;
1251 /* Mirror flags in the output context for future ep enable/disable */
1252 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1253 virt_dev->out_ctx->drop_flags = 0;
3ffbba95
SS
1254
1255 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1256 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1257 set_bit(udev->devnum, udev->bus->devmap.devicemap);
1258
1259 return 0;
1260}
1261
66d4eadd
SS
1262int xhci_get_frame(struct usb_hcd *hcd)
1263{
1264 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1265 /* EHCI mods by the periodic size. Why? */
1266 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
1267}
1268
1269MODULE_DESCRIPTION(DRIVER_DESC);
1270MODULE_AUTHOR(DRIVER_AUTHOR);
1271MODULE_LICENSE("GPL");
1272
1273static int __init xhci_hcd_init(void)
1274{
1275#ifdef CONFIG_PCI
1276 int retval = 0;
1277
1278 retval = xhci_register_pci();
1279
1280 if (retval < 0) {
1281 printk(KERN_DEBUG "Problem registering PCI driver.");
1282 return retval;
1283 }
1284#endif
98441973
SS
1285 /*
1286 * Check the compiler generated sizes of structures that must be laid
1287 * out in specific ways for hardware access.
1288 */
1289 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1290 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1291 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1292 /* xhci_device_control has eight fields, and also
1293 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1294 */
1295 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1296 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1297 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1298 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1299 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1300 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1301 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1302 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1303 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
66d4eadd
SS
1304 return 0;
1305}
1306module_init(xhci_hcd_init);
1307
1308static void __exit xhci_hcd_cleanup(void)
1309{
1310#ifdef CONFIG_PCI
1311 xhci_unregister_pci();
1312#endif
1313}
1314module_exit(xhci_hcd_cleanup);
This page took 0.094415 seconds and 5 git commands to generate.