2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk
;
37 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
38 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
48 * Returns negative errno, or zero on success
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
55 u32 mask
, u32 done
, int usec
)
60 result
= xhci_readl(xhci
, ptr
);
61 if (result
== ~(u32
)0) /* card removed */
73 * Disable interrupts and begin the xHCI halting process.
75 void xhci_quiesce(struct xhci_hcd
*xhci
)
82 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
86 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
88 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
92 * Force HC into halt state.
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
96 * should halt within 16 ms of the run/stop bit being cleared.
97 * Read HC Halted bit in the status register to see when the HC is finished.
99 int xhci_halt(struct xhci_hcd
*xhci
)
102 xhci_dbg(xhci
, "// Halt the HC\n");
105 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
106 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
108 xhci
->xhc_state
|= XHCI_STATE_HALTED
;
113 * Set the run bit and wait for the host to be running.
115 static int xhci_start(struct xhci_hcd
*xhci
)
120 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
122 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
124 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
127 * Wait for the HCHalted Status bit to be 0 to indicate the host is
130 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
131 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
132 if (ret
== -ETIMEDOUT
)
133 xhci_err(xhci
, "Host took too long to start, "
134 "waited %u microseconds.\n",
137 xhci
->xhc_state
&= ~XHCI_STATE_HALTED
;
144 * This resets pipelines, timers, counters, state machines, etc.
145 * Transactions will be terminated immediately, and operational registers
146 * will be set to their defaults.
148 int xhci_reset(struct xhci_hcd
*xhci
)
154 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
155 if ((state
& STS_HALT
) == 0) {
156 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
160 xhci_dbg(xhci
, "// Reset the HC\n");
161 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
162 command
|= CMD_RESET
;
163 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
165 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
166 CMD_RESET
, 0, 250 * 1000);
170 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
172 * xHCI cannot write to any doorbells or operational registers other
173 * than status until the "Controller Not Ready" flag is cleared.
175 return handshake(xhci
, &xhci
->op_regs
->status
, STS_CNR
, 0, 250 * 1000);
180 * free all IRQs request
182 static void xhci_free_irq(struct xhci_hcd
*xhci
)
185 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
187 /* return if using legacy interrupt */
188 if (xhci_to_hcd(xhci
)->irq
>= 0)
191 if (xhci
->msix_entries
) {
192 for (i
= 0; i
< xhci
->msix_count
; i
++)
193 if (xhci
->msix_entries
[i
].vector
)
194 free_irq(xhci
->msix_entries
[i
].vector
,
196 } else if (pdev
->irq
>= 0)
197 free_irq(pdev
->irq
, xhci_to_hcd(xhci
));
205 static int xhci_setup_msi(struct xhci_hcd
*xhci
)
208 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
210 ret
= pci_enable_msi(pdev
);
212 xhci_err(xhci
, "failed to allocate MSI entry\n");
216 ret
= request_irq(pdev
->irq
, (irq_handler_t
)xhci_msi_irq
,
217 0, "xhci_hcd", xhci_to_hcd(xhci
));
219 xhci_err(xhci
, "disable MSI interrupt\n");
220 pci_disable_msi(pdev
);
229 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
232 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
233 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
236 * calculate number of msi-x vectors supported.
237 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
238 * with max number of interrupters based on the xhci HCSPARAMS1.
239 * - num_online_cpus: maximum msi-x vectors per CPUs core.
240 * Add additional 1 vector to ensure always available interrupt.
242 xhci
->msix_count
= min(num_online_cpus() + 1,
243 HCS_MAX_INTRS(xhci
->hcs_params1
));
246 kmalloc((sizeof(struct msix_entry
))*xhci
->msix_count
,
248 if (!xhci
->msix_entries
) {
249 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
253 for (i
= 0; i
< xhci
->msix_count
; i
++) {
254 xhci
->msix_entries
[i
].entry
= i
;
255 xhci
->msix_entries
[i
].vector
= 0;
258 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
260 xhci_err(xhci
, "Failed to enable MSI-X\n");
264 for (i
= 0; i
< xhci
->msix_count
; i
++) {
265 ret
= request_irq(xhci
->msix_entries
[i
].vector
,
266 (irq_handler_t
)xhci_msi_irq
,
267 0, "xhci_hcd", xhci_to_hcd(xhci
));
272 hcd
->msix_enabled
= 1;
276 xhci_err(xhci
, "disable MSI-X interrupt\n");
278 pci_disable_msix(pdev
);
280 kfree(xhci
->msix_entries
);
281 xhci
->msix_entries
= NULL
;
285 /* Free any IRQs and disable MSI-X */
286 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
288 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
289 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
293 if (xhci
->msix_entries
) {
294 pci_disable_msix(pdev
);
295 kfree(xhci
->msix_entries
);
296 xhci
->msix_entries
= NULL
;
298 pci_disable_msi(pdev
);
301 hcd
->msix_enabled
= 0;
306 * Initialize memory for HCD and xHC (one-time init).
308 * Program the PAGESIZE register, initialize the device context array, create
309 * device contexts (?), set up a command ring segment (or two?), create event
310 * ring (one for now).
312 int xhci_init(struct usb_hcd
*hcd
)
314 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
317 xhci_dbg(xhci
, "xhci_init\n");
318 spin_lock_init(&xhci
->lock
);
320 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
321 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
323 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
325 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
326 xhci_dbg(xhci
, "Finished xhci_init\n");
331 /*-------------------------------------------------------------------------*/
334 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
335 static void xhci_event_ring_work(unsigned long arg
)
340 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
343 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
345 spin_lock_irqsave(&xhci
->lock
, flags
);
346 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
347 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
348 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
349 xhci_dbg(xhci
, "HW died, polling stopped.\n");
350 spin_unlock_irqrestore(&xhci
->lock
, flags
);
354 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
355 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
356 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
357 xhci
->error_bitmask
= 0;
358 xhci_dbg(xhci
, "Event ring:\n");
359 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
360 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
361 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
362 temp_64
&= ~ERST_PTR_MASK
;
363 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
364 xhci_dbg(xhci
, "Command ring:\n");
365 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
366 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
367 xhci_dbg_cmd_ptrs(xhci
);
368 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
371 for (j
= 0; j
< 31; ++j
) {
372 xhci_dbg_ep_rings(xhci
, i
, j
, &xhci
->devs
[i
]->eps
[j
]);
375 spin_unlock_irqrestore(&xhci
->lock
, flags
);
378 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
380 xhci_dbg(xhci
, "Quit polling the event ring.\n");
384 static int xhci_run_finished(struct xhci_hcd
*xhci
)
386 if (xhci_start(xhci
)) {
390 xhci
->shared_hcd
->state
= HC_STATE_RUNNING
;
392 if (xhci
->quirks
& XHCI_NEC_HOST
)
393 xhci_ring_cmd_db(xhci
);
395 xhci_dbg(xhci
, "Finished xhci_run for USB3 roothub\n");
400 * Start the HC after it was halted.
402 * This function is called by the USB core when the HC driver is added.
403 * Its opposite is xhci_stop().
405 * xhci_init() must be called once before this function can be called.
406 * Reset the HC, enable device slot contexts, program DCBAAP, and
407 * set command ring pointer and event ring pointer.
409 * Setup MSI-X vectors and enable interrupts.
411 int xhci_run(struct usb_hcd
*hcd
)
416 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
417 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
419 /* Start the xHCI host controller running only after the USB 2.0 roothub
423 hcd
->uses_new_polling
= 1;
424 if (!usb_hcd_is_primary_hcd(hcd
))
425 return xhci_run_finished(xhci
);
427 xhci_dbg(xhci
, "xhci_run\n");
428 /* unregister the legacy interrupt */
430 free_irq(hcd
->irq
, hcd
);
433 /* Some Fresco Logic host controllers advertise MSI, but fail to
434 * generate interrupts. Don't even try to enable MSI.
436 if (xhci
->quirks
& XHCI_BROKEN_MSI
)
439 ret
= xhci_setup_msix(xhci
);
441 /* fall back to msi*/
442 ret
= xhci_setup_msi(xhci
);
446 /* fall back to legacy interrupt*/
447 ret
= request_irq(pdev
->irq
, &usb_hcd_irq
, IRQF_SHARED
,
448 hcd
->irq_descr
, hcd
);
450 xhci_err(xhci
, "request interrupt %d failed\n",
454 hcd
->irq
= pdev
->irq
;
457 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
458 init_timer(&xhci
->event_ring_timer
);
459 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
460 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
461 /* Poll the event ring */
462 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
464 xhci_dbg(xhci
, "Setting event ring polling timer\n");
465 add_timer(&xhci
->event_ring_timer
);
468 xhci_dbg(xhci
, "Command ring memory map follows:\n");
469 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
470 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
471 xhci_dbg_cmd_ptrs(xhci
);
473 xhci_dbg(xhci
, "ERST memory map follows:\n");
474 xhci_dbg_erst(xhci
, &xhci
->erst
);
475 xhci_dbg(xhci
, "Event ring:\n");
476 xhci_debug_ring(xhci
, xhci
->event_ring
);
477 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
478 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
479 temp_64
&= ~ERST_PTR_MASK
;
480 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
482 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
483 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
484 temp
&= ~ER_IRQ_INTERVAL_MASK
;
486 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
488 /* Set the HCD state before we enable the irqs */
489 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
491 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
493 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
495 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
496 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
497 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
498 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
499 &xhci
->ir_set
->irq_pending
);
500 xhci_print_ir_set(xhci
, 0);
502 if (xhci
->quirks
& XHCI_NEC_HOST
)
503 xhci_queue_vendor_command(xhci
, 0, 0, 0,
504 TRB_TYPE(TRB_NEC_GET_FW
));
506 xhci_dbg(xhci
, "Finished xhci_run for USB2 roothub\n");
510 static void xhci_only_stop_hcd(struct usb_hcd
*hcd
)
512 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
514 spin_lock_irq(&xhci
->lock
);
517 /* The shared_hcd is going to be deallocated shortly (the USB core only
518 * calls this function when allocation fails in usb_add_hcd(), or
519 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
521 xhci
->shared_hcd
= NULL
;
522 spin_unlock_irq(&xhci
->lock
);
528 * This function is called by the USB core when the HC driver is removed.
529 * Its opposite is xhci_run().
531 * Disable device contexts, disable IRQs, and quiesce the HC.
532 * Reset the HC, finish any completed transactions, and cleanup memory.
534 void xhci_stop(struct usb_hcd
*hcd
)
537 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
539 if (!usb_hcd_is_primary_hcd(hcd
)) {
540 xhci_only_stop_hcd(xhci
->shared_hcd
);
544 spin_lock_irq(&xhci
->lock
);
545 /* Make sure the xHC is halted for a USB3 roothub
546 * (xhci_stop() could be called as part of failed init).
550 spin_unlock_irq(&xhci
->lock
);
552 xhci_cleanup_msix(xhci
);
554 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
555 /* Tell the event ring poll function not to reschedule */
557 del_timer_sync(&xhci
->event_ring_timer
);
560 if (xhci
->quirks
& XHCI_AMD_PLL_FIX
)
563 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
564 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
565 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
566 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
567 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
568 &xhci
->ir_set
->irq_pending
);
569 xhci_print_ir_set(xhci
, 0);
571 xhci_dbg(xhci
, "cleaning up memory\n");
572 xhci_mem_cleanup(xhci
);
573 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
574 xhci_readl(xhci
, &xhci
->op_regs
->status
));
578 * Shutdown HC (not bus-specific)
580 * This is called when the machine is rebooting or halting. We assume that the
581 * machine will be powered off, and the HC's internal state will be reset.
582 * Don't bother to free memory.
584 * This will only ever be called with the main usb_hcd (the USB3 roothub).
586 void xhci_shutdown(struct usb_hcd
*hcd
)
588 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
590 spin_lock_irq(&xhci
->lock
);
592 spin_unlock_irq(&xhci
->lock
);
594 xhci_cleanup_msix(xhci
);
596 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
597 xhci_readl(xhci
, &xhci
->op_regs
->status
));
601 static void xhci_save_registers(struct xhci_hcd
*xhci
)
603 xhci
->s3
.command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
604 xhci
->s3
.dev_nt
= xhci_readl(xhci
, &xhci
->op_regs
->dev_notification
);
605 xhci
->s3
.dcbaa_ptr
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
606 xhci
->s3
.config_reg
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
607 xhci
->s3
.irq_pending
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
608 xhci
->s3
.irq_control
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
609 xhci
->s3
.erst_size
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
610 xhci
->s3
.erst_base
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
611 xhci
->s3
.erst_dequeue
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
614 static void xhci_restore_registers(struct xhci_hcd
*xhci
)
616 xhci_writel(xhci
, xhci
->s3
.command
, &xhci
->op_regs
->command
);
617 xhci_writel(xhci
, xhci
->s3
.dev_nt
, &xhci
->op_regs
->dev_notification
);
618 xhci_write_64(xhci
, xhci
->s3
.dcbaa_ptr
, &xhci
->op_regs
->dcbaa_ptr
);
619 xhci_writel(xhci
, xhci
->s3
.config_reg
, &xhci
->op_regs
->config_reg
);
620 xhci_writel(xhci
, xhci
->s3
.irq_pending
, &xhci
->ir_set
->irq_pending
);
621 xhci_writel(xhci
, xhci
->s3
.irq_control
, &xhci
->ir_set
->irq_control
);
622 xhci_writel(xhci
, xhci
->s3
.erst_size
, &xhci
->ir_set
->erst_size
);
623 xhci_write_64(xhci
, xhci
->s3
.erst_base
, &xhci
->ir_set
->erst_base
);
626 static void xhci_set_cmd_ring_deq(struct xhci_hcd
*xhci
)
630 /* step 2: initialize command ring buffer */
631 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
632 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
633 (xhci_trb_virt_to_dma(xhci
->cmd_ring
->deq_seg
,
634 xhci
->cmd_ring
->dequeue
) &
635 (u64
) ~CMD_RING_RSVD_BITS
) |
636 xhci
->cmd_ring
->cycle_state
;
637 xhci_dbg(xhci
, "// Setting command ring address to 0x%llx\n",
638 (long unsigned long) val_64
);
639 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
643 * The whole command ring must be cleared to zero when we suspend the host.
645 * The host doesn't save the command ring pointer in the suspend well, so we
646 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
647 * aligned, because of the reserved bits in the command ring dequeue pointer
648 * register. Therefore, we can't just set the dequeue pointer back in the
649 * middle of the ring (TRBs are 16-byte aligned).
651 static void xhci_clear_command_ring(struct xhci_hcd
*xhci
)
653 struct xhci_ring
*ring
;
654 struct xhci_segment
*seg
;
656 ring
= xhci
->cmd_ring
;
659 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
661 } while (seg
!= ring
->deq_seg
);
663 /* Reset the software enqueue and dequeue pointers */
664 ring
->deq_seg
= ring
->first_seg
;
665 ring
->dequeue
= ring
->first_seg
->trbs
;
666 ring
->enq_seg
= ring
->deq_seg
;
667 ring
->enqueue
= ring
->dequeue
;
670 * Ring is now zeroed, so the HW should look for change of ownership
671 * when the cycle bit is set to 1.
673 ring
->cycle_state
= 1;
676 * Reset the hardware dequeue pointer.
677 * Yes, this will need to be re-written after resume, but we're paranoid
678 * and want to make sure the hardware doesn't access bogus memory
679 * because, say, the BIOS or an SMI started the host without changing
680 * the command ring pointers.
682 xhci_set_cmd_ring_deq(xhci
);
686 * Stop HC (not bus-specific)
688 * This is called when the machine transition into S3/S4 mode.
691 int xhci_suspend(struct xhci_hcd
*xhci
)
694 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
698 spin_lock_irq(&xhci
->lock
);
699 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
700 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
701 /* step 1: stop endpoint */
702 /* skipped assuming that port suspend has done */
704 /* step 2: clear Run/Stop bit */
705 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
707 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
708 if (handshake(xhci
, &xhci
->op_regs
->status
,
709 STS_HALT
, STS_HALT
, 100*100)) {
710 xhci_warn(xhci
, "WARN: xHC CMD_RUN timeout\n");
711 spin_unlock_irq(&xhci
->lock
);
714 xhci_clear_command_ring(xhci
);
716 /* step 3: save registers */
717 xhci_save_registers(xhci
);
719 /* step 4: set CSS flag */
720 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
722 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
723 if (handshake(xhci
, &xhci
->op_regs
->status
, STS_SAVE
, 0, 10*100)) {
724 xhci_warn(xhci
, "WARN: xHC CMD_CSS timeout\n");
725 spin_unlock_irq(&xhci
->lock
);
728 spin_unlock_irq(&xhci
->lock
);
730 /* step 5: remove core well power */
731 /* synchronize irq when using MSI-X */
732 if (xhci
->msix_entries
) {
733 for (i
= 0; i
< xhci
->msix_count
; i
++)
734 synchronize_irq(xhci
->msix_entries
[i
].vector
);
741 * start xHC (not bus-specific)
743 * This is called when the machine transition from S3/S4 mode.
746 int xhci_resume(struct xhci_hcd
*xhci
, bool hibernated
)
748 u32 command
, temp
= 0;
749 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
750 struct usb_hcd
*secondary_hcd
;
753 /* Wait a bit if either of the roothubs need to settle from the
754 * transition into bus suspend.
756 if (time_before(jiffies
, xhci
->bus_state
[0].next_statechange
) ||
758 xhci
->bus_state
[1].next_statechange
))
761 spin_lock_irq(&xhci
->lock
);
764 /* step 1: restore register */
765 xhci_restore_registers(xhci
);
766 /* step 2: initialize command ring buffer */
767 xhci_set_cmd_ring_deq(xhci
);
768 /* step 3: restore state and start state*/
769 /* step 3: set CRS flag */
770 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
772 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
773 if (handshake(xhci
, &xhci
->op_regs
->status
,
774 STS_RESTORE
, 0, 10*100)) {
775 xhci_dbg(xhci
, "WARN: xHC CMD_CSS timeout\n");
776 spin_unlock_irq(&xhci
->lock
);
779 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
782 /* If restore operation fails, re-initialize the HC during resume */
783 if ((temp
& STS_SRE
) || hibernated
) {
784 /* Let the USB core know _both_ roothubs lost power. */
785 usb_root_hub_lost_power(xhci
->main_hcd
->self
.root_hub
);
786 usb_root_hub_lost_power(xhci
->shared_hcd
->self
.root_hub
);
788 xhci_dbg(xhci
, "Stop HCD\n");
791 spin_unlock_irq(&xhci
->lock
);
792 xhci_cleanup_msix(xhci
);
794 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
795 /* Tell the event ring poll function not to reschedule */
797 del_timer_sync(&xhci
->event_ring_timer
);
800 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
801 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
802 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
803 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
804 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
805 &xhci
->ir_set
->irq_pending
);
806 xhci_print_ir_set(xhci
, 0);
808 xhci_dbg(xhci
, "cleaning up memory\n");
809 xhci_mem_cleanup(xhci
);
810 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
811 xhci_readl(xhci
, &xhci
->op_regs
->status
));
813 /* USB core calls the PCI reinit and start functions twice:
814 * first with the primary HCD, and then with the secondary HCD.
815 * If we don't do the same, the host will never be started.
817 if (!usb_hcd_is_primary_hcd(hcd
))
820 secondary_hcd
= xhci
->shared_hcd
;
822 xhci_dbg(xhci
, "Initialize the xhci_hcd\n");
823 retval
= xhci_init(hcd
->primary_hcd
);
826 xhci_dbg(xhci
, "Start the primary HCD\n");
827 retval
= xhci_run(hcd
->primary_hcd
);
831 xhci_dbg(xhci
, "Start the secondary HCD\n");
832 retval
= xhci_run(secondary_hcd
);
834 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
835 set_bit(HCD_FLAG_HW_ACCESSIBLE
,
836 &xhci
->shared_hcd
->flags
);
839 hcd
->state
= HC_STATE_SUSPENDED
;
840 xhci
->shared_hcd
->state
= HC_STATE_SUSPENDED
;
844 /* step 4: set Run/Stop bit */
845 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
847 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
848 handshake(xhci
, &xhci
->op_regs
->status
, STS_HALT
,
851 /* step 5: walk topology and initialize portsc,
852 * portpmsc and portli
854 /* this is done in bus_resume */
856 /* step 6: restart each of the previously
857 * Running endpoints by ringing their doorbells
860 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
861 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
863 spin_unlock_irq(&xhci
->lock
);
866 #endif /* CONFIG_PM */
868 /*-------------------------------------------------------------------------*/
871 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
872 * HCDs. Find the index for an endpoint given its descriptor. Use the return
873 * value to right shift 1 for the bitmask.
875 * Index = (epnum * 2) + direction - 1,
876 * where direction = 0 for OUT, 1 for IN.
877 * For control endpoints, the IN index is used (OUT index is unused), so
878 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
880 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
883 if (usb_endpoint_xfer_control(desc
))
884 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
886 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
887 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
891 /* Find the flag for this endpoint (for use in the control context). Use the
892 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
895 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
897 return 1 << (xhci_get_endpoint_index(desc
) + 1);
900 /* Find the flag for this endpoint (for use in the control context). Use the
901 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
904 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
906 return 1 << (ep_index
+ 1);
909 /* Compute the last valid endpoint context index. Basically, this is the
910 * endpoint index plus one. For slot contexts with more than valid endpoint,
911 * we find the most significant bit set in the added contexts flags.
912 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
913 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
915 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
917 return fls(added_ctxs
) - 1;
920 /* Returns 1 if the arguments are OK;
921 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
923 static int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
924 struct usb_host_endpoint
*ep
, int check_ep
, bool check_virt_dev
,
926 struct xhci_hcd
*xhci
;
927 struct xhci_virt_device
*virt_dev
;
929 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
930 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
935 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
940 if (check_virt_dev
) {
941 xhci
= hcd_to_xhci(hcd
);
942 if (!udev
->slot_id
|| !xhci
->devs
943 || !xhci
->devs
[udev
->slot_id
]) {
944 printk(KERN_DEBUG
"xHCI %s called with unaddressed "
949 virt_dev
= xhci
->devs
[udev
->slot_id
];
950 if (virt_dev
->udev
!= udev
) {
951 printk(KERN_DEBUG
"xHCI %s called with udev and "
952 "virt_dev does not match\n", func
);
960 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
961 struct usb_device
*udev
, struct xhci_command
*command
,
962 bool ctx_change
, bool must_succeed
);
965 * Full speed devices may have a max packet size greater than 8 bytes, but the
966 * USB core doesn't know that until it reads the first 8 bytes of the
967 * descriptor. If the usb_device's max packet size changes after that point,
968 * we need to issue an evaluate context command and wait on it.
970 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
971 unsigned int ep_index
, struct urb
*urb
)
973 struct xhci_container_ctx
*in_ctx
;
974 struct xhci_container_ctx
*out_ctx
;
975 struct xhci_input_control_ctx
*ctrl_ctx
;
976 struct xhci_ep_ctx
*ep_ctx
;
978 int hw_max_packet_size
;
981 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
982 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
983 hw_max_packet_size
= MAX_PACKET_DECODED(le32_to_cpu(ep_ctx
->ep_info2
));
984 max_packet_size
= le16_to_cpu(urb
->dev
->ep0
.desc
.wMaxPacketSize
);
985 if (hw_max_packet_size
!= max_packet_size
) {
986 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
987 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
989 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
991 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
993 /* Set up the modified control endpoint 0 */
994 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
995 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
996 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
997 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
998 ep_ctx
->ep_info2
&= cpu_to_le32(~MAX_PACKET_MASK
);
999 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet_size
));
1001 /* Set up the input context flags for the command */
1002 /* FIXME: This won't work if a non-default control endpoint
1003 * changes max packet sizes.
1005 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1006 ctrl_ctx
->add_flags
= cpu_to_le32(EP0_FLAG
);
1007 ctrl_ctx
->drop_flags
= 0;
1009 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
1010 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
1011 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
1012 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
1014 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
1017 /* Clean up the input context for later use by bandwidth
1020 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
);
1026 * non-error returns are a promise to giveback() the urb later
1027 * we drop ownership so next owner (or urb unlink) can get it
1029 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
1031 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1032 unsigned long flags
;
1034 unsigned int slot_id
, ep_index
;
1035 struct urb_priv
*urb_priv
;
1038 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
,
1039 true, true, __func__
) <= 0)
1042 slot_id
= urb
->dev
->slot_id
;
1043 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1045 if (!HCD_HW_ACCESSIBLE(hcd
)) {
1046 if (!in_interrupt())
1047 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
1052 if (usb_endpoint_xfer_isoc(&urb
->ep
->desc
))
1053 size
= urb
->number_of_packets
;
1057 urb_priv
= kzalloc(sizeof(struct urb_priv
) +
1058 size
* sizeof(struct xhci_td
*), mem_flags
);
1062 for (i
= 0; i
< size
; i
++) {
1063 urb_priv
->td
[i
] = kzalloc(sizeof(struct xhci_td
), mem_flags
);
1064 if (!urb_priv
->td
[i
]) {
1065 urb_priv
->length
= i
;
1066 xhci_urb_free_priv(xhci
, urb_priv
);
1071 urb_priv
->length
= size
;
1072 urb_priv
->td_cnt
= 0;
1073 urb
->hcpriv
= urb_priv
;
1075 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
1076 /* Check to see if the max packet size for the default control
1077 * endpoint changed during FS device enumeration
1079 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
1080 ret
= xhci_check_maxpacket(xhci
, slot_id
,
1086 /* We have a spinlock and interrupts disabled, so we must pass
1087 * atomic context to this function, which may allocate memory.
1089 spin_lock_irqsave(&xhci
->lock
, flags
);
1090 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1092 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
1094 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1095 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
1096 spin_lock_irqsave(&xhci
->lock
, flags
);
1097 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1099 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1100 EP_GETTING_STREAMS
) {
1101 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1102 "is transitioning to using streams.\n");
1104 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1105 EP_GETTING_NO_STREAMS
) {
1106 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1107 "is transitioning to "
1108 "not having streams.\n");
1111 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
1114 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1115 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
1116 spin_lock_irqsave(&xhci
->lock
, flags
);
1117 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1119 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
1121 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1123 spin_lock_irqsave(&xhci
->lock
, flags
);
1124 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1126 ret
= xhci_queue_isoc_tx_prepare(xhci
, GFP_ATOMIC
, urb
,
1128 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1133 xhci_urb_free_priv(xhci
, urb_priv
);
1135 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
1136 "non-responsive xHCI host.\n",
1137 urb
->ep
->desc
.bEndpointAddress
, urb
);
1138 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1142 /* Get the right ring for the given URB.
1143 * If the endpoint supports streams, boundary check the URB's stream ID.
1144 * If the endpoint doesn't support streams, return the singular endpoint ring.
1146 static struct xhci_ring
*xhci_urb_to_transfer_ring(struct xhci_hcd
*xhci
,
1149 unsigned int slot_id
;
1150 unsigned int ep_index
;
1151 unsigned int stream_id
;
1152 struct xhci_virt_ep
*ep
;
1154 slot_id
= urb
->dev
->slot_id
;
1155 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1156 stream_id
= urb
->stream_id
;
1157 ep
= &xhci
->devs
[slot_id
]->eps
[ep_index
];
1158 /* Common case: no streams */
1159 if (!(ep
->ep_state
& EP_HAS_STREAMS
))
1162 if (stream_id
== 0) {
1164 "WARN: Slot ID %u, ep index %u has streams, "
1165 "but URB has no stream ID.\n",
1170 if (stream_id
< ep
->stream_info
->num_streams
)
1171 return ep
->stream_info
->stream_rings
[stream_id
];
1174 "WARN: Slot ID %u, ep index %u has "
1175 "stream IDs 1 to %u allocated, "
1176 "but stream ID %u is requested.\n",
1178 ep
->stream_info
->num_streams
- 1,
1184 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1185 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1186 * should pick up where it left off in the TD, unless a Set Transfer Ring
1187 * Dequeue Pointer is issued.
1189 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1190 * the ring. Since the ring is a contiguous structure, they can't be physically
1191 * removed. Instead, there are two options:
1193 * 1) If the HC is in the middle of processing the URB to be canceled, we
1194 * simply move the ring's dequeue pointer past those TRBs using the Set
1195 * Transfer Ring Dequeue Pointer command. This will be the common case,
1196 * when drivers timeout on the last submitted URB and attempt to cancel.
1198 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1199 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1200 * HC will need to invalidate the any TRBs it has cached after the stop
1201 * endpoint command, as noted in the xHCI 0.95 errata.
1203 * 3) The TD may have completed by the time the Stop Endpoint Command
1204 * completes, so software needs to handle that case too.
1206 * This function should protect against the TD enqueueing code ringing the
1207 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1208 * It also needs to account for multiple cancellations on happening at the same
1209 * time for the same endpoint.
1211 * Note that this function can be called in any context, or so says
1212 * usb_hcd_unlink_urb()
1214 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1216 unsigned long flags
;
1219 struct xhci_hcd
*xhci
;
1220 struct urb_priv
*urb_priv
;
1222 unsigned int ep_index
;
1223 struct xhci_ring
*ep_ring
;
1224 struct xhci_virt_ep
*ep
;
1226 xhci
= hcd_to_xhci(hcd
);
1227 spin_lock_irqsave(&xhci
->lock
, flags
);
1228 /* Make sure the URB hasn't completed or been unlinked already */
1229 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1230 if (ret
|| !urb
->hcpriv
)
1232 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
1233 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1234 xhci_dbg(xhci
, "HW died, freeing TD.\n");
1235 urb_priv
= urb
->hcpriv
;
1237 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1238 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1239 usb_hcd_giveback_urb(hcd
, urb
, -ESHUTDOWN
);
1240 xhci_urb_free_priv(xhci
, urb_priv
);
1243 if (xhci
->xhc_state
& XHCI_STATE_DYING
) {
1244 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
1245 "non-responsive xHCI host.\n",
1246 urb
->ep
->desc
.bEndpointAddress
, urb
);
1247 /* Let the stop endpoint command watchdog timer (which set this
1248 * state) finish cleaning up the endpoint TD lists. We must
1249 * have caught it in the middle of dropping a lock and giving
1255 xhci_dbg(xhci
, "Cancel URB %p\n", urb
);
1256 xhci_dbg(xhci
, "Event ring:\n");
1257 xhci_debug_ring(xhci
, xhci
->event_ring
);
1258 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1259 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
1260 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
1266 xhci_dbg(xhci
, "Endpoint ring:\n");
1267 xhci_debug_ring(xhci
, ep_ring
);
1269 urb_priv
= urb
->hcpriv
;
1271 for (i
= urb_priv
->td_cnt
; i
< urb_priv
->length
; i
++) {
1272 td
= urb_priv
->td
[i
];
1273 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
1276 /* Queue a stop endpoint command, but only if this is
1277 * the first cancellation to be handled.
1279 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
1280 ep
->ep_state
|= EP_HALT_PENDING
;
1281 ep
->stop_cmds_pending
++;
1282 ep
->stop_cmd_timer
.expires
= jiffies
+
1283 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
1284 add_timer(&ep
->stop_cmd_timer
);
1285 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
, 0);
1286 xhci_ring_cmd_db(xhci
);
1289 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1293 /* Drop an endpoint from a new bandwidth configuration for this device.
1294 * Only one call to this function is allowed per endpoint before
1295 * check_bandwidth() or reset_bandwidth() must be called.
1296 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1297 * add the endpoint to the schedule with possibly new parameters denoted by a
1298 * different endpoint descriptor in usb_host_endpoint.
1299 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1302 * The USB core will not allow URBs to be queued to an endpoint that is being
1303 * disabled, so there's no need for mutual exclusion to protect
1304 * the xhci->devs[slot_id] structure.
1306 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1307 struct usb_host_endpoint
*ep
)
1309 struct xhci_hcd
*xhci
;
1310 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1311 struct xhci_input_control_ctx
*ctrl_ctx
;
1312 struct xhci_slot_ctx
*slot_ctx
;
1313 unsigned int last_ctx
;
1314 unsigned int ep_index
;
1315 struct xhci_ep_ctx
*ep_ctx
;
1317 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1320 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1323 xhci
= hcd_to_xhci(hcd
);
1324 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1327 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1328 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
1329 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
1330 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
1331 __func__
, drop_flag
);
1335 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1336 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1337 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1338 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1339 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1340 /* If the HC already knows the endpoint is disabled,
1341 * or the HCD has noted it is disabled, ignore this request
1343 if (((ep_ctx
->ep_info
& cpu_to_le32(EP_STATE_MASK
)) ==
1344 cpu_to_le32(EP_STATE_DISABLED
)) ||
1345 le32_to_cpu(ctrl_ctx
->drop_flags
) &
1346 xhci_get_endpoint_flag(&ep
->desc
)) {
1347 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
1352 ctrl_ctx
->drop_flags
|= cpu_to_le32(drop_flag
);
1353 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1355 ctrl_ctx
->add_flags
&= cpu_to_le32(~drop_flag
);
1356 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1358 last_ctx
= xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx
->add_flags
));
1359 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1360 /* Update the last valid endpoint context, if we deleted the last one */
1361 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) >
1362 LAST_CTX(last_ctx
)) {
1363 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1364 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1366 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1368 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
1370 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1371 (unsigned int) ep
->desc
.bEndpointAddress
,
1373 (unsigned int) new_drop_flags
,
1374 (unsigned int) new_add_flags
,
1375 (unsigned int) new_slot_info
);
1379 /* Add an endpoint to a new possible bandwidth configuration for this device.
1380 * Only one call to this function is allowed per endpoint before
1381 * check_bandwidth() or reset_bandwidth() must be called.
1382 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1383 * add the endpoint to the schedule with possibly new parameters denoted by a
1384 * different endpoint descriptor in usb_host_endpoint.
1385 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1388 * The USB core will not allow URBs to be queued to an endpoint until the
1389 * configuration or alt setting is installed in the device, so there's no need
1390 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1392 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1393 struct usb_host_endpoint
*ep
)
1395 struct xhci_hcd
*xhci
;
1396 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1397 unsigned int ep_index
;
1398 struct xhci_ep_ctx
*ep_ctx
;
1399 struct xhci_slot_ctx
*slot_ctx
;
1400 struct xhci_input_control_ctx
*ctrl_ctx
;
1402 unsigned int last_ctx
;
1403 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1406 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1408 /* So we won't queue a reset ep command for a root hub */
1412 xhci
= hcd_to_xhci(hcd
);
1413 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1416 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1417 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1418 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1419 /* FIXME when we have to issue an evaluate endpoint command to
1420 * deal with ep0 max packet size changing once we get the
1423 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1424 __func__
, added_ctxs
);
1428 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1429 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1430 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1431 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1432 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1433 /* If the HCD has already noted the endpoint is enabled,
1434 * ignore this request.
1436 if (le32_to_cpu(ctrl_ctx
->add_flags
) &
1437 xhci_get_endpoint_flag(&ep
->desc
)) {
1438 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1444 * Configuration and alternate setting changes must be done in
1445 * process context, not interrupt context (or so documenation
1446 * for usb_set_interface() and usb_set_configuration() claim).
1448 if (xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
],
1449 udev
, ep
, GFP_NOIO
) < 0) {
1450 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1451 __func__
, ep
->desc
.bEndpointAddress
);
1455 ctrl_ctx
->add_flags
|= cpu_to_le32(added_ctxs
);
1456 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1458 /* If xhci_endpoint_disable() was called for this endpoint, but the
1459 * xHC hasn't been notified yet through the check_bandwidth() call,
1460 * this re-adds a new state for the endpoint from the new endpoint
1461 * descriptors. We must drop and re-add this endpoint, so we leave the
1464 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1466 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1467 /* Update the last valid endpoint context, if we just added one past */
1468 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) <
1469 LAST_CTX(last_ctx
)) {
1470 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1471 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1473 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1475 /* Store the usb_device pointer for later use */
1478 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1479 (unsigned int) ep
->desc
.bEndpointAddress
,
1481 (unsigned int) new_drop_flags
,
1482 (unsigned int) new_add_flags
,
1483 (unsigned int) new_slot_info
);
1487 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1489 struct xhci_input_control_ctx
*ctrl_ctx
;
1490 struct xhci_ep_ctx
*ep_ctx
;
1491 struct xhci_slot_ctx
*slot_ctx
;
1494 /* When a device's add flag and drop flag are zero, any subsequent
1495 * configure endpoint command will leave that endpoint's state
1496 * untouched. Make sure we don't leave any old state in the input
1497 * endpoint contexts.
1499 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1500 ctrl_ctx
->drop_flags
= 0;
1501 ctrl_ctx
->add_flags
= 0;
1502 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1503 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1504 /* Endpoint 0 is always valid */
1505 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1));
1506 for (i
= 1; i
< 31; ++i
) {
1507 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1508 ep_ctx
->ep_info
= 0;
1509 ep_ctx
->ep_info2
= 0;
1511 ep_ctx
->tx_info
= 0;
1515 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1516 struct usb_device
*udev
, u32
*cmd_status
)
1520 switch (*cmd_status
) {
1522 dev_warn(&udev
->dev
, "Not enough host controller resources "
1523 "for new device state.\n");
1525 /* FIXME: can we allocate more resources for the HC? */
1528 dev_warn(&udev
->dev
, "Not enough bandwidth "
1529 "for new device state.\n");
1531 /* FIXME: can we go back to the old state? */
1534 /* the HCD set up something wrong */
1535 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1537 "and endpoint is not disabled.\n");
1541 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1545 xhci_err(xhci
, "ERROR: unexpected command completion "
1546 "code 0x%x.\n", *cmd_status
);
1553 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1554 struct usb_device
*udev
, u32
*cmd_status
)
1557 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1559 switch (*cmd_status
) {
1561 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1562 "context command.\n");
1566 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1567 "evaluate context command.\n");
1568 case COMP_CTX_STATE
:
1569 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1570 "evaluate context command.\n");
1571 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1575 /* Max Exit Latency too large error */
1576 dev_warn(&udev
->dev
, "WARN: Max Exit Latency too large\n");
1580 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1584 xhci_err(xhci
, "ERROR: unexpected command completion "
1585 "code 0x%x.\n", *cmd_status
);
1592 static u32
xhci_count_num_new_endpoints(struct xhci_hcd
*xhci
,
1593 struct xhci_container_ctx
*in_ctx
)
1595 struct xhci_input_control_ctx
*ctrl_ctx
;
1596 u32 valid_add_flags
;
1597 u32 valid_drop_flags
;
1599 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1600 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1601 * (bit 1). The default control endpoint is added during the Address
1602 * Device command and is never removed until the slot is disabled.
1604 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1605 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1607 /* Use hweight32 to count the number of ones in the add flags, or
1608 * number of endpoints added. Don't count endpoints that are changed
1609 * (both added and dropped).
1611 return hweight32(valid_add_flags
) -
1612 hweight32(valid_add_flags
& valid_drop_flags
);
1615 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd
*xhci
,
1616 struct xhci_container_ctx
*in_ctx
)
1618 struct xhci_input_control_ctx
*ctrl_ctx
;
1619 u32 valid_add_flags
;
1620 u32 valid_drop_flags
;
1622 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1623 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1624 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1626 return hweight32(valid_drop_flags
) -
1627 hweight32(valid_add_flags
& valid_drop_flags
);
1631 * We need to reserve the new number of endpoints before the configure endpoint
1632 * command completes. We can't subtract the dropped endpoints from the number
1633 * of active endpoints until the command completes because we can oversubscribe
1634 * the host in this case:
1636 * - the first configure endpoint command drops more endpoints than it adds
1637 * - a second configure endpoint command that adds more endpoints is queued
1638 * - the first configure endpoint command fails, so the config is unchanged
1639 * - the second command may succeed, even though there isn't enough resources
1641 * Must be called with xhci->lock held.
1643 static int xhci_reserve_host_resources(struct xhci_hcd
*xhci
,
1644 struct xhci_container_ctx
*in_ctx
)
1648 added_eps
= xhci_count_num_new_endpoints(xhci
, in_ctx
);
1649 if (xhci
->num_active_eps
+ added_eps
> xhci
->limit_active_eps
) {
1650 xhci_dbg(xhci
, "Not enough ep ctxs: "
1651 "%u active, need to add %u, limit is %u.\n",
1652 xhci
->num_active_eps
, added_eps
,
1653 xhci
->limit_active_eps
);
1656 xhci
->num_active_eps
+= added_eps
;
1657 xhci_dbg(xhci
, "Adding %u ep ctxs, %u now active.\n", added_eps
,
1658 xhci
->num_active_eps
);
1663 * The configure endpoint was failed by the xHC for some other reason, so we
1664 * need to revert the resources that failed configuration would have used.
1666 * Must be called with xhci->lock held.
1668 static void xhci_free_host_resources(struct xhci_hcd
*xhci
,
1669 struct xhci_container_ctx
*in_ctx
)
1673 num_failed_eps
= xhci_count_num_new_endpoints(xhci
, in_ctx
);
1674 xhci
->num_active_eps
-= num_failed_eps
;
1675 xhci_dbg(xhci
, "Removing %u failed ep ctxs, %u now active.\n",
1677 xhci
->num_active_eps
);
1681 * Now that the command has completed, clean up the active endpoint count by
1682 * subtracting out the endpoints that were dropped (but not changed).
1684 * Must be called with xhci->lock held.
1686 static void xhci_finish_resource_reservation(struct xhci_hcd
*xhci
,
1687 struct xhci_container_ctx
*in_ctx
)
1689 u32 num_dropped_eps
;
1691 num_dropped_eps
= xhci_count_num_dropped_endpoints(xhci
, in_ctx
);
1692 xhci
->num_active_eps
-= num_dropped_eps
;
1693 if (num_dropped_eps
)
1694 xhci_dbg(xhci
, "Removing %u dropped ep ctxs, %u now active.\n",
1696 xhci
->num_active_eps
);
1699 /* Issue a configure endpoint command or evaluate context command
1700 * and wait for it to finish.
1702 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1703 struct usb_device
*udev
,
1704 struct xhci_command
*command
,
1705 bool ctx_change
, bool must_succeed
)
1709 unsigned long flags
;
1710 struct xhci_container_ctx
*in_ctx
;
1711 struct completion
*cmd_completion
;
1713 struct xhci_virt_device
*virt_dev
;
1715 spin_lock_irqsave(&xhci
->lock
, flags
);
1716 virt_dev
= xhci
->devs
[udev
->slot_id
];
1718 in_ctx
= command
->in_ctx
;
1719 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
) &&
1720 xhci_reserve_host_resources(xhci
, in_ctx
)) {
1721 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1722 xhci_warn(xhci
, "Not enough host resources, "
1723 "active endpoint contexts = %u\n",
1724 xhci
->num_active_eps
);
1728 cmd_completion
= command
->completion
;
1729 cmd_status
= &command
->status
;
1730 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
1732 /* Enqueue pointer can be left pointing to the link TRB,
1733 * we must handle that
1735 if (TRB_TYPE_LINK_LE32(command
->command_trb
->link
.control
))
1736 command
->command_trb
=
1737 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
1739 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
1741 in_ctx
= virt_dev
->in_ctx
;
1742 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
) &&
1743 xhci_reserve_host_resources(xhci
, in_ctx
)) {
1744 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1745 xhci_warn(xhci
, "Not enough host resources, "
1746 "active endpoint contexts = %u\n",
1747 xhci
->num_active_eps
);
1750 cmd_completion
= &virt_dev
->cmd_completion
;
1751 cmd_status
= &virt_dev
->cmd_status
;
1753 init_completion(cmd_completion
);
1756 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
1757 udev
->slot_id
, must_succeed
);
1759 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
1763 list_del(&command
->cmd_list
);
1764 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
1765 xhci_free_host_resources(xhci
, in_ctx
);
1766 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1767 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
1770 xhci_ring_cmd_db(xhci
);
1771 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1773 /* Wait for the configure endpoint command to complete */
1774 timeleft
= wait_for_completion_interruptible_timeout(
1776 USB_CTRL_SET_TIMEOUT
);
1777 if (timeleft
<= 0) {
1778 xhci_warn(xhci
, "%s while waiting for %s command\n",
1779 timeleft
== 0 ? "Timeout" : "Signal",
1781 "configure endpoint" :
1782 "evaluate context");
1783 /* FIXME cancel the configure endpoint command */
1788 ret
= xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
1790 ret
= xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
1792 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
1793 spin_lock_irqsave(&xhci
->lock
, flags
);
1794 /* If the command failed, remove the reserved resources.
1795 * Otherwise, clean up the estimate to include dropped eps.
1798 xhci_free_host_resources(xhci
, in_ctx
);
1800 xhci_finish_resource_reservation(xhci
, in_ctx
);
1801 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1806 /* Called after one or more calls to xhci_add_endpoint() or
1807 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1808 * to call xhci_reset_bandwidth().
1810 * Since we are in the middle of changing either configuration or
1811 * installing a new alt setting, the USB core won't allow URBs to be
1812 * enqueued for any endpoint on the old config or interface. Nothing
1813 * else should be touching the xhci->devs[slot_id] structure, so we
1814 * don't need to take the xhci->lock for manipulating that.
1816 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1820 struct xhci_hcd
*xhci
;
1821 struct xhci_virt_device
*virt_dev
;
1822 struct xhci_input_control_ctx
*ctrl_ctx
;
1823 struct xhci_slot_ctx
*slot_ctx
;
1825 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
1828 xhci
= hcd_to_xhci(hcd
);
1829 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1832 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1833 virt_dev
= xhci
->devs
[udev
->slot_id
];
1835 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1836 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1837 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
1838 ctrl_ctx
->add_flags
&= cpu_to_le32(~EP0_FLAG
);
1839 ctrl_ctx
->drop_flags
&= cpu_to_le32(~(SLOT_FLAG
| EP0_FLAG
));
1840 xhci_dbg(xhci
, "New Input Control Context:\n");
1841 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1842 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
1843 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
1845 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
1848 /* Callee should call reset_bandwidth() */
1852 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
1853 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
1854 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
1856 /* Free any rings that were dropped, but not changed. */
1857 for (i
= 1; i
< 31; ++i
) {
1858 if ((le32_to_cpu(ctrl_ctx
->drop_flags
) & (1 << (i
+ 1))) &&
1859 !(le32_to_cpu(ctrl_ctx
->add_flags
) & (1 << (i
+ 1))))
1860 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1862 xhci_zero_in_ctx(xhci
, virt_dev
);
1864 * Install any rings for completely new endpoints or changed endpoints,
1865 * and free or cache any old rings from changed endpoints.
1867 for (i
= 1; i
< 31; ++i
) {
1868 if (!virt_dev
->eps
[i
].new_ring
)
1870 /* Only cache or free the old ring if it exists.
1871 * It may not if this is the first add of an endpoint.
1873 if (virt_dev
->eps
[i
].ring
) {
1874 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1876 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
1877 virt_dev
->eps
[i
].new_ring
= NULL
;
1883 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1885 struct xhci_hcd
*xhci
;
1886 struct xhci_virt_device
*virt_dev
;
1889 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
1892 xhci
= hcd_to_xhci(hcd
);
1894 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1895 virt_dev
= xhci
->devs
[udev
->slot_id
];
1896 /* Free any rings allocated for added endpoints */
1897 for (i
= 0; i
< 31; ++i
) {
1898 if (virt_dev
->eps
[i
].new_ring
) {
1899 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
1900 virt_dev
->eps
[i
].new_ring
= NULL
;
1903 xhci_zero_in_ctx(xhci
, virt_dev
);
1906 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
1907 struct xhci_container_ctx
*in_ctx
,
1908 struct xhci_container_ctx
*out_ctx
,
1909 u32 add_flags
, u32 drop_flags
)
1911 struct xhci_input_control_ctx
*ctrl_ctx
;
1912 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1913 ctrl_ctx
->add_flags
= cpu_to_le32(add_flags
);
1914 ctrl_ctx
->drop_flags
= cpu_to_le32(drop_flags
);
1915 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
1916 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
1918 xhci_dbg(xhci
, "Input Context:\n");
1919 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
1922 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
1923 unsigned int slot_id
, unsigned int ep_index
,
1924 struct xhci_dequeue_state
*deq_state
)
1926 struct xhci_container_ctx
*in_ctx
;
1927 struct xhci_ep_ctx
*ep_ctx
;
1931 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1932 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1933 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1934 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1935 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
1936 deq_state
->new_deq_ptr
);
1938 xhci_warn(xhci
, "WARN Cannot submit config ep after "
1939 "reset ep command\n");
1940 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
1941 deq_state
->new_deq_seg
,
1942 deq_state
->new_deq_ptr
);
1945 ep_ctx
->deq
= cpu_to_le64(addr
| deq_state
->new_cycle_state
);
1947 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
1948 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1949 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
1952 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
1953 struct usb_device
*udev
, unsigned int ep_index
)
1955 struct xhci_dequeue_state deq_state
;
1956 struct xhci_virt_ep
*ep
;
1958 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
1959 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1960 /* We need to move the HW's dequeue pointer past this TD,
1961 * or it will attempt to resend it on the next doorbell ring.
1963 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
1964 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
1967 /* HW with the reset endpoint quirk will use the saved dequeue state to
1968 * issue a configure endpoint command later.
1970 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
1971 xhci_dbg(xhci
, "Queueing new dequeue state\n");
1972 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
1973 ep_index
, ep
->stopped_stream
, &deq_state
);
1975 /* Better hope no one uses the input context between now and the
1976 * reset endpoint completion!
1977 * XXX: No idea how this hardware will react when stream rings
1980 xhci_dbg(xhci
, "Setting up input context for "
1981 "configure endpoint command\n");
1982 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
1983 ep_index
, &deq_state
);
1987 /* Deal with stalled endpoints. The core should have sent the control message
1988 * to clear the halt condition. However, we need to make the xHCI hardware
1989 * reset its sequence number, since a device will expect a sequence number of
1990 * zero after the halt condition is cleared.
1991 * Context: in_interrupt
1993 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
1994 struct usb_host_endpoint
*ep
)
1996 struct xhci_hcd
*xhci
;
1997 struct usb_device
*udev
;
1998 unsigned int ep_index
;
1999 unsigned long flags
;
2001 struct xhci_virt_ep
*virt_ep
;
2003 xhci
= hcd_to_xhci(hcd
);
2004 udev
= (struct usb_device
*) ep
->hcpriv
;
2005 /* Called with a root hub endpoint (or an endpoint that wasn't added
2006 * with xhci_add_endpoint()
2010 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2011 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2012 if (!virt_ep
->stopped_td
) {
2013 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
2014 ep
->desc
.bEndpointAddress
);
2017 if (usb_endpoint_xfer_control(&ep
->desc
)) {
2018 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
2022 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
2023 spin_lock_irqsave(&xhci
->lock
, flags
);
2024 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
2026 * Can't change the ring dequeue pointer until it's transitioned to the
2027 * stopped state, which is only upon a successful reset endpoint
2028 * command. Better hope that last command worked!
2031 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
2032 kfree(virt_ep
->stopped_td
);
2033 xhci_ring_cmd_db(xhci
);
2035 virt_ep
->stopped_td
= NULL
;
2036 virt_ep
->stopped_trb
= NULL
;
2037 virt_ep
->stopped_stream
= 0;
2038 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2041 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
2044 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
2045 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
2046 unsigned int slot_id
)
2049 unsigned int ep_index
;
2050 unsigned int ep_state
;
2054 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, true, __func__
);
2057 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
2058 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
2059 " descriptor for ep 0x%x does not support streams\n",
2060 ep
->desc
.bEndpointAddress
);
2064 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2065 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2066 if (ep_state
& EP_HAS_STREAMS
||
2067 ep_state
& EP_GETTING_STREAMS
) {
2068 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
2069 "already has streams set up.\n",
2070 ep
->desc
.bEndpointAddress
);
2071 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
2072 "dynamic stream context array reallocation.\n");
2075 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
2076 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
2077 "endpoint 0x%x; URBs are pending.\n",
2078 ep
->desc
.bEndpointAddress
);
2084 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
2085 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
2087 unsigned int max_streams
;
2089 /* The stream context array size must be a power of two */
2090 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
2092 * Find out how many primary stream array entries the host controller
2093 * supports. Later we may use secondary stream arrays (similar to 2nd
2094 * level page entries), but that's an optional feature for xHCI host
2095 * controllers. xHCs must support at least 4 stream IDs.
2097 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
2098 if (*num_stream_ctxs
> max_streams
) {
2099 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
2101 *num_stream_ctxs
= max_streams
;
2102 *num_streams
= max_streams
;
2106 /* Returns an error code if one of the endpoint already has streams.
2107 * This does not change any data structures, it only checks and gathers
2110 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
2111 struct usb_device
*udev
,
2112 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2113 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
2115 unsigned int max_streams
;
2116 unsigned int endpoint_flag
;
2120 for (i
= 0; i
< num_eps
; i
++) {
2121 ret
= xhci_check_streams_endpoint(xhci
, udev
,
2122 eps
[i
], udev
->slot_id
);
2126 max_streams
= USB_SS_MAX_STREAMS(
2127 eps
[i
]->ss_ep_comp
.bmAttributes
);
2128 if (max_streams
< (*num_streams
- 1)) {
2129 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
2130 eps
[i
]->desc
.bEndpointAddress
,
2132 *num_streams
= max_streams
+1;
2135 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
2136 if (*changed_ep_bitmask
& endpoint_flag
)
2138 *changed_ep_bitmask
|= endpoint_flag
;
2143 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
2144 struct usb_device
*udev
,
2145 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
2147 u32 changed_ep_bitmask
= 0;
2148 unsigned int slot_id
;
2149 unsigned int ep_index
;
2150 unsigned int ep_state
;
2153 slot_id
= udev
->slot_id
;
2154 if (!xhci
->devs
[slot_id
])
2157 for (i
= 0; i
< num_eps
; i
++) {
2158 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2159 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2160 /* Are streams already being freed for the endpoint? */
2161 if (ep_state
& EP_GETTING_NO_STREAMS
) {
2162 xhci_warn(xhci
, "WARN Can't disable streams for "
2164 "streams are being disabled already.",
2165 eps
[i
]->desc
.bEndpointAddress
);
2168 /* Are there actually any streams to free? */
2169 if (!(ep_state
& EP_HAS_STREAMS
) &&
2170 !(ep_state
& EP_GETTING_STREAMS
)) {
2171 xhci_warn(xhci
, "WARN Can't disable streams for "
2173 "streams are already disabled!",
2174 eps
[i
]->desc
.bEndpointAddress
);
2175 xhci_warn(xhci
, "WARN xhci_free_streams() called "
2176 "with non-streams endpoint\n");
2179 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
2181 return changed_ep_bitmask
;
2185 * The USB device drivers use this function (though the HCD interface in USB
2186 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2187 * coordinate mass storage command queueing across multiple endpoints (basically
2188 * a stream ID == a task ID).
2190 * Setting up streams involves allocating the same size stream context array
2191 * for each endpoint and issuing a configure endpoint command for all endpoints.
2193 * Don't allow the call to succeed if one endpoint only supports one stream
2194 * (which means it doesn't support streams at all).
2196 * Drivers may get less stream IDs than they asked for, if the host controller
2197 * hardware or endpoints claim they can't support the number of requested
2200 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
2201 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2202 unsigned int num_streams
, gfp_t mem_flags
)
2205 struct xhci_hcd
*xhci
;
2206 struct xhci_virt_device
*vdev
;
2207 struct xhci_command
*config_cmd
;
2208 unsigned int ep_index
;
2209 unsigned int num_stream_ctxs
;
2210 unsigned long flags
;
2211 u32 changed_ep_bitmask
= 0;
2216 /* Add one to the number of streams requested to account for
2217 * stream 0 that is reserved for xHCI usage.
2220 xhci
= hcd_to_xhci(hcd
);
2221 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
2224 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
2226 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
2230 /* Check to make sure all endpoints are not already configured for
2231 * streams. While we're at it, find the maximum number of streams that
2232 * all the endpoints will support and check for duplicate endpoints.
2234 spin_lock_irqsave(&xhci
->lock
, flags
);
2235 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
2236 num_eps
, &num_streams
, &changed_ep_bitmask
);
2238 xhci_free_command(xhci
, config_cmd
);
2239 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2242 if (num_streams
<= 1) {
2243 xhci_warn(xhci
, "WARN: endpoints can't handle "
2244 "more than one stream.\n");
2245 xhci_free_command(xhci
, config_cmd
);
2246 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2249 vdev
= xhci
->devs
[udev
->slot_id
];
2250 /* Mark each endpoint as being in transition, so
2251 * xhci_urb_enqueue() will reject all URBs.
2253 for (i
= 0; i
< num_eps
; i
++) {
2254 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2255 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
2257 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2259 /* Setup internal data structures and allocate HW data structures for
2260 * streams (but don't install the HW structures in the input context
2261 * until we're sure all memory allocation succeeded).
2263 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
2264 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
2265 num_stream_ctxs
, num_streams
);
2267 for (i
= 0; i
< num_eps
; i
++) {
2268 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2269 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
2271 num_streams
, mem_flags
);
2272 if (!vdev
->eps
[ep_index
].stream_info
)
2274 /* Set maxPstreams in endpoint context and update deq ptr to
2275 * point to stream context array. FIXME
2279 /* Set up the input context for a configure endpoint command. */
2280 for (i
= 0; i
< num_eps
; i
++) {
2281 struct xhci_ep_ctx
*ep_ctx
;
2283 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2284 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
2286 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
2287 vdev
->out_ctx
, ep_index
);
2288 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
2289 vdev
->eps
[ep_index
].stream_info
);
2291 /* Tell the HW to drop its old copy of the endpoint context info
2292 * and add the updated copy from the input context.
2294 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
2295 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
2297 /* Issue and wait for the configure endpoint command */
2298 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
2301 /* xHC rejected the configure endpoint command for some reason, so we
2302 * leave the old ring intact and free our internal streams data
2308 spin_lock_irqsave(&xhci
->lock
, flags
);
2309 for (i
= 0; i
< num_eps
; i
++) {
2310 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2311 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
2312 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
2313 udev
->slot_id
, ep_index
);
2314 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
2316 xhci_free_command(xhci
, config_cmd
);
2317 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2319 /* Subtract 1 for stream 0, which drivers can't use */
2320 return num_streams
- 1;
2323 /* If it didn't work, free the streams! */
2324 for (i
= 0; i
< num_eps
; i
++) {
2325 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2326 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
2327 vdev
->eps
[ep_index
].stream_info
= NULL
;
2328 /* FIXME Unset maxPstreams in endpoint context and
2329 * update deq ptr to point to normal string ring.
2331 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
2332 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
2333 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
2335 xhci_free_command(xhci
, config_cmd
);
2339 /* Transition the endpoint from using streams to being a "normal" endpoint
2342 * Modify the endpoint context state, submit a configure endpoint command,
2343 * and free all endpoint rings for streams if that completes successfully.
2345 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
2346 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2350 struct xhci_hcd
*xhci
;
2351 struct xhci_virt_device
*vdev
;
2352 struct xhci_command
*command
;
2353 unsigned int ep_index
;
2354 unsigned long flags
;
2355 u32 changed_ep_bitmask
;
2357 xhci
= hcd_to_xhci(hcd
);
2358 vdev
= xhci
->devs
[udev
->slot_id
];
2360 /* Set up a configure endpoint command to remove the streams rings */
2361 spin_lock_irqsave(&xhci
->lock
, flags
);
2362 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
2363 udev
, eps
, num_eps
);
2364 if (changed_ep_bitmask
== 0) {
2365 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2369 /* Use the xhci_command structure from the first endpoint. We may have
2370 * allocated too many, but the driver may call xhci_free_streams() for
2371 * each endpoint it grouped into one call to xhci_alloc_streams().
2373 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
2374 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
2375 for (i
= 0; i
< num_eps
; i
++) {
2376 struct xhci_ep_ctx
*ep_ctx
;
2378 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2379 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
2380 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
2381 EP_GETTING_NO_STREAMS
;
2383 xhci_endpoint_copy(xhci
, command
->in_ctx
,
2384 vdev
->out_ctx
, ep_index
);
2385 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
2386 &vdev
->eps
[ep_index
]);
2388 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
2389 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
2390 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2392 /* Issue and wait for the configure endpoint command,
2393 * which must succeed.
2395 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
2398 /* xHC rejected the configure endpoint command for some reason, so we
2399 * leave the streams rings intact.
2404 spin_lock_irqsave(&xhci
->lock
, flags
);
2405 for (i
= 0; i
< num_eps
; i
++) {
2406 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2407 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
2408 vdev
->eps
[ep_index
].stream_info
= NULL
;
2409 /* FIXME Unset maxPstreams in endpoint context and
2410 * update deq ptr to point to normal string ring.
2412 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
2413 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
2415 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2421 * Deletes endpoint resources for endpoints that were active before a Reset
2422 * Device command, or a Disable Slot command. The Reset Device command leaves
2423 * the control endpoint intact, whereas the Disable Slot command deletes it.
2425 * Must be called with xhci->lock held.
2427 void xhci_free_device_endpoint_resources(struct xhci_hcd
*xhci
,
2428 struct xhci_virt_device
*virt_dev
, bool drop_control_ep
)
2431 unsigned int num_dropped_eps
= 0;
2432 unsigned int drop_flags
= 0;
2434 for (i
= (drop_control_ep
? 0 : 1); i
< 31; i
++) {
2435 if (virt_dev
->eps
[i
].ring
) {
2436 drop_flags
|= 1 << i
;
2440 xhci
->num_active_eps
-= num_dropped_eps
;
2441 if (num_dropped_eps
)
2442 xhci_dbg(xhci
, "Dropped %u ep ctxs, flags = 0x%x, "
2444 num_dropped_eps
, drop_flags
,
2445 xhci
->num_active_eps
);
2449 * This submits a Reset Device Command, which will set the device state to 0,
2450 * set the device address to 0, and disable all the endpoints except the default
2451 * control endpoint. The USB core should come back and call
2452 * xhci_address_device(), and then re-set up the configuration. If this is
2453 * called because of a usb_reset_and_verify_device(), then the old alternate
2454 * settings will be re-installed through the normal bandwidth allocation
2457 * Wait for the Reset Device command to finish. Remove all structures
2458 * associated with the endpoints that were disabled. Clear the input device
2459 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
2461 * If the virt_dev to be reset does not exist or does not match the udev,
2462 * it means the device is lost, possibly due to the xHC restore error and
2463 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2464 * re-allocate the device.
2466 int xhci_discover_or_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2469 unsigned long flags
;
2470 struct xhci_hcd
*xhci
;
2471 unsigned int slot_id
;
2472 struct xhci_virt_device
*virt_dev
;
2473 struct xhci_command
*reset_device_cmd
;
2475 int last_freed_endpoint
;
2476 struct xhci_slot_ctx
*slot_ctx
;
2478 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, false, __func__
);
2481 xhci
= hcd_to_xhci(hcd
);
2482 slot_id
= udev
->slot_id
;
2483 virt_dev
= xhci
->devs
[slot_id
];
2485 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
2486 "not exist. Re-allocate the device\n", slot_id
);
2487 ret
= xhci_alloc_dev(hcd
, udev
);
2494 if (virt_dev
->udev
!= udev
) {
2495 /* If the virt_dev and the udev does not match, this virt_dev
2496 * may belong to another udev.
2497 * Re-allocate the device.
2499 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
2500 "not match the udev. Re-allocate the device\n",
2502 ret
= xhci_alloc_dev(hcd
, udev
);
2509 /* If device is not setup, there is no point in resetting it */
2510 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
2511 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
)) ==
2512 SLOT_STATE_DISABLED
)
2515 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
2516 /* Allocate the command structure that holds the struct completion.
2517 * Assume we're in process context, since the normal device reset
2518 * process has to wait for the device anyway. Storage devices are
2519 * reset as part of error handling, so use GFP_NOIO instead of
2522 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
2523 if (!reset_device_cmd
) {
2524 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
2528 /* Attempt to submit the Reset Device command to the command ring */
2529 spin_lock_irqsave(&xhci
->lock
, flags
);
2530 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
2532 /* Enqueue pointer can be left pointing to the link TRB,
2533 * we must handle that
2535 if (TRB_TYPE_LINK_LE32(reset_device_cmd
->command_trb
->link
.control
))
2536 reset_device_cmd
->command_trb
=
2537 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
2539 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
2540 ret
= xhci_queue_reset_device(xhci
, slot_id
);
2542 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2543 list_del(&reset_device_cmd
->cmd_list
);
2544 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2545 goto command_cleanup
;
2547 xhci_ring_cmd_db(xhci
);
2548 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2550 /* Wait for the Reset Device command to finish */
2551 timeleft
= wait_for_completion_interruptible_timeout(
2552 reset_device_cmd
->completion
,
2553 USB_CTRL_SET_TIMEOUT
);
2554 if (timeleft
<= 0) {
2555 xhci_warn(xhci
, "%s while waiting for reset device command\n",
2556 timeleft
== 0 ? "Timeout" : "Signal");
2557 spin_lock_irqsave(&xhci
->lock
, flags
);
2558 /* The timeout might have raced with the event ring handler, so
2559 * only delete from the list if the item isn't poisoned.
2561 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
2562 list_del(&reset_device_cmd
->cmd_list
);
2563 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2565 goto command_cleanup
;
2568 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2569 * unless we tried to reset a slot ID that wasn't enabled,
2570 * or the device wasn't in the addressed or configured state.
2572 ret
= reset_device_cmd
->status
;
2574 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
2575 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
2576 xhci_info(xhci
, "Can't reset device (slot ID %u) in %s state\n",
2578 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
2579 xhci_info(xhci
, "Not freeing device rings.\n");
2580 /* Don't treat this as an error. May change my mind later. */
2582 goto command_cleanup
;
2584 xhci_dbg(xhci
, "Successful reset device command.\n");
2587 if (xhci_is_vendor_info_code(xhci
, ret
))
2589 xhci_warn(xhci
, "Unknown completion code %u for "
2590 "reset device command.\n", ret
);
2592 goto command_cleanup
;
2595 /* Free up host controller endpoint resources */
2596 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
2597 spin_lock_irqsave(&xhci
->lock
, flags
);
2598 /* Don't delete the default control endpoint resources */
2599 xhci_free_device_endpoint_resources(xhci
, virt_dev
, false);
2600 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2603 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2604 last_freed_endpoint
= 1;
2605 for (i
= 1; i
< 31; ++i
) {
2606 struct xhci_virt_ep
*ep
= &virt_dev
->eps
[i
];
2608 if (ep
->ep_state
& EP_HAS_STREAMS
) {
2609 xhci_free_stream_info(xhci
, ep
->stream_info
);
2610 ep
->stream_info
= NULL
;
2611 ep
->ep_state
&= ~EP_HAS_STREAMS
;
2615 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2616 last_freed_endpoint
= i
;
2619 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
2620 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
2624 xhci_free_command(xhci
, reset_device_cmd
);
2629 * At this point, the struct usb_device is about to go away, the device has
2630 * disconnected, and all traffic has been stopped and the endpoints have been
2631 * disabled. Free any HC data structures associated with that device.
2633 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2635 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2636 struct xhci_virt_device
*virt_dev
;
2637 unsigned long flags
;
2641 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2645 virt_dev
= xhci
->devs
[udev
->slot_id
];
2647 /* Stop any wayward timer functions (which may grab the lock) */
2648 for (i
= 0; i
< 31; ++i
) {
2649 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
2650 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
2653 spin_lock_irqsave(&xhci
->lock
, flags
);
2654 /* Don't disable the slot if the host controller is dead. */
2655 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
2656 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
2657 xhci_free_virt_device(xhci
, udev
->slot_id
);
2658 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2662 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
2663 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2664 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2667 xhci_ring_cmd_db(xhci
);
2668 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2670 * Event command completion handler will free any data structures
2671 * associated with the slot. XXX Can free sleep?
2676 * Checks if we have enough host controller resources for the default control
2679 * Must be called with xhci->lock held.
2681 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd
*xhci
)
2683 if (xhci
->num_active_eps
+ 1 > xhci
->limit_active_eps
) {
2684 xhci_dbg(xhci
, "Not enough ep ctxs: "
2685 "%u active, need to add 1, limit is %u.\n",
2686 xhci
->num_active_eps
, xhci
->limit_active_eps
);
2689 xhci
->num_active_eps
+= 1;
2690 xhci_dbg(xhci
, "Adding 1 ep ctx, %u now active.\n",
2691 xhci
->num_active_eps
);
2697 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2698 * timed out, or allocating memory failed. Returns 1 on success.
2700 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2702 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2703 unsigned long flags
;
2707 spin_lock_irqsave(&xhci
->lock
, flags
);
2708 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
2710 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2711 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2714 xhci_ring_cmd_db(xhci
);
2715 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2717 /* XXX: how much time for xHC slot assignment? */
2718 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2719 USB_CTRL_SET_TIMEOUT
);
2720 if (timeleft
<= 0) {
2721 xhci_warn(xhci
, "%s while waiting for a slot\n",
2722 timeleft
== 0 ? "Timeout" : "Signal");
2723 /* FIXME cancel the enable slot request */
2727 if (!xhci
->slot_id
) {
2728 xhci_err(xhci
, "Error while assigning device slot ID\n");
2732 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
2733 spin_lock_irqsave(&xhci
->lock
, flags
);
2734 ret
= xhci_reserve_host_control_ep_resources(xhci
);
2736 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2737 xhci_warn(xhci
, "Not enough host resources, "
2738 "active endpoint contexts = %u\n",
2739 xhci
->num_active_eps
);
2742 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2744 /* Use GFP_NOIO, since this function can be called from
2745 * xhci_discover_or_reset_device(), which may be called as part of
2746 * mass storage driver error handling.
2748 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_NOIO
)) {
2749 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
2752 udev
->slot_id
= xhci
->slot_id
;
2753 /* Is this a LS or FS device under a HS hub? */
2754 /* Hub or peripherial? */
2758 /* Disable slot, if we can do it without mem alloc */
2759 spin_lock_irqsave(&xhci
->lock
, flags
);
2760 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
2761 xhci_ring_cmd_db(xhci
);
2762 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2767 * Issue an Address Device command (which will issue a SetAddress request to
2769 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2770 * we should only issue and wait on one address command at the same time.
2772 * We add one to the device address issued by the hardware because the USB core
2773 * uses address 1 for the root hubs (even though they're not really devices).
2775 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2777 unsigned long flags
;
2779 struct xhci_virt_device
*virt_dev
;
2781 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2782 struct xhci_slot_ctx
*slot_ctx
;
2783 struct xhci_input_control_ctx
*ctrl_ctx
;
2786 if (!udev
->slot_id
) {
2787 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
2791 virt_dev
= xhci
->devs
[udev
->slot_id
];
2793 if (WARN_ON(!virt_dev
)) {
2795 * In plug/unplug torture test with an NEC controller,
2796 * a zero-dereference was observed once due to virt_dev = 0.
2797 * Print useful debug rather than crash if it is observed again!
2799 xhci_warn(xhci
, "Virt dev invalid for slot_id 0x%x!\n",
2804 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
2806 * If this is the first Set Address since device plug-in or
2807 * virt_device realloaction after a resume with an xHCI power loss,
2808 * then set up the slot context.
2810 if (!slot_ctx
->dev_info
)
2811 xhci_setup_addressable_virt_dev(xhci
, udev
);
2812 /* Otherwise, update the control endpoint ring enqueue pointer. */
2814 xhci_copy_ep0_dequeue_into_input_ctx(xhci
, udev
);
2815 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2816 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2818 spin_lock_irqsave(&xhci
->lock
, flags
);
2819 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
2822 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2823 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
2826 xhci_ring_cmd_db(xhci
);
2827 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2829 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2830 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
2831 USB_CTRL_SET_TIMEOUT
);
2832 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2833 * the SetAddress() "recovery interval" required by USB and aborting the
2834 * command on a timeout.
2836 if (timeleft
<= 0) {
2837 xhci_warn(xhci
, "%s while waiting for a slot\n",
2838 timeleft
== 0 ? "Timeout" : "Signal");
2839 /* FIXME cancel the address device command */
2843 switch (virt_dev
->cmd_status
) {
2844 case COMP_CTX_STATE
:
2846 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
2851 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
2855 xhci_dbg(xhci
, "Successful Address Device command\n");
2858 xhci_err(xhci
, "ERROR: unexpected command completion "
2859 "code 0x%x.\n", virt_dev
->cmd_status
);
2860 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2861 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2868 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
2869 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
2870 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2872 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
2873 (unsigned long long)
2874 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]));
2875 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
2876 (unsigned long long)virt_dev
->out_ctx
->dma
);
2877 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
2878 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
2879 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
2880 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
2882 * USB core uses address 1 for the roothubs, so we add one to the
2883 * address given back to us by the HC.
2885 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
2886 /* Use kernel assigned address for devices; store xHC assigned
2887 * address locally. */
2888 virt_dev
->address
= (le32_to_cpu(slot_ctx
->dev_state
) & DEV_ADDR_MASK
)
2890 /* Zero the input context control for later use */
2891 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2892 ctrl_ctx
->add_flags
= 0;
2893 ctrl_ctx
->drop_flags
= 0;
2895 xhci_dbg(xhci
, "Internal device address = %d\n", virt_dev
->address
);
2900 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
2901 * internal data structures for the device.
2903 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
2904 struct usb_tt
*tt
, gfp_t mem_flags
)
2906 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2907 struct xhci_virt_device
*vdev
;
2908 struct xhci_command
*config_cmd
;
2909 struct xhci_input_control_ctx
*ctrl_ctx
;
2910 struct xhci_slot_ctx
*slot_ctx
;
2911 unsigned long flags
;
2912 unsigned think_time
;
2915 /* Ignore root hubs */
2919 vdev
= xhci
->devs
[hdev
->slot_id
];
2921 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
2924 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
2926 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
2930 spin_lock_irqsave(&xhci
->lock
, flags
);
2931 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
2932 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
2933 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2934 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
2935 slot_ctx
->dev_info
|= cpu_to_le32(DEV_HUB
);
2937 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
2938 if (xhci
->hci_version
> 0x95) {
2939 xhci_dbg(xhci
, "xHCI version %x needs hub "
2940 "TT think time and number of ports\n",
2941 (unsigned int) xhci
->hci_version
);
2942 slot_ctx
->dev_info2
|= cpu_to_le32(XHCI_MAX_PORTS(hdev
->maxchild
));
2943 /* Set TT think time - convert from ns to FS bit times.
2944 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2945 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2947 * xHCI 1.0: this field shall be 0 if the device is not a
2950 think_time
= tt
->think_time
;
2951 if (think_time
!= 0)
2952 think_time
= (think_time
/ 666) - 1;
2953 if (xhci
->hci_version
< 0x100 || hdev
->speed
== USB_SPEED_HIGH
)
2954 slot_ctx
->tt_info
|=
2955 cpu_to_le32(TT_THINK_TIME(think_time
));
2957 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
2958 "TT think time or number of ports\n",
2959 (unsigned int) xhci
->hci_version
);
2961 slot_ctx
->dev_state
= 0;
2962 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2964 xhci_dbg(xhci
, "Set up %s for hub device.\n",
2965 (xhci
->hci_version
> 0x95) ?
2966 "configure endpoint" : "evaluate context");
2967 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
2968 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
2970 /* Issue and wait for the configure endpoint or
2971 * evaluate context command.
2973 if (xhci
->hci_version
> 0x95)
2974 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2977 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
2980 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
2981 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
2983 xhci_free_command(xhci
, config_cmd
);
2987 int xhci_get_frame(struct usb_hcd
*hcd
)
2989 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
2990 /* EHCI mods by the periodic size. Why? */
2991 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
2994 MODULE_DESCRIPTION(DRIVER_DESC
);
2995 MODULE_AUTHOR(DRIVER_AUTHOR
);
2996 MODULE_LICENSE("GPL");
2998 static int __init
xhci_hcd_init(void)
3003 retval
= xhci_register_pci();
3006 printk(KERN_DEBUG
"Problem registering PCI driver.");
3011 * Check the compiler generated sizes of structures that must be laid
3012 * out in specific ways for hardware access.
3014 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
3015 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
3016 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
3017 /* xhci_device_control has eight fields, and also
3018 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
3020 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
3021 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
3022 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
3023 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
3024 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
3025 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
3026 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
3027 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
3030 module_init(xhci_hcd_init
);
3032 static void __exit
xhci_hcd_cleanup(void)
3035 xhci_unregister_pci();
3038 module_exit(xhci_hcd_cleanup
);