2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30 #include <linux/dma-mapping.h>
33 #include "xhci-trace.h"
35 #define DRIVER_AUTHOR "Sarah Sharp"
36 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39 static int link_quirk
;
40 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
41 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
43 static unsigned int quirks
;
44 module_param(quirks
, uint
, S_IRUGO
);
45 MODULE_PARM_DESC(quirks
, "Bit flags for quirks to be enabled as default");
47 /* TODO: copied from ehci-hcd.c - can this be refactored? */
49 * xhci_handshake - spin reading hc until handshake completes or fails
50 * @ptr: address of hc register to be read
51 * @mask: bits to look at in result of read
52 * @done: value of those bits when handshake succeeds
53 * @usec: timeout in microseconds
55 * Returns negative errno, or zero on success
57 * Success happens when the "mask" bits have the specified value (hardware
58 * handshake done). There are two failure modes: "usec" have passed (major
59 * hardware flakeout), or the register reads as all-ones (hardware removed).
61 int xhci_handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
62 u32 mask
, u32 done
, int usec
)
68 if (result
== ~(u32
)0) /* card removed */
80 * Disable interrupts and begin the xHCI halting process.
82 void xhci_quiesce(struct xhci_hcd
*xhci
)
89 halted
= readl(&xhci
->op_regs
->status
) & STS_HALT
;
93 cmd
= readl(&xhci
->op_regs
->command
);
95 writel(cmd
, &xhci
->op_regs
->command
);
99 * Force HC into halt state.
101 * Disable any IRQs and clear the run/stop bit.
102 * HC will complete any current and actively pipelined transactions, and
103 * should halt within 16 ms of the run/stop bit being cleared.
104 * Read HC Halted bit in the status register to see when the HC is finished.
106 int xhci_halt(struct xhci_hcd
*xhci
)
109 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "// Halt the HC");
112 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
113 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
115 xhci
->xhc_state
|= XHCI_STATE_HALTED
;
116 xhci
->cmd_ring_state
= CMD_RING_STATE_STOPPED
;
118 xhci_warn(xhci
, "Host not halted after %u microseconds.\n",
124 * Set the run bit and wait for the host to be running.
126 static int xhci_start(struct xhci_hcd
*xhci
)
131 temp
= readl(&xhci
->op_regs
->command
);
133 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "// Turn on HC, cmd = 0x%x.",
135 writel(temp
, &xhci
->op_regs
->command
);
138 * Wait for the HCHalted Status bit to be 0 to indicate the host is
141 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
142 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
143 if (ret
== -ETIMEDOUT
)
144 xhci_err(xhci
, "Host took too long to start, "
145 "waited %u microseconds.\n",
148 xhci
->xhc_state
&= ~XHCI_STATE_HALTED
;
155 * This resets pipelines, timers, counters, state machines, etc.
156 * Transactions will be terminated immediately, and operational registers
157 * will be set to their defaults.
159 int xhci_reset(struct xhci_hcd
*xhci
)
165 state
= readl(&xhci
->op_regs
->status
);
166 if ((state
& STS_HALT
) == 0) {
167 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
171 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "// Reset the HC");
172 command
= readl(&xhci
->op_regs
->command
);
173 command
|= CMD_RESET
;
174 writel(command
, &xhci
->op_regs
->command
);
176 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->command
,
177 CMD_RESET
, 0, 10 * 1000 * 1000);
181 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
182 "Wait for controller to be ready for doorbell rings");
184 * xHCI cannot write to any doorbells or operational registers other
185 * than status until the "Controller Not Ready" flag is cleared.
187 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
188 STS_CNR
, 0, 10 * 1000 * 1000);
190 for (i
= 0; i
< 2; ++i
) {
191 xhci
->bus_state
[i
].port_c_suspend
= 0;
192 xhci
->bus_state
[i
].suspended_ports
= 0;
193 xhci
->bus_state
[i
].resuming_ports
= 0;
200 static int xhci_free_msi(struct xhci_hcd
*xhci
)
204 if (!xhci
->msix_entries
)
207 for (i
= 0; i
< xhci
->msix_count
; i
++)
208 if (xhci
->msix_entries
[i
].vector
)
209 free_irq(xhci
->msix_entries
[i
].vector
,
217 static int xhci_setup_msi(struct xhci_hcd
*xhci
)
220 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
222 ret
= pci_enable_msi(pdev
);
224 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
225 "failed to allocate MSI entry");
229 ret
= request_irq(pdev
->irq
, xhci_msi_irq
,
230 0, "xhci_hcd", xhci_to_hcd(xhci
));
232 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
233 "disable MSI interrupt");
234 pci_disable_msi(pdev
);
242 * free all IRQs request
244 static void xhci_free_irq(struct xhci_hcd
*xhci
)
246 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
249 /* return if using legacy interrupt */
250 if (xhci_to_hcd(xhci
)->irq
> 0)
253 ret
= xhci_free_msi(xhci
);
257 free_irq(pdev
->irq
, xhci_to_hcd(xhci
));
265 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
268 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
269 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
272 * calculate number of msi-x vectors supported.
273 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
274 * with max number of interrupters based on the xhci HCSPARAMS1.
275 * - num_online_cpus: maximum msi-x vectors per CPUs core.
276 * Add additional 1 vector to ensure always available interrupt.
278 xhci
->msix_count
= min(num_online_cpus() + 1,
279 HCS_MAX_INTRS(xhci
->hcs_params1
));
282 kmalloc((sizeof(struct msix_entry
))*xhci
->msix_count
,
284 if (!xhci
->msix_entries
) {
285 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
289 for (i
= 0; i
< xhci
->msix_count
; i
++) {
290 xhci
->msix_entries
[i
].entry
= i
;
291 xhci
->msix_entries
[i
].vector
= 0;
294 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
296 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
297 "Failed to enable MSI-X");
301 for (i
= 0; i
< xhci
->msix_count
; i
++) {
302 ret
= request_irq(xhci
->msix_entries
[i
].vector
,
304 0, "xhci_hcd", xhci_to_hcd(xhci
));
309 hcd
->msix_enabled
= 1;
313 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "disable MSI-X interrupt");
315 pci_disable_msix(pdev
);
317 kfree(xhci
->msix_entries
);
318 xhci
->msix_entries
= NULL
;
322 /* Free any IRQs and disable MSI-X */
323 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
325 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
326 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
328 if (xhci
->quirks
& XHCI_PLAT
)
333 if (xhci
->msix_entries
) {
334 pci_disable_msix(pdev
);
335 kfree(xhci
->msix_entries
);
336 xhci
->msix_entries
= NULL
;
338 pci_disable_msi(pdev
);
341 hcd
->msix_enabled
= 0;
345 static void __maybe_unused
xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
349 if (xhci
->msix_entries
) {
350 for (i
= 0; i
< xhci
->msix_count
; i
++)
351 synchronize_irq(xhci
->msix_entries
[i
].vector
);
355 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
357 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
358 struct pci_dev
*pdev
;
361 /* The xhci platform device has set up IRQs through usb_add_hcd. */
362 if (xhci
->quirks
& XHCI_PLAT
)
365 pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
367 * Some Fresco Logic host controllers advertise MSI, but fail to
368 * generate interrupts. Don't even try to enable MSI.
370 if (xhci
->quirks
& XHCI_BROKEN_MSI
)
373 /* unregister the legacy interrupt */
375 free_irq(hcd
->irq
, hcd
);
378 ret
= xhci_setup_msix(xhci
);
380 /* fall back to msi*/
381 ret
= xhci_setup_msi(xhci
);
384 /* hcd->irq is 0, we have MSI */
388 xhci_err(xhci
, "No msi-x/msi found and no IRQ in BIOS\n");
393 /* fall back to legacy interrupt*/
394 ret
= request_irq(pdev
->irq
, &usb_hcd_irq
, IRQF_SHARED
,
395 hcd
->irq_descr
, hcd
);
397 xhci_err(xhci
, "request interrupt %d failed\n",
401 hcd
->irq
= pdev
->irq
;
407 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
412 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
416 static void xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
422 static void compliance_mode_recovery(unsigned long arg
)
424 struct xhci_hcd
*xhci
;
429 xhci
= (struct xhci_hcd
*)arg
;
431 for (i
= 0; i
< xhci
->num_usb3_ports
; i
++) {
432 temp
= readl(xhci
->usb3_ports
[i
]);
433 if ((temp
& PORT_PLS_MASK
) == USB_SS_PORT_LS_COMP_MOD
) {
435 * Compliance Mode Detected. Letting USB Core
436 * handle the Warm Reset
438 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
439 "Compliance mode detected->port %d",
441 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
442 "Attempting compliance mode recovery");
443 hcd
= xhci
->shared_hcd
;
445 if (hcd
->state
== HC_STATE_SUSPENDED
)
446 usb_hcd_resume_root_hub(hcd
);
448 usb_hcd_poll_rh_status(hcd
);
452 if (xhci
->port_status_u0
!= ((1 << xhci
->num_usb3_ports
)-1))
453 mod_timer(&xhci
->comp_mode_recovery_timer
,
454 jiffies
+ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
));
458 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
459 * that causes ports behind that hardware to enter compliance mode sometimes.
460 * The quirk creates a timer that polls every 2 seconds the link state of
461 * each host controller's port and recovers it by issuing a Warm reset
462 * if Compliance mode is detected, otherwise the port will become "dead" (no
463 * device connections or disconnections will be detected anymore). Becasue no
464 * status event is generated when entering compliance mode (per xhci spec),
465 * this quirk is needed on systems that have the failing hardware installed.
467 static void compliance_mode_recovery_timer_init(struct xhci_hcd
*xhci
)
469 xhci
->port_status_u0
= 0;
470 init_timer(&xhci
->comp_mode_recovery_timer
);
472 xhci
->comp_mode_recovery_timer
.data
= (unsigned long) xhci
;
473 xhci
->comp_mode_recovery_timer
.function
= compliance_mode_recovery
;
474 xhci
->comp_mode_recovery_timer
.expires
= jiffies
+
475 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
);
477 set_timer_slack(&xhci
->comp_mode_recovery_timer
,
478 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
));
479 add_timer(&xhci
->comp_mode_recovery_timer
);
480 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
481 "Compliance mode recovery timer initialized");
485 * This function identifies the systems that have installed the SN65LVPE502CP
486 * USB3.0 re-driver and that need the Compliance Mode Quirk.
488 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
490 bool xhci_compliance_mode_recovery_timer_quirk_check(void)
492 const char *dmi_product_name
, *dmi_sys_vendor
;
494 dmi_product_name
= dmi_get_system_info(DMI_PRODUCT_NAME
);
495 dmi_sys_vendor
= dmi_get_system_info(DMI_SYS_VENDOR
);
496 if (!dmi_product_name
|| !dmi_sys_vendor
)
499 if (!(strstr(dmi_sys_vendor
, "Hewlett-Packard")))
502 if (strstr(dmi_product_name
, "Z420") ||
503 strstr(dmi_product_name
, "Z620") ||
504 strstr(dmi_product_name
, "Z820") ||
505 strstr(dmi_product_name
, "Z1 Workstation"))
511 static int xhci_all_ports_seen_u0(struct xhci_hcd
*xhci
)
513 return (xhci
->port_status_u0
== ((1 << xhci
->num_usb3_ports
)-1));
518 * Initialize memory for HCD and xHC (one-time init).
520 * Program the PAGESIZE register, initialize the device context array, create
521 * device contexts (?), set up a command ring segment (or two?), create event
522 * ring (one for now).
524 int xhci_init(struct usb_hcd
*hcd
)
526 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
529 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "xhci_init");
530 spin_lock_init(&xhci
->lock
);
531 if (xhci
->hci_version
== 0x95 && link_quirk
) {
532 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
533 "QUIRK: Not clearing Link TRB chain bits.");
534 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
536 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
537 "xHCI doesn't need link TRB QUIRK");
539 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
540 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Finished xhci_init");
542 /* Initializing Compliance Mode Recovery Data If Needed */
543 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
544 xhci
->quirks
|= XHCI_COMP_MODE_QUIRK
;
545 compliance_mode_recovery_timer_init(xhci
);
551 /*-------------------------------------------------------------------------*/
554 static int xhci_run_finished(struct xhci_hcd
*xhci
)
556 if (xhci_start(xhci
)) {
560 xhci
->shared_hcd
->state
= HC_STATE_RUNNING
;
561 xhci
->cmd_ring_state
= CMD_RING_STATE_RUNNING
;
563 if (xhci
->quirks
& XHCI_NEC_HOST
)
564 xhci_ring_cmd_db(xhci
);
566 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
567 "Finished xhci_run for USB3 roothub");
572 * Start the HC after it was halted.
574 * This function is called by the USB core when the HC driver is added.
575 * Its opposite is xhci_stop().
577 * xhci_init() must be called once before this function can be called.
578 * Reset the HC, enable device slot contexts, program DCBAAP, and
579 * set command ring pointer and event ring pointer.
581 * Setup MSI-X vectors and enable interrupts.
583 int xhci_run(struct usb_hcd
*hcd
)
588 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
590 /* Start the xHCI host controller running only after the USB 2.0 roothub
594 hcd
->uses_new_polling
= 1;
595 if (!usb_hcd_is_primary_hcd(hcd
))
596 return xhci_run_finished(xhci
);
598 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "xhci_run");
600 ret
= xhci_try_enable_msi(hcd
);
604 xhci_dbg(xhci
, "Command ring memory map follows:\n");
605 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
606 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
607 xhci_dbg_cmd_ptrs(xhci
);
609 xhci_dbg(xhci
, "ERST memory map follows:\n");
610 xhci_dbg_erst(xhci
, &xhci
->erst
);
611 xhci_dbg(xhci
, "Event ring:\n");
612 xhci_debug_ring(xhci
, xhci
->event_ring
);
613 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
614 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
615 temp_64
&= ~ERST_PTR_MASK
;
616 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
617 "ERST deq = 64'h%0lx", (long unsigned int) temp_64
);
619 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
620 "// Set the interrupt modulation register");
621 temp
= readl(&xhci
->ir_set
->irq_control
);
622 temp
&= ~ER_IRQ_INTERVAL_MASK
;
624 writel(temp
, &xhci
->ir_set
->irq_control
);
626 /* Set the HCD state before we enable the irqs */
627 temp
= readl(&xhci
->op_regs
->command
);
629 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
630 "// Enable interrupts, cmd = 0x%x.", temp
);
631 writel(temp
, &xhci
->op_regs
->command
);
633 temp
= readl(&xhci
->ir_set
->irq_pending
);
634 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
635 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
636 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
637 writel(ER_IRQ_ENABLE(temp
), &xhci
->ir_set
->irq_pending
);
638 xhci_print_ir_set(xhci
, 0);
640 if (xhci
->quirks
& XHCI_NEC_HOST
)
641 xhci_queue_vendor_command(xhci
, 0, 0, 0,
642 TRB_TYPE(TRB_NEC_GET_FW
));
644 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
645 "Finished xhci_run for USB2 roothub");
649 static void xhci_only_stop_hcd(struct usb_hcd
*hcd
)
651 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
653 spin_lock_irq(&xhci
->lock
);
656 /* The shared_hcd is going to be deallocated shortly (the USB core only
657 * calls this function when allocation fails in usb_add_hcd(), or
658 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
660 xhci
->shared_hcd
= NULL
;
661 spin_unlock_irq(&xhci
->lock
);
667 * This function is called by the USB core when the HC driver is removed.
668 * Its opposite is xhci_run().
670 * Disable device contexts, disable IRQs, and quiesce the HC.
671 * Reset the HC, finish any completed transactions, and cleanup memory.
673 void xhci_stop(struct usb_hcd
*hcd
)
676 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
678 if (!usb_hcd_is_primary_hcd(hcd
)) {
679 xhci_only_stop_hcd(xhci
->shared_hcd
);
683 spin_lock_irq(&xhci
->lock
);
684 /* Make sure the xHC is halted for a USB3 roothub
685 * (xhci_stop() could be called as part of failed init).
689 spin_unlock_irq(&xhci
->lock
);
691 xhci_cleanup_msix(xhci
);
693 /* Deleting Compliance Mode Recovery Timer */
694 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
695 (!(xhci_all_ports_seen_u0(xhci
)))) {
696 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
697 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
698 "%s: compliance mode recovery timer deleted",
702 if (xhci
->quirks
& XHCI_AMD_PLL_FIX
)
705 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
706 "// Disabling event ring interrupts");
707 temp
= readl(&xhci
->op_regs
->status
);
708 writel(temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
709 temp
= readl(&xhci
->ir_set
->irq_pending
);
710 writel(ER_IRQ_DISABLE(temp
), &xhci
->ir_set
->irq_pending
);
711 xhci_print_ir_set(xhci
, 0);
713 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "cleaning up memory");
714 xhci_mem_cleanup(xhci
);
715 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
716 "xhci_stop completed - status = %x",
717 readl(&xhci
->op_regs
->status
));
721 * Shutdown HC (not bus-specific)
723 * This is called when the machine is rebooting or halting. We assume that the
724 * machine will be powered off, and the HC's internal state will be reset.
725 * Don't bother to free memory.
727 * This will only ever be called with the main usb_hcd (the USB3 roothub).
729 void xhci_shutdown(struct usb_hcd
*hcd
)
731 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
733 if (xhci
->quirks
& XHCI_SPURIOUS_REBOOT
)
734 usb_disable_xhci_ports(to_pci_dev(hcd
->self
.controller
));
736 spin_lock_irq(&xhci
->lock
);
738 /* Workaround for spurious wakeups at shutdown with HSW */
739 if (xhci
->quirks
& XHCI_SPURIOUS_WAKEUP
)
741 spin_unlock_irq(&xhci
->lock
);
743 xhci_cleanup_msix(xhci
);
745 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
746 "xhci_shutdown completed - status = %x",
747 readl(&xhci
->op_regs
->status
));
749 /* Yet another workaround for spurious wakeups at shutdown with HSW */
750 if (xhci
->quirks
& XHCI_SPURIOUS_WAKEUP
)
751 pci_set_power_state(to_pci_dev(hcd
->self
.controller
), PCI_D3hot
);
755 static void xhci_save_registers(struct xhci_hcd
*xhci
)
757 xhci
->s3
.command
= readl(&xhci
->op_regs
->command
);
758 xhci
->s3
.dev_nt
= readl(&xhci
->op_regs
->dev_notification
);
759 xhci
->s3
.dcbaa_ptr
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
760 xhci
->s3
.config_reg
= readl(&xhci
->op_regs
->config_reg
);
761 xhci
->s3
.erst_size
= readl(&xhci
->ir_set
->erst_size
);
762 xhci
->s3
.erst_base
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
763 xhci
->s3
.erst_dequeue
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
764 xhci
->s3
.irq_pending
= readl(&xhci
->ir_set
->irq_pending
);
765 xhci
->s3
.irq_control
= readl(&xhci
->ir_set
->irq_control
);
768 static void xhci_restore_registers(struct xhci_hcd
*xhci
)
770 writel(xhci
->s3
.command
, &xhci
->op_regs
->command
);
771 writel(xhci
->s3
.dev_nt
, &xhci
->op_regs
->dev_notification
);
772 xhci_write_64(xhci
, xhci
->s3
.dcbaa_ptr
, &xhci
->op_regs
->dcbaa_ptr
);
773 writel(xhci
->s3
.config_reg
, &xhci
->op_regs
->config_reg
);
774 writel(xhci
->s3
.erst_size
, &xhci
->ir_set
->erst_size
);
775 xhci_write_64(xhci
, xhci
->s3
.erst_base
, &xhci
->ir_set
->erst_base
);
776 xhci_write_64(xhci
, xhci
->s3
.erst_dequeue
, &xhci
->ir_set
->erst_dequeue
);
777 writel(xhci
->s3
.irq_pending
, &xhci
->ir_set
->irq_pending
);
778 writel(xhci
->s3
.irq_control
, &xhci
->ir_set
->irq_control
);
781 static void xhci_set_cmd_ring_deq(struct xhci_hcd
*xhci
)
785 /* step 2: initialize command ring buffer */
786 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
787 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
788 (xhci_trb_virt_to_dma(xhci
->cmd_ring
->deq_seg
,
789 xhci
->cmd_ring
->dequeue
) &
790 (u64
) ~CMD_RING_RSVD_BITS
) |
791 xhci
->cmd_ring
->cycle_state
;
792 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
793 "// Setting command ring address to 0x%llx",
794 (long unsigned long) val_64
);
795 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
799 * The whole command ring must be cleared to zero when we suspend the host.
801 * The host doesn't save the command ring pointer in the suspend well, so we
802 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
803 * aligned, because of the reserved bits in the command ring dequeue pointer
804 * register. Therefore, we can't just set the dequeue pointer back in the
805 * middle of the ring (TRBs are 16-byte aligned).
807 static void xhci_clear_command_ring(struct xhci_hcd
*xhci
)
809 struct xhci_ring
*ring
;
810 struct xhci_segment
*seg
;
812 ring
= xhci
->cmd_ring
;
816 sizeof(union xhci_trb
) * (TRBS_PER_SEGMENT
- 1));
817 seg
->trbs
[TRBS_PER_SEGMENT
- 1].link
.control
&=
818 cpu_to_le32(~TRB_CYCLE
);
820 } while (seg
!= ring
->deq_seg
);
822 /* Reset the software enqueue and dequeue pointers */
823 ring
->deq_seg
= ring
->first_seg
;
824 ring
->dequeue
= ring
->first_seg
->trbs
;
825 ring
->enq_seg
= ring
->deq_seg
;
826 ring
->enqueue
= ring
->dequeue
;
828 ring
->num_trbs_free
= ring
->num_segs
* (TRBS_PER_SEGMENT
- 1) - 1;
830 * Ring is now zeroed, so the HW should look for change of ownership
831 * when the cycle bit is set to 1.
833 ring
->cycle_state
= 1;
836 * Reset the hardware dequeue pointer.
837 * Yes, this will need to be re-written after resume, but we're paranoid
838 * and want to make sure the hardware doesn't access bogus memory
839 * because, say, the BIOS or an SMI started the host without changing
840 * the command ring pointers.
842 xhci_set_cmd_ring_deq(xhci
);
846 * Stop HC (not bus-specific)
848 * This is called when the machine transition into S3/S4 mode.
851 int xhci_suspend(struct xhci_hcd
*xhci
)
854 unsigned int delay
= XHCI_MAX_HALT_USEC
;
855 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
858 if (hcd
->state
!= HC_STATE_SUSPENDED
||
859 xhci
->shared_hcd
->state
!= HC_STATE_SUSPENDED
)
862 /* Don't poll the roothubs on bus suspend. */
863 xhci_dbg(xhci
, "%s: stopping port polling.\n", __func__
);
864 clear_bit(HCD_FLAG_POLL_RH
, &hcd
->flags
);
865 del_timer_sync(&hcd
->rh_timer
);
867 spin_lock_irq(&xhci
->lock
);
868 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
869 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
870 /* step 1: stop endpoint */
871 /* skipped assuming that port suspend has done */
873 /* step 2: clear Run/Stop bit */
874 command
= readl(&xhci
->op_regs
->command
);
876 writel(command
, &xhci
->op_regs
->command
);
878 /* Some chips from Fresco Logic need an extraordinary delay */
879 delay
*= (xhci
->quirks
& XHCI_SLOW_SUSPEND
) ? 10 : 1;
881 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
882 STS_HALT
, STS_HALT
, delay
)) {
883 xhci_warn(xhci
, "WARN: xHC CMD_RUN timeout\n");
884 spin_unlock_irq(&xhci
->lock
);
887 xhci_clear_command_ring(xhci
);
889 /* step 3: save registers */
890 xhci_save_registers(xhci
);
892 /* step 4: set CSS flag */
893 command
= readl(&xhci
->op_regs
->command
);
895 writel(command
, &xhci
->op_regs
->command
);
896 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
897 STS_SAVE
, 0, 10 * 1000)) {
898 xhci_warn(xhci
, "WARN: xHC save state timeout\n");
899 spin_unlock_irq(&xhci
->lock
);
902 spin_unlock_irq(&xhci
->lock
);
905 * Deleting Compliance Mode Recovery Timer because the xHCI Host
906 * is about to be suspended.
908 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
909 (!(xhci_all_ports_seen_u0(xhci
)))) {
910 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
911 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
912 "%s: compliance mode recovery timer deleted",
916 /* step 5: remove core well power */
917 /* synchronize irq when using MSI-X */
918 xhci_msix_sync_irqs(xhci
);
924 * start xHC (not bus-specific)
926 * This is called when the machine transition from S3/S4 mode.
929 int xhci_resume(struct xhci_hcd
*xhci
, bool hibernated
)
931 u32 command
, temp
= 0;
932 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
933 struct usb_hcd
*secondary_hcd
;
935 bool comp_timer_running
= false;
937 /* Wait a bit if either of the roothubs need to settle from the
938 * transition into bus suspend.
940 if (time_before(jiffies
, xhci
->bus_state
[0].next_statechange
) ||
942 xhci
->bus_state
[1].next_statechange
))
945 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
946 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
948 spin_lock_irq(&xhci
->lock
);
949 if (xhci
->quirks
& XHCI_RESET_ON_RESUME
)
953 /* step 1: restore register */
954 xhci_restore_registers(xhci
);
955 /* step 2: initialize command ring buffer */
956 xhci_set_cmd_ring_deq(xhci
);
957 /* step 3: restore state and start state*/
958 /* step 3: set CRS flag */
959 command
= readl(&xhci
->op_regs
->command
);
961 writel(command
, &xhci
->op_regs
->command
);
962 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
963 STS_RESTORE
, 0, 10 * 1000)) {
964 xhci_warn(xhci
, "WARN: xHC restore state timeout\n");
965 spin_unlock_irq(&xhci
->lock
);
968 temp
= readl(&xhci
->op_regs
->status
);
971 /* If restore operation fails, re-initialize the HC during resume */
972 if ((temp
& STS_SRE
) || hibernated
) {
974 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
975 !(xhci_all_ports_seen_u0(xhci
))) {
976 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
977 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
978 "Compliance Mode Recovery Timer deleted!");
981 /* Let the USB core know _both_ roothubs lost power. */
982 usb_root_hub_lost_power(xhci
->main_hcd
->self
.root_hub
);
983 usb_root_hub_lost_power(xhci
->shared_hcd
->self
.root_hub
);
985 xhci_dbg(xhci
, "Stop HCD\n");
988 spin_unlock_irq(&xhci
->lock
);
989 xhci_cleanup_msix(xhci
);
991 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
992 temp
= readl(&xhci
->op_regs
->status
);
993 writel(temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
994 temp
= readl(&xhci
->ir_set
->irq_pending
);
995 writel(ER_IRQ_DISABLE(temp
), &xhci
->ir_set
->irq_pending
);
996 xhci_print_ir_set(xhci
, 0);
998 xhci_dbg(xhci
, "cleaning up memory\n");
999 xhci_mem_cleanup(xhci
);
1000 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
1001 readl(&xhci
->op_regs
->status
));
1003 /* USB core calls the PCI reinit and start functions twice:
1004 * first with the primary HCD, and then with the secondary HCD.
1005 * If we don't do the same, the host will never be started.
1007 if (!usb_hcd_is_primary_hcd(hcd
))
1008 secondary_hcd
= hcd
;
1010 secondary_hcd
= xhci
->shared_hcd
;
1012 xhci_dbg(xhci
, "Initialize the xhci_hcd\n");
1013 retval
= xhci_init(hcd
->primary_hcd
);
1016 comp_timer_running
= true;
1018 xhci_dbg(xhci
, "Start the primary HCD\n");
1019 retval
= xhci_run(hcd
->primary_hcd
);
1021 xhci_dbg(xhci
, "Start the secondary HCD\n");
1022 retval
= xhci_run(secondary_hcd
);
1024 hcd
->state
= HC_STATE_SUSPENDED
;
1025 xhci
->shared_hcd
->state
= HC_STATE_SUSPENDED
;
1029 /* step 4: set Run/Stop bit */
1030 command
= readl(&xhci
->op_regs
->command
);
1032 writel(command
, &xhci
->op_regs
->command
);
1033 xhci_handshake(xhci
, &xhci
->op_regs
->status
, STS_HALT
,
1036 /* step 5: walk topology and initialize portsc,
1037 * portpmsc and portli
1039 /* this is done in bus_resume */
1041 /* step 6: restart each of the previously
1042 * Running endpoints by ringing their doorbells
1045 spin_unlock_irq(&xhci
->lock
);
1049 usb_hcd_resume_root_hub(hcd
);
1050 usb_hcd_resume_root_hub(xhci
->shared_hcd
);
1054 * If system is subject to the Quirk, Compliance Mode Timer needs to
1055 * be re-initialized Always after a system resume. Ports are subject
1056 * to suffer the Compliance Mode issue again. It doesn't matter if
1057 * ports have entered previously to U0 before system's suspension.
1059 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) && !comp_timer_running
)
1060 compliance_mode_recovery_timer_init(xhci
);
1062 /* Re-enable port polling. */
1063 xhci_dbg(xhci
, "%s: starting port polling.\n", __func__
);
1064 set_bit(HCD_FLAG_POLL_RH
, &hcd
->flags
);
1065 usb_hcd_poll_rh_status(hcd
);
1069 #endif /* CONFIG_PM */
1071 /*-------------------------------------------------------------------------*/
1074 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1075 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1076 * value to right shift 1 for the bitmask.
1078 * Index = (epnum * 2) + direction - 1,
1079 * where direction = 0 for OUT, 1 for IN.
1080 * For control endpoints, the IN index is used (OUT index is unused), so
1081 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1083 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
1086 if (usb_endpoint_xfer_control(desc
))
1087 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
1089 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
1090 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
1094 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1095 * address from the XHCI endpoint index.
1097 unsigned int xhci_get_endpoint_address(unsigned int ep_index
)
1099 unsigned int number
= DIV_ROUND_UP(ep_index
, 2);
1100 unsigned int direction
= ep_index
% 2 ? USB_DIR_OUT
: USB_DIR_IN
;
1101 return direction
| number
;
1104 /* Find the flag for this endpoint (for use in the control context). Use the
1105 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1108 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
1110 return 1 << (xhci_get_endpoint_index(desc
) + 1);
1113 /* Find the flag for this endpoint (for use in the control context). Use the
1114 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1117 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
1119 return 1 << (ep_index
+ 1);
1122 /* Compute the last valid endpoint context index. Basically, this is the
1123 * endpoint index plus one. For slot contexts with more than valid endpoint,
1124 * we find the most significant bit set in the added contexts flags.
1125 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1126 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1128 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
1130 return fls(added_ctxs
) - 1;
1133 /* Returns 1 if the arguments are OK;
1134 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1136 static int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1137 struct usb_host_endpoint
*ep
, int check_ep
, bool check_virt_dev
,
1139 struct xhci_hcd
*xhci
;
1140 struct xhci_virt_device
*virt_dev
;
1142 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
1143 pr_debug("xHCI %s called with invalid args\n", func
);
1146 if (!udev
->parent
) {
1147 pr_debug("xHCI %s called for root hub\n", func
);
1151 xhci
= hcd_to_xhci(hcd
);
1152 if (check_virt_dev
) {
1153 if (!udev
->slot_id
|| !xhci
->devs
[udev
->slot_id
]) {
1154 xhci_dbg(xhci
, "xHCI %s called with unaddressed device\n",
1159 virt_dev
= xhci
->devs
[udev
->slot_id
];
1160 if (virt_dev
->udev
!= udev
) {
1161 xhci_dbg(xhci
, "xHCI %s called with udev and "
1162 "virt_dev does not match\n", func
);
1167 if (xhci
->xhc_state
& XHCI_STATE_HALTED
)
1173 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1174 struct usb_device
*udev
, struct xhci_command
*command
,
1175 bool ctx_change
, bool must_succeed
);
1178 * Full speed devices may have a max packet size greater than 8 bytes, but the
1179 * USB core doesn't know that until it reads the first 8 bytes of the
1180 * descriptor. If the usb_device's max packet size changes after that point,
1181 * we need to issue an evaluate context command and wait on it.
1183 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
1184 unsigned int ep_index
, struct urb
*urb
)
1186 struct xhci_container_ctx
*in_ctx
;
1187 struct xhci_container_ctx
*out_ctx
;
1188 struct xhci_input_control_ctx
*ctrl_ctx
;
1189 struct xhci_ep_ctx
*ep_ctx
;
1190 int max_packet_size
;
1191 int hw_max_packet_size
;
1194 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
1195 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1196 hw_max_packet_size
= MAX_PACKET_DECODED(le32_to_cpu(ep_ctx
->ep_info2
));
1197 max_packet_size
= usb_endpoint_maxp(&urb
->dev
->ep0
.desc
);
1198 if (hw_max_packet_size
!= max_packet_size
) {
1199 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1200 "Max Packet Size for ep 0 changed.");
1201 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1202 "Max packet size in usb_device = %d",
1204 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1205 "Max packet size in xHCI HW = %d",
1206 hw_max_packet_size
);
1207 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1208 "Issuing evaluate context command.");
1210 /* Set up the input context flags for the command */
1211 /* FIXME: This won't work if a non-default control endpoint
1212 * changes max packet sizes.
1214 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1215 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1217 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1221 /* Set up the modified control endpoint 0 */
1222 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1223 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1225 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1226 ep_ctx
->ep_info2
&= cpu_to_le32(~MAX_PACKET_MASK
);
1227 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet_size
));
1229 ctrl_ctx
->add_flags
= cpu_to_le32(EP0_FLAG
);
1230 ctrl_ctx
->drop_flags
= 0;
1232 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
1233 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
1234 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
1235 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
1237 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
1240 /* Clean up the input context for later use by bandwidth
1243 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
);
1249 * non-error returns are a promise to giveback() the urb later
1250 * we drop ownership so next owner (or urb unlink) can get it
1252 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
1254 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1255 struct xhci_td
*buffer
;
1256 unsigned long flags
;
1258 unsigned int slot_id
, ep_index
;
1259 struct urb_priv
*urb_priv
;
1262 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
,
1263 true, true, __func__
) <= 0)
1266 slot_id
= urb
->dev
->slot_id
;
1267 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1269 if (!HCD_HW_ACCESSIBLE(hcd
)) {
1270 if (!in_interrupt())
1271 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
1276 if (usb_endpoint_xfer_isoc(&urb
->ep
->desc
))
1277 size
= urb
->number_of_packets
;
1281 urb_priv
= kzalloc(sizeof(struct urb_priv
) +
1282 size
* sizeof(struct xhci_td
*), mem_flags
);
1286 buffer
= kzalloc(size
* sizeof(struct xhci_td
), mem_flags
);
1292 for (i
= 0; i
< size
; i
++) {
1293 urb_priv
->td
[i
] = buffer
;
1297 urb_priv
->length
= size
;
1298 urb_priv
->td_cnt
= 0;
1299 urb
->hcpriv
= urb_priv
;
1301 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
1302 /* Check to see if the max packet size for the default control
1303 * endpoint changed during FS device enumeration
1305 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
1306 ret
= xhci_check_maxpacket(xhci
, slot_id
,
1309 xhci_urb_free_priv(xhci
, urb_priv
);
1315 /* We have a spinlock and interrupts disabled, so we must pass
1316 * atomic context to this function, which may allocate memory.
1318 spin_lock_irqsave(&xhci
->lock
, flags
);
1319 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1321 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
1325 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1326 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
1327 spin_lock_irqsave(&xhci
->lock
, flags
);
1328 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1330 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1331 EP_GETTING_STREAMS
) {
1332 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1333 "is transitioning to using streams.\n");
1335 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1336 EP_GETTING_NO_STREAMS
) {
1337 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1338 "is transitioning to "
1339 "not having streams.\n");
1342 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
1347 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1348 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
1349 spin_lock_irqsave(&xhci
->lock
, flags
);
1350 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1352 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
1356 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1358 spin_lock_irqsave(&xhci
->lock
, flags
);
1359 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1361 ret
= xhci_queue_isoc_tx_prepare(xhci
, GFP_ATOMIC
, urb
,
1365 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1370 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
1371 "non-responsive xHCI host.\n",
1372 urb
->ep
->desc
.bEndpointAddress
, urb
);
1375 xhci_urb_free_priv(xhci
, urb_priv
);
1377 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1381 /* Get the right ring for the given URB.
1382 * If the endpoint supports streams, boundary check the URB's stream ID.
1383 * If the endpoint doesn't support streams, return the singular endpoint ring.
1385 static struct xhci_ring
*xhci_urb_to_transfer_ring(struct xhci_hcd
*xhci
,
1388 unsigned int slot_id
;
1389 unsigned int ep_index
;
1390 unsigned int stream_id
;
1391 struct xhci_virt_ep
*ep
;
1393 slot_id
= urb
->dev
->slot_id
;
1394 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1395 stream_id
= urb
->stream_id
;
1396 ep
= &xhci
->devs
[slot_id
]->eps
[ep_index
];
1397 /* Common case: no streams */
1398 if (!(ep
->ep_state
& EP_HAS_STREAMS
))
1401 if (stream_id
== 0) {
1403 "WARN: Slot ID %u, ep index %u has streams, "
1404 "but URB has no stream ID.\n",
1409 if (stream_id
< ep
->stream_info
->num_streams
)
1410 return ep
->stream_info
->stream_rings
[stream_id
];
1413 "WARN: Slot ID %u, ep index %u has "
1414 "stream IDs 1 to %u allocated, "
1415 "but stream ID %u is requested.\n",
1417 ep
->stream_info
->num_streams
- 1,
1423 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1424 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1425 * should pick up where it left off in the TD, unless a Set Transfer Ring
1426 * Dequeue Pointer is issued.
1428 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1429 * the ring. Since the ring is a contiguous structure, they can't be physically
1430 * removed. Instead, there are two options:
1432 * 1) If the HC is in the middle of processing the URB to be canceled, we
1433 * simply move the ring's dequeue pointer past those TRBs using the Set
1434 * Transfer Ring Dequeue Pointer command. This will be the common case,
1435 * when drivers timeout on the last submitted URB and attempt to cancel.
1437 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1438 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1439 * HC will need to invalidate the any TRBs it has cached after the stop
1440 * endpoint command, as noted in the xHCI 0.95 errata.
1442 * 3) The TD may have completed by the time the Stop Endpoint Command
1443 * completes, so software needs to handle that case too.
1445 * This function should protect against the TD enqueueing code ringing the
1446 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1447 * It also needs to account for multiple cancellations on happening at the same
1448 * time for the same endpoint.
1450 * Note that this function can be called in any context, or so says
1451 * usb_hcd_unlink_urb()
1453 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1455 unsigned long flags
;
1458 struct xhci_hcd
*xhci
;
1459 struct urb_priv
*urb_priv
;
1461 unsigned int ep_index
;
1462 struct xhci_ring
*ep_ring
;
1463 struct xhci_virt_ep
*ep
;
1465 xhci
= hcd_to_xhci(hcd
);
1466 spin_lock_irqsave(&xhci
->lock
, flags
);
1467 /* Make sure the URB hasn't completed or been unlinked already */
1468 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1469 if (ret
|| !urb
->hcpriv
)
1471 temp
= readl(&xhci
->op_regs
->status
);
1472 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1473 xhci_dbg_trace(xhci
, trace_xhci_dbg_cancel_urb
,
1474 "HW died, freeing TD.");
1475 urb_priv
= urb
->hcpriv
;
1476 for (i
= urb_priv
->td_cnt
; i
< urb_priv
->length
; i
++) {
1477 td
= urb_priv
->td
[i
];
1478 if (!list_empty(&td
->td_list
))
1479 list_del_init(&td
->td_list
);
1480 if (!list_empty(&td
->cancelled_td_list
))
1481 list_del_init(&td
->cancelled_td_list
);
1484 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1485 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1486 usb_hcd_giveback_urb(hcd
, urb
, -ESHUTDOWN
);
1487 xhci_urb_free_priv(xhci
, urb_priv
);
1490 if ((xhci
->xhc_state
& XHCI_STATE_DYING
) ||
1491 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1492 xhci_dbg_trace(xhci
, trace_xhci_dbg_cancel_urb
,
1493 "Ep 0x%x: URB %p to be canceled on "
1494 "non-responsive xHCI host.",
1495 urb
->ep
->desc
.bEndpointAddress
, urb
);
1496 /* Let the stop endpoint command watchdog timer (which set this
1497 * state) finish cleaning up the endpoint TD lists. We must
1498 * have caught it in the middle of dropping a lock and giving
1504 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1505 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
1506 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
1512 urb_priv
= urb
->hcpriv
;
1513 i
= urb_priv
->td_cnt
;
1514 if (i
< urb_priv
->length
)
1515 xhci_dbg_trace(xhci
, trace_xhci_dbg_cancel_urb
,
1516 "Cancel URB %p, dev %s, ep 0x%x, "
1517 "starting at offset 0x%llx",
1518 urb
, urb
->dev
->devpath
,
1519 urb
->ep
->desc
.bEndpointAddress
,
1520 (unsigned long long) xhci_trb_virt_to_dma(
1521 urb_priv
->td
[i
]->start_seg
,
1522 urb_priv
->td
[i
]->first_trb
));
1524 for (; i
< urb_priv
->length
; i
++) {
1525 td
= urb_priv
->td
[i
];
1526 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
1529 /* Queue a stop endpoint command, but only if this is
1530 * the first cancellation to be handled.
1532 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
1533 ep
->ep_state
|= EP_HALT_PENDING
;
1534 ep
->stop_cmds_pending
++;
1535 ep
->stop_cmd_timer
.expires
= jiffies
+
1536 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
1537 add_timer(&ep
->stop_cmd_timer
);
1538 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
, 0);
1539 xhci_ring_cmd_db(xhci
);
1542 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1546 /* Drop an endpoint from a new bandwidth configuration for this device.
1547 * Only one call to this function is allowed per endpoint before
1548 * check_bandwidth() or reset_bandwidth() must be called.
1549 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1550 * add the endpoint to the schedule with possibly new parameters denoted by a
1551 * different endpoint descriptor in usb_host_endpoint.
1552 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1555 * The USB core will not allow URBs to be queued to an endpoint that is being
1556 * disabled, so there's no need for mutual exclusion to protect
1557 * the xhci->devs[slot_id] structure.
1559 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1560 struct usb_host_endpoint
*ep
)
1562 struct xhci_hcd
*xhci
;
1563 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1564 struct xhci_input_control_ctx
*ctrl_ctx
;
1565 struct xhci_slot_ctx
*slot_ctx
;
1566 unsigned int last_ctx
;
1567 unsigned int ep_index
;
1568 struct xhci_ep_ctx
*ep_ctx
;
1570 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1573 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1576 xhci
= hcd_to_xhci(hcd
);
1577 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1580 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1581 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
1582 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
1583 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
1584 __func__
, drop_flag
);
1588 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1589 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1590 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1592 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1597 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1598 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1599 /* If the HC already knows the endpoint is disabled,
1600 * or the HCD has noted it is disabled, ignore this request
1602 if (((ep_ctx
->ep_info
& cpu_to_le32(EP_STATE_MASK
)) ==
1603 cpu_to_le32(EP_STATE_DISABLED
)) ||
1604 le32_to_cpu(ctrl_ctx
->drop_flags
) &
1605 xhci_get_endpoint_flag(&ep
->desc
)) {
1606 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
1611 ctrl_ctx
->drop_flags
|= cpu_to_le32(drop_flag
);
1612 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1614 ctrl_ctx
->add_flags
&= cpu_to_le32(~drop_flag
);
1615 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1617 last_ctx
= xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx
->add_flags
));
1618 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1619 /* Update the last valid endpoint context, if we deleted the last one */
1620 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) >
1621 LAST_CTX(last_ctx
)) {
1622 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1623 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1625 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1627 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
1629 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1630 (unsigned int) ep
->desc
.bEndpointAddress
,
1632 (unsigned int) new_drop_flags
,
1633 (unsigned int) new_add_flags
,
1634 (unsigned int) new_slot_info
);
1638 /* Add an endpoint to a new possible bandwidth configuration for this device.
1639 * Only one call to this function is allowed per endpoint before
1640 * check_bandwidth() or reset_bandwidth() must be called.
1641 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1642 * add the endpoint to the schedule with possibly new parameters denoted by a
1643 * different endpoint descriptor in usb_host_endpoint.
1644 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1647 * The USB core will not allow URBs to be queued to an endpoint until the
1648 * configuration or alt setting is installed in the device, so there's no need
1649 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1651 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1652 struct usb_host_endpoint
*ep
)
1654 struct xhci_hcd
*xhci
;
1655 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1656 unsigned int ep_index
;
1657 struct xhci_slot_ctx
*slot_ctx
;
1658 struct xhci_input_control_ctx
*ctrl_ctx
;
1660 unsigned int last_ctx
;
1661 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1662 struct xhci_virt_device
*virt_dev
;
1665 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1667 /* So we won't queue a reset ep command for a root hub */
1671 xhci
= hcd_to_xhci(hcd
);
1672 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1675 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1676 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1677 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1678 /* FIXME when we have to issue an evaluate endpoint command to
1679 * deal with ep0 max packet size changing once we get the
1682 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1683 __func__
, added_ctxs
);
1687 virt_dev
= xhci
->devs
[udev
->slot_id
];
1688 in_ctx
= virt_dev
->in_ctx
;
1689 out_ctx
= virt_dev
->out_ctx
;
1690 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1692 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1697 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1698 /* If this endpoint is already in use, and the upper layers are trying
1699 * to add it again without dropping it, reject the addition.
1701 if (virt_dev
->eps
[ep_index
].ring
&&
1702 !(le32_to_cpu(ctrl_ctx
->drop_flags
) &
1703 xhci_get_endpoint_flag(&ep
->desc
))) {
1704 xhci_warn(xhci
, "Trying to add endpoint 0x%x "
1705 "without dropping it.\n",
1706 (unsigned int) ep
->desc
.bEndpointAddress
);
1710 /* If the HCD has already noted the endpoint is enabled,
1711 * ignore this request.
1713 if (le32_to_cpu(ctrl_ctx
->add_flags
) &
1714 xhci_get_endpoint_flag(&ep
->desc
)) {
1715 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1721 * Configuration and alternate setting changes must be done in
1722 * process context, not interrupt context (or so documenation
1723 * for usb_set_interface() and usb_set_configuration() claim).
1725 if (xhci_endpoint_init(xhci
, virt_dev
, udev
, ep
, GFP_NOIO
) < 0) {
1726 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1727 __func__
, ep
->desc
.bEndpointAddress
);
1731 ctrl_ctx
->add_flags
|= cpu_to_le32(added_ctxs
);
1732 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1734 /* If xhci_endpoint_disable() was called for this endpoint, but the
1735 * xHC hasn't been notified yet through the check_bandwidth() call,
1736 * this re-adds a new state for the endpoint from the new endpoint
1737 * descriptors. We must drop and re-add this endpoint, so we leave the
1740 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1742 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1743 /* Update the last valid endpoint context, if we just added one past */
1744 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) <
1745 LAST_CTX(last_ctx
)) {
1746 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1747 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1749 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1751 /* Store the usb_device pointer for later use */
1754 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1755 (unsigned int) ep
->desc
.bEndpointAddress
,
1757 (unsigned int) new_drop_flags
,
1758 (unsigned int) new_add_flags
,
1759 (unsigned int) new_slot_info
);
1763 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1765 struct xhci_input_control_ctx
*ctrl_ctx
;
1766 struct xhci_ep_ctx
*ep_ctx
;
1767 struct xhci_slot_ctx
*slot_ctx
;
1770 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1772 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1777 /* When a device's add flag and drop flag are zero, any subsequent
1778 * configure endpoint command will leave that endpoint's state
1779 * untouched. Make sure we don't leave any old state in the input
1780 * endpoint contexts.
1782 ctrl_ctx
->drop_flags
= 0;
1783 ctrl_ctx
->add_flags
= 0;
1784 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1785 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1786 /* Endpoint 0 is always valid */
1787 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1));
1788 for (i
= 1; i
< 31; ++i
) {
1789 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1790 ep_ctx
->ep_info
= 0;
1791 ep_ctx
->ep_info2
= 0;
1793 ep_ctx
->tx_info
= 0;
1797 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1798 struct usb_device
*udev
, u32
*cmd_status
)
1802 switch (*cmd_status
) {
1804 dev_warn(&udev
->dev
, "Not enough host controller resources "
1805 "for new device state.\n");
1807 /* FIXME: can we allocate more resources for the HC? */
1810 case COMP_2ND_BW_ERR
:
1811 dev_warn(&udev
->dev
, "Not enough bandwidth "
1812 "for new device state.\n");
1814 /* FIXME: can we go back to the old state? */
1817 /* the HCD set up something wrong */
1818 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1820 "and endpoint is not disabled.\n");
1824 dev_warn(&udev
->dev
, "ERROR: Incompatible device for endpoint "
1825 "configure command.\n");
1829 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1830 "Successful Endpoint Configure command");
1834 xhci_err(xhci
, "ERROR: unexpected command completion "
1835 "code 0x%x.\n", *cmd_status
);
1842 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1843 struct usb_device
*udev
, u32
*cmd_status
)
1846 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1848 switch (*cmd_status
) {
1850 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1851 "context command.\n");
1855 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1856 "evaluate context command.\n");
1859 case COMP_CTX_STATE
:
1860 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1861 "evaluate context command.\n");
1862 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1866 dev_warn(&udev
->dev
, "ERROR: Incompatible device for evaluate "
1867 "context command.\n");
1871 /* Max Exit Latency too large error */
1872 dev_warn(&udev
->dev
, "WARN: Max Exit Latency too large\n");
1876 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
1877 "Successful evaluate context command");
1881 xhci_err(xhci
, "ERROR: unexpected command completion "
1882 "code 0x%x.\n", *cmd_status
);
1889 static u32
xhci_count_num_new_endpoints(struct xhci_hcd
*xhci
,
1890 struct xhci_input_control_ctx
*ctrl_ctx
)
1892 u32 valid_add_flags
;
1893 u32 valid_drop_flags
;
1895 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1896 * (bit 1). The default control endpoint is added during the Address
1897 * Device command and is never removed until the slot is disabled.
1899 valid_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
) >> 2;
1900 valid_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
) >> 2;
1902 /* Use hweight32 to count the number of ones in the add flags, or
1903 * number of endpoints added. Don't count endpoints that are changed
1904 * (both added and dropped).
1906 return hweight32(valid_add_flags
) -
1907 hweight32(valid_add_flags
& valid_drop_flags
);
1910 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd
*xhci
,
1911 struct xhci_input_control_ctx
*ctrl_ctx
)
1913 u32 valid_add_flags
;
1914 u32 valid_drop_flags
;
1916 valid_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
) >> 2;
1917 valid_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
) >> 2;
1919 return hweight32(valid_drop_flags
) -
1920 hweight32(valid_add_flags
& valid_drop_flags
);
1924 * We need to reserve the new number of endpoints before the configure endpoint
1925 * command completes. We can't subtract the dropped endpoints from the number
1926 * of active endpoints until the command completes because we can oversubscribe
1927 * the host in this case:
1929 * - the first configure endpoint command drops more endpoints than it adds
1930 * - a second configure endpoint command that adds more endpoints is queued
1931 * - the first configure endpoint command fails, so the config is unchanged
1932 * - the second command may succeed, even though there isn't enough resources
1934 * Must be called with xhci->lock held.
1936 static int xhci_reserve_host_resources(struct xhci_hcd
*xhci
,
1937 struct xhci_input_control_ctx
*ctrl_ctx
)
1941 added_eps
= xhci_count_num_new_endpoints(xhci
, ctrl_ctx
);
1942 if (xhci
->num_active_eps
+ added_eps
> xhci
->limit_active_eps
) {
1943 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
1944 "Not enough ep ctxs: "
1945 "%u active, need to add %u, limit is %u.",
1946 xhci
->num_active_eps
, added_eps
,
1947 xhci
->limit_active_eps
);
1950 xhci
->num_active_eps
+= added_eps
;
1951 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
1952 "Adding %u ep ctxs, %u now active.", added_eps
,
1953 xhci
->num_active_eps
);
1958 * The configure endpoint was failed by the xHC for some other reason, so we
1959 * need to revert the resources that failed configuration would have used.
1961 * Must be called with xhci->lock held.
1963 static void xhci_free_host_resources(struct xhci_hcd
*xhci
,
1964 struct xhci_input_control_ctx
*ctrl_ctx
)
1968 num_failed_eps
= xhci_count_num_new_endpoints(xhci
, ctrl_ctx
);
1969 xhci
->num_active_eps
-= num_failed_eps
;
1970 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
1971 "Removing %u failed ep ctxs, %u now active.",
1973 xhci
->num_active_eps
);
1977 * Now that the command has completed, clean up the active endpoint count by
1978 * subtracting out the endpoints that were dropped (but not changed).
1980 * Must be called with xhci->lock held.
1982 static void xhci_finish_resource_reservation(struct xhci_hcd
*xhci
,
1983 struct xhci_input_control_ctx
*ctrl_ctx
)
1985 u32 num_dropped_eps
;
1987 num_dropped_eps
= xhci_count_num_dropped_endpoints(xhci
, ctrl_ctx
);
1988 xhci
->num_active_eps
-= num_dropped_eps
;
1989 if (num_dropped_eps
)
1990 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
1991 "Removing %u dropped ep ctxs, %u now active.",
1993 xhci
->num_active_eps
);
1996 static unsigned int xhci_get_block_size(struct usb_device
*udev
)
1998 switch (udev
->speed
) {
2000 case USB_SPEED_FULL
:
2002 case USB_SPEED_HIGH
:
2004 case USB_SPEED_SUPER
:
2006 case USB_SPEED_UNKNOWN
:
2007 case USB_SPEED_WIRELESS
:
2009 /* Should never happen */
2015 xhci_get_largest_overhead(struct xhci_interval_bw
*interval_bw
)
2017 if (interval_bw
->overhead
[LS_OVERHEAD_TYPE
])
2019 if (interval_bw
->overhead
[FS_OVERHEAD_TYPE
])
2024 /* If we are changing a LS/FS device under a HS hub,
2025 * make sure (if we are activating a new TT) that the HS bus has enough
2026 * bandwidth for this new TT.
2028 static int xhci_check_tt_bw_table(struct xhci_hcd
*xhci
,
2029 struct xhci_virt_device
*virt_dev
,
2032 struct xhci_interval_bw_table
*bw_table
;
2033 struct xhci_tt_bw_info
*tt_info
;
2035 /* Find the bandwidth table for the root port this TT is attached to. */
2036 bw_table
= &xhci
->rh_bw
[virt_dev
->real_port
- 1].bw_table
;
2037 tt_info
= virt_dev
->tt_info
;
2038 /* If this TT already had active endpoints, the bandwidth for this TT
2039 * has already been added. Removing all periodic endpoints (and thus
2040 * making the TT enactive) will only decrease the bandwidth used.
2044 if (old_active_eps
== 0 && tt_info
->active_eps
!= 0) {
2045 if (bw_table
->bw_used
+ TT_HS_OVERHEAD
> HS_BW_LIMIT
)
2049 /* Not sure why we would have no new active endpoints...
2051 * Maybe because of an Evaluate Context change for a hub update or a
2052 * control endpoint 0 max packet size change?
2053 * FIXME: skip the bandwidth calculation in that case.
2058 static int xhci_check_ss_bw(struct xhci_hcd
*xhci
,
2059 struct xhci_virt_device
*virt_dev
)
2061 unsigned int bw_reserved
;
2063 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_IN
, 100);
2064 if (virt_dev
->bw_table
->ss_bw_in
> (SS_BW_LIMIT_IN
- bw_reserved
))
2067 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_OUT
, 100);
2068 if (virt_dev
->bw_table
->ss_bw_out
> (SS_BW_LIMIT_OUT
- bw_reserved
))
2075 * This algorithm is a very conservative estimate of the worst-case scheduling
2076 * scenario for any one interval. The hardware dynamically schedules the
2077 * packets, so we can't tell which microframe could be the limiting factor in
2078 * the bandwidth scheduling. This only takes into account periodic endpoints.
2080 * Obviously, we can't solve an NP complete problem to find the minimum worst
2081 * case scenario. Instead, we come up with an estimate that is no less than
2082 * the worst case bandwidth used for any one microframe, but may be an
2085 * We walk the requirements for each endpoint by interval, starting with the
2086 * smallest interval, and place packets in the schedule where there is only one
2087 * possible way to schedule packets for that interval. In order to simplify
2088 * this algorithm, we record the largest max packet size for each interval, and
2089 * assume all packets will be that size.
2091 * For interval 0, we obviously must schedule all packets for each interval.
2092 * The bandwidth for interval 0 is just the amount of data to be transmitted
2093 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2094 * the number of packets).
2096 * For interval 1, we have two possible microframes to schedule those packets
2097 * in. For this algorithm, if we can schedule the same number of packets for
2098 * each possible scheduling opportunity (each microframe), we will do so. The
2099 * remaining number of packets will be saved to be transmitted in the gaps in
2100 * the next interval's scheduling sequence.
2102 * As we move those remaining packets to be scheduled with interval 2 packets,
2103 * we have to double the number of remaining packets to transmit. This is
2104 * because the intervals are actually powers of 2, and we would be transmitting
2105 * the previous interval's packets twice in this interval. We also have to be
2106 * sure that when we look at the largest max packet size for this interval, we
2107 * also look at the largest max packet size for the remaining packets and take
2108 * the greater of the two.
2110 * The algorithm continues to evenly distribute packets in each scheduling
2111 * opportunity, and push the remaining packets out, until we get to the last
2112 * interval. Then those packets and their associated overhead are just added
2113 * to the bandwidth used.
2115 static int xhci_check_bw_table(struct xhci_hcd
*xhci
,
2116 struct xhci_virt_device
*virt_dev
,
2119 unsigned int bw_reserved
;
2120 unsigned int max_bandwidth
;
2121 unsigned int bw_used
;
2122 unsigned int block_size
;
2123 struct xhci_interval_bw_table
*bw_table
;
2124 unsigned int packet_size
= 0;
2125 unsigned int overhead
= 0;
2126 unsigned int packets_transmitted
= 0;
2127 unsigned int packets_remaining
= 0;
2130 if (virt_dev
->udev
->speed
== USB_SPEED_SUPER
)
2131 return xhci_check_ss_bw(xhci
, virt_dev
);
2133 if (virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
2134 max_bandwidth
= HS_BW_LIMIT
;
2135 /* Convert percent of bus BW reserved to blocks reserved */
2136 bw_reserved
= DIV_ROUND_UP(HS_BW_RESERVED
* max_bandwidth
, 100);
2138 max_bandwidth
= FS_BW_LIMIT
;
2139 bw_reserved
= DIV_ROUND_UP(FS_BW_RESERVED
* max_bandwidth
, 100);
2142 bw_table
= virt_dev
->bw_table
;
2143 /* We need to translate the max packet size and max ESIT payloads into
2144 * the units the hardware uses.
2146 block_size
= xhci_get_block_size(virt_dev
->udev
);
2148 /* If we are manipulating a LS/FS device under a HS hub, double check
2149 * that the HS bus has enough bandwidth if we are activing a new TT.
2151 if (virt_dev
->tt_info
) {
2152 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
2153 "Recalculating BW for rootport %u",
2154 virt_dev
->real_port
);
2155 if (xhci_check_tt_bw_table(xhci
, virt_dev
, old_active_eps
)) {
2156 xhci_warn(xhci
, "Not enough bandwidth on HS bus for "
2157 "newly activated TT.\n");
2160 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
2161 "Recalculating BW for TT slot %u port %u",
2162 virt_dev
->tt_info
->slot_id
,
2163 virt_dev
->tt_info
->ttport
);
2165 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
2166 "Recalculating BW for rootport %u",
2167 virt_dev
->real_port
);
2170 /* Add in how much bandwidth will be used for interval zero, or the
2171 * rounded max ESIT payload + number of packets * largest overhead.
2173 bw_used
= DIV_ROUND_UP(bw_table
->interval0_esit_payload
, block_size
) +
2174 bw_table
->interval_bw
[0].num_packets
*
2175 xhci_get_largest_overhead(&bw_table
->interval_bw
[0]);
2177 for (i
= 1; i
< XHCI_MAX_INTERVAL
; i
++) {
2178 unsigned int bw_added
;
2179 unsigned int largest_mps
;
2180 unsigned int interval_overhead
;
2183 * How many packets could we transmit in this interval?
2184 * If packets didn't fit in the previous interval, we will need
2185 * to transmit that many packets twice within this interval.
2187 packets_remaining
= 2 * packets_remaining
+
2188 bw_table
->interval_bw
[i
].num_packets
;
2190 /* Find the largest max packet size of this or the previous
2193 if (list_empty(&bw_table
->interval_bw
[i
].endpoints
))
2196 struct xhci_virt_ep
*virt_ep
;
2197 struct list_head
*ep_entry
;
2199 ep_entry
= bw_table
->interval_bw
[i
].endpoints
.next
;
2200 virt_ep
= list_entry(ep_entry
,
2201 struct xhci_virt_ep
, bw_endpoint_list
);
2202 /* Convert to blocks, rounding up */
2203 largest_mps
= DIV_ROUND_UP(
2204 virt_ep
->bw_info
.max_packet_size
,
2207 if (largest_mps
> packet_size
)
2208 packet_size
= largest_mps
;
2210 /* Use the larger overhead of this or the previous interval. */
2211 interval_overhead
= xhci_get_largest_overhead(
2212 &bw_table
->interval_bw
[i
]);
2213 if (interval_overhead
> overhead
)
2214 overhead
= interval_overhead
;
2216 /* How many packets can we evenly distribute across
2217 * (1 << (i + 1)) possible scheduling opportunities?
2219 packets_transmitted
= packets_remaining
>> (i
+ 1);
2221 /* Add in the bandwidth used for those scheduled packets */
2222 bw_added
= packets_transmitted
* (overhead
+ packet_size
);
2224 /* How many packets do we have remaining to transmit? */
2225 packets_remaining
= packets_remaining
% (1 << (i
+ 1));
2227 /* What largest max packet size should those packets have? */
2228 /* If we've transmitted all packets, don't carry over the
2229 * largest packet size.
2231 if (packets_remaining
== 0) {
2234 } else if (packets_transmitted
> 0) {
2235 /* Otherwise if we do have remaining packets, and we've
2236 * scheduled some packets in this interval, take the
2237 * largest max packet size from endpoints with this
2240 packet_size
= largest_mps
;
2241 overhead
= interval_overhead
;
2243 /* Otherwise carry over packet_size and overhead from the last
2244 * time we had a remainder.
2246 bw_used
+= bw_added
;
2247 if (bw_used
> max_bandwidth
) {
2248 xhci_warn(xhci
, "Not enough bandwidth. "
2249 "Proposed: %u, Max: %u\n",
2250 bw_used
, max_bandwidth
);
2255 * Ok, we know we have some packets left over after even-handedly
2256 * scheduling interval 15. We don't know which microframes they will
2257 * fit into, so we over-schedule and say they will be scheduled every
2260 if (packets_remaining
> 0)
2261 bw_used
+= overhead
+ packet_size
;
2263 if (!virt_dev
->tt_info
&& virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
2264 unsigned int port_index
= virt_dev
->real_port
- 1;
2266 /* OK, we're manipulating a HS device attached to a
2267 * root port bandwidth domain. Include the number of active TTs
2268 * in the bandwidth used.
2270 bw_used
+= TT_HS_OVERHEAD
*
2271 xhci
->rh_bw
[port_index
].num_active_tts
;
2274 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
2275 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2276 "Available: %u " "percent",
2277 bw_used
, max_bandwidth
, bw_reserved
,
2278 (max_bandwidth
- bw_used
- bw_reserved
) * 100 /
2281 bw_used
+= bw_reserved
;
2282 if (bw_used
> max_bandwidth
) {
2283 xhci_warn(xhci
, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2284 bw_used
, max_bandwidth
);
2288 bw_table
->bw_used
= bw_used
;
2292 static bool xhci_is_async_ep(unsigned int ep_type
)
2294 return (ep_type
!= ISOC_OUT_EP
&& ep_type
!= INT_OUT_EP
&&
2295 ep_type
!= ISOC_IN_EP
&&
2296 ep_type
!= INT_IN_EP
);
2299 static bool xhci_is_sync_in_ep(unsigned int ep_type
)
2301 return (ep_type
== ISOC_IN_EP
|| ep_type
== INT_IN_EP
);
2304 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info
*ep_bw
)
2306 unsigned int mps
= DIV_ROUND_UP(ep_bw
->max_packet_size
, SS_BLOCK
);
2308 if (ep_bw
->ep_interval
== 0)
2309 return SS_OVERHEAD_BURST
+
2310 (ep_bw
->mult
* ep_bw
->num_packets
*
2311 (SS_OVERHEAD
+ mps
));
2312 return DIV_ROUND_UP(ep_bw
->mult
* ep_bw
->num_packets
*
2313 (SS_OVERHEAD
+ mps
+ SS_OVERHEAD_BURST
),
2314 1 << ep_bw
->ep_interval
);
2318 void xhci_drop_ep_from_interval_table(struct xhci_hcd
*xhci
,
2319 struct xhci_bw_info
*ep_bw
,
2320 struct xhci_interval_bw_table
*bw_table
,
2321 struct usb_device
*udev
,
2322 struct xhci_virt_ep
*virt_ep
,
2323 struct xhci_tt_bw_info
*tt_info
)
2325 struct xhci_interval_bw
*interval_bw
;
2326 int normalized_interval
;
2328 if (xhci_is_async_ep(ep_bw
->type
))
2331 if (udev
->speed
== USB_SPEED_SUPER
) {
2332 if (xhci_is_sync_in_ep(ep_bw
->type
))
2333 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
-=
2334 xhci_get_ss_bw_consumed(ep_bw
);
2336 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
-=
2337 xhci_get_ss_bw_consumed(ep_bw
);
2341 /* SuperSpeed endpoints never get added to intervals in the table, so
2342 * this check is only valid for HS/FS/LS devices.
2344 if (list_empty(&virt_ep
->bw_endpoint_list
))
2346 /* For LS/FS devices, we need to translate the interval expressed in
2347 * microframes to frames.
2349 if (udev
->speed
== USB_SPEED_HIGH
)
2350 normalized_interval
= ep_bw
->ep_interval
;
2352 normalized_interval
= ep_bw
->ep_interval
- 3;
2354 if (normalized_interval
== 0)
2355 bw_table
->interval0_esit_payload
-= ep_bw
->max_esit_payload
;
2356 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2357 interval_bw
->num_packets
-= ep_bw
->num_packets
;
2358 switch (udev
->speed
) {
2360 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] -= 1;
2362 case USB_SPEED_FULL
:
2363 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] -= 1;
2365 case USB_SPEED_HIGH
:
2366 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] -= 1;
2368 case USB_SPEED_SUPER
:
2369 case USB_SPEED_UNKNOWN
:
2370 case USB_SPEED_WIRELESS
:
2371 /* Should never happen because only LS/FS/HS endpoints will get
2372 * added to the endpoint list.
2377 tt_info
->active_eps
-= 1;
2378 list_del_init(&virt_ep
->bw_endpoint_list
);
2381 static void xhci_add_ep_to_interval_table(struct xhci_hcd
*xhci
,
2382 struct xhci_bw_info
*ep_bw
,
2383 struct xhci_interval_bw_table
*bw_table
,
2384 struct usb_device
*udev
,
2385 struct xhci_virt_ep
*virt_ep
,
2386 struct xhci_tt_bw_info
*tt_info
)
2388 struct xhci_interval_bw
*interval_bw
;
2389 struct xhci_virt_ep
*smaller_ep
;
2390 int normalized_interval
;
2392 if (xhci_is_async_ep(ep_bw
->type
))
2395 if (udev
->speed
== USB_SPEED_SUPER
) {
2396 if (xhci_is_sync_in_ep(ep_bw
->type
))
2397 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
+=
2398 xhci_get_ss_bw_consumed(ep_bw
);
2400 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
+=
2401 xhci_get_ss_bw_consumed(ep_bw
);
2405 /* For LS/FS devices, we need to translate the interval expressed in
2406 * microframes to frames.
2408 if (udev
->speed
== USB_SPEED_HIGH
)
2409 normalized_interval
= ep_bw
->ep_interval
;
2411 normalized_interval
= ep_bw
->ep_interval
- 3;
2413 if (normalized_interval
== 0)
2414 bw_table
->interval0_esit_payload
+= ep_bw
->max_esit_payload
;
2415 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2416 interval_bw
->num_packets
+= ep_bw
->num_packets
;
2417 switch (udev
->speed
) {
2419 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] += 1;
2421 case USB_SPEED_FULL
:
2422 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] += 1;
2424 case USB_SPEED_HIGH
:
2425 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] += 1;
2427 case USB_SPEED_SUPER
:
2428 case USB_SPEED_UNKNOWN
:
2429 case USB_SPEED_WIRELESS
:
2430 /* Should never happen because only LS/FS/HS endpoints will get
2431 * added to the endpoint list.
2437 tt_info
->active_eps
+= 1;
2438 /* Insert the endpoint into the list, largest max packet size first. */
2439 list_for_each_entry(smaller_ep
, &interval_bw
->endpoints
,
2441 if (ep_bw
->max_packet_size
>=
2442 smaller_ep
->bw_info
.max_packet_size
) {
2443 /* Add the new ep before the smaller endpoint */
2444 list_add_tail(&virt_ep
->bw_endpoint_list
,
2445 &smaller_ep
->bw_endpoint_list
);
2449 /* Add the new endpoint at the end of the list. */
2450 list_add_tail(&virt_ep
->bw_endpoint_list
,
2451 &interval_bw
->endpoints
);
2454 void xhci_update_tt_active_eps(struct xhci_hcd
*xhci
,
2455 struct xhci_virt_device
*virt_dev
,
2458 struct xhci_root_port_bw_info
*rh_bw_info
;
2459 if (!virt_dev
->tt_info
)
2462 rh_bw_info
= &xhci
->rh_bw
[virt_dev
->real_port
- 1];
2463 if (old_active_eps
== 0 &&
2464 virt_dev
->tt_info
->active_eps
!= 0) {
2465 rh_bw_info
->num_active_tts
+= 1;
2466 rh_bw_info
->bw_table
.bw_used
+= TT_HS_OVERHEAD
;
2467 } else if (old_active_eps
!= 0 &&
2468 virt_dev
->tt_info
->active_eps
== 0) {
2469 rh_bw_info
->num_active_tts
-= 1;
2470 rh_bw_info
->bw_table
.bw_used
-= TT_HS_OVERHEAD
;
2474 static int xhci_reserve_bandwidth(struct xhci_hcd
*xhci
,
2475 struct xhci_virt_device
*virt_dev
,
2476 struct xhci_container_ctx
*in_ctx
)
2478 struct xhci_bw_info ep_bw_info
[31];
2480 struct xhci_input_control_ctx
*ctrl_ctx
;
2481 int old_active_eps
= 0;
2483 if (virt_dev
->tt_info
)
2484 old_active_eps
= virt_dev
->tt_info
->active_eps
;
2486 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2488 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2493 for (i
= 0; i
< 31; i
++) {
2494 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2497 /* Make a copy of the BW info in case we need to revert this */
2498 memcpy(&ep_bw_info
[i
], &virt_dev
->eps
[i
].bw_info
,
2499 sizeof(ep_bw_info
[i
]));
2500 /* Drop the endpoint from the interval table if the endpoint is
2501 * being dropped or changed.
2503 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2504 xhci_drop_ep_from_interval_table(xhci
,
2505 &virt_dev
->eps
[i
].bw_info
,
2511 /* Overwrite the information stored in the endpoints' bw_info */
2512 xhci_update_bw_info(xhci
, virt_dev
->in_ctx
, ctrl_ctx
, virt_dev
);
2513 for (i
= 0; i
< 31; i
++) {
2514 /* Add any changed or added endpoints to the interval table */
2515 if (EP_IS_ADDED(ctrl_ctx
, i
))
2516 xhci_add_ep_to_interval_table(xhci
,
2517 &virt_dev
->eps
[i
].bw_info
,
2524 if (!xhci_check_bw_table(xhci
, virt_dev
, old_active_eps
)) {
2525 /* Ok, this fits in the bandwidth we have.
2526 * Update the number of active TTs.
2528 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
2532 /* We don't have enough bandwidth for this, revert the stored info. */
2533 for (i
= 0; i
< 31; i
++) {
2534 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2537 /* Drop the new copies of any added or changed endpoints from
2538 * the interval table.
2540 if (EP_IS_ADDED(ctrl_ctx
, i
)) {
2541 xhci_drop_ep_from_interval_table(xhci
,
2542 &virt_dev
->eps
[i
].bw_info
,
2548 /* Revert the endpoint back to its old information */
2549 memcpy(&virt_dev
->eps
[i
].bw_info
, &ep_bw_info
[i
],
2550 sizeof(ep_bw_info
[i
]));
2551 /* Add any changed or dropped endpoints back into the table */
2552 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2553 xhci_add_ep_to_interval_table(xhci
,
2554 &virt_dev
->eps
[i
].bw_info
,
2564 /* Issue a configure endpoint command or evaluate context command
2565 * and wait for it to finish.
2567 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
2568 struct usb_device
*udev
,
2569 struct xhci_command
*command
,
2570 bool ctx_change
, bool must_succeed
)
2574 unsigned long flags
;
2575 struct xhci_container_ctx
*in_ctx
;
2576 struct xhci_input_control_ctx
*ctrl_ctx
;
2577 struct completion
*cmd_completion
;
2579 struct xhci_virt_device
*virt_dev
;
2580 union xhci_trb
*cmd_trb
;
2582 spin_lock_irqsave(&xhci
->lock
, flags
);
2583 virt_dev
= xhci
->devs
[udev
->slot_id
];
2586 in_ctx
= command
->in_ctx
;
2588 in_ctx
= virt_dev
->in_ctx
;
2589 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2591 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2592 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2597 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
) &&
2598 xhci_reserve_host_resources(xhci
, ctrl_ctx
)) {
2599 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2600 xhci_warn(xhci
, "Not enough host resources, "
2601 "active endpoint contexts = %u\n",
2602 xhci
->num_active_eps
);
2605 if ((xhci
->quirks
& XHCI_SW_BW_CHECKING
) &&
2606 xhci_reserve_bandwidth(xhci
, virt_dev
, in_ctx
)) {
2607 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2608 xhci_free_host_resources(xhci
, ctrl_ctx
);
2609 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2610 xhci_warn(xhci
, "Not enough bandwidth\n");
2615 cmd_completion
= command
->completion
;
2616 cmd_status
= &command
->status
;
2617 command
->command_trb
= xhci_find_next_enqueue(xhci
->cmd_ring
);
2618 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
2620 cmd_completion
= &virt_dev
->cmd_completion
;
2621 cmd_status
= &virt_dev
->cmd_status
;
2623 init_completion(cmd_completion
);
2625 cmd_trb
= xhci_find_next_enqueue(xhci
->cmd_ring
);
2627 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
2628 udev
->slot_id
, must_succeed
);
2630 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
2631 udev
->slot_id
, must_succeed
);
2634 list_del(&command
->cmd_list
);
2635 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2636 xhci_free_host_resources(xhci
, ctrl_ctx
);
2637 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2638 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
2639 "FIXME allocate a new ring segment");
2642 xhci_ring_cmd_db(xhci
);
2643 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2645 /* Wait for the configure endpoint command to complete */
2646 timeleft
= wait_for_completion_interruptible_timeout(
2648 XHCI_CMD_DEFAULT_TIMEOUT
);
2649 if (timeleft
<= 0) {
2650 xhci_warn(xhci
, "%s while waiting for %s command\n",
2651 timeleft
== 0 ? "Timeout" : "Signal",
2653 "configure endpoint" :
2654 "evaluate context");
2655 /* cancel the configure endpoint command */
2656 ret
= xhci_cancel_cmd(xhci
, command
, cmd_trb
);
2663 ret
= xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
2665 ret
= xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
2667 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
2668 spin_lock_irqsave(&xhci
->lock
, flags
);
2669 /* If the command failed, remove the reserved resources.
2670 * Otherwise, clean up the estimate to include dropped eps.
2673 xhci_free_host_resources(xhci
, ctrl_ctx
);
2675 xhci_finish_resource_reservation(xhci
, ctrl_ctx
);
2676 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2681 /* Called after one or more calls to xhci_add_endpoint() or
2682 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2683 * to call xhci_reset_bandwidth().
2685 * Since we are in the middle of changing either configuration or
2686 * installing a new alt setting, the USB core won't allow URBs to be
2687 * enqueued for any endpoint on the old config or interface. Nothing
2688 * else should be touching the xhci->devs[slot_id] structure, so we
2689 * don't need to take the xhci->lock for manipulating that.
2691 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2695 struct xhci_hcd
*xhci
;
2696 struct xhci_virt_device
*virt_dev
;
2697 struct xhci_input_control_ctx
*ctrl_ctx
;
2698 struct xhci_slot_ctx
*slot_ctx
;
2700 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2703 xhci
= hcd_to_xhci(hcd
);
2704 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
2707 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2708 virt_dev
= xhci
->devs
[udev
->slot_id
];
2710 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2711 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2713 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2717 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2718 ctrl_ctx
->add_flags
&= cpu_to_le32(~EP0_FLAG
);
2719 ctrl_ctx
->drop_flags
&= cpu_to_le32(~(SLOT_FLAG
| EP0_FLAG
));
2721 /* Don't issue the command if there's no endpoints to update. */
2722 if (ctrl_ctx
->add_flags
== cpu_to_le32(SLOT_FLAG
) &&
2723 ctrl_ctx
->drop_flags
== 0)
2726 xhci_dbg(xhci
, "New Input Control Context:\n");
2727 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
2728 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
2729 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2731 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
2734 /* Callee should call reset_bandwidth() */
2738 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
2739 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
2740 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2742 /* Free any rings that were dropped, but not changed. */
2743 for (i
= 1; i
< 31; ++i
) {
2744 if ((le32_to_cpu(ctrl_ctx
->drop_flags
) & (1 << (i
+ 1))) &&
2745 !(le32_to_cpu(ctrl_ctx
->add_flags
) & (1 << (i
+ 1))))
2746 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2748 xhci_zero_in_ctx(xhci
, virt_dev
);
2750 * Install any rings for completely new endpoints or changed endpoints,
2751 * and free or cache any old rings from changed endpoints.
2753 for (i
= 1; i
< 31; ++i
) {
2754 if (!virt_dev
->eps
[i
].new_ring
)
2756 /* Only cache or free the old ring if it exists.
2757 * It may not if this is the first add of an endpoint.
2759 if (virt_dev
->eps
[i
].ring
) {
2760 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2762 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
2763 virt_dev
->eps
[i
].new_ring
= NULL
;
2769 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2771 struct xhci_hcd
*xhci
;
2772 struct xhci_virt_device
*virt_dev
;
2775 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2778 xhci
= hcd_to_xhci(hcd
);
2780 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2781 virt_dev
= xhci
->devs
[udev
->slot_id
];
2782 /* Free any rings allocated for added endpoints */
2783 for (i
= 0; i
< 31; ++i
) {
2784 if (virt_dev
->eps
[i
].new_ring
) {
2785 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
2786 virt_dev
->eps
[i
].new_ring
= NULL
;
2789 xhci_zero_in_ctx(xhci
, virt_dev
);
2792 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
2793 struct xhci_container_ctx
*in_ctx
,
2794 struct xhci_container_ctx
*out_ctx
,
2795 struct xhci_input_control_ctx
*ctrl_ctx
,
2796 u32 add_flags
, u32 drop_flags
)
2798 ctrl_ctx
->add_flags
= cpu_to_le32(add_flags
);
2799 ctrl_ctx
->drop_flags
= cpu_to_le32(drop_flags
);
2800 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
2801 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2803 xhci_dbg(xhci
, "Input Context:\n");
2804 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
2807 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
2808 unsigned int slot_id
, unsigned int ep_index
,
2809 struct xhci_dequeue_state
*deq_state
)
2811 struct xhci_input_control_ctx
*ctrl_ctx
;
2812 struct xhci_container_ctx
*in_ctx
;
2813 struct xhci_ep_ctx
*ep_ctx
;
2817 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
2818 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2820 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2825 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2826 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
2827 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
2828 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
2829 deq_state
->new_deq_ptr
);
2831 xhci_warn(xhci
, "WARN Cannot submit config ep after "
2832 "reset ep command\n");
2833 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
2834 deq_state
->new_deq_seg
,
2835 deq_state
->new_deq_ptr
);
2838 ep_ctx
->deq
= cpu_to_le64(addr
| deq_state
->new_cycle_state
);
2840 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
2841 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2842 xhci
->devs
[slot_id
]->out_ctx
, ctrl_ctx
,
2843 added_ctxs
, added_ctxs
);
2846 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
2847 struct usb_device
*udev
, unsigned int ep_index
)
2849 struct xhci_dequeue_state deq_state
;
2850 struct xhci_virt_ep
*ep
;
2852 xhci_dbg_trace(xhci
, trace_xhci_dbg_reset_ep
,
2853 "Cleaning up stalled endpoint ring");
2854 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2855 /* We need to move the HW's dequeue pointer past this TD,
2856 * or it will attempt to resend it on the next doorbell ring.
2858 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
2859 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
2862 /* HW with the reset endpoint quirk will use the saved dequeue state to
2863 * issue a configure endpoint command later.
2865 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
2866 xhci_dbg_trace(xhci
, trace_xhci_dbg_reset_ep
,
2867 "Queueing new dequeue state");
2868 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
2869 ep_index
, ep
->stopped_stream
, &deq_state
);
2871 /* Better hope no one uses the input context between now and the
2872 * reset endpoint completion!
2873 * XXX: No idea how this hardware will react when stream rings
2876 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
2877 "Setting up input context for "
2878 "configure endpoint command");
2879 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
2880 ep_index
, &deq_state
);
2884 /* Deal with stalled endpoints. The core should have sent the control message
2885 * to clear the halt condition. However, we need to make the xHCI hardware
2886 * reset its sequence number, since a device will expect a sequence number of
2887 * zero after the halt condition is cleared.
2888 * Context: in_interrupt
2890 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
2891 struct usb_host_endpoint
*ep
)
2893 struct xhci_hcd
*xhci
;
2894 struct usb_device
*udev
;
2895 unsigned int ep_index
;
2896 unsigned long flags
;
2898 struct xhci_virt_ep
*virt_ep
;
2900 xhci
= hcd_to_xhci(hcd
);
2901 udev
= (struct usb_device
*) ep
->hcpriv
;
2902 /* Called with a root hub endpoint (or an endpoint that wasn't added
2903 * with xhci_add_endpoint()
2907 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2908 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2909 if (!virt_ep
->stopped_td
) {
2910 xhci_dbg_trace(xhci
, trace_xhci_dbg_reset_ep
,
2911 "Endpoint 0x%x not halted, refusing to reset.",
2912 ep
->desc
.bEndpointAddress
);
2915 if (usb_endpoint_xfer_control(&ep
->desc
)) {
2916 xhci_dbg_trace(xhci
, trace_xhci_dbg_reset_ep
,
2917 "Control endpoint stall already handled.");
2921 xhci_dbg_trace(xhci
, trace_xhci_dbg_reset_ep
,
2922 "Queueing reset endpoint command");
2923 spin_lock_irqsave(&xhci
->lock
, flags
);
2924 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
2926 * Can't change the ring dequeue pointer until it's transitioned to the
2927 * stopped state, which is only upon a successful reset endpoint
2928 * command. Better hope that last command worked!
2931 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
2932 kfree(virt_ep
->stopped_td
);
2933 xhci_ring_cmd_db(xhci
);
2935 virt_ep
->stopped_td
= NULL
;
2936 virt_ep
->stopped_trb
= NULL
;
2937 virt_ep
->stopped_stream
= 0;
2938 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2941 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
2944 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
2945 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
2946 unsigned int slot_id
)
2949 unsigned int ep_index
;
2950 unsigned int ep_state
;
2954 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, true, __func__
);
2957 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
2958 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
2959 " descriptor for ep 0x%x does not support streams\n",
2960 ep
->desc
.bEndpointAddress
);
2964 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2965 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2966 if (ep_state
& EP_HAS_STREAMS
||
2967 ep_state
& EP_GETTING_STREAMS
) {
2968 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
2969 "already has streams set up.\n",
2970 ep
->desc
.bEndpointAddress
);
2971 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
2972 "dynamic stream context array reallocation.\n");
2975 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
2976 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
2977 "endpoint 0x%x; URBs are pending.\n",
2978 ep
->desc
.bEndpointAddress
);
2984 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
2985 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
2987 unsigned int max_streams
;
2989 /* The stream context array size must be a power of two */
2990 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
2992 * Find out how many primary stream array entries the host controller
2993 * supports. Later we may use secondary stream arrays (similar to 2nd
2994 * level page entries), but that's an optional feature for xHCI host
2995 * controllers. xHCs must support at least 4 stream IDs.
2997 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
2998 if (*num_stream_ctxs
> max_streams
) {
2999 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
3001 *num_stream_ctxs
= max_streams
;
3002 *num_streams
= max_streams
;
3006 /* Returns an error code if one of the endpoint already has streams.
3007 * This does not change any data structures, it only checks and gathers
3010 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
3011 struct usb_device
*udev
,
3012 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3013 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
3015 unsigned int max_streams
;
3016 unsigned int endpoint_flag
;
3020 for (i
= 0; i
< num_eps
; i
++) {
3021 ret
= xhci_check_streams_endpoint(xhci
, udev
,
3022 eps
[i
], udev
->slot_id
);
3026 max_streams
= usb_ss_max_streams(&eps
[i
]->ss_ep_comp
);
3027 if (max_streams
< (*num_streams
- 1)) {
3028 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
3029 eps
[i
]->desc
.bEndpointAddress
,
3031 *num_streams
= max_streams
+1;
3034 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
3035 if (*changed_ep_bitmask
& endpoint_flag
)
3037 *changed_ep_bitmask
|= endpoint_flag
;
3042 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
3043 struct usb_device
*udev
,
3044 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
3046 u32 changed_ep_bitmask
= 0;
3047 unsigned int slot_id
;
3048 unsigned int ep_index
;
3049 unsigned int ep_state
;
3052 slot_id
= udev
->slot_id
;
3053 if (!xhci
->devs
[slot_id
])
3056 for (i
= 0; i
< num_eps
; i
++) {
3057 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3058 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
3059 /* Are streams already being freed for the endpoint? */
3060 if (ep_state
& EP_GETTING_NO_STREAMS
) {
3061 xhci_warn(xhci
, "WARN Can't disable streams for "
3063 "streams are being disabled already\n",
3064 eps
[i
]->desc
.bEndpointAddress
);
3067 /* Are there actually any streams to free? */
3068 if (!(ep_state
& EP_HAS_STREAMS
) &&
3069 !(ep_state
& EP_GETTING_STREAMS
)) {
3070 xhci_warn(xhci
, "WARN Can't disable streams for "
3072 "streams are already disabled!\n",
3073 eps
[i
]->desc
.bEndpointAddress
);
3074 xhci_warn(xhci
, "WARN xhci_free_streams() called "
3075 "with non-streams endpoint\n");
3078 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
3080 return changed_ep_bitmask
;
3084 * The USB device drivers use this function (though the HCD interface in USB
3085 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3086 * coordinate mass storage command queueing across multiple endpoints (basically
3087 * a stream ID == a task ID).
3089 * Setting up streams involves allocating the same size stream context array
3090 * for each endpoint and issuing a configure endpoint command for all endpoints.
3092 * Don't allow the call to succeed if one endpoint only supports one stream
3093 * (which means it doesn't support streams at all).
3095 * Drivers may get less stream IDs than they asked for, if the host controller
3096 * hardware or endpoints claim they can't support the number of requested
3099 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3100 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3101 unsigned int num_streams
, gfp_t mem_flags
)
3104 struct xhci_hcd
*xhci
;
3105 struct xhci_virt_device
*vdev
;
3106 struct xhci_command
*config_cmd
;
3107 struct xhci_input_control_ctx
*ctrl_ctx
;
3108 unsigned int ep_index
;
3109 unsigned int num_stream_ctxs
;
3110 unsigned long flags
;
3111 u32 changed_ep_bitmask
= 0;
3116 /* Add one to the number of streams requested to account for
3117 * stream 0 that is reserved for xHCI usage.
3120 xhci
= hcd_to_xhci(hcd
);
3121 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
3124 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
3126 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
3129 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
3131 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3133 xhci_free_command(xhci
, config_cmd
);
3137 /* Check to make sure all endpoints are not already configured for
3138 * streams. While we're at it, find the maximum number of streams that
3139 * all the endpoints will support and check for duplicate endpoints.
3141 spin_lock_irqsave(&xhci
->lock
, flags
);
3142 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
3143 num_eps
, &num_streams
, &changed_ep_bitmask
);
3145 xhci_free_command(xhci
, config_cmd
);
3146 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3149 if (num_streams
<= 1) {
3150 xhci_warn(xhci
, "WARN: endpoints can't handle "
3151 "more than one stream.\n");
3152 xhci_free_command(xhci
, config_cmd
);
3153 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3156 vdev
= xhci
->devs
[udev
->slot_id
];
3157 /* Mark each endpoint as being in transition, so
3158 * xhci_urb_enqueue() will reject all URBs.
3160 for (i
= 0; i
< num_eps
; i
++) {
3161 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3162 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
3164 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3166 /* Setup internal data structures and allocate HW data structures for
3167 * streams (but don't install the HW structures in the input context
3168 * until we're sure all memory allocation succeeded).
3170 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
3171 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
3172 num_stream_ctxs
, num_streams
);
3174 for (i
= 0; i
< num_eps
; i
++) {
3175 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3176 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
3178 num_streams
, mem_flags
);
3179 if (!vdev
->eps
[ep_index
].stream_info
)
3181 /* Set maxPstreams in endpoint context and update deq ptr to
3182 * point to stream context array. FIXME
3186 /* Set up the input context for a configure endpoint command. */
3187 for (i
= 0; i
< num_eps
; i
++) {
3188 struct xhci_ep_ctx
*ep_ctx
;
3190 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3191 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
3193 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
3194 vdev
->out_ctx
, ep_index
);
3195 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
3196 vdev
->eps
[ep_index
].stream_info
);
3198 /* Tell the HW to drop its old copy of the endpoint context info
3199 * and add the updated copy from the input context.
3201 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
3202 vdev
->out_ctx
, ctrl_ctx
,
3203 changed_ep_bitmask
, changed_ep_bitmask
);
3205 /* Issue and wait for the configure endpoint command */
3206 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
3209 /* xHC rejected the configure endpoint command for some reason, so we
3210 * leave the old ring intact and free our internal streams data
3216 spin_lock_irqsave(&xhci
->lock
, flags
);
3217 for (i
= 0; i
< num_eps
; i
++) {
3218 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3219 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3220 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
3221 udev
->slot_id
, ep_index
);
3222 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
3224 xhci_free_command(xhci
, config_cmd
);
3225 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3227 /* Subtract 1 for stream 0, which drivers can't use */
3228 return num_streams
- 1;
3231 /* If it didn't work, free the streams! */
3232 for (i
= 0; i
< num_eps
; i
++) {
3233 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3234 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3235 vdev
->eps
[ep_index
].stream_info
= NULL
;
3236 /* FIXME Unset maxPstreams in endpoint context and
3237 * update deq ptr to point to normal string ring.
3239 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3240 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3241 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
3243 xhci_free_command(xhci
, config_cmd
);
3247 /* Transition the endpoint from using streams to being a "normal" endpoint
3250 * Modify the endpoint context state, submit a configure endpoint command,
3251 * and free all endpoint rings for streams if that completes successfully.
3253 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3254 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3258 struct xhci_hcd
*xhci
;
3259 struct xhci_virt_device
*vdev
;
3260 struct xhci_command
*command
;
3261 struct xhci_input_control_ctx
*ctrl_ctx
;
3262 unsigned int ep_index
;
3263 unsigned long flags
;
3264 u32 changed_ep_bitmask
;
3266 xhci
= hcd_to_xhci(hcd
);
3267 vdev
= xhci
->devs
[udev
->slot_id
];
3269 /* Set up a configure endpoint command to remove the streams rings */
3270 spin_lock_irqsave(&xhci
->lock
, flags
);
3271 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
3272 udev
, eps
, num_eps
);
3273 if (changed_ep_bitmask
== 0) {
3274 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3278 /* Use the xhci_command structure from the first endpoint. We may have
3279 * allocated too many, but the driver may call xhci_free_streams() for
3280 * each endpoint it grouped into one call to xhci_alloc_streams().
3282 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
3283 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
3284 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, command
->in_ctx
);
3286 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3287 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3292 for (i
= 0; i
< num_eps
; i
++) {
3293 struct xhci_ep_ctx
*ep_ctx
;
3295 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3296 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
3297 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
3298 EP_GETTING_NO_STREAMS
;
3300 xhci_endpoint_copy(xhci
, command
->in_ctx
,
3301 vdev
->out_ctx
, ep_index
);
3302 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
3303 &vdev
->eps
[ep_index
]);
3305 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
3306 vdev
->out_ctx
, ctrl_ctx
,
3307 changed_ep_bitmask
, changed_ep_bitmask
);
3308 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3310 /* Issue and wait for the configure endpoint command,
3311 * which must succeed.
3313 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
3316 /* xHC rejected the configure endpoint command for some reason, so we
3317 * leave the streams rings intact.
3322 spin_lock_irqsave(&xhci
->lock
, flags
);
3323 for (i
= 0; i
< num_eps
; i
++) {
3324 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3325 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3326 vdev
->eps
[ep_index
].stream_info
= NULL
;
3327 /* FIXME Unset maxPstreams in endpoint context and
3328 * update deq ptr to point to normal string ring.
3330 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
3331 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3333 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3339 * Deletes endpoint resources for endpoints that were active before a Reset
3340 * Device command, or a Disable Slot command. The Reset Device command leaves
3341 * the control endpoint intact, whereas the Disable Slot command deletes it.
3343 * Must be called with xhci->lock held.
3345 void xhci_free_device_endpoint_resources(struct xhci_hcd
*xhci
,
3346 struct xhci_virt_device
*virt_dev
, bool drop_control_ep
)
3349 unsigned int num_dropped_eps
= 0;
3350 unsigned int drop_flags
= 0;
3352 for (i
= (drop_control_ep
? 0 : 1); i
< 31; i
++) {
3353 if (virt_dev
->eps
[i
].ring
) {
3354 drop_flags
|= 1 << i
;
3358 xhci
->num_active_eps
-= num_dropped_eps
;
3359 if (num_dropped_eps
)
3360 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
3361 "Dropped %u ep ctxs, flags = 0x%x, "
3363 num_dropped_eps
, drop_flags
,
3364 xhci
->num_active_eps
);
3368 * This submits a Reset Device Command, which will set the device state to 0,
3369 * set the device address to 0, and disable all the endpoints except the default
3370 * control endpoint. The USB core should come back and call
3371 * xhci_address_device(), and then re-set up the configuration. If this is
3372 * called because of a usb_reset_and_verify_device(), then the old alternate
3373 * settings will be re-installed through the normal bandwidth allocation
3376 * Wait for the Reset Device command to finish. Remove all structures
3377 * associated with the endpoints that were disabled. Clear the input device
3378 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3380 * If the virt_dev to be reset does not exist or does not match the udev,
3381 * it means the device is lost, possibly due to the xHC restore error and
3382 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3383 * re-allocate the device.
3385 int xhci_discover_or_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3388 unsigned long flags
;
3389 struct xhci_hcd
*xhci
;
3390 unsigned int slot_id
;
3391 struct xhci_virt_device
*virt_dev
;
3392 struct xhci_command
*reset_device_cmd
;
3394 int last_freed_endpoint
;
3395 struct xhci_slot_ctx
*slot_ctx
;
3396 int old_active_eps
= 0;
3398 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, false, __func__
);
3401 xhci
= hcd_to_xhci(hcd
);
3402 slot_id
= udev
->slot_id
;
3403 virt_dev
= xhci
->devs
[slot_id
];
3405 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3406 "not exist. Re-allocate the device\n", slot_id
);
3407 ret
= xhci_alloc_dev(hcd
, udev
);
3414 if (virt_dev
->udev
!= udev
) {
3415 /* If the virt_dev and the udev does not match, this virt_dev
3416 * may belong to another udev.
3417 * Re-allocate the device.
3419 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3420 "not match the udev. Re-allocate the device\n",
3422 ret
= xhci_alloc_dev(hcd
, udev
);
3429 /* If device is not setup, there is no point in resetting it */
3430 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3431 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
)) ==
3432 SLOT_STATE_DISABLED
)
3435 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
3436 /* Allocate the command structure that holds the struct completion.
3437 * Assume we're in process context, since the normal device reset
3438 * process has to wait for the device anyway. Storage devices are
3439 * reset as part of error handling, so use GFP_NOIO instead of
3442 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
3443 if (!reset_device_cmd
) {
3444 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
3448 /* Attempt to submit the Reset Device command to the command ring */
3449 spin_lock_irqsave(&xhci
->lock
, flags
);
3450 reset_device_cmd
->command_trb
= xhci_find_next_enqueue(xhci
->cmd_ring
);
3452 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
3453 ret
= xhci_queue_reset_device(xhci
, slot_id
);
3455 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3456 list_del(&reset_device_cmd
->cmd_list
);
3457 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3458 goto command_cleanup
;
3460 xhci_ring_cmd_db(xhci
);
3461 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3463 /* Wait for the Reset Device command to finish */
3464 timeleft
= wait_for_completion_interruptible_timeout(
3465 reset_device_cmd
->completion
,
3466 XHCI_CMD_DEFAULT_TIMEOUT
);
3467 if (timeleft
<= 0) {
3468 xhci_warn(xhci
, "%s while waiting for reset device command\n",
3469 timeleft
== 0 ? "Timeout" : "Signal");
3470 spin_lock_irqsave(&xhci
->lock
, flags
);
3471 /* The timeout might have raced with the event ring handler, so
3472 * only delete from the list if the item isn't poisoned.
3474 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
3475 list_del(&reset_device_cmd
->cmd_list
);
3476 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3478 goto command_cleanup
;
3481 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3482 * unless we tried to reset a slot ID that wasn't enabled,
3483 * or the device wasn't in the addressed or configured state.
3485 ret
= reset_device_cmd
->status
;
3487 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
3488 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
3489 xhci_dbg(xhci
, "Can't reset device (slot ID %u) in %s state\n",
3491 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
3492 xhci_dbg(xhci
, "Not freeing device rings.\n");
3493 /* Don't treat this as an error. May change my mind later. */
3495 goto command_cleanup
;
3497 xhci_dbg(xhci
, "Successful reset device command.\n");
3500 if (xhci_is_vendor_info_code(xhci
, ret
))
3502 xhci_warn(xhci
, "Unknown completion code %u for "
3503 "reset device command.\n", ret
);
3505 goto command_cleanup
;
3508 /* Free up host controller endpoint resources */
3509 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3510 spin_lock_irqsave(&xhci
->lock
, flags
);
3511 /* Don't delete the default control endpoint resources */
3512 xhci_free_device_endpoint_resources(xhci
, virt_dev
, false);
3513 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3516 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3517 last_freed_endpoint
= 1;
3518 for (i
= 1; i
< 31; ++i
) {
3519 struct xhci_virt_ep
*ep
= &virt_dev
->eps
[i
];
3521 if (ep
->ep_state
& EP_HAS_STREAMS
) {
3522 xhci_free_stream_info(xhci
, ep
->stream_info
);
3523 ep
->stream_info
= NULL
;
3524 ep
->ep_state
&= ~EP_HAS_STREAMS
;
3528 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
3529 last_freed_endpoint
= i
;
3531 if (!list_empty(&virt_dev
->eps
[i
].bw_endpoint_list
))
3532 xhci_drop_ep_from_interval_table(xhci
,
3533 &virt_dev
->eps
[i
].bw_info
,
3538 xhci_clear_endpoint_bw_info(&virt_dev
->eps
[i
].bw_info
);
3540 /* If necessary, update the number of active TTs on this root port */
3541 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
3543 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
3544 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
3548 xhci_free_command(xhci
, reset_device_cmd
);
3553 * At this point, the struct usb_device is about to go away, the device has
3554 * disconnected, and all traffic has been stopped and the endpoints have been
3555 * disabled. Free any HC data structures associated with that device.
3557 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3559 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3560 struct xhci_virt_device
*virt_dev
;
3561 unsigned long flags
;
3565 #ifndef CONFIG_USB_DEFAULT_PERSIST
3567 * We called pm_runtime_get_noresume when the device was attached.
3568 * Decrement the counter here to allow controller to runtime suspend
3569 * if no devices remain.
3571 if (xhci
->quirks
& XHCI_RESET_ON_RESUME
)
3572 pm_runtime_put_noidle(hcd
->self
.controller
);
3575 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
3576 /* If the host is halted due to driver unload, we still need to free the
3579 if (ret
<= 0 && ret
!= -ENODEV
)
3582 virt_dev
= xhci
->devs
[udev
->slot_id
];
3584 /* Stop any wayward timer functions (which may grab the lock) */
3585 for (i
= 0; i
< 31; ++i
) {
3586 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
3587 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
3590 spin_lock_irqsave(&xhci
->lock
, flags
);
3591 /* Don't disable the slot if the host controller is dead. */
3592 state
= readl(&xhci
->op_regs
->status
);
3593 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
) ||
3594 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
3595 xhci_free_virt_device(xhci
, udev
->slot_id
);
3596 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3600 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
3601 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3602 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3605 xhci_ring_cmd_db(xhci
);
3606 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3608 * Event command completion handler will free any data structures
3609 * associated with the slot. XXX Can free sleep?
3614 * Checks if we have enough host controller resources for the default control
3617 * Must be called with xhci->lock held.
3619 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd
*xhci
)
3621 if (xhci
->num_active_eps
+ 1 > xhci
->limit_active_eps
) {
3622 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
3623 "Not enough ep ctxs: "
3624 "%u active, need to add 1, limit is %u.",
3625 xhci
->num_active_eps
, xhci
->limit_active_eps
);
3628 xhci
->num_active_eps
+= 1;
3629 xhci_dbg_trace(xhci
, trace_xhci_dbg_quirks
,
3630 "Adding 1 ep ctx, %u now active.",
3631 xhci
->num_active_eps
);
3637 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3638 * timed out, or allocating memory failed. Returns 1 on success.
3640 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3642 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3643 unsigned long flags
;
3646 union xhci_trb
*cmd_trb
;
3648 spin_lock_irqsave(&xhci
->lock
, flags
);
3649 cmd_trb
= xhci_find_next_enqueue(xhci
->cmd_ring
);
3650 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
3652 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3653 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3656 xhci_ring_cmd_db(xhci
);
3657 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3659 /* XXX: how much time for xHC slot assignment? */
3660 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3661 XHCI_CMD_DEFAULT_TIMEOUT
);
3662 if (timeleft
<= 0) {
3663 xhci_warn(xhci
, "%s while waiting for a slot\n",
3664 timeleft
== 0 ? "Timeout" : "Signal");
3665 /* cancel the enable slot request */
3666 return xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3669 if (!xhci
->slot_id
) {
3670 xhci_err(xhci
, "Error while assigning device slot ID\n");
3674 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3675 spin_lock_irqsave(&xhci
->lock
, flags
);
3676 ret
= xhci_reserve_host_control_ep_resources(xhci
);
3678 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3679 xhci_warn(xhci
, "Not enough host resources, "
3680 "active endpoint contexts = %u\n",
3681 xhci
->num_active_eps
);
3684 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3686 /* Use GFP_NOIO, since this function can be called from
3687 * xhci_discover_or_reset_device(), which may be called as part of
3688 * mass storage driver error handling.
3690 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_NOIO
)) {
3691 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
3694 udev
->slot_id
= xhci
->slot_id
;
3696 #ifndef CONFIG_USB_DEFAULT_PERSIST
3698 * If resetting upon resume, we can't put the controller into runtime
3699 * suspend if there is a device attached.
3701 if (xhci
->quirks
& XHCI_RESET_ON_RESUME
)
3702 pm_runtime_get_noresume(hcd
->self
.controller
);
3705 /* Is this a LS or FS device under a HS hub? */
3706 /* Hub or peripherial? */
3710 /* Disable slot, if we can do it without mem alloc */
3711 spin_lock_irqsave(&xhci
->lock
, flags
);
3712 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
3713 xhci_ring_cmd_db(xhci
);
3714 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3719 * Issue an Address Device command and optionally send a corresponding
3720 * SetAddress request to the device.
3721 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3722 * we should only issue and wait on one address command at the same time.
3724 static int xhci_setup_device(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3725 enum xhci_setup_dev setup
)
3727 const char *act
= setup
== SETUP_CONTEXT_ONLY
? "context" : "address";
3728 unsigned long flags
;
3730 struct xhci_virt_device
*virt_dev
;
3732 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3733 struct xhci_slot_ctx
*slot_ctx
;
3734 struct xhci_input_control_ctx
*ctrl_ctx
;
3736 union xhci_trb
*cmd_trb
;
3738 if (!udev
->slot_id
) {
3739 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3740 "Bad Slot ID %d", udev
->slot_id
);
3744 virt_dev
= xhci
->devs
[udev
->slot_id
];
3746 if (WARN_ON(!virt_dev
)) {
3748 * In plug/unplug torture test with an NEC controller,
3749 * a zero-dereference was observed once due to virt_dev = 0.
3750 * Print useful debug rather than crash if it is observed again!
3752 xhci_warn(xhci
, "Virt dev invalid for slot_id 0x%x!\n",
3757 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
3758 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
3760 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3765 * If this is the first Set Address since device plug-in or
3766 * virt_device realloaction after a resume with an xHCI power loss,
3767 * then set up the slot context.
3769 if (!slot_ctx
->dev_info
)
3770 xhci_setup_addressable_virt_dev(xhci
, udev
);
3771 /* Otherwise, update the control endpoint ring enqueue pointer. */
3773 xhci_copy_ep0_dequeue_into_input_ctx(xhci
, udev
);
3774 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
| EP0_FLAG
);
3775 ctrl_ctx
->drop_flags
= 0;
3777 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3778 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3779 trace_xhci_address_ctx(xhci
, virt_dev
->in_ctx
,
3780 le32_to_cpu(slot_ctx
->dev_info
) >> 27);
3782 spin_lock_irqsave(&xhci
->lock
, flags
);
3783 cmd_trb
= xhci_find_next_enqueue(xhci
->cmd_ring
);
3784 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
3785 udev
->slot_id
, setup
);
3787 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3788 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3789 "FIXME: allocate a command ring segment");
3792 xhci_ring_cmd_db(xhci
);
3793 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3795 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3796 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3797 XHCI_CMD_DEFAULT_TIMEOUT
);
3798 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3799 * the SetAddress() "recovery interval" required by USB and aborting the
3800 * command on a timeout.
3802 if (timeleft
<= 0) {
3803 xhci_warn(xhci
, "%s while waiting for setup %s command\n",
3804 timeleft
== 0 ? "Timeout" : "Signal", act
);
3805 /* cancel the address device command */
3806 ret
= xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3812 switch (virt_dev
->cmd_status
) {
3813 case COMP_CTX_STATE
:
3815 xhci_err(xhci
, "Setup ERROR: setup %s command for slot %d.\n",
3816 act
, udev
->slot_id
);
3820 dev_warn(&udev
->dev
, "Device not responding to setup %s.\n", act
);
3824 dev_warn(&udev
->dev
,
3825 "ERROR: Incompatible device for setup %s command\n", act
);
3829 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3830 "Successful setup %s command", act
);
3834 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3835 act
, virt_dev
->cmd_status
);
3836 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3837 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3838 trace_xhci_address_ctx(xhci
, virt_dev
->out_ctx
, 1);
3845 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
3846 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3847 "Op regs DCBAA ptr = %#016llx", temp_64
);
3848 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3849 "Slot ID %d dcbaa entry @%p = %#016llx",
3851 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
3852 (unsigned long long)
3853 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]));
3854 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3855 "Output Context DMA address = %#08llx",
3856 (unsigned long long)virt_dev
->out_ctx
->dma
);
3857 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3858 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3859 trace_xhci_address_ctx(xhci
, virt_dev
->in_ctx
,
3860 le32_to_cpu(slot_ctx
->dev_info
) >> 27);
3861 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3862 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3864 * USB core uses address 1 for the roothubs, so we add one to the
3865 * address given back to us by the HC.
3867 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3868 trace_xhci_address_ctx(xhci
, virt_dev
->out_ctx
,
3869 le32_to_cpu(slot_ctx
->dev_info
) >> 27);
3870 /* Zero the input context control for later use */
3871 ctrl_ctx
->add_flags
= 0;
3872 ctrl_ctx
->drop_flags
= 0;
3874 xhci_dbg_trace(xhci
, trace_xhci_dbg_address
,
3875 "Internal device address = %d",
3876 le32_to_cpu(slot_ctx
->dev_state
) & DEV_ADDR_MASK
);
3881 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3883 return xhci_setup_device(hcd
, udev
, SETUP_CONTEXT_ADDRESS
);
3886 int xhci_enable_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3888 return xhci_setup_device(hcd
, udev
, SETUP_CONTEXT_ONLY
);
3892 * Transfer the port index into real index in the HW port status
3893 * registers. Caculate offset between the port's PORTSC register
3894 * and port status base. Divide the number of per port register
3895 * to get the real index. The raw port number bases 1.
3897 int xhci_find_raw_port_number(struct usb_hcd
*hcd
, int port1
)
3899 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3900 __le32 __iomem
*base_addr
= &xhci
->op_regs
->port_status_base
;
3901 __le32 __iomem
*addr
;
3904 if (hcd
->speed
!= HCD_USB3
)
3905 addr
= xhci
->usb2_ports
[port1
- 1];
3907 addr
= xhci
->usb3_ports
[port1
- 1];
3909 raw_port
= (addr
- base_addr
)/NUM_PORT_REGS
+ 1;
3914 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3915 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3917 static int __maybe_unused
xhci_change_max_exit_latency(struct xhci_hcd
*xhci
,
3918 struct usb_device
*udev
, u16 max_exit_latency
)
3920 struct xhci_virt_device
*virt_dev
;
3921 struct xhci_command
*command
;
3922 struct xhci_input_control_ctx
*ctrl_ctx
;
3923 struct xhci_slot_ctx
*slot_ctx
;
3924 unsigned long flags
;
3927 spin_lock_irqsave(&xhci
->lock
, flags
);
3928 if (max_exit_latency
== xhci
->devs
[udev
->slot_id
]->current_mel
) {
3929 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3933 /* Attempt to issue an Evaluate Context command to change the MEL. */
3934 virt_dev
= xhci
->devs
[udev
->slot_id
];
3935 command
= xhci
->lpm_command
;
3936 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, command
->in_ctx
);
3938 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3939 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3944 xhci_slot_copy(xhci
, command
->in_ctx
, virt_dev
->out_ctx
);
3945 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3947 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
3948 slot_ctx
= xhci_get_slot_ctx(xhci
, command
->in_ctx
);
3949 slot_ctx
->dev_info2
&= cpu_to_le32(~((u32
) MAX_EXIT
));
3950 slot_ctx
->dev_info2
|= cpu_to_le32(max_exit_latency
);
3952 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
3953 "Set up evaluate context for LPM MEL change.");
3954 xhci_dbg(xhci
, "Slot %u Input Context:\n", udev
->slot_id
);
3955 xhci_dbg_ctx(xhci
, command
->in_ctx
, 0);
3957 /* Issue and wait for the evaluate context command. */
3958 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
3960 xhci_dbg(xhci
, "Slot %u Output Context:\n", udev
->slot_id
);
3961 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 0);
3964 spin_lock_irqsave(&xhci
->lock
, flags
);
3965 virt_dev
->current_mel
= max_exit_latency
;
3966 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3971 #ifdef CONFIG_PM_RUNTIME
3973 /* BESL to HIRD Encoding array for USB2 LPM */
3974 static int xhci_besl_encoding
[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3975 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3977 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3978 static int xhci_calculate_hird_besl(struct xhci_hcd
*xhci
,
3979 struct usb_device
*udev
)
3981 int u2del
, besl
, besl_host
;
3982 int besl_device
= 0;
3985 u2del
= HCS_U2_LATENCY(xhci
->hcs_params3
);
3986 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
3988 if (field
& USB_BESL_SUPPORT
) {
3989 for (besl_host
= 0; besl_host
< 16; besl_host
++) {
3990 if (xhci_besl_encoding
[besl_host
] >= u2del
)
3993 /* Use baseline BESL value as default */
3994 if (field
& USB_BESL_BASELINE_VALID
)
3995 besl_device
= USB_GET_BESL_BASELINE(field
);
3996 else if (field
& USB_BESL_DEEP_VALID
)
3997 besl_device
= USB_GET_BESL_DEEP(field
);
4002 besl_host
= (u2del
- 51) / 75 + 1;
4005 besl
= besl_host
+ besl_device
;
4012 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4013 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device
*udev
)
4020 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
4022 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4023 l1
= udev
->l1_params
.timeout
/ 256;
4025 /* device has preferred BESLD */
4026 if (field
& USB_BESL_DEEP_VALID
) {
4027 besld
= USB_GET_BESL_DEEP(field
);
4031 return PORT_BESLD(besld
) | PORT_L1_TIMEOUT(l1
) | PORT_HIRDM(hirdm
);
4034 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
4035 struct usb_device
*udev
, int enable
)
4037 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4038 __le32 __iomem
**port_array
;
4039 __le32 __iomem
*pm_addr
, *hlpm_addr
;
4040 u32 pm_val
, hlpm_val
, field
;
4041 unsigned int port_num
;
4042 unsigned long flags
;
4043 int hird
, exit_latency
;
4046 if (hcd
->speed
== HCD_USB3
|| !xhci
->hw_lpm_support
||
4050 if (!udev
->parent
|| udev
->parent
->parent
||
4051 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
4054 if (udev
->usb2_hw_lpm_capable
!= 1)
4057 spin_lock_irqsave(&xhci
->lock
, flags
);
4059 port_array
= xhci
->usb2_ports
;
4060 port_num
= udev
->portnum
- 1;
4061 pm_addr
= port_array
[port_num
] + PORTPMSC
;
4062 pm_val
= readl(pm_addr
);
4063 hlpm_addr
= port_array
[port_num
] + PORTHLPMC
;
4064 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
4066 xhci_dbg(xhci
, "%s port %d USB2 hardware LPM\n",
4067 enable
? "enable" : "disable", port_num
);
4070 /* Host supports BESL timeout instead of HIRD */
4071 if (udev
->usb2_hw_lpm_besl_capable
) {
4072 /* if device doesn't have a preferred BESL value use a
4073 * default one which works with mixed HIRD and BESL
4074 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4076 if ((field
& USB_BESL_SUPPORT
) &&
4077 (field
& USB_BESL_BASELINE_VALID
))
4078 hird
= USB_GET_BESL_BASELINE(field
);
4080 hird
= udev
->l1_params
.besl
;
4082 exit_latency
= xhci_besl_encoding
[hird
];
4083 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4085 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4086 * input context for link powermanagement evaluate
4087 * context commands. It is protected by hcd->bandwidth
4088 * mutex and is shared by all devices. We need to set
4089 * the max ext latency in USB 2 BESL LPM as well, so
4090 * use the same mutex and xhci_change_max_exit_latency()
4092 mutex_lock(hcd
->bandwidth_mutex
);
4093 ret
= xhci_change_max_exit_latency(xhci
, udev
,
4095 mutex_unlock(hcd
->bandwidth_mutex
);
4099 spin_lock_irqsave(&xhci
->lock
, flags
);
4101 hlpm_val
= xhci_calculate_usb2_hw_lpm_params(udev
);
4102 writel(hlpm_val
, hlpm_addr
);
4106 hird
= xhci_calculate_hird_besl(xhci
, udev
);
4109 pm_val
&= ~PORT_HIRD_MASK
;
4110 pm_val
|= PORT_HIRD(hird
) | PORT_RWE
| PORT_L1DS(udev
->slot_id
);
4111 writel(pm_val
, pm_addr
);
4112 pm_val
= readl(pm_addr
);
4114 writel(pm_val
, pm_addr
);
4118 pm_val
&= ~(PORT_HLE
| PORT_RWE
| PORT_HIRD_MASK
| PORT_L1DS_MASK
);
4119 writel(pm_val
, pm_addr
);
4122 if (udev
->usb2_hw_lpm_besl_capable
) {
4123 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4124 mutex_lock(hcd
->bandwidth_mutex
);
4125 xhci_change_max_exit_latency(xhci
, udev
, 0);
4126 mutex_unlock(hcd
->bandwidth_mutex
);
4131 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4135 /* check if a usb2 port supports a given extened capability protocol
4136 * only USB2 ports extended protocol capability values are cached.
4137 * Return 1 if capability is supported
4139 static int xhci_check_usb2_port_capability(struct xhci_hcd
*xhci
, int port
,
4140 unsigned capability
)
4142 u32 port_offset
, port_count
;
4145 for (i
= 0; i
< xhci
->num_ext_caps
; i
++) {
4146 if (xhci
->ext_caps
[i
] & capability
) {
4147 /* port offsets starts at 1 */
4148 port_offset
= XHCI_EXT_PORT_OFF(xhci
->ext_caps
[i
]) - 1;
4149 port_count
= XHCI_EXT_PORT_COUNT(xhci
->ext_caps
[i
]);
4150 if (port
>= port_offset
&&
4151 port
< port_offset
+ port_count
)
4158 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4160 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4161 int portnum
= udev
->portnum
- 1;
4163 if (hcd
->speed
== HCD_USB3
|| !xhci
->sw_lpm_support
||
4167 /* we only support lpm for non-hub device connected to root hub yet */
4168 if (!udev
->parent
|| udev
->parent
->parent
||
4169 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
4172 if (xhci
->hw_lpm_support
== 1 &&
4173 xhci_check_usb2_port_capability(
4174 xhci
, portnum
, XHCI_HLC
)) {
4175 udev
->usb2_hw_lpm_capable
= 1;
4176 udev
->l1_params
.timeout
= XHCI_L1_TIMEOUT
;
4177 udev
->l1_params
.besl
= XHCI_DEFAULT_BESL
;
4178 if (xhci_check_usb2_port_capability(xhci
, portnum
,
4180 udev
->usb2_hw_lpm_besl_capable
= 1;
4188 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
4189 struct usb_device
*udev
, int enable
)
4194 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4199 #endif /* CONFIG_PM_RUNTIME */
4201 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4204 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4205 static unsigned long long xhci_service_interval_to_ns(
4206 struct usb_endpoint_descriptor
*desc
)
4208 return (1ULL << (desc
->bInterval
- 1)) * 125 * 1000;
4211 static u16
xhci_get_timeout_no_hub_lpm(struct usb_device
*udev
,
4212 enum usb3_link_state state
)
4214 unsigned long long sel
;
4215 unsigned long long pel
;
4216 unsigned int max_sel_pel
;
4221 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4222 sel
= DIV_ROUND_UP(udev
->u1_params
.sel
, 1000);
4223 pel
= DIV_ROUND_UP(udev
->u1_params
.pel
, 1000);
4224 max_sel_pel
= USB3_LPM_MAX_U1_SEL_PEL
;
4228 sel
= DIV_ROUND_UP(udev
->u2_params
.sel
, 1000);
4229 pel
= DIV_ROUND_UP(udev
->u2_params
.pel
, 1000);
4230 max_sel_pel
= USB3_LPM_MAX_U2_SEL_PEL
;
4234 dev_warn(&udev
->dev
, "%s: Can't get timeout for non-U1 or U2 state.\n",
4236 return USB3_LPM_DISABLED
;
4239 if (sel
<= max_sel_pel
&& pel
<= max_sel_pel
)
4240 return USB3_LPM_DEVICE_INITIATED
;
4242 if (sel
> max_sel_pel
)
4243 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
4244 "due to long SEL %llu ms\n",
4247 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
4248 "due to long PEL %llu ms\n",
4250 return USB3_LPM_DISABLED
;
4253 /* Returns the hub-encoded U1 timeout value.
4254 * The U1 timeout should be the maximum of the following values:
4255 * - For control endpoints, U1 system exit latency (SEL) * 3
4256 * - For bulk endpoints, U1 SEL * 5
4257 * - For interrupt endpoints:
4258 * - Notification EPs, U1 SEL * 3
4259 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4260 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4262 static u16
xhci_calculate_intel_u1_timeout(struct usb_device
*udev
,
4263 struct usb_endpoint_descriptor
*desc
)
4265 unsigned long long timeout_ns
;
4269 ep_type
= usb_endpoint_type(desc
);
4271 case USB_ENDPOINT_XFER_CONTROL
:
4272 timeout_ns
= udev
->u1_params
.sel
* 3;
4274 case USB_ENDPOINT_XFER_BULK
:
4275 timeout_ns
= udev
->u1_params
.sel
* 5;
4277 case USB_ENDPOINT_XFER_INT
:
4278 intr_type
= usb_endpoint_interrupt_type(desc
);
4279 if (intr_type
== USB_ENDPOINT_INTR_NOTIFICATION
) {
4280 timeout_ns
= udev
->u1_params
.sel
* 3;
4283 /* Otherwise the calculation is the same as isoc eps */
4284 case USB_ENDPOINT_XFER_ISOC
:
4285 timeout_ns
= xhci_service_interval_to_ns(desc
);
4286 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
* 105, 100);
4287 if (timeout_ns
< udev
->u1_params
.sel
* 2)
4288 timeout_ns
= udev
->u1_params
.sel
* 2;
4294 /* The U1 timeout is encoded in 1us intervals. */
4295 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 1000);
4296 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
4297 if (timeout_ns
== USB3_LPM_DISABLED
)
4300 /* If the necessary timeout value is bigger than what we can set in the
4301 * USB 3.0 hub, we have to disable hub-initiated U1.
4303 if (timeout_ns
<= USB3_LPM_U1_MAX_TIMEOUT
)
4305 dev_dbg(&udev
->dev
, "Hub-initiated U1 disabled "
4306 "due to long timeout %llu ms\n", timeout_ns
);
4307 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U1
);
4310 /* Returns the hub-encoded U2 timeout value.
4311 * The U2 timeout should be the maximum of:
4312 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4313 * - largest bInterval of any active periodic endpoint (to avoid going
4314 * into lower power link states between intervals).
4315 * - the U2 Exit Latency of the device
4317 static u16
xhci_calculate_intel_u2_timeout(struct usb_device
*udev
,
4318 struct usb_endpoint_descriptor
*desc
)
4320 unsigned long long timeout_ns
;
4321 unsigned long long u2_del_ns
;
4323 timeout_ns
= 10 * 1000 * 1000;
4325 if ((usb_endpoint_xfer_int(desc
) || usb_endpoint_xfer_isoc(desc
)) &&
4326 (xhci_service_interval_to_ns(desc
) > timeout_ns
))
4327 timeout_ns
= xhci_service_interval_to_ns(desc
);
4329 u2_del_ns
= le16_to_cpu(udev
->bos
->ss_cap
->bU2DevExitLat
) * 1000ULL;
4330 if (u2_del_ns
> timeout_ns
)
4331 timeout_ns
= u2_del_ns
;
4333 /* The U2 timeout is encoded in 256us intervals */
4334 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 256 * 1000);
4335 /* If the necessary timeout value is bigger than what we can set in the
4336 * USB 3.0 hub, we have to disable hub-initiated U2.
4338 if (timeout_ns
<= USB3_LPM_U2_MAX_TIMEOUT
)
4340 dev_dbg(&udev
->dev
, "Hub-initiated U2 disabled "
4341 "due to long timeout %llu ms\n", timeout_ns
);
4342 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U2
);
4345 static u16
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4346 struct usb_device
*udev
,
4347 struct usb_endpoint_descriptor
*desc
,
4348 enum usb3_link_state state
,
4351 if (state
== USB3_LPM_U1
) {
4352 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4353 return xhci_calculate_intel_u1_timeout(udev
, desc
);
4355 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4356 return xhci_calculate_intel_u2_timeout(udev
, desc
);
4359 return USB3_LPM_DISABLED
;
4362 static int xhci_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4363 struct usb_device
*udev
,
4364 struct usb_endpoint_descriptor
*desc
,
4365 enum usb3_link_state state
,
4370 alt_timeout
= xhci_call_host_update_timeout_for_endpoint(xhci
, udev
,
4371 desc
, state
, timeout
);
4373 /* If we found we can't enable hub-initiated LPM, or
4374 * the U1 or U2 exit latency was too high to allow
4375 * device-initiated LPM as well, just stop searching.
4377 if (alt_timeout
== USB3_LPM_DISABLED
||
4378 alt_timeout
== USB3_LPM_DEVICE_INITIATED
) {
4379 *timeout
= alt_timeout
;
4382 if (alt_timeout
> *timeout
)
4383 *timeout
= alt_timeout
;
4387 static int xhci_update_timeout_for_interface(struct xhci_hcd
*xhci
,
4388 struct usb_device
*udev
,
4389 struct usb_host_interface
*alt
,
4390 enum usb3_link_state state
,
4395 for (j
= 0; j
< alt
->desc
.bNumEndpoints
; j
++) {
4396 if (xhci_update_timeout_for_endpoint(xhci
, udev
,
4397 &alt
->endpoint
[j
].desc
, state
, timeout
))
4404 static int xhci_check_intel_tier_policy(struct usb_device
*udev
,
4405 enum usb3_link_state state
)
4407 struct usb_device
*parent
;
4408 unsigned int num_hubs
;
4410 if (state
== USB3_LPM_U2
)
4413 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4414 for (parent
= udev
->parent
, num_hubs
= 0; parent
->parent
;
4415 parent
= parent
->parent
)
4421 dev_dbg(&udev
->dev
, "Disabling U1 link state for device"
4422 " below second-tier hub.\n");
4423 dev_dbg(&udev
->dev
, "Plug device into first-tier hub "
4424 "to decrease power consumption.\n");
4428 static int xhci_check_tier_policy(struct xhci_hcd
*xhci
,
4429 struct usb_device
*udev
,
4430 enum usb3_link_state state
)
4432 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4433 return xhci_check_intel_tier_policy(udev
, state
);
4437 /* Returns the U1 or U2 timeout that should be enabled.
4438 * If the tier check or timeout setting functions return with a non-zero exit
4439 * code, that means the timeout value has been finalized and we shouldn't look
4440 * at any more endpoints.
4442 static u16
xhci_calculate_lpm_timeout(struct usb_hcd
*hcd
,
4443 struct usb_device
*udev
, enum usb3_link_state state
)
4445 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4446 struct usb_host_config
*config
;
4449 u16 timeout
= USB3_LPM_DISABLED
;
4451 if (state
== USB3_LPM_U1
)
4453 else if (state
== USB3_LPM_U2
)
4456 dev_warn(&udev
->dev
, "Can't enable unknown link state %i\n",
4461 if (xhci_check_tier_policy(xhci
, udev
, state
) < 0)
4464 /* Gather some information about the currently installed configuration
4465 * and alternate interface settings.
4467 if (xhci_update_timeout_for_endpoint(xhci
, udev
, &udev
->ep0
.desc
,
4471 config
= udev
->actconfig
;
4475 for (i
= 0; i
< config
->desc
.bNumInterfaces
; i
++) {
4476 struct usb_driver
*driver
;
4477 struct usb_interface
*intf
= config
->interface
[i
];
4482 /* Check if any currently bound drivers want hub-initiated LPM
4485 if (intf
->dev
.driver
) {
4486 driver
= to_usb_driver(intf
->dev
.driver
);
4487 if (driver
&& driver
->disable_hub_initiated_lpm
) {
4488 dev_dbg(&udev
->dev
, "Hub-initiated %s disabled "
4489 "at request of driver %s\n",
4490 state_name
, driver
->name
);
4491 return xhci_get_timeout_no_hub_lpm(udev
, state
);
4495 /* Not sure how this could happen... */
4496 if (!intf
->cur_altsetting
)
4499 if (xhci_update_timeout_for_interface(xhci
, udev
,
4500 intf
->cur_altsetting
,
4507 static int calculate_max_exit_latency(struct usb_device
*udev
,
4508 enum usb3_link_state state_changed
,
4509 u16 hub_encoded_timeout
)
4511 unsigned long long u1_mel_us
= 0;
4512 unsigned long long u2_mel_us
= 0;
4513 unsigned long long mel_us
= 0;
4519 disabling_u1
= (state_changed
== USB3_LPM_U1
&&
4520 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4521 disabling_u2
= (state_changed
== USB3_LPM_U2
&&
4522 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4524 enabling_u1
= (state_changed
== USB3_LPM_U1
&&
4525 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4526 enabling_u2
= (state_changed
== USB3_LPM_U2
&&
4527 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4529 /* If U1 was already enabled and we're not disabling it,
4530 * or we're going to enable U1, account for the U1 max exit latency.
4532 if ((udev
->u1_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u1
) ||
4534 u1_mel_us
= DIV_ROUND_UP(udev
->u1_params
.mel
, 1000);
4535 if ((udev
->u2_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u2
) ||
4537 u2_mel_us
= DIV_ROUND_UP(udev
->u2_params
.mel
, 1000);
4539 if (u1_mel_us
> u2_mel_us
)
4543 /* xHCI host controller max exit latency field is only 16 bits wide. */
4544 if (mel_us
> MAX_EXIT
) {
4545 dev_warn(&udev
->dev
, "Link PM max exit latency of %lluus "
4546 "is too big.\n", mel_us
);
4552 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4553 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4554 struct usb_device
*udev
, enum usb3_link_state state
)
4556 struct xhci_hcd
*xhci
;
4557 u16 hub_encoded_timeout
;
4561 xhci
= hcd_to_xhci(hcd
);
4562 /* The LPM timeout values are pretty host-controller specific, so don't
4563 * enable hub-initiated timeouts unless the vendor has provided
4564 * information about their timeout algorithm.
4566 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4567 !xhci
->devs
[udev
->slot_id
])
4568 return USB3_LPM_DISABLED
;
4570 hub_encoded_timeout
= xhci_calculate_lpm_timeout(hcd
, udev
, state
);
4571 mel
= calculate_max_exit_latency(udev
, state
, hub_encoded_timeout
);
4573 /* Max Exit Latency is too big, disable LPM. */
4574 hub_encoded_timeout
= USB3_LPM_DISABLED
;
4578 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4581 return hub_encoded_timeout
;
4584 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4585 struct usb_device
*udev
, enum usb3_link_state state
)
4587 struct xhci_hcd
*xhci
;
4591 xhci
= hcd_to_xhci(hcd
);
4592 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4593 !xhci
->devs
[udev
->slot_id
])
4596 mel
= calculate_max_exit_latency(udev
, state
, USB3_LPM_DISABLED
);
4597 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4602 #else /* CONFIG_PM */
4604 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4605 struct usb_device
*udev
, enum usb3_link_state state
)
4607 return USB3_LPM_DISABLED
;
4610 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4611 struct usb_device
*udev
, enum usb3_link_state state
)
4615 #endif /* CONFIG_PM */
4617 /*-------------------------------------------------------------------------*/
4619 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4620 * internal data structures for the device.
4622 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
4623 struct usb_tt
*tt
, gfp_t mem_flags
)
4625 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4626 struct xhci_virt_device
*vdev
;
4627 struct xhci_command
*config_cmd
;
4628 struct xhci_input_control_ctx
*ctrl_ctx
;
4629 struct xhci_slot_ctx
*slot_ctx
;
4630 unsigned long flags
;
4631 unsigned think_time
;
4634 /* Ignore root hubs */
4638 vdev
= xhci
->devs
[hdev
->slot_id
];
4640 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
4643 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
4645 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
4648 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
4650 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
4652 xhci_free_command(xhci
, config_cmd
);
4656 spin_lock_irqsave(&xhci
->lock
, flags
);
4657 if (hdev
->speed
== USB_SPEED_HIGH
&&
4658 xhci_alloc_tt_info(xhci
, vdev
, hdev
, tt
, GFP_ATOMIC
)) {
4659 xhci_dbg(xhci
, "Could not allocate xHCI TT structure.\n");
4660 xhci_free_command(xhci
, config_cmd
);
4661 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4665 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
4666 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
4667 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
4668 slot_ctx
->dev_info
|= cpu_to_le32(DEV_HUB
);
4670 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
4671 if (xhci
->hci_version
> 0x95) {
4672 xhci_dbg(xhci
, "xHCI version %x needs hub "
4673 "TT think time and number of ports\n",
4674 (unsigned int) xhci
->hci_version
);
4675 slot_ctx
->dev_info2
|= cpu_to_le32(XHCI_MAX_PORTS(hdev
->maxchild
));
4676 /* Set TT think time - convert from ns to FS bit times.
4677 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4678 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4680 * xHCI 1.0: this field shall be 0 if the device is not a
4683 think_time
= tt
->think_time
;
4684 if (think_time
!= 0)
4685 think_time
= (think_time
/ 666) - 1;
4686 if (xhci
->hci_version
< 0x100 || hdev
->speed
== USB_SPEED_HIGH
)
4687 slot_ctx
->tt_info
|=
4688 cpu_to_le32(TT_THINK_TIME(think_time
));
4690 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
4691 "TT think time or number of ports\n",
4692 (unsigned int) xhci
->hci_version
);
4694 slot_ctx
->dev_state
= 0;
4695 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4697 xhci_dbg(xhci
, "Set up %s for hub device.\n",
4698 (xhci
->hci_version
> 0x95) ?
4699 "configure endpoint" : "evaluate context");
4700 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
4701 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
4703 /* Issue and wait for the configure endpoint or
4704 * evaluate context command.
4706 if (xhci
->hci_version
> 0x95)
4707 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4710 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4713 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
4714 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
4716 xhci_free_command(xhci
, config_cmd
);
4720 int xhci_get_frame(struct usb_hcd
*hcd
)
4722 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4723 /* EHCI mods by the periodic size. Why? */
4724 return readl(&xhci
->run_regs
->microframe_index
) >> 3;
4727 int xhci_gen_setup(struct usb_hcd
*hcd
, xhci_get_quirks_t get_quirks
)
4729 struct xhci_hcd
*xhci
;
4730 struct device
*dev
= hcd
->self
.controller
;
4733 /* Accept arbitrarily long scatter-gather lists */
4734 hcd
->self
.sg_tablesize
= ~0;
4736 /* XHCI controllers don't stop the ep queue on short packets :| */
4737 hcd
->self
.no_stop_on_short
= 1;
4739 if (usb_hcd_is_primary_hcd(hcd
)) {
4740 xhci
= kzalloc(sizeof(struct xhci_hcd
), GFP_KERNEL
);
4743 *((struct xhci_hcd
**) hcd
->hcd_priv
) = xhci
;
4744 xhci
->main_hcd
= hcd
;
4745 /* Mark the first roothub as being USB 2.0.
4746 * The xHCI driver will register the USB 3.0 roothub.
4748 hcd
->speed
= HCD_USB2
;
4749 hcd
->self
.root_hub
->speed
= USB_SPEED_HIGH
;
4751 * USB 2.0 roothub under xHCI has an integrated TT,
4752 * (rate matching hub) as opposed to having an OHCI/UHCI
4753 * companion controller.
4757 /* xHCI private pointer was set in xhci_pci_probe for the second
4758 * registered roothub.
4760 xhci
= hcd_to_xhci(hcd
);
4762 * Support arbitrarily aligned sg-list entries on hosts without
4763 * TD fragment rules (which are currently unsupported).
4765 if (xhci
->hci_version
< 0x100)
4766 hcd
->self
.no_sg_constraint
= 1;
4771 xhci
->cap_regs
= hcd
->regs
;
4772 xhci
->op_regs
= hcd
->regs
+
4773 HC_LENGTH(readl(&xhci
->cap_regs
->hc_capbase
));
4774 xhci
->run_regs
= hcd
->regs
+
4775 (readl(&xhci
->cap_regs
->run_regs_off
) & RTSOFF_MASK
);
4776 /* Cache read-only capability registers */
4777 xhci
->hcs_params1
= readl(&xhci
->cap_regs
->hcs_params1
);
4778 xhci
->hcs_params2
= readl(&xhci
->cap_regs
->hcs_params2
);
4779 xhci
->hcs_params3
= readl(&xhci
->cap_regs
->hcs_params3
);
4780 xhci
->hcc_params
= readl(&xhci
->cap_regs
->hc_capbase
);
4781 xhci
->hci_version
= HC_VERSION(xhci
->hcc_params
);
4782 xhci
->hcc_params
= readl(&xhci
->cap_regs
->hcc_params
);
4783 xhci_print_registers(xhci
);
4785 xhci
->quirks
= quirks
;
4787 get_quirks(dev
, xhci
);
4789 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4790 * success event after a short transfer. This quirk will ignore such
4793 if (xhci
->hci_version
> 0x96)
4794 xhci
->quirks
|= XHCI_SPURIOUS_SUCCESS
;
4796 if (xhci
->hci_version
< 0x100)
4797 hcd
->self
.no_sg_constraint
= 1;
4799 /* Make sure the HC is halted. */
4800 retval
= xhci_halt(xhci
);
4804 xhci_dbg(xhci
, "Resetting HCD\n");
4805 /* Reset the internal HC memory state and registers. */
4806 retval
= xhci_reset(xhci
);
4809 xhci_dbg(xhci
, "Reset complete\n");
4811 /* Set dma_mask and coherent_dma_mask to 64-bits,
4812 * if xHC supports 64-bit addressing */
4813 if (HCC_64BIT_ADDR(xhci
->hcc_params
) &&
4814 !dma_set_mask(dev
, DMA_BIT_MASK(64))) {
4815 xhci_dbg(xhci
, "Enabling 64-bit DMA addresses.\n");
4816 dma_set_coherent_mask(dev
, DMA_BIT_MASK(64));
4819 xhci_dbg(xhci
, "Calling HCD init\n");
4820 /* Initialize HCD and host controller data structures. */
4821 retval
= xhci_init(hcd
);
4824 xhci_dbg(xhci
, "Called HCD init\n");
4831 MODULE_DESCRIPTION(DRIVER_DESC
);
4832 MODULE_AUTHOR(DRIVER_AUTHOR
);
4833 MODULE_LICENSE("GPL");
4835 static int __init
xhci_hcd_init(void)
4839 retval
= xhci_register_pci();
4841 pr_debug("Problem registering PCI driver.\n");
4844 retval
= xhci_register_plat();
4846 pr_debug("Problem registering platform driver.\n");
4850 * Check the compiler generated sizes of structures that must be laid
4851 * out in specific ways for hardware access.
4853 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
4854 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
4855 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
4856 /* xhci_device_control has eight fields, and also
4857 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4859 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
4860 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
4861 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
4862 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
4863 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
4864 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4865 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
4868 xhci_unregister_pci();
4871 module_init(xhci_hcd_init
);
4873 static void __exit
xhci_hcd_cleanup(void)
4875 xhci_unregister_pci();
4876 xhci_unregister_plat();
4878 module_exit(xhci_hcd_cleanup
);