2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
36 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
37 static int link_quirk
;
38 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
39 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
41 /* TODO: copied from ehci-hcd.c - can this be refactored? */
43 * xhci_handshake - spin reading hc until handshake completes or fails
44 * @ptr: address of hc register to be read
45 * @mask: bits to look at in result of read
46 * @done: value of those bits when handshake succeeds
47 * @usec: timeout in microseconds
49 * Returns negative errno, or zero on success
51 * Success happens when the "mask" bits have the specified value (hardware
52 * handshake done). There are two failure modes: "usec" have passed (major
53 * hardware flakeout), or the register reads as all-ones (hardware removed).
55 int xhci_handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
56 u32 mask
, u32 done
, int usec
)
61 result
= xhci_readl(xhci
, ptr
);
62 if (result
== ~(u32
)0) /* card removed */
74 * Disable interrupts and begin the xHCI halting process.
76 void xhci_quiesce(struct xhci_hcd
*xhci
)
83 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
87 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
89 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
93 * Force HC into halt state.
95 * Disable any IRQs and clear the run/stop bit.
96 * HC will complete any current and actively pipelined transactions, and
97 * should halt within 16 ms of the run/stop bit being cleared.
98 * Read HC Halted bit in the status register to see when the HC is finished.
100 int xhci_halt(struct xhci_hcd
*xhci
)
103 xhci_dbg(xhci
, "// Halt the HC\n");
106 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
107 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
109 xhci
->xhc_state
|= XHCI_STATE_HALTED
;
110 xhci
->cmd_ring_state
= CMD_RING_STATE_STOPPED
;
112 xhci_warn(xhci
, "Host not halted after %u microseconds.\n",
118 * Set the run bit and wait for the host to be running.
120 static int xhci_start(struct xhci_hcd
*xhci
)
125 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
127 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
129 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
132 * Wait for the HCHalted Status bit to be 0 to indicate the host is
135 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
136 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
137 if (ret
== -ETIMEDOUT
)
138 xhci_err(xhci
, "Host took too long to start, "
139 "waited %u microseconds.\n",
142 xhci
->xhc_state
&= ~XHCI_STATE_HALTED
;
149 * This resets pipelines, timers, counters, state machines, etc.
150 * Transactions will be terminated immediately, and operational registers
151 * will be set to their defaults.
153 int xhci_reset(struct xhci_hcd
*xhci
)
159 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
160 if ((state
& STS_HALT
) == 0) {
161 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
165 xhci_dbg(xhci
, "// Reset the HC\n");
166 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
167 command
|= CMD_RESET
;
168 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
170 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->command
,
171 CMD_RESET
, 0, 10 * 1000 * 1000);
175 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
177 * xHCI cannot write to any doorbells or operational registers other
178 * than status until the "Controller Not Ready" flag is cleared.
180 ret
= xhci_handshake(xhci
, &xhci
->op_regs
->status
,
181 STS_CNR
, 0, 10 * 1000 * 1000);
183 for (i
= 0; i
< 2; ++i
) {
184 xhci
->bus_state
[i
].port_c_suspend
= 0;
185 xhci
->bus_state
[i
].suspended_ports
= 0;
186 xhci
->bus_state
[i
].resuming_ports
= 0;
193 static int xhci_free_msi(struct xhci_hcd
*xhci
)
197 if (!xhci
->msix_entries
)
200 for (i
= 0; i
< xhci
->msix_count
; i
++)
201 if (xhci
->msix_entries
[i
].vector
)
202 free_irq(xhci
->msix_entries
[i
].vector
,
210 static int xhci_setup_msi(struct xhci_hcd
*xhci
)
213 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
215 ret
= pci_enable_msi(pdev
);
217 xhci_dbg(xhci
, "failed to allocate MSI entry\n");
221 ret
= request_irq(pdev
->irq
, xhci_msi_irq
,
222 0, "xhci_hcd", xhci_to_hcd(xhci
));
224 xhci_dbg(xhci
, "disable MSI interrupt\n");
225 pci_disable_msi(pdev
);
233 * free all IRQs request
235 static void xhci_free_irq(struct xhci_hcd
*xhci
)
237 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
240 /* return if using legacy interrupt */
241 if (xhci_to_hcd(xhci
)->irq
> 0)
244 ret
= xhci_free_msi(xhci
);
248 free_irq(pdev
->irq
, xhci_to_hcd(xhci
));
256 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
259 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
260 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
263 * calculate number of msi-x vectors supported.
264 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
265 * with max number of interrupters based on the xhci HCSPARAMS1.
266 * - num_online_cpus: maximum msi-x vectors per CPUs core.
267 * Add additional 1 vector to ensure always available interrupt.
269 xhci
->msix_count
= min(num_online_cpus() + 1,
270 HCS_MAX_INTRS(xhci
->hcs_params1
));
273 kmalloc((sizeof(struct msix_entry
))*xhci
->msix_count
,
275 if (!xhci
->msix_entries
) {
276 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
280 for (i
= 0; i
< xhci
->msix_count
; i
++) {
281 xhci
->msix_entries
[i
].entry
= i
;
282 xhci
->msix_entries
[i
].vector
= 0;
285 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
287 xhci_dbg(xhci
, "Failed to enable MSI-X\n");
291 for (i
= 0; i
< xhci
->msix_count
; i
++) {
292 ret
= request_irq(xhci
->msix_entries
[i
].vector
,
294 0, "xhci_hcd", xhci_to_hcd(xhci
));
299 hcd
->msix_enabled
= 1;
303 xhci_dbg(xhci
, "disable MSI-X interrupt\n");
305 pci_disable_msix(pdev
);
307 kfree(xhci
->msix_entries
);
308 xhci
->msix_entries
= NULL
;
312 /* Free any IRQs and disable MSI-X */
313 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
315 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
316 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
320 if (xhci
->msix_entries
) {
321 pci_disable_msix(pdev
);
322 kfree(xhci
->msix_entries
);
323 xhci
->msix_entries
= NULL
;
325 pci_disable_msi(pdev
);
328 hcd
->msix_enabled
= 0;
332 static void __maybe_unused
xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
336 if (xhci
->msix_entries
) {
337 for (i
= 0; i
< xhci
->msix_count
; i
++)
338 synchronize_irq(xhci
->msix_entries
[i
].vector
);
342 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
344 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
345 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
349 * Some Fresco Logic host controllers advertise MSI, but fail to
350 * generate interrupts. Don't even try to enable MSI.
352 if (xhci
->quirks
& XHCI_BROKEN_MSI
)
355 /* unregister the legacy interrupt */
357 free_irq(hcd
->irq
, hcd
);
360 ret
= xhci_setup_msix(xhci
);
362 /* fall back to msi*/
363 ret
= xhci_setup_msi(xhci
);
366 /* hcd->irq is 0, we have MSI */
370 xhci_err(xhci
, "No msi-x/msi found and no IRQ in BIOS\n");
375 /* fall back to legacy interrupt*/
376 ret
= request_irq(pdev
->irq
, &usb_hcd_irq
, IRQF_SHARED
,
377 hcd
->irq_descr
, hcd
);
379 xhci_err(xhci
, "request interrupt %d failed\n",
383 hcd
->irq
= pdev
->irq
;
389 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
394 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
398 static void xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
404 static void compliance_mode_recovery(unsigned long arg
)
406 struct xhci_hcd
*xhci
;
411 xhci
= (struct xhci_hcd
*)arg
;
413 for (i
= 0; i
< xhci
->num_usb3_ports
; i
++) {
414 temp
= xhci_readl(xhci
, xhci
->usb3_ports
[i
]);
415 if ((temp
& PORT_PLS_MASK
) == USB_SS_PORT_LS_COMP_MOD
) {
417 * Compliance Mode Detected. Letting USB Core
418 * handle the Warm Reset
420 xhci_dbg(xhci
, "Compliance mode detected->port %d\n",
422 xhci_dbg(xhci
, "Attempting compliance mode recovery\n");
423 hcd
= xhci
->shared_hcd
;
425 if (hcd
->state
== HC_STATE_SUSPENDED
)
426 usb_hcd_resume_root_hub(hcd
);
428 usb_hcd_poll_rh_status(hcd
);
432 if (xhci
->port_status_u0
!= ((1 << xhci
->num_usb3_ports
)-1))
433 mod_timer(&xhci
->comp_mode_recovery_timer
,
434 jiffies
+ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
));
438 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
439 * that causes ports behind that hardware to enter compliance mode sometimes.
440 * The quirk creates a timer that polls every 2 seconds the link state of
441 * each host controller's port and recovers it by issuing a Warm reset
442 * if Compliance mode is detected, otherwise the port will become "dead" (no
443 * device connections or disconnections will be detected anymore). Becasue no
444 * status event is generated when entering compliance mode (per xhci spec),
445 * this quirk is needed on systems that have the failing hardware installed.
447 static void compliance_mode_recovery_timer_init(struct xhci_hcd
*xhci
)
449 xhci
->port_status_u0
= 0;
450 init_timer(&xhci
->comp_mode_recovery_timer
);
452 xhci
->comp_mode_recovery_timer
.data
= (unsigned long) xhci
;
453 xhci
->comp_mode_recovery_timer
.function
= compliance_mode_recovery
;
454 xhci
->comp_mode_recovery_timer
.expires
= jiffies
+
455 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
);
457 set_timer_slack(&xhci
->comp_mode_recovery_timer
,
458 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS
));
459 add_timer(&xhci
->comp_mode_recovery_timer
);
460 xhci_dbg(xhci
, "Compliance mode recovery timer initialized\n");
464 * This function identifies the systems that have installed the SN65LVPE502CP
465 * USB3.0 re-driver and that need the Compliance Mode Quirk.
467 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
469 bool xhci_compliance_mode_recovery_timer_quirk_check(void)
471 const char *dmi_product_name
, *dmi_sys_vendor
;
473 dmi_product_name
= dmi_get_system_info(DMI_PRODUCT_NAME
);
474 dmi_sys_vendor
= dmi_get_system_info(DMI_SYS_VENDOR
);
475 if (!dmi_product_name
|| !dmi_sys_vendor
)
478 if (!(strstr(dmi_sys_vendor
, "Hewlett-Packard")))
481 if (strstr(dmi_product_name
, "Z420") ||
482 strstr(dmi_product_name
, "Z620") ||
483 strstr(dmi_product_name
, "Z820") ||
484 strstr(dmi_product_name
, "Z1 Workstation"))
490 static int xhci_all_ports_seen_u0(struct xhci_hcd
*xhci
)
492 return (xhci
->port_status_u0
== ((1 << xhci
->num_usb3_ports
)-1));
497 * Initialize memory for HCD and xHC (one-time init).
499 * Program the PAGESIZE register, initialize the device context array, create
500 * device contexts (?), set up a command ring segment (or two?), create event
501 * ring (one for now).
503 int xhci_init(struct usb_hcd
*hcd
)
505 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
508 xhci_dbg(xhci
, "xhci_init\n");
509 spin_lock_init(&xhci
->lock
);
510 if (xhci
->hci_version
== 0x95 && link_quirk
) {
511 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
512 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
514 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
516 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
517 xhci_dbg(xhci
, "Finished xhci_init\n");
519 /* Initializing Compliance Mode Recovery Data If Needed */
520 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
521 xhci
->quirks
|= XHCI_COMP_MODE_QUIRK
;
522 compliance_mode_recovery_timer_init(xhci
);
528 /*-------------------------------------------------------------------------*/
531 static int xhci_run_finished(struct xhci_hcd
*xhci
)
533 if (xhci_start(xhci
)) {
537 xhci
->shared_hcd
->state
= HC_STATE_RUNNING
;
538 xhci
->cmd_ring_state
= CMD_RING_STATE_RUNNING
;
540 if (xhci
->quirks
& XHCI_NEC_HOST
)
541 xhci_ring_cmd_db(xhci
);
543 xhci_dbg(xhci
, "Finished xhci_run for USB3 roothub\n");
548 * Start the HC after it was halted.
550 * This function is called by the USB core when the HC driver is added.
551 * Its opposite is xhci_stop().
553 * xhci_init() must be called once before this function can be called.
554 * Reset the HC, enable device slot contexts, program DCBAAP, and
555 * set command ring pointer and event ring pointer.
557 * Setup MSI-X vectors and enable interrupts.
559 int xhci_run(struct usb_hcd
*hcd
)
564 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
566 /* Start the xHCI host controller running only after the USB 2.0 roothub
570 hcd
->uses_new_polling
= 1;
571 if (!usb_hcd_is_primary_hcd(hcd
))
572 return xhci_run_finished(xhci
);
574 xhci_dbg(xhci
, "xhci_run\n");
576 ret
= xhci_try_enable_msi(hcd
);
580 xhci_dbg(xhci
, "Command ring memory map follows:\n");
581 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
582 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
583 xhci_dbg_cmd_ptrs(xhci
);
585 xhci_dbg(xhci
, "ERST memory map follows:\n");
586 xhci_dbg_erst(xhci
, &xhci
->erst
);
587 xhci_dbg(xhci
, "Event ring:\n");
588 xhci_debug_ring(xhci
, xhci
->event_ring
);
589 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
590 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
591 temp_64
&= ~ERST_PTR_MASK
;
592 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
594 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
595 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
596 temp
&= ~ER_IRQ_INTERVAL_MASK
;
598 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
600 /* Set the HCD state before we enable the irqs */
601 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
603 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
605 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
607 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
608 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
609 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
610 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
611 &xhci
->ir_set
->irq_pending
);
612 xhci_print_ir_set(xhci
, 0);
614 if (xhci
->quirks
& XHCI_NEC_HOST
)
615 xhci_queue_vendor_command(xhci
, 0, 0, 0,
616 TRB_TYPE(TRB_NEC_GET_FW
));
618 xhci_dbg(xhci
, "Finished xhci_run for USB2 roothub\n");
622 static void xhci_only_stop_hcd(struct usb_hcd
*hcd
)
624 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
626 spin_lock_irq(&xhci
->lock
);
629 /* The shared_hcd is going to be deallocated shortly (the USB core only
630 * calls this function when allocation fails in usb_add_hcd(), or
631 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
633 xhci
->shared_hcd
= NULL
;
634 spin_unlock_irq(&xhci
->lock
);
640 * This function is called by the USB core when the HC driver is removed.
641 * Its opposite is xhci_run().
643 * Disable device contexts, disable IRQs, and quiesce the HC.
644 * Reset the HC, finish any completed transactions, and cleanup memory.
646 void xhci_stop(struct usb_hcd
*hcd
)
649 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
651 if (!usb_hcd_is_primary_hcd(hcd
)) {
652 xhci_only_stop_hcd(xhci
->shared_hcd
);
656 spin_lock_irq(&xhci
->lock
);
657 /* Make sure the xHC is halted for a USB3 roothub
658 * (xhci_stop() could be called as part of failed init).
662 spin_unlock_irq(&xhci
->lock
);
664 xhci_cleanup_msix(xhci
);
666 /* Deleting Compliance Mode Recovery Timer */
667 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
668 (!(xhci_all_ports_seen_u0(xhci
)))) {
669 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
670 xhci_dbg(xhci
, "%s: compliance mode recovery timer deleted\n",
674 if (xhci
->quirks
& XHCI_AMD_PLL_FIX
)
677 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
678 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
679 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
680 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
681 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
682 &xhci
->ir_set
->irq_pending
);
683 xhci_print_ir_set(xhci
, 0);
685 xhci_dbg(xhci
, "cleaning up memory\n");
686 xhci_mem_cleanup(xhci
);
687 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
688 xhci_readl(xhci
, &xhci
->op_regs
->status
));
692 * Shutdown HC (not bus-specific)
694 * This is called when the machine is rebooting or halting. We assume that the
695 * machine will be powered off, and the HC's internal state will be reset.
696 * Don't bother to free memory.
698 * This will only ever be called with the main usb_hcd (the USB3 roothub).
700 void xhci_shutdown(struct usb_hcd
*hcd
)
702 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
704 if (xhci
->quirks
& XHCI_SPURIOUS_REBOOT
)
705 usb_disable_xhci_ports(to_pci_dev(hcd
->self
.controller
));
707 spin_lock_irq(&xhci
->lock
);
709 spin_unlock_irq(&xhci
->lock
);
711 xhci_cleanup_msix(xhci
);
713 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
714 xhci_readl(xhci
, &xhci
->op_regs
->status
));
718 static void xhci_save_registers(struct xhci_hcd
*xhci
)
720 xhci
->s3
.command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
721 xhci
->s3
.dev_nt
= xhci_readl(xhci
, &xhci
->op_regs
->dev_notification
);
722 xhci
->s3
.dcbaa_ptr
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
723 xhci
->s3
.config_reg
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
724 xhci
->s3
.erst_size
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
725 xhci
->s3
.erst_base
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
726 xhci
->s3
.erst_dequeue
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
727 xhci
->s3
.irq_pending
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
728 xhci
->s3
.irq_control
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
731 static void xhci_restore_registers(struct xhci_hcd
*xhci
)
733 xhci_writel(xhci
, xhci
->s3
.command
, &xhci
->op_regs
->command
);
734 xhci_writel(xhci
, xhci
->s3
.dev_nt
, &xhci
->op_regs
->dev_notification
);
735 xhci_write_64(xhci
, xhci
->s3
.dcbaa_ptr
, &xhci
->op_regs
->dcbaa_ptr
);
736 xhci_writel(xhci
, xhci
->s3
.config_reg
, &xhci
->op_regs
->config_reg
);
737 xhci_writel(xhci
, xhci
->s3
.erst_size
, &xhci
->ir_set
->erst_size
);
738 xhci_write_64(xhci
, xhci
->s3
.erst_base
, &xhci
->ir_set
->erst_base
);
739 xhci_write_64(xhci
, xhci
->s3
.erst_dequeue
, &xhci
->ir_set
->erst_dequeue
);
740 xhci_writel(xhci
, xhci
->s3
.irq_pending
, &xhci
->ir_set
->irq_pending
);
741 xhci_writel(xhci
, xhci
->s3
.irq_control
, &xhci
->ir_set
->irq_control
);
744 static void xhci_set_cmd_ring_deq(struct xhci_hcd
*xhci
)
748 /* step 2: initialize command ring buffer */
749 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
750 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
751 (xhci_trb_virt_to_dma(xhci
->cmd_ring
->deq_seg
,
752 xhci
->cmd_ring
->dequeue
) &
753 (u64
) ~CMD_RING_RSVD_BITS
) |
754 xhci
->cmd_ring
->cycle_state
;
755 xhci_dbg(xhci
, "// Setting command ring address to 0x%llx\n",
756 (long unsigned long) val_64
);
757 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
761 * The whole command ring must be cleared to zero when we suspend the host.
763 * The host doesn't save the command ring pointer in the suspend well, so we
764 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
765 * aligned, because of the reserved bits in the command ring dequeue pointer
766 * register. Therefore, we can't just set the dequeue pointer back in the
767 * middle of the ring (TRBs are 16-byte aligned).
769 static void xhci_clear_command_ring(struct xhci_hcd
*xhci
)
771 struct xhci_ring
*ring
;
772 struct xhci_segment
*seg
;
774 ring
= xhci
->cmd_ring
;
778 sizeof(union xhci_trb
) * (TRBS_PER_SEGMENT
- 1));
779 seg
->trbs
[TRBS_PER_SEGMENT
- 1].link
.control
&=
780 cpu_to_le32(~TRB_CYCLE
);
782 } while (seg
!= ring
->deq_seg
);
784 /* Reset the software enqueue and dequeue pointers */
785 ring
->deq_seg
= ring
->first_seg
;
786 ring
->dequeue
= ring
->first_seg
->trbs
;
787 ring
->enq_seg
= ring
->deq_seg
;
788 ring
->enqueue
= ring
->dequeue
;
790 ring
->num_trbs_free
= ring
->num_segs
* (TRBS_PER_SEGMENT
- 1) - 1;
792 * Ring is now zeroed, so the HW should look for change of ownership
793 * when the cycle bit is set to 1.
795 ring
->cycle_state
= 1;
798 * Reset the hardware dequeue pointer.
799 * Yes, this will need to be re-written after resume, but we're paranoid
800 * and want to make sure the hardware doesn't access bogus memory
801 * because, say, the BIOS or an SMI started the host without changing
802 * the command ring pointers.
804 xhci_set_cmd_ring_deq(xhci
);
808 * Stop HC (not bus-specific)
810 * This is called when the machine transition into S3/S4 mode.
813 int xhci_suspend(struct xhci_hcd
*xhci
)
816 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
819 if (hcd
->state
!= HC_STATE_SUSPENDED
||
820 xhci
->shared_hcd
->state
!= HC_STATE_SUSPENDED
)
823 /* Don't poll the roothubs on bus suspend. */
824 xhci_dbg(xhci
, "%s: stopping port polling.\n", __func__
);
825 clear_bit(HCD_FLAG_POLL_RH
, &hcd
->flags
);
826 del_timer_sync(&hcd
->rh_timer
);
828 spin_lock_irq(&xhci
->lock
);
829 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
830 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
831 /* step 1: stop endpoint */
832 /* skipped assuming that port suspend has done */
834 /* step 2: clear Run/Stop bit */
835 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
837 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
838 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
839 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
)) {
840 xhci_warn(xhci
, "WARN: xHC CMD_RUN timeout\n");
841 spin_unlock_irq(&xhci
->lock
);
844 xhci_clear_command_ring(xhci
);
846 /* step 3: save registers */
847 xhci_save_registers(xhci
);
849 /* step 4: set CSS flag */
850 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
852 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
853 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
854 STS_SAVE
, 0, 10 * 1000)) {
855 xhci_warn(xhci
, "WARN: xHC save state timeout\n");
856 spin_unlock_irq(&xhci
->lock
);
859 spin_unlock_irq(&xhci
->lock
);
862 * Deleting Compliance Mode Recovery Timer because the xHCI Host
863 * is about to be suspended.
865 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
866 (!(xhci_all_ports_seen_u0(xhci
)))) {
867 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
868 xhci_dbg(xhci
, "%s: compliance mode recovery timer deleted\n",
872 /* step 5: remove core well power */
873 /* synchronize irq when using MSI-X */
874 xhci_msix_sync_irqs(xhci
);
880 * start xHC (not bus-specific)
882 * This is called when the machine transition from S3/S4 mode.
885 int xhci_resume(struct xhci_hcd
*xhci
, bool hibernated
)
887 u32 command
, temp
= 0;
888 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
889 struct usb_hcd
*secondary_hcd
;
891 bool comp_timer_running
= false;
893 /* Wait a bit if either of the roothubs need to settle from the
894 * transition into bus suspend.
896 if (time_before(jiffies
, xhci
->bus_state
[0].next_statechange
) ||
898 xhci
->bus_state
[1].next_statechange
))
901 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
902 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
904 spin_lock_irq(&xhci
->lock
);
905 if (xhci
->quirks
& XHCI_RESET_ON_RESUME
)
909 /* step 1: restore register */
910 xhci_restore_registers(xhci
);
911 /* step 2: initialize command ring buffer */
912 xhci_set_cmd_ring_deq(xhci
);
913 /* step 3: restore state and start state*/
914 /* step 3: set CRS flag */
915 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
917 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
918 if (xhci_handshake(xhci
, &xhci
->op_regs
->status
,
919 STS_RESTORE
, 0, 10 * 1000)) {
920 xhci_warn(xhci
, "WARN: xHC restore state timeout\n");
921 spin_unlock_irq(&xhci
->lock
);
924 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
927 /* If restore operation fails, re-initialize the HC during resume */
928 if ((temp
& STS_SRE
) || hibernated
) {
930 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) &&
931 !(xhci_all_ports_seen_u0(xhci
))) {
932 del_timer_sync(&xhci
->comp_mode_recovery_timer
);
933 xhci_dbg(xhci
, "Compliance Mode Recovery Timer deleted!\n");
936 /* Let the USB core know _both_ roothubs lost power. */
937 usb_root_hub_lost_power(xhci
->main_hcd
->self
.root_hub
);
938 usb_root_hub_lost_power(xhci
->shared_hcd
->self
.root_hub
);
940 xhci_dbg(xhci
, "Stop HCD\n");
943 spin_unlock_irq(&xhci
->lock
);
944 xhci_cleanup_msix(xhci
);
946 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
947 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
948 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
949 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
950 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
951 &xhci
->ir_set
->irq_pending
);
952 xhci_print_ir_set(xhci
, 0);
954 xhci_dbg(xhci
, "cleaning up memory\n");
955 xhci_mem_cleanup(xhci
);
956 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
957 xhci_readl(xhci
, &xhci
->op_regs
->status
));
959 /* USB core calls the PCI reinit and start functions twice:
960 * first with the primary HCD, and then with the secondary HCD.
961 * If we don't do the same, the host will never be started.
963 if (!usb_hcd_is_primary_hcd(hcd
))
966 secondary_hcd
= xhci
->shared_hcd
;
968 xhci_dbg(xhci
, "Initialize the xhci_hcd\n");
969 retval
= xhci_init(hcd
->primary_hcd
);
972 comp_timer_running
= true;
974 xhci_dbg(xhci
, "Start the primary HCD\n");
975 retval
= xhci_run(hcd
->primary_hcd
);
977 xhci_dbg(xhci
, "Start the secondary HCD\n");
978 retval
= xhci_run(secondary_hcd
);
980 hcd
->state
= HC_STATE_SUSPENDED
;
981 xhci
->shared_hcd
->state
= HC_STATE_SUSPENDED
;
985 /* step 4: set Run/Stop bit */
986 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
988 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
989 xhci_handshake(xhci
, &xhci
->op_regs
->status
, STS_HALT
,
992 /* step 5: walk topology and initialize portsc,
993 * portpmsc and portli
995 /* this is done in bus_resume */
997 /* step 6: restart each of the previously
998 * Running endpoints by ringing their doorbells
1001 spin_unlock_irq(&xhci
->lock
);
1005 usb_hcd_resume_root_hub(hcd
);
1006 usb_hcd_resume_root_hub(xhci
->shared_hcd
);
1010 * If system is subject to the Quirk, Compliance Mode Timer needs to
1011 * be re-initialized Always after a system resume. Ports are subject
1012 * to suffer the Compliance Mode issue again. It doesn't matter if
1013 * ports have entered previously to U0 before system's suspension.
1015 if ((xhci
->quirks
& XHCI_COMP_MODE_QUIRK
) && !comp_timer_running
)
1016 compliance_mode_recovery_timer_init(xhci
);
1018 /* Re-enable port polling. */
1019 xhci_dbg(xhci
, "%s: starting port polling.\n", __func__
);
1020 set_bit(HCD_FLAG_POLL_RH
, &hcd
->flags
);
1021 usb_hcd_poll_rh_status(hcd
);
1025 #endif /* CONFIG_PM */
1027 /*-------------------------------------------------------------------------*/
1030 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1031 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1032 * value to right shift 1 for the bitmask.
1034 * Index = (epnum * 2) + direction - 1,
1035 * where direction = 0 for OUT, 1 for IN.
1036 * For control endpoints, the IN index is used (OUT index is unused), so
1037 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1039 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
1042 if (usb_endpoint_xfer_control(desc
))
1043 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
1045 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
1046 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
1050 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1051 * address from the XHCI endpoint index.
1053 unsigned int xhci_get_endpoint_address(unsigned int ep_index
)
1055 unsigned int number
= DIV_ROUND_UP(ep_index
, 2);
1056 unsigned int direction
= ep_index
% 2 ? USB_DIR_OUT
: USB_DIR_IN
;
1057 return direction
| number
;
1060 /* Find the flag for this endpoint (for use in the control context). Use the
1061 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1064 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
1066 return 1 << (xhci_get_endpoint_index(desc
) + 1);
1069 /* Find the flag for this endpoint (for use in the control context). Use the
1070 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1073 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
1075 return 1 << (ep_index
+ 1);
1078 /* Compute the last valid endpoint context index. Basically, this is the
1079 * endpoint index plus one. For slot contexts with more than valid endpoint,
1080 * we find the most significant bit set in the added contexts flags.
1081 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1082 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1084 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
1086 return fls(added_ctxs
) - 1;
1089 /* Returns 1 if the arguments are OK;
1090 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1092 static int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1093 struct usb_host_endpoint
*ep
, int check_ep
, bool check_virt_dev
,
1095 struct xhci_hcd
*xhci
;
1096 struct xhci_virt_device
*virt_dev
;
1098 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
1099 pr_debug("xHCI %s called with invalid args\n", func
);
1102 if (!udev
->parent
) {
1103 pr_debug("xHCI %s called for root hub\n", func
);
1107 xhci
= hcd_to_xhci(hcd
);
1108 if (check_virt_dev
) {
1109 if (!udev
->slot_id
|| !xhci
->devs
[udev
->slot_id
]) {
1110 xhci_dbg(xhci
, "xHCI %s called with unaddressed device\n",
1115 virt_dev
= xhci
->devs
[udev
->slot_id
];
1116 if (virt_dev
->udev
!= udev
) {
1117 xhci_dbg(xhci
, "xHCI %s called with udev and "
1118 "virt_dev does not match\n", func
);
1123 if (xhci
->xhc_state
& XHCI_STATE_HALTED
)
1129 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1130 struct usb_device
*udev
, struct xhci_command
*command
,
1131 bool ctx_change
, bool must_succeed
);
1134 * Full speed devices may have a max packet size greater than 8 bytes, but the
1135 * USB core doesn't know that until it reads the first 8 bytes of the
1136 * descriptor. If the usb_device's max packet size changes after that point,
1137 * we need to issue an evaluate context command and wait on it.
1139 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
1140 unsigned int ep_index
, struct urb
*urb
)
1142 struct xhci_container_ctx
*in_ctx
;
1143 struct xhci_container_ctx
*out_ctx
;
1144 struct xhci_input_control_ctx
*ctrl_ctx
;
1145 struct xhci_ep_ctx
*ep_ctx
;
1146 int max_packet_size
;
1147 int hw_max_packet_size
;
1150 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
1151 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1152 hw_max_packet_size
= MAX_PACKET_DECODED(le32_to_cpu(ep_ctx
->ep_info2
));
1153 max_packet_size
= usb_endpoint_maxp(&urb
->dev
->ep0
.desc
);
1154 if (hw_max_packet_size
!= max_packet_size
) {
1155 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
1156 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
1158 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
1159 hw_max_packet_size
);
1160 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
1162 /* Set up the input context flags for the command */
1163 /* FIXME: This won't work if a non-default control endpoint
1164 * changes max packet sizes.
1166 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1167 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1169 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1173 /* Set up the modified control endpoint 0 */
1174 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1175 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1177 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1178 ep_ctx
->ep_info2
&= cpu_to_le32(~MAX_PACKET_MASK
);
1179 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet_size
));
1181 ctrl_ctx
->add_flags
= cpu_to_le32(EP0_FLAG
);
1182 ctrl_ctx
->drop_flags
= 0;
1184 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
1185 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
1186 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
1187 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
1189 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
1192 /* Clean up the input context for later use by bandwidth
1195 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
);
1201 * non-error returns are a promise to giveback() the urb later
1202 * we drop ownership so next owner (or urb unlink) can get it
1204 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
1206 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1207 struct xhci_td
*buffer
;
1208 unsigned long flags
;
1210 unsigned int slot_id
, ep_index
;
1211 struct urb_priv
*urb_priv
;
1214 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
,
1215 true, true, __func__
) <= 0)
1218 slot_id
= urb
->dev
->slot_id
;
1219 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1221 if (!HCD_HW_ACCESSIBLE(hcd
)) {
1222 if (!in_interrupt())
1223 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
1228 if (usb_endpoint_xfer_isoc(&urb
->ep
->desc
))
1229 size
= urb
->number_of_packets
;
1233 urb_priv
= kzalloc(sizeof(struct urb_priv
) +
1234 size
* sizeof(struct xhci_td
*), mem_flags
);
1238 buffer
= kzalloc(size
* sizeof(struct xhci_td
), mem_flags
);
1244 for (i
= 0; i
< size
; i
++) {
1245 urb_priv
->td
[i
] = buffer
;
1249 urb_priv
->length
= size
;
1250 urb_priv
->td_cnt
= 0;
1251 urb
->hcpriv
= urb_priv
;
1253 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
1254 /* Check to see if the max packet size for the default control
1255 * endpoint changed during FS device enumeration
1257 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
1258 ret
= xhci_check_maxpacket(xhci
, slot_id
,
1261 xhci_urb_free_priv(xhci
, urb_priv
);
1267 /* We have a spinlock and interrupts disabled, so we must pass
1268 * atomic context to this function, which may allocate memory.
1270 spin_lock_irqsave(&xhci
->lock
, flags
);
1271 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1273 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
1277 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1278 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
1279 spin_lock_irqsave(&xhci
->lock
, flags
);
1280 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1282 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1283 EP_GETTING_STREAMS
) {
1284 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1285 "is transitioning to using streams.\n");
1287 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1288 EP_GETTING_NO_STREAMS
) {
1289 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1290 "is transitioning to "
1291 "not having streams.\n");
1294 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
1299 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1300 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
1301 spin_lock_irqsave(&xhci
->lock
, flags
);
1302 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1304 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
1308 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1310 spin_lock_irqsave(&xhci
->lock
, flags
);
1311 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1313 ret
= xhci_queue_isoc_tx_prepare(xhci
, GFP_ATOMIC
, urb
,
1317 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1322 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
1323 "non-responsive xHCI host.\n",
1324 urb
->ep
->desc
.bEndpointAddress
, urb
);
1327 xhci_urb_free_priv(xhci
, urb_priv
);
1329 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1333 /* Get the right ring for the given URB.
1334 * If the endpoint supports streams, boundary check the URB's stream ID.
1335 * If the endpoint doesn't support streams, return the singular endpoint ring.
1337 static struct xhci_ring
*xhci_urb_to_transfer_ring(struct xhci_hcd
*xhci
,
1340 unsigned int slot_id
;
1341 unsigned int ep_index
;
1342 unsigned int stream_id
;
1343 struct xhci_virt_ep
*ep
;
1345 slot_id
= urb
->dev
->slot_id
;
1346 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1347 stream_id
= urb
->stream_id
;
1348 ep
= &xhci
->devs
[slot_id
]->eps
[ep_index
];
1349 /* Common case: no streams */
1350 if (!(ep
->ep_state
& EP_HAS_STREAMS
))
1353 if (stream_id
== 0) {
1355 "WARN: Slot ID %u, ep index %u has streams, "
1356 "but URB has no stream ID.\n",
1361 if (stream_id
< ep
->stream_info
->num_streams
)
1362 return ep
->stream_info
->stream_rings
[stream_id
];
1365 "WARN: Slot ID %u, ep index %u has "
1366 "stream IDs 1 to %u allocated, "
1367 "but stream ID %u is requested.\n",
1369 ep
->stream_info
->num_streams
- 1,
1375 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1376 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1377 * should pick up where it left off in the TD, unless a Set Transfer Ring
1378 * Dequeue Pointer is issued.
1380 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1381 * the ring. Since the ring is a contiguous structure, they can't be physically
1382 * removed. Instead, there are two options:
1384 * 1) If the HC is in the middle of processing the URB to be canceled, we
1385 * simply move the ring's dequeue pointer past those TRBs using the Set
1386 * Transfer Ring Dequeue Pointer command. This will be the common case,
1387 * when drivers timeout on the last submitted URB and attempt to cancel.
1389 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1390 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1391 * HC will need to invalidate the any TRBs it has cached after the stop
1392 * endpoint command, as noted in the xHCI 0.95 errata.
1394 * 3) The TD may have completed by the time the Stop Endpoint Command
1395 * completes, so software needs to handle that case too.
1397 * This function should protect against the TD enqueueing code ringing the
1398 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1399 * It also needs to account for multiple cancellations on happening at the same
1400 * time for the same endpoint.
1402 * Note that this function can be called in any context, or so says
1403 * usb_hcd_unlink_urb()
1405 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1407 unsigned long flags
;
1410 struct xhci_hcd
*xhci
;
1411 struct urb_priv
*urb_priv
;
1413 unsigned int ep_index
;
1414 struct xhci_ring
*ep_ring
;
1415 struct xhci_virt_ep
*ep
;
1417 xhci
= hcd_to_xhci(hcd
);
1418 spin_lock_irqsave(&xhci
->lock
, flags
);
1419 /* Make sure the URB hasn't completed or been unlinked already */
1420 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1421 if (ret
|| !urb
->hcpriv
)
1423 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
1424 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1425 xhci_dbg(xhci
, "HW died, freeing TD.\n");
1426 urb_priv
= urb
->hcpriv
;
1427 for (i
= urb_priv
->td_cnt
; i
< urb_priv
->length
; i
++) {
1428 td
= urb_priv
->td
[i
];
1429 if (!list_empty(&td
->td_list
))
1430 list_del_init(&td
->td_list
);
1431 if (!list_empty(&td
->cancelled_td_list
))
1432 list_del_init(&td
->cancelled_td_list
);
1435 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1436 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1437 usb_hcd_giveback_urb(hcd
, urb
, -ESHUTDOWN
);
1438 xhci_urb_free_priv(xhci
, urb_priv
);
1441 if ((xhci
->xhc_state
& XHCI_STATE_DYING
) ||
1442 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1443 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
1444 "non-responsive xHCI host.\n",
1445 urb
->ep
->desc
.bEndpointAddress
, urb
);
1446 /* Let the stop endpoint command watchdog timer (which set this
1447 * state) finish cleaning up the endpoint TD lists. We must
1448 * have caught it in the middle of dropping a lock and giving
1454 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1455 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
1456 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
1462 urb_priv
= urb
->hcpriv
;
1463 i
= urb_priv
->td_cnt
;
1464 if (i
< urb_priv
->length
)
1465 xhci_dbg(xhci
, "Cancel URB %p, dev %s, ep 0x%x, "
1466 "starting at offset 0x%llx\n",
1467 urb
, urb
->dev
->devpath
,
1468 urb
->ep
->desc
.bEndpointAddress
,
1469 (unsigned long long) xhci_trb_virt_to_dma(
1470 urb_priv
->td
[i
]->start_seg
,
1471 urb_priv
->td
[i
]->first_trb
));
1473 for (; i
< urb_priv
->length
; i
++) {
1474 td
= urb_priv
->td
[i
];
1475 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
1478 /* Queue a stop endpoint command, but only if this is
1479 * the first cancellation to be handled.
1481 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
1482 ep
->ep_state
|= EP_HALT_PENDING
;
1483 ep
->stop_cmds_pending
++;
1484 ep
->stop_cmd_timer
.expires
= jiffies
+
1485 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
1486 add_timer(&ep
->stop_cmd_timer
);
1487 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
, 0);
1488 xhci_ring_cmd_db(xhci
);
1491 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1495 /* Drop an endpoint from a new bandwidth configuration for this device.
1496 * Only one call to this function is allowed per endpoint before
1497 * check_bandwidth() or reset_bandwidth() must be called.
1498 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1499 * add the endpoint to the schedule with possibly new parameters denoted by a
1500 * different endpoint descriptor in usb_host_endpoint.
1501 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1504 * The USB core will not allow URBs to be queued to an endpoint that is being
1505 * disabled, so there's no need for mutual exclusion to protect
1506 * the xhci->devs[slot_id] structure.
1508 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1509 struct usb_host_endpoint
*ep
)
1511 struct xhci_hcd
*xhci
;
1512 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1513 struct xhci_input_control_ctx
*ctrl_ctx
;
1514 struct xhci_slot_ctx
*slot_ctx
;
1515 unsigned int last_ctx
;
1516 unsigned int ep_index
;
1517 struct xhci_ep_ctx
*ep_ctx
;
1519 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1522 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1525 xhci
= hcd_to_xhci(hcd
);
1526 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1529 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1530 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
1531 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
1532 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
1533 __func__
, drop_flag
);
1537 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1538 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1539 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1541 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1546 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1547 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1548 /* If the HC already knows the endpoint is disabled,
1549 * or the HCD has noted it is disabled, ignore this request
1551 if (((ep_ctx
->ep_info
& cpu_to_le32(EP_STATE_MASK
)) ==
1552 cpu_to_le32(EP_STATE_DISABLED
)) ||
1553 le32_to_cpu(ctrl_ctx
->drop_flags
) &
1554 xhci_get_endpoint_flag(&ep
->desc
)) {
1555 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
1560 ctrl_ctx
->drop_flags
|= cpu_to_le32(drop_flag
);
1561 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1563 ctrl_ctx
->add_flags
&= cpu_to_le32(~drop_flag
);
1564 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1566 last_ctx
= xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx
->add_flags
));
1567 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1568 /* Update the last valid endpoint context, if we deleted the last one */
1569 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) >
1570 LAST_CTX(last_ctx
)) {
1571 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1572 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1574 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1576 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
1578 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1579 (unsigned int) ep
->desc
.bEndpointAddress
,
1581 (unsigned int) new_drop_flags
,
1582 (unsigned int) new_add_flags
,
1583 (unsigned int) new_slot_info
);
1587 /* Add an endpoint to a new possible bandwidth configuration for this device.
1588 * Only one call to this function is allowed per endpoint before
1589 * check_bandwidth() or reset_bandwidth() must be called.
1590 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1591 * add the endpoint to the schedule with possibly new parameters denoted by a
1592 * different endpoint descriptor in usb_host_endpoint.
1593 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1596 * The USB core will not allow URBs to be queued to an endpoint until the
1597 * configuration or alt setting is installed in the device, so there's no need
1598 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1600 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1601 struct usb_host_endpoint
*ep
)
1603 struct xhci_hcd
*xhci
;
1604 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1605 unsigned int ep_index
;
1606 struct xhci_slot_ctx
*slot_ctx
;
1607 struct xhci_input_control_ctx
*ctrl_ctx
;
1609 unsigned int last_ctx
;
1610 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1611 struct xhci_virt_device
*virt_dev
;
1614 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1616 /* So we won't queue a reset ep command for a root hub */
1620 xhci
= hcd_to_xhci(hcd
);
1621 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1624 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1625 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1626 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1627 /* FIXME when we have to issue an evaluate endpoint command to
1628 * deal with ep0 max packet size changing once we get the
1631 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1632 __func__
, added_ctxs
);
1636 virt_dev
= xhci
->devs
[udev
->slot_id
];
1637 in_ctx
= virt_dev
->in_ctx
;
1638 out_ctx
= virt_dev
->out_ctx
;
1639 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1641 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1646 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1647 /* If this endpoint is already in use, and the upper layers are trying
1648 * to add it again without dropping it, reject the addition.
1650 if (virt_dev
->eps
[ep_index
].ring
&&
1651 !(le32_to_cpu(ctrl_ctx
->drop_flags
) &
1652 xhci_get_endpoint_flag(&ep
->desc
))) {
1653 xhci_warn(xhci
, "Trying to add endpoint 0x%x "
1654 "without dropping it.\n",
1655 (unsigned int) ep
->desc
.bEndpointAddress
);
1659 /* If the HCD has already noted the endpoint is enabled,
1660 * ignore this request.
1662 if (le32_to_cpu(ctrl_ctx
->add_flags
) &
1663 xhci_get_endpoint_flag(&ep
->desc
)) {
1664 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1670 * Configuration and alternate setting changes must be done in
1671 * process context, not interrupt context (or so documenation
1672 * for usb_set_interface() and usb_set_configuration() claim).
1674 if (xhci_endpoint_init(xhci
, virt_dev
, udev
, ep
, GFP_NOIO
) < 0) {
1675 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1676 __func__
, ep
->desc
.bEndpointAddress
);
1680 ctrl_ctx
->add_flags
|= cpu_to_le32(added_ctxs
);
1681 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1683 /* If xhci_endpoint_disable() was called for this endpoint, but the
1684 * xHC hasn't been notified yet through the check_bandwidth() call,
1685 * this re-adds a new state for the endpoint from the new endpoint
1686 * descriptors. We must drop and re-add this endpoint, so we leave the
1689 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1691 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1692 /* Update the last valid endpoint context, if we just added one past */
1693 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) <
1694 LAST_CTX(last_ctx
)) {
1695 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1696 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1698 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1700 /* Store the usb_device pointer for later use */
1703 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1704 (unsigned int) ep
->desc
.bEndpointAddress
,
1706 (unsigned int) new_drop_flags
,
1707 (unsigned int) new_add_flags
,
1708 (unsigned int) new_slot_info
);
1712 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1714 struct xhci_input_control_ctx
*ctrl_ctx
;
1715 struct xhci_ep_ctx
*ep_ctx
;
1716 struct xhci_slot_ctx
*slot_ctx
;
1719 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1721 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
1726 /* When a device's add flag and drop flag are zero, any subsequent
1727 * configure endpoint command will leave that endpoint's state
1728 * untouched. Make sure we don't leave any old state in the input
1729 * endpoint contexts.
1731 ctrl_ctx
->drop_flags
= 0;
1732 ctrl_ctx
->add_flags
= 0;
1733 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1734 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1735 /* Endpoint 0 is always valid */
1736 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1));
1737 for (i
= 1; i
< 31; ++i
) {
1738 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1739 ep_ctx
->ep_info
= 0;
1740 ep_ctx
->ep_info2
= 0;
1742 ep_ctx
->tx_info
= 0;
1746 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1747 struct usb_device
*udev
, u32
*cmd_status
)
1751 switch (*cmd_status
) {
1753 dev_warn(&udev
->dev
, "Not enough host controller resources "
1754 "for new device state.\n");
1756 /* FIXME: can we allocate more resources for the HC? */
1759 case COMP_2ND_BW_ERR
:
1760 dev_warn(&udev
->dev
, "Not enough bandwidth "
1761 "for new device state.\n");
1763 /* FIXME: can we go back to the old state? */
1766 /* the HCD set up something wrong */
1767 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1769 "and endpoint is not disabled.\n");
1773 dev_warn(&udev
->dev
, "ERROR: Incompatible device for endpoint "
1774 "configure command.\n");
1778 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1782 xhci_err(xhci
, "ERROR: unexpected command completion "
1783 "code 0x%x.\n", *cmd_status
);
1790 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1791 struct usb_device
*udev
, u32
*cmd_status
)
1794 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1796 switch (*cmd_status
) {
1798 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1799 "context command.\n");
1803 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1804 "evaluate context command.\n");
1807 case COMP_CTX_STATE
:
1808 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1809 "evaluate context command.\n");
1810 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1814 dev_warn(&udev
->dev
, "ERROR: Incompatible device for evaluate "
1815 "context command.\n");
1819 /* Max Exit Latency too large error */
1820 dev_warn(&udev
->dev
, "WARN: Max Exit Latency too large\n");
1824 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1828 xhci_err(xhci
, "ERROR: unexpected command completion "
1829 "code 0x%x.\n", *cmd_status
);
1836 static u32
xhci_count_num_new_endpoints(struct xhci_hcd
*xhci
,
1837 struct xhci_input_control_ctx
*ctrl_ctx
)
1839 u32 valid_add_flags
;
1840 u32 valid_drop_flags
;
1842 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1843 * (bit 1). The default control endpoint is added during the Address
1844 * Device command and is never removed until the slot is disabled.
1846 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1847 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1849 /* Use hweight32 to count the number of ones in the add flags, or
1850 * number of endpoints added. Don't count endpoints that are changed
1851 * (both added and dropped).
1853 return hweight32(valid_add_flags
) -
1854 hweight32(valid_add_flags
& valid_drop_flags
);
1857 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd
*xhci
,
1858 struct xhci_input_control_ctx
*ctrl_ctx
)
1860 u32 valid_add_flags
;
1861 u32 valid_drop_flags
;
1863 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1864 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1866 return hweight32(valid_drop_flags
) -
1867 hweight32(valid_add_flags
& valid_drop_flags
);
1871 * We need to reserve the new number of endpoints before the configure endpoint
1872 * command completes. We can't subtract the dropped endpoints from the number
1873 * of active endpoints until the command completes because we can oversubscribe
1874 * the host in this case:
1876 * - the first configure endpoint command drops more endpoints than it adds
1877 * - a second configure endpoint command that adds more endpoints is queued
1878 * - the first configure endpoint command fails, so the config is unchanged
1879 * - the second command may succeed, even though there isn't enough resources
1881 * Must be called with xhci->lock held.
1883 static int xhci_reserve_host_resources(struct xhci_hcd
*xhci
,
1884 struct xhci_input_control_ctx
*ctrl_ctx
)
1888 added_eps
= xhci_count_num_new_endpoints(xhci
, ctrl_ctx
);
1889 if (xhci
->num_active_eps
+ added_eps
> xhci
->limit_active_eps
) {
1890 xhci_dbg(xhci
, "Not enough ep ctxs: "
1891 "%u active, need to add %u, limit is %u.\n",
1892 xhci
->num_active_eps
, added_eps
,
1893 xhci
->limit_active_eps
);
1896 xhci
->num_active_eps
+= added_eps
;
1897 xhci_dbg(xhci
, "Adding %u ep ctxs, %u now active.\n", added_eps
,
1898 xhci
->num_active_eps
);
1903 * The configure endpoint was failed by the xHC for some other reason, so we
1904 * need to revert the resources that failed configuration would have used.
1906 * Must be called with xhci->lock held.
1908 static void xhci_free_host_resources(struct xhci_hcd
*xhci
,
1909 struct xhci_input_control_ctx
*ctrl_ctx
)
1913 num_failed_eps
= xhci_count_num_new_endpoints(xhci
, ctrl_ctx
);
1914 xhci
->num_active_eps
-= num_failed_eps
;
1915 xhci_dbg(xhci
, "Removing %u failed ep ctxs, %u now active.\n",
1917 xhci
->num_active_eps
);
1921 * Now that the command has completed, clean up the active endpoint count by
1922 * subtracting out the endpoints that were dropped (but not changed).
1924 * Must be called with xhci->lock held.
1926 static void xhci_finish_resource_reservation(struct xhci_hcd
*xhci
,
1927 struct xhci_input_control_ctx
*ctrl_ctx
)
1929 u32 num_dropped_eps
;
1931 num_dropped_eps
= xhci_count_num_dropped_endpoints(xhci
, ctrl_ctx
);
1932 xhci
->num_active_eps
-= num_dropped_eps
;
1933 if (num_dropped_eps
)
1934 xhci_dbg(xhci
, "Removing %u dropped ep ctxs, %u now active.\n",
1936 xhci
->num_active_eps
);
1939 static unsigned int xhci_get_block_size(struct usb_device
*udev
)
1941 switch (udev
->speed
) {
1943 case USB_SPEED_FULL
:
1945 case USB_SPEED_HIGH
:
1947 case USB_SPEED_SUPER
:
1949 case USB_SPEED_UNKNOWN
:
1950 case USB_SPEED_WIRELESS
:
1952 /* Should never happen */
1958 xhci_get_largest_overhead(struct xhci_interval_bw
*interval_bw
)
1960 if (interval_bw
->overhead
[LS_OVERHEAD_TYPE
])
1962 if (interval_bw
->overhead
[FS_OVERHEAD_TYPE
])
1967 /* If we are changing a LS/FS device under a HS hub,
1968 * make sure (if we are activating a new TT) that the HS bus has enough
1969 * bandwidth for this new TT.
1971 static int xhci_check_tt_bw_table(struct xhci_hcd
*xhci
,
1972 struct xhci_virt_device
*virt_dev
,
1975 struct xhci_interval_bw_table
*bw_table
;
1976 struct xhci_tt_bw_info
*tt_info
;
1978 /* Find the bandwidth table for the root port this TT is attached to. */
1979 bw_table
= &xhci
->rh_bw
[virt_dev
->real_port
- 1].bw_table
;
1980 tt_info
= virt_dev
->tt_info
;
1981 /* If this TT already had active endpoints, the bandwidth for this TT
1982 * has already been added. Removing all periodic endpoints (and thus
1983 * making the TT enactive) will only decrease the bandwidth used.
1987 if (old_active_eps
== 0 && tt_info
->active_eps
!= 0) {
1988 if (bw_table
->bw_used
+ TT_HS_OVERHEAD
> HS_BW_LIMIT
)
1992 /* Not sure why we would have no new active endpoints...
1994 * Maybe because of an Evaluate Context change for a hub update or a
1995 * control endpoint 0 max packet size change?
1996 * FIXME: skip the bandwidth calculation in that case.
2001 static int xhci_check_ss_bw(struct xhci_hcd
*xhci
,
2002 struct xhci_virt_device
*virt_dev
)
2004 unsigned int bw_reserved
;
2006 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_IN
, 100);
2007 if (virt_dev
->bw_table
->ss_bw_in
> (SS_BW_LIMIT_IN
- bw_reserved
))
2010 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_OUT
, 100);
2011 if (virt_dev
->bw_table
->ss_bw_out
> (SS_BW_LIMIT_OUT
- bw_reserved
))
2018 * This algorithm is a very conservative estimate of the worst-case scheduling
2019 * scenario for any one interval. The hardware dynamically schedules the
2020 * packets, so we can't tell which microframe could be the limiting factor in
2021 * the bandwidth scheduling. This only takes into account periodic endpoints.
2023 * Obviously, we can't solve an NP complete problem to find the minimum worst
2024 * case scenario. Instead, we come up with an estimate that is no less than
2025 * the worst case bandwidth used for any one microframe, but may be an
2028 * We walk the requirements for each endpoint by interval, starting with the
2029 * smallest interval, and place packets in the schedule where there is only one
2030 * possible way to schedule packets for that interval. In order to simplify
2031 * this algorithm, we record the largest max packet size for each interval, and
2032 * assume all packets will be that size.
2034 * For interval 0, we obviously must schedule all packets for each interval.
2035 * The bandwidth for interval 0 is just the amount of data to be transmitted
2036 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2037 * the number of packets).
2039 * For interval 1, we have two possible microframes to schedule those packets
2040 * in. For this algorithm, if we can schedule the same number of packets for
2041 * each possible scheduling opportunity (each microframe), we will do so. The
2042 * remaining number of packets will be saved to be transmitted in the gaps in
2043 * the next interval's scheduling sequence.
2045 * As we move those remaining packets to be scheduled with interval 2 packets,
2046 * we have to double the number of remaining packets to transmit. This is
2047 * because the intervals are actually powers of 2, and we would be transmitting
2048 * the previous interval's packets twice in this interval. We also have to be
2049 * sure that when we look at the largest max packet size for this interval, we
2050 * also look at the largest max packet size for the remaining packets and take
2051 * the greater of the two.
2053 * The algorithm continues to evenly distribute packets in each scheduling
2054 * opportunity, and push the remaining packets out, until we get to the last
2055 * interval. Then those packets and their associated overhead are just added
2056 * to the bandwidth used.
2058 static int xhci_check_bw_table(struct xhci_hcd
*xhci
,
2059 struct xhci_virt_device
*virt_dev
,
2062 unsigned int bw_reserved
;
2063 unsigned int max_bandwidth
;
2064 unsigned int bw_used
;
2065 unsigned int block_size
;
2066 struct xhci_interval_bw_table
*bw_table
;
2067 unsigned int packet_size
= 0;
2068 unsigned int overhead
= 0;
2069 unsigned int packets_transmitted
= 0;
2070 unsigned int packets_remaining
= 0;
2073 if (virt_dev
->udev
->speed
== USB_SPEED_SUPER
)
2074 return xhci_check_ss_bw(xhci
, virt_dev
);
2076 if (virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
2077 max_bandwidth
= HS_BW_LIMIT
;
2078 /* Convert percent of bus BW reserved to blocks reserved */
2079 bw_reserved
= DIV_ROUND_UP(HS_BW_RESERVED
* max_bandwidth
, 100);
2081 max_bandwidth
= FS_BW_LIMIT
;
2082 bw_reserved
= DIV_ROUND_UP(FS_BW_RESERVED
* max_bandwidth
, 100);
2085 bw_table
= virt_dev
->bw_table
;
2086 /* We need to translate the max packet size and max ESIT payloads into
2087 * the units the hardware uses.
2089 block_size
= xhci_get_block_size(virt_dev
->udev
);
2091 /* If we are manipulating a LS/FS device under a HS hub, double check
2092 * that the HS bus has enough bandwidth if we are activing a new TT.
2094 if (virt_dev
->tt_info
) {
2095 xhci_dbg(xhci
, "Recalculating BW for rootport %u\n",
2096 virt_dev
->real_port
);
2097 if (xhci_check_tt_bw_table(xhci
, virt_dev
, old_active_eps
)) {
2098 xhci_warn(xhci
, "Not enough bandwidth on HS bus for "
2099 "newly activated TT.\n");
2102 xhci_dbg(xhci
, "Recalculating BW for TT slot %u port %u\n",
2103 virt_dev
->tt_info
->slot_id
,
2104 virt_dev
->tt_info
->ttport
);
2106 xhci_dbg(xhci
, "Recalculating BW for rootport %u\n",
2107 virt_dev
->real_port
);
2110 /* Add in how much bandwidth will be used for interval zero, or the
2111 * rounded max ESIT payload + number of packets * largest overhead.
2113 bw_used
= DIV_ROUND_UP(bw_table
->interval0_esit_payload
, block_size
) +
2114 bw_table
->interval_bw
[0].num_packets
*
2115 xhci_get_largest_overhead(&bw_table
->interval_bw
[0]);
2117 for (i
= 1; i
< XHCI_MAX_INTERVAL
; i
++) {
2118 unsigned int bw_added
;
2119 unsigned int largest_mps
;
2120 unsigned int interval_overhead
;
2123 * How many packets could we transmit in this interval?
2124 * If packets didn't fit in the previous interval, we will need
2125 * to transmit that many packets twice within this interval.
2127 packets_remaining
= 2 * packets_remaining
+
2128 bw_table
->interval_bw
[i
].num_packets
;
2130 /* Find the largest max packet size of this or the previous
2133 if (list_empty(&bw_table
->interval_bw
[i
].endpoints
))
2136 struct xhci_virt_ep
*virt_ep
;
2137 struct list_head
*ep_entry
;
2139 ep_entry
= bw_table
->interval_bw
[i
].endpoints
.next
;
2140 virt_ep
= list_entry(ep_entry
,
2141 struct xhci_virt_ep
, bw_endpoint_list
);
2142 /* Convert to blocks, rounding up */
2143 largest_mps
= DIV_ROUND_UP(
2144 virt_ep
->bw_info
.max_packet_size
,
2147 if (largest_mps
> packet_size
)
2148 packet_size
= largest_mps
;
2150 /* Use the larger overhead of this or the previous interval. */
2151 interval_overhead
= xhci_get_largest_overhead(
2152 &bw_table
->interval_bw
[i
]);
2153 if (interval_overhead
> overhead
)
2154 overhead
= interval_overhead
;
2156 /* How many packets can we evenly distribute across
2157 * (1 << (i + 1)) possible scheduling opportunities?
2159 packets_transmitted
= packets_remaining
>> (i
+ 1);
2161 /* Add in the bandwidth used for those scheduled packets */
2162 bw_added
= packets_transmitted
* (overhead
+ packet_size
);
2164 /* How many packets do we have remaining to transmit? */
2165 packets_remaining
= packets_remaining
% (1 << (i
+ 1));
2167 /* What largest max packet size should those packets have? */
2168 /* If we've transmitted all packets, don't carry over the
2169 * largest packet size.
2171 if (packets_remaining
== 0) {
2174 } else if (packets_transmitted
> 0) {
2175 /* Otherwise if we do have remaining packets, and we've
2176 * scheduled some packets in this interval, take the
2177 * largest max packet size from endpoints with this
2180 packet_size
= largest_mps
;
2181 overhead
= interval_overhead
;
2183 /* Otherwise carry over packet_size and overhead from the last
2184 * time we had a remainder.
2186 bw_used
+= bw_added
;
2187 if (bw_used
> max_bandwidth
) {
2188 xhci_warn(xhci
, "Not enough bandwidth. "
2189 "Proposed: %u, Max: %u\n",
2190 bw_used
, max_bandwidth
);
2195 * Ok, we know we have some packets left over after even-handedly
2196 * scheduling interval 15. We don't know which microframes they will
2197 * fit into, so we over-schedule and say they will be scheduled every
2200 if (packets_remaining
> 0)
2201 bw_used
+= overhead
+ packet_size
;
2203 if (!virt_dev
->tt_info
&& virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
2204 unsigned int port_index
= virt_dev
->real_port
- 1;
2206 /* OK, we're manipulating a HS device attached to a
2207 * root port bandwidth domain. Include the number of active TTs
2208 * in the bandwidth used.
2210 bw_used
+= TT_HS_OVERHEAD
*
2211 xhci
->rh_bw
[port_index
].num_active_tts
;
2214 xhci_dbg(xhci
, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2215 "Available: %u " "percent\n",
2216 bw_used
, max_bandwidth
, bw_reserved
,
2217 (max_bandwidth
- bw_used
- bw_reserved
) * 100 /
2220 bw_used
+= bw_reserved
;
2221 if (bw_used
> max_bandwidth
) {
2222 xhci_warn(xhci
, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2223 bw_used
, max_bandwidth
);
2227 bw_table
->bw_used
= bw_used
;
2231 static bool xhci_is_async_ep(unsigned int ep_type
)
2233 return (ep_type
!= ISOC_OUT_EP
&& ep_type
!= INT_OUT_EP
&&
2234 ep_type
!= ISOC_IN_EP
&&
2235 ep_type
!= INT_IN_EP
);
2238 static bool xhci_is_sync_in_ep(unsigned int ep_type
)
2240 return (ep_type
== ISOC_IN_EP
|| ep_type
== INT_IN_EP
);
2243 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info
*ep_bw
)
2245 unsigned int mps
= DIV_ROUND_UP(ep_bw
->max_packet_size
, SS_BLOCK
);
2247 if (ep_bw
->ep_interval
== 0)
2248 return SS_OVERHEAD_BURST
+
2249 (ep_bw
->mult
* ep_bw
->num_packets
*
2250 (SS_OVERHEAD
+ mps
));
2251 return DIV_ROUND_UP(ep_bw
->mult
* ep_bw
->num_packets
*
2252 (SS_OVERHEAD
+ mps
+ SS_OVERHEAD_BURST
),
2253 1 << ep_bw
->ep_interval
);
2257 void xhci_drop_ep_from_interval_table(struct xhci_hcd
*xhci
,
2258 struct xhci_bw_info
*ep_bw
,
2259 struct xhci_interval_bw_table
*bw_table
,
2260 struct usb_device
*udev
,
2261 struct xhci_virt_ep
*virt_ep
,
2262 struct xhci_tt_bw_info
*tt_info
)
2264 struct xhci_interval_bw
*interval_bw
;
2265 int normalized_interval
;
2267 if (xhci_is_async_ep(ep_bw
->type
))
2270 if (udev
->speed
== USB_SPEED_SUPER
) {
2271 if (xhci_is_sync_in_ep(ep_bw
->type
))
2272 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
-=
2273 xhci_get_ss_bw_consumed(ep_bw
);
2275 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
-=
2276 xhci_get_ss_bw_consumed(ep_bw
);
2280 /* SuperSpeed endpoints never get added to intervals in the table, so
2281 * this check is only valid for HS/FS/LS devices.
2283 if (list_empty(&virt_ep
->bw_endpoint_list
))
2285 /* For LS/FS devices, we need to translate the interval expressed in
2286 * microframes to frames.
2288 if (udev
->speed
== USB_SPEED_HIGH
)
2289 normalized_interval
= ep_bw
->ep_interval
;
2291 normalized_interval
= ep_bw
->ep_interval
- 3;
2293 if (normalized_interval
== 0)
2294 bw_table
->interval0_esit_payload
-= ep_bw
->max_esit_payload
;
2295 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2296 interval_bw
->num_packets
-= ep_bw
->num_packets
;
2297 switch (udev
->speed
) {
2299 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] -= 1;
2301 case USB_SPEED_FULL
:
2302 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] -= 1;
2304 case USB_SPEED_HIGH
:
2305 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] -= 1;
2307 case USB_SPEED_SUPER
:
2308 case USB_SPEED_UNKNOWN
:
2309 case USB_SPEED_WIRELESS
:
2310 /* Should never happen because only LS/FS/HS endpoints will get
2311 * added to the endpoint list.
2316 tt_info
->active_eps
-= 1;
2317 list_del_init(&virt_ep
->bw_endpoint_list
);
2320 static void xhci_add_ep_to_interval_table(struct xhci_hcd
*xhci
,
2321 struct xhci_bw_info
*ep_bw
,
2322 struct xhci_interval_bw_table
*bw_table
,
2323 struct usb_device
*udev
,
2324 struct xhci_virt_ep
*virt_ep
,
2325 struct xhci_tt_bw_info
*tt_info
)
2327 struct xhci_interval_bw
*interval_bw
;
2328 struct xhci_virt_ep
*smaller_ep
;
2329 int normalized_interval
;
2331 if (xhci_is_async_ep(ep_bw
->type
))
2334 if (udev
->speed
== USB_SPEED_SUPER
) {
2335 if (xhci_is_sync_in_ep(ep_bw
->type
))
2336 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
+=
2337 xhci_get_ss_bw_consumed(ep_bw
);
2339 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
+=
2340 xhci_get_ss_bw_consumed(ep_bw
);
2344 /* For LS/FS devices, we need to translate the interval expressed in
2345 * microframes to frames.
2347 if (udev
->speed
== USB_SPEED_HIGH
)
2348 normalized_interval
= ep_bw
->ep_interval
;
2350 normalized_interval
= ep_bw
->ep_interval
- 3;
2352 if (normalized_interval
== 0)
2353 bw_table
->interval0_esit_payload
+= ep_bw
->max_esit_payload
;
2354 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2355 interval_bw
->num_packets
+= ep_bw
->num_packets
;
2356 switch (udev
->speed
) {
2358 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] += 1;
2360 case USB_SPEED_FULL
:
2361 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] += 1;
2363 case USB_SPEED_HIGH
:
2364 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] += 1;
2366 case USB_SPEED_SUPER
:
2367 case USB_SPEED_UNKNOWN
:
2368 case USB_SPEED_WIRELESS
:
2369 /* Should never happen because only LS/FS/HS endpoints will get
2370 * added to the endpoint list.
2376 tt_info
->active_eps
+= 1;
2377 /* Insert the endpoint into the list, largest max packet size first. */
2378 list_for_each_entry(smaller_ep
, &interval_bw
->endpoints
,
2380 if (ep_bw
->max_packet_size
>=
2381 smaller_ep
->bw_info
.max_packet_size
) {
2382 /* Add the new ep before the smaller endpoint */
2383 list_add_tail(&virt_ep
->bw_endpoint_list
,
2384 &smaller_ep
->bw_endpoint_list
);
2388 /* Add the new endpoint at the end of the list. */
2389 list_add_tail(&virt_ep
->bw_endpoint_list
,
2390 &interval_bw
->endpoints
);
2393 void xhci_update_tt_active_eps(struct xhci_hcd
*xhci
,
2394 struct xhci_virt_device
*virt_dev
,
2397 struct xhci_root_port_bw_info
*rh_bw_info
;
2398 if (!virt_dev
->tt_info
)
2401 rh_bw_info
= &xhci
->rh_bw
[virt_dev
->real_port
- 1];
2402 if (old_active_eps
== 0 &&
2403 virt_dev
->tt_info
->active_eps
!= 0) {
2404 rh_bw_info
->num_active_tts
+= 1;
2405 rh_bw_info
->bw_table
.bw_used
+= TT_HS_OVERHEAD
;
2406 } else if (old_active_eps
!= 0 &&
2407 virt_dev
->tt_info
->active_eps
== 0) {
2408 rh_bw_info
->num_active_tts
-= 1;
2409 rh_bw_info
->bw_table
.bw_used
-= TT_HS_OVERHEAD
;
2413 static int xhci_reserve_bandwidth(struct xhci_hcd
*xhci
,
2414 struct xhci_virt_device
*virt_dev
,
2415 struct xhci_container_ctx
*in_ctx
)
2417 struct xhci_bw_info ep_bw_info
[31];
2419 struct xhci_input_control_ctx
*ctrl_ctx
;
2420 int old_active_eps
= 0;
2422 if (virt_dev
->tt_info
)
2423 old_active_eps
= virt_dev
->tt_info
->active_eps
;
2425 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2427 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2432 for (i
= 0; i
< 31; i
++) {
2433 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2436 /* Make a copy of the BW info in case we need to revert this */
2437 memcpy(&ep_bw_info
[i
], &virt_dev
->eps
[i
].bw_info
,
2438 sizeof(ep_bw_info
[i
]));
2439 /* Drop the endpoint from the interval table if the endpoint is
2440 * being dropped or changed.
2442 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2443 xhci_drop_ep_from_interval_table(xhci
,
2444 &virt_dev
->eps
[i
].bw_info
,
2450 /* Overwrite the information stored in the endpoints' bw_info */
2451 xhci_update_bw_info(xhci
, virt_dev
->in_ctx
, ctrl_ctx
, virt_dev
);
2452 for (i
= 0; i
< 31; i
++) {
2453 /* Add any changed or added endpoints to the interval table */
2454 if (EP_IS_ADDED(ctrl_ctx
, i
))
2455 xhci_add_ep_to_interval_table(xhci
,
2456 &virt_dev
->eps
[i
].bw_info
,
2463 if (!xhci_check_bw_table(xhci
, virt_dev
, old_active_eps
)) {
2464 /* Ok, this fits in the bandwidth we have.
2465 * Update the number of active TTs.
2467 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
2471 /* We don't have enough bandwidth for this, revert the stored info. */
2472 for (i
= 0; i
< 31; i
++) {
2473 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2476 /* Drop the new copies of any added or changed endpoints from
2477 * the interval table.
2479 if (EP_IS_ADDED(ctrl_ctx
, i
)) {
2480 xhci_drop_ep_from_interval_table(xhci
,
2481 &virt_dev
->eps
[i
].bw_info
,
2487 /* Revert the endpoint back to its old information */
2488 memcpy(&virt_dev
->eps
[i
].bw_info
, &ep_bw_info
[i
],
2489 sizeof(ep_bw_info
[i
]));
2490 /* Add any changed or dropped endpoints back into the table */
2491 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2492 xhci_add_ep_to_interval_table(xhci
,
2493 &virt_dev
->eps
[i
].bw_info
,
2503 /* Issue a configure endpoint command or evaluate context command
2504 * and wait for it to finish.
2506 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
2507 struct usb_device
*udev
,
2508 struct xhci_command
*command
,
2509 bool ctx_change
, bool must_succeed
)
2513 unsigned long flags
;
2514 struct xhci_container_ctx
*in_ctx
;
2515 struct xhci_input_control_ctx
*ctrl_ctx
;
2516 struct completion
*cmd_completion
;
2518 struct xhci_virt_device
*virt_dev
;
2519 union xhci_trb
*cmd_trb
;
2521 spin_lock_irqsave(&xhci
->lock
, flags
);
2522 virt_dev
= xhci
->devs
[udev
->slot_id
];
2525 in_ctx
= command
->in_ctx
;
2527 in_ctx
= virt_dev
->in_ctx
;
2528 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2530 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2531 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2536 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
) &&
2537 xhci_reserve_host_resources(xhci
, ctrl_ctx
)) {
2538 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2539 xhci_warn(xhci
, "Not enough host resources, "
2540 "active endpoint contexts = %u\n",
2541 xhci
->num_active_eps
);
2544 if ((xhci
->quirks
& XHCI_SW_BW_CHECKING
) &&
2545 xhci_reserve_bandwidth(xhci
, virt_dev
, in_ctx
)) {
2546 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2547 xhci_free_host_resources(xhci
, ctrl_ctx
);
2548 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2549 xhci_warn(xhci
, "Not enough bandwidth\n");
2554 cmd_completion
= command
->completion
;
2555 cmd_status
= &command
->status
;
2556 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
2558 /* Enqueue pointer can be left pointing to the link TRB,
2559 * we must handle that
2561 if (TRB_TYPE_LINK_LE32(command
->command_trb
->link
.control
))
2562 command
->command_trb
=
2563 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
2565 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
2567 cmd_completion
= &virt_dev
->cmd_completion
;
2568 cmd_status
= &virt_dev
->cmd_status
;
2570 init_completion(cmd_completion
);
2572 cmd_trb
= xhci
->cmd_ring
->dequeue
;
2574 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
2575 udev
->slot_id
, must_succeed
);
2577 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
2578 udev
->slot_id
, must_succeed
);
2581 list_del(&command
->cmd_list
);
2582 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2583 xhci_free_host_resources(xhci
, ctrl_ctx
);
2584 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2585 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
2588 xhci_ring_cmd_db(xhci
);
2589 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2591 /* Wait for the configure endpoint command to complete */
2592 timeleft
= wait_for_completion_interruptible_timeout(
2594 XHCI_CMD_DEFAULT_TIMEOUT
);
2595 if (timeleft
<= 0) {
2596 xhci_warn(xhci
, "%s while waiting for %s command\n",
2597 timeleft
== 0 ? "Timeout" : "Signal",
2599 "configure endpoint" :
2600 "evaluate context");
2601 /* cancel the configure endpoint command */
2602 ret
= xhci_cancel_cmd(xhci
, command
, cmd_trb
);
2609 ret
= xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
2611 ret
= xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
2613 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
2614 spin_lock_irqsave(&xhci
->lock
, flags
);
2615 /* If the command failed, remove the reserved resources.
2616 * Otherwise, clean up the estimate to include dropped eps.
2619 xhci_free_host_resources(xhci
, ctrl_ctx
);
2621 xhci_finish_resource_reservation(xhci
, ctrl_ctx
);
2622 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2627 /* Called after one or more calls to xhci_add_endpoint() or
2628 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2629 * to call xhci_reset_bandwidth().
2631 * Since we are in the middle of changing either configuration or
2632 * installing a new alt setting, the USB core won't allow URBs to be
2633 * enqueued for any endpoint on the old config or interface. Nothing
2634 * else should be touching the xhci->devs[slot_id] structure, so we
2635 * don't need to take the xhci->lock for manipulating that.
2637 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2641 struct xhci_hcd
*xhci
;
2642 struct xhci_virt_device
*virt_dev
;
2643 struct xhci_input_control_ctx
*ctrl_ctx
;
2644 struct xhci_slot_ctx
*slot_ctx
;
2646 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2649 xhci
= hcd_to_xhci(hcd
);
2650 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
2653 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2654 virt_dev
= xhci
->devs
[udev
->slot_id
];
2656 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2657 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2659 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2663 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2664 ctrl_ctx
->add_flags
&= cpu_to_le32(~EP0_FLAG
);
2665 ctrl_ctx
->drop_flags
&= cpu_to_le32(~(SLOT_FLAG
| EP0_FLAG
));
2667 /* Don't issue the command if there's no endpoints to update. */
2668 if (ctrl_ctx
->add_flags
== cpu_to_le32(SLOT_FLAG
) &&
2669 ctrl_ctx
->drop_flags
== 0)
2672 xhci_dbg(xhci
, "New Input Control Context:\n");
2673 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
2674 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
2675 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2677 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
2680 /* Callee should call reset_bandwidth() */
2684 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
2685 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
2686 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2688 /* Free any rings that were dropped, but not changed. */
2689 for (i
= 1; i
< 31; ++i
) {
2690 if ((le32_to_cpu(ctrl_ctx
->drop_flags
) & (1 << (i
+ 1))) &&
2691 !(le32_to_cpu(ctrl_ctx
->add_flags
) & (1 << (i
+ 1))))
2692 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2694 xhci_zero_in_ctx(xhci
, virt_dev
);
2696 * Install any rings for completely new endpoints or changed endpoints,
2697 * and free or cache any old rings from changed endpoints.
2699 for (i
= 1; i
< 31; ++i
) {
2700 if (!virt_dev
->eps
[i
].new_ring
)
2702 /* Only cache or free the old ring if it exists.
2703 * It may not if this is the first add of an endpoint.
2705 if (virt_dev
->eps
[i
].ring
) {
2706 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2708 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
2709 virt_dev
->eps
[i
].new_ring
= NULL
;
2715 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2717 struct xhci_hcd
*xhci
;
2718 struct xhci_virt_device
*virt_dev
;
2721 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2724 xhci
= hcd_to_xhci(hcd
);
2726 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2727 virt_dev
= xhci
->devs
[udev
->slot_id
];
2728 /* Free any rings allocated for added endpoints */
2729 for (i
= 0; i
< 31; ++i
) {
2730 if (virt_dev
->eps
[i
].new_ring
) {
2731 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
2732 virt_dev
->eps
[i
].new_ring
= NULL
;
2735 xhci_zero_in_ctx(xhci
, virt_dev
);
2738 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
2739 struct xhci_container_ctx
*in_ctx
,
2740 struct xhci_container_ctx
*out_ctx
,
2741 struct xhci_input_control_ctx
*ctrl_ctx
,
2742 u32 add_flags
, u32 drop_flags
)
2744 ctrl_ctx
->add_flags
= cpu_to_le32(add_flags
);
2745 ctrl_ctx
->drop_flags
= cpu_to_le32(drop_flags
);
2746 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
2747 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2749 xhci_dbg(xhci
, "Input Context:\n");
2750 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
2753 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
2754 unsigned int slot_id
, unsigned int ep_index
,
2755 struct xhci_dequeue_state
*deq_state
)
2757 struct xhci_input_control_ctx
*ctrl_ctx
;
2758 struct xhci_container_ctx
*in_ctx
;
2759 struct xhci_ep_ctx
*ep_ctx
;
2763 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
2764 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2766 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
2771 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2772 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
2773 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
2774 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
2775 deq_state
->new_deq_ptr
);
2777 xhci_warn(xhci
, "WARN Cannot submit config ep after "
2778 "reset ep command\n");
2779 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
2780 deq_state
->new_deq_seg
,
2781 deq_state
->new_deq_ptr
);
2784 ep_ctx
->deq
= cpu_to_le64(addr
| deq_state
->new_cycle_state
);
2786 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
2787 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2788 xhci
->devs
[slot_id
]->out_ctx
, ctrl_ctx
,
2789 added_ctxs
, added_ctxs
);
2792 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
2793 struct usb_device
*udev
, unsigned int ep_index
)
2795 struct xhci_dequeue_state deq_state
;
2796 struct xhci_virt_ep
*ep
;
2798 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
2799 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2800 /* We need to move the HW's dequeue pointer past this TD,
2801 * or it will attempt to resend it on the next doorbell ring.
2803 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
2804 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
2807 /* HW with the reset endpoint quirk will use the saved dequeue state to
2808 * issue a configure endpoint command later.
2810 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
2811 xhci_dbg(xhci
, "Queueing new dequeue state\n");
2812 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
2813 ep_index
, ep
->stopped_stream
, &deq_state
);
2815 /* Better hope no one uses the input context between now and the
2816 * reset endpoint completion!
2817 * XXX: No idea how this hardware will react when stream rings
2820 xhci_dbg(xhci
, "Setting up input context for "
2821 "configure endpoint command\n");
2822 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
2823 ep_index
, &deq_state
);
2827 /* Deal with stalled endpoints. The core should have sent the control message
2828 * to clear the halt condition. However, we need to make the xHCI hardware
2829 * reset its sequence number, since a device will expect a sequence number of
2830 * zero after the halt condition is cleared.
2831 * Context: in_interrupt
2833 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
2834 struct usb_host_endpoint
*ep
)
2836 struct xhci_hcd
*xhci
;
2837 struct usb_device
*udev
;
2838 unsigned int ep_index
;
2839 unsigned long flags
;
2841 struct xhci_virt_ep
*virt_ep
;
2843 xhci
= hcd_to_xhci(hcd
);
2844 udev
= (struct usb_device
*) ep
->hcpriv
;
2845 /* Called with a root hub endpoint (or an endpoint that wasn't added
2846 * with xhci_add_endpoint()
2850 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2851 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2852 if (!virt_ep
->stopped_td
) {
2853 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
2854 ep
->desc
.bEndpointAddress
);
2857 if (usb_endpoint_xfer_control(&ep
->desc
)) {
2858 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
2862 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
2863 spin_lock_irqsave(&xhci
->lock
, flags
);
2864 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
2866 * Can't change the ring dequeue pointer until it's transitioned to the
2867 * stopped state, which is only upon a successful reset endpoint
2868 * command. Better hope that last command worked!
2871 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
2872 kfree(virt_ep
->stopped_td
);
2873 xhci_ring_cmd_db(xhci
);
2875 virt_ep
->stopped_td
= NULL
;
2876 virt_ep
->stopped_trb
= NULL
;
2877 virt_ep
->stopped_stream
= 0;
2878 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2881 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
2884 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
2885 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
2886 unsigned int slot_id
)
2889 unsigned int ep_index
;
2890 unsigned int ep_state
;
2894 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, true, __func__
);
2897 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
2898 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
2899 " descriptor for ep 0x%x does not support streams\n",
2900 ep
->desc
.bEndpointAddress
);
2904 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2905 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2906 if (ep_state
& EP_HAS_STREAMS
||
2907 ep_state
& EP_GETTING_STREAMS
) {
2908 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
2909 "already has streams set up.\n",
2910 ep
->desc
.bEndpointAddress
);
2911 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
2912 "dynamic stream context array reallocation.\n");
2915 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
2916 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
2917 "endpoint 0x%x; URBs are pending.\n",
2918 ep
->desc
.bEndpointAddress
);
2924 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
2925 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
2927 unsigned int max_streams
;
2929 /* The stream context array size must be a power of two */
2930 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
2932 * Find out how many primary stream array entries the host controller
2933 * supports. Later we may use secondary stream arrays (similar to 2nd
2934 * level page entries), but that's an optional feature for xHCI host
2935 * controllers. xHCs must support at least 4 stream IDs.
2937 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
2938 if (*num_stream_ctxs
> max_streams
) {
2939 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
2941 *num_stream_ctxs
= max_streams
;
2942 *num_streams
= max_streams
;
2946 /* Returns an error code if one of the endpoint already has streams.
2947 * This does not change any data structures, it only checks and gathers
2950 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
2951 struct usb_device
*udev
,
2952 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2953 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
2955 unsigned int max_streams
;
2956 unsigned int endpoint_flag
;
2960 for (i
= 0; i
< num_eps
; i
++) {
2961 ret
= xhci_check_streams_endpoint(xhci
, udev
,
2962 eps
[i
], udev
->slot_id
);
2966 max_streams
= usb_ss_max_streams(&eps
[i
]->ss_ep_comp
);
2967 if (max_streams
< (*num_streams
- 1)) {
2968 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
2969 eps
[i
]->desc
.bEndpointAddress
,
2971 *num_streams
= max_streams
+1;
2974 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
2975 if (*changed_ep_bitmask
& endpoint_flag
)
2977 *changed_ep_bitmask
|= endpoint_flag
;
2982 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
2983 struct usb_device
*udev
,
2984 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
2986 u32 changed_ep_bitmask
= 0;
2987 unsigned int slot_id
;
2988 unsigned int ep_index
;
2989 unsigned int ep_state
;
2992 slot_id
= udev
->slot_id
;
2993 if (!xhci
->devs
[slot_id
])
2996 for (i
= 0; i
< num_eps
; i
++) {
2997 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2998 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2999 /* Are streams already being freed for the endpoint? */
3000 if (ep_state
& EP_GETTING_NO_STREAMS
) {
3001 xhci_warn(xhci
, "WARN Can't disable streams for "
3003 "streams are being disabled already\n",
3004 eps
[i
]->desc
.bEndpointAddress
);
3007 /* Are there actually any streams to free? */
3008 if (!(ep_state
& EP_HAS_STREAMS
) &&
3009 !(ep_state
& EP_GETTING_STREAMS
)) {
3010 xhci_warn(xhci
, "WARN Can't disable streams for "
3012 "streams are already disabled!\n",
3013 eps
[i
]->desc
.bEndpointAddress
);
3014 xhci_warn(xhci
, "WARN xhci_free_streams() called "
3015 "with non-streams endpoint\n");
3018 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
3020 return changed_ep_bitmask
;
3024 * The USB device drivers use this function (though the HCD interface in USB
3025 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3026 * coordinate mass storage command queueing across multiple endpoints (basically
3027 * a stream ID == a task ID).
3029 * Setting up streams involves allocating the same size stream context array
3030 * for each endpoint and issuing a configure endpoint command for all endpoints.
3032 * Don't allow the call to succeed if one endpoint only supports one stream
3033 * (which means it doesn't support streams at all).
3035 * Drivers may get less stream IDs than they asked for, if the host controller
3036 * hardware or endpoints claim they can't support the number of requested
3039 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3040 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3041 unsigned int num_streams
, gfp_t mem_flags
)
3044 struct xhci_hcd
*xhci
;
3045 struct xhci_virt_device
*vdev
;
3046 struct xhci_command
*config_cmd
;
3047 struct xhci_input_control_ctx
*ctrl_ctx
;
3048 unsigned int ep_index
;
3049 unsigned int num_stream_ctxs
;
3050 unsigned long flags
;
3051 u32 changed_ep_bitmask
= 0;
3056 /* Add one to the number of streams requested to account for
3057 * stream 0 that is reserved for xHCI usage.
3060 xhci
= hcd_to_xhci(hcd
);
3061 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
3064 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
3066 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
3069 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
3071 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3073 xhci_free_command(xhci
, config_cmd
);
3077 /* Check to make sure all endpoints are not already configured for
3078 * streams. While we're at it, find the maximum number of streams that
3079 * all the endpoints will support and check for duplicate endpoints.
3081 spin_lock_irqsave(&xhci
->lock
, flags
);
3082 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
3083 num_eps
, &num_streams
, &changed_ep_bitmask
);
3085 xhci_free_command(xhci
, config_cmd
);
3086 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3089 if (num_streams
<= 1) {
3090 xhci_warn(xhci
, "WARN: endpoints can't handle "
3091 "more than one stream.\n");
3092 xhci_free_command(xhci
, config_cmd
);
3093 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3096 vdev
= xhci
->devs
[udev
->slot_id
];
3097 /* Mark each endpoint as being in transition, so
3098 * xhci_urb_enqueue() will reject all URBs.
3100 for (i
= 0; i
< num_eps
; i
++) {
3101 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3102 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
3104 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3106 /* Setup internal data structures and allocate HW data structures for
3107 * streams (but don't install the HW structures in the input context
3108 * until we're sure all memory allocation succeeded).
3110 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
3111 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
3112 num_stream_ctxs
, num_streams
);
3114 for (i
= 0; i
< num_eps
; i
++) {
3115 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3116 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
3118 num_streams
, mem_flags
);
3119 if (!vdev
->eps
[ep_index
].stream_info
)
3121 /* Set maxPstreams in endpoint context and update deq ptr to
3122 * point to stream context array. FIXME
3126 /* Set up the input context for a configure endpoint command. */
3127 for (i
= 0; i
< num_eps
; i
++) {
3128 struct xhci_ep_ctx
*ep_ctx
;
3130 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3131 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
3133 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
3134 vdev
->out_ctx
, ep_index
);
3135 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
3136 vdev
->eps
[ep_index
].stream_info
);
3138 /* Tell the HW to drop its old copy of the endpoint context info
3139 * and add the updated copy from the input context.
3141 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
3142 vdev
->out_ctx
, ctrl_ctx
,
3143 changed_ep_bitmask
, changed_ep_bitmask
);
3145 /* Issue and wait for the configure endpoint command */
3146 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
3149 /* xHC rejected the configure endpoint command for some reason, so we
3150 * leave the old ring intact and free our internal streams data
3156 spin_lock_irqsave(&xhci
->lock
, flags
);
3157 for (i
= 0; i
< num_eps
; i
++) {
3158 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3159 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3160 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
3161 udev
->slot_id
, ep_index
);
3162 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
3164 xhci_free_command(xhci
, config_cmd
);
3165 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3167 /* Subtract 1 for stream 0, which drivers can't use */
3168 return num_streams
- 1;
3171 /* If it didn't work, free the streams! */
3172 for (i
= 0; i
< num_eps
; i
++) {
3173 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3174 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3175 vdev
->eps
[ep_index
].stream_info
= NULL
;
3176 /* FIXME Unset maxPstreams in endpoint context and
3177 * update deq ptr to point to normal string ring.
3179 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3180 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3181 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
3183 xhci_free_command(xhci
, config_cmd
);
3187 /* Transition the endpoint from using streams to being a "normal" endpoint
3190 * Modify the endpoint context state, submit a configure endpoint command,
3191 * and free all endpoint rings for streams if that completes successfully.
3193 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3194 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3198 struct xhci_hcd
*xhci
;
3199 struct xhci_virt_device
*vdev
;
3200 struct xhci_command
*command
;
3201 struct xhci_input_control_ctx
*ctrl_ctx
;
3202 unsigned int ep_index
;
3203 unsigned long flags
;
3204 u32 changed_ep_bitmask
;
3206 xhci
= hcd_to_xhci(hcd
);
3207 vdev
= xhci
->devs
[udev
->slot_id
];
3209 /* Set up a configure endpoint command to remove the streams rings */
3210 spin_lock_irqsave(&xhci
->lock
, flags
);
3211 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
3212 udev
, eps
, num_eps
);
3213 if (changed_ep_bitmask
== 0) {
3214 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3218 /* Use the xhci_command structure from the first endpoint. We may have
3219 * allocated too many, but the driver may call xhci_free_streams() for
3220 * each endpoint it grouped into one call to xhci_alloc_streams().
3222 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
3223 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
3224 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, command
->in_ctx
);
3226 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3227 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3232 for (i
= 0; i
< num_eps
; i
++) {
3233 struct xhci_ep_ctx
*ep_ctx
;
3235 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3236 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
3237 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
3238 EP_GETTING_NO_STREAMS
;
3240 xhci_endpoint_copy(xhci
, command
->in_ctx
,
3241 vdev
->out_ctx
, ep_index
);
3242 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
3243 &vdev
->eps
[ep_index
]);
3245 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
3246 vdev
->out_ctx
, ctrl_ctx
,
3247 changed_ep_bitmask
, changed_ep_bitmask
);
3248 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3250 /* Issue and wait for the configure endpoint command,
3251 * which must succeed.
3253 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
3256 /* xHC rejected the configure endpoint command for some reason, so we
3257 * leave the streams rings intact.
3262 spin_lock_irqsave(&xhci
->lock
, flags
);
3263 for (i
= 0; i
< num_eps
; i
++) {
3264 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3265 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3266 vdev
->eps
[ep_index
].stream_info
= NULL
;
3267 /* FIXME Unset maxPstreams in endpoint context and
3268 * update deq ptr to point to normal string ring.
3270 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
3271 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3273 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3279 * Deletes endpoint resources for endpoints that were active before a Reset
3280 * Device command, or a Disable Slot command. The Reset Device command leaves
3281 * the control endpoint intact, whereas the Disable Slot command deletes it.
3283 * Must be called with xhci->lock held.
3285 void xhci_free_device_endpoint_resources(struct xhci_hcd
*xhci
,
3286 struct xhci_virt_device
*virt_dev
, bool drop_control_ep
)
3289 unsigned int num_dropped_eps
= 0;
3290 unsigned int drop_flags
= 0;
3292 for (i
= (drop_control_ep
? 0 : 1); i
< 31; i
++) {
3293 if (virt_dev
->eps
[i
].ring
) {
3294 drop_flags
|= 1 << i
;
3298 xhci
->num_active_eps
-= num_dropped_eps
;
3299 if (num_dropped_eps
)
3300 xhci_dbg(xhci
, "Dropped %u ep ctxs, flags = 0x%x, "
3302 num_dropped_eps
, drop_flags
,
3303 xhci
->num_active_eps
);
3307 * This submits a Reset Device Command, which will set the device state to 0,
3308 * set the device address to 0, and disable all the endpoints except the default
3309 * control endpoint. The USB core should come back and call
3310 * xhci_address_device(), and then re-set up the configuration. If this is
3311 * called because of a usb_reset_and_verify_device(), then the old alternate
3312 * settings will be re-installed through the normal bandwidth allocation
3315 * Wait for the Reset Device command to finish. Remove all structures
3316 * associated with the endpoints that were disabled. Clear the input device
3317 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3319 * If the virt_dev to be reset does not exist or does not match the udev,
3320 * it means the device is lost, possibly due to the xHC restore error and
3321 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3322 * re-allocate the device.
3324 int xhci_discover_or_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3327 unsigned long flags
;
3328 struct xhci_hcd
*xhci
;
3329 unsigned int slot_id
;
3330 struct xhci_virt_device
*virt_dev
;
3331 struct xhci_command
*reset_device_cmd
;
3333 int last_freed_endpoint
;
3334 struct xhci_slot_ctx
*slot_ctx
;
3335 int old_active_eps
= 0;
3337 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, false, __func__
);
3340 xhci
= hcd_to_xhci(hcd
);
3341 slot_id
= udev
->slot_id
;
3342 virt_dev
= xhci
->devs
[slot_id
];
3344 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3345 "not exist. Re-allocate the device\n", slot_id
);
3346 ret
= xhci_alloc_dev(hcd
, udev
);
3353 if (virt_dev
->udev
!= udev
) {
3354 /* If the virt_dev and the udev does not match, this virt_dev
3355 * may belong to another udev.
3356 * Re-allocate the device.
3358 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3359 "not match the udev. Re-allocate the device\n",
3361 ret
= xhci_alloc_dev(hcd
, udev
);
3368 /* If device is not setup, there is no point in resetting it */
3369 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3370 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
)) ==
3371 SLOT_STATE_DISABLED
)
3374 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
3375 /* Allocate the command structure that holds the struct completion.
3376 * Assume we're in process context, since the normal device reset
3377 * process has to wait for the device anyway. Storage devices are
3378 * reset as part of error handling, so use GFP_NOIO instead of
3381 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
3382 if (!reset_device_cmd
) {
3383 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
3387 /* Attempt to submit the Reset Device command to the command ring */
3388 spin_lock_irqsave(&xhci
->lock
, flags
);
3389 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
3391 /* Enqueue pointer can be left pointing to the link TRB,
3392 * we must handle that
3394 if (TRB_TYPE_LINK_LE32(reset_device_cmd
->command_trb
->link
.control
))
3395 reset_device_cmd
->command_trb
=
3396 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
3398 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
3399 ret
= xhci_queue_reset_device(xhci
, slot_id
);
3401 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3402 list_del(&reset_device_cmd
->cmd_list
);
3403 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3404 goto command_cleanup
;
3406 xhci_ring_cmd_db(xhci
);
3407 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3409 /* Wait for the Reset Device command to finish */
3410 timeleft
= wait_for_completion_interruptible_timeout(
3411 reset_device_cmd
->completion
,
3412 USB_CTRL_SET_TIMEOUT
);
3413 if (timeleft
<= 0) {
3414 xhci_warn(xhci
, "%s while waiting for reset device command\n",
3415 timeleft
== 0 ? "Timeout" : "Signal");
3416 spin_lock_irqsave(&xhci
->lock
, flags
);
3417 /* The timeout might have raced with the event ring handler, so
3418 * only delete from the list if the item isn't poisoned.
3420 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
3421 list_del(&reset_device_cmd
->cmd_list
);
3422 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3424 goto command_cleanup
;
3427 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3428 * unless we tried to reset a slot ID that wasn't enabled,
3429 * or the device wasn't in the addressed or configured state.
3431 ret
= reset_device_cmd
->status
;
3433 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
3434 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
3435 xhci_dbg(xhci
, "Can't reset device (slot ID %u) in %s state\n",
3437 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
3438 xhci_dbg(xhci
, "Not freeing device rings.\n");
3439 /* Don't treat this as an error. May change my mind later. */
3441 goto command_cleanup
;
3443 xhci_dbg(xhci
, "Successful reset device command.\n");
3446 if (xhci_is_vendor_info_code(xhci
, ret
))
3448 xhci_warn(xhci
, "Unknown completion code %u for "
3449 "reset device command.\n", ret
);
3451 goto command_cleanup
;
3454 /* Free up host controller endpoint resources */
3455 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3456 spin_lock_irqsave(&xhci
->lock
, flags
);
3457 /* Don't delete the default control endpoint resources */
3458 xhci_free_device_endpoint_resources(xhci
, virt_dev
, false);
3459 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3462 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3463 last_freed_endpoint
= 1;
3464 for (i
= 1; i
< 31; ++i
) {
3465 struct xhci_virt_ep
*ep
= &virt_dev
->eps
[i
];
3467 if (ep
->ep_state
& EP_HAS_STREAMS
) {
3468 xhci_free_stream_info(xhci
, ep
->stream_info
);
3469 ep
->stream_info
= NULL
;
3470 ep
->ep_state
&= ~EP_HAS_STREAMS
;
3474 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
3475 last_freed_endpoint
= i
;
3477 if (!list_empty(&virt_dev
->eps
[i
].bw_endpoint_list
))
3478 xhci_drop_ep_from_interval_table(xhci
,
3479 &virt_dev
->eps
[i
].bw_info
,
3484 xhci_clear_endpoint_bw_info(&virt_dev
->eps
[i
].bw_info
);
3486 /* If necessary, update the number of active TTs on this root port */
3487 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
3489 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
3490 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
3494 xhci_free_command(xhci
, reset_device_cmd
);
3499 * At this point, the struct usb_device is about to go away, the device has
3500 * disconnected, and all traffic has been stopped and the endpoints have been
3501 * disabled. Free any HC data structures associated with that device.
3503 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3505 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3506 struct xhci_virt_device
*virt_dev
;
3507 unsigned long flags
;
3511 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
3512 /* If the host is halted due to driver unload, we still need to free the
3515 if (ret
<= 0 && ret
!= -ENODEV
)
3518 virt_dev
= xhci
->devs
[udev
->slot_id
];
3520 /* Stop any wayward timer functions (which may grab the lock) */
3521 for (i
= 0; i
< 31; ++i
) {
3522 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
3523 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
3526 if (udev
->usb2_hw_lpm_enabled
) {
3527 xhci_set_usb2_hardware_lpm(hcd
, udev
, 0);
3528 udev
->usb2_hw_lpm_enabled
= 0;
3531 spin_lock_irqsave(&xhci
->lock
, flags
);
3532 /* Don't disable the slot if the host controller is dead. */
3533 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
3534 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
) ||
3535 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
3536 xhci_free_virt_device(xhci
, udev
->slot_id
);
3537 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3541 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
3542 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3543 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3546 xhci_ring_cmd_db(xhci
);
3547 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3549 * Event command completion handler will free any data structures
3550 * associated with the slot. XXX Can free sleep?
3555 * Checks if we have enough host controller resources for the default control
3558 * Must be called with xhci->lock held.
3560 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd
*xhci
)
3562 if (xhci
->num_active_eps
+ 1 > xhci
->limit_active_eps
) {
3563 xhci_dbg(xhci
, "Not enough ep ctxs: "
3564 "%u active, need to add 1, limit is %u.\n",
3565 xhci
->num_active_eps
, xhci
->limit_active_eps
);
3568 xhci
->num_active_eps
+= 1;
3569 xhci_dbg(xhci
, "Adding 1 ep ctx, %u now active.\n",
3570 xhci
->num_active_eps
);
3576 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3577 * timed out, or allocating memory failed. Returns 1 on success.
3579 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3581 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3582 unsigned long flags
;
3585 union xhci_trb
*cmd_trb
;
3587 spin_lock_irqsave(&xhci
->lock
, flags
);
3588 cmd_trb
= xhci
->cmd_ring
->dequeue
;
3589 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
3591 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3592 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3595 xhci_ring_cmd_db(xhci
);
3596 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3598 /* XXX: how much time for xHC slot assignment? */
3599 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3600 XHCI_CMD_DEFAULT_TIMEOUT
);
3601 if (timeleft
<= 0) {
3602 xhci_warn(xhci
, "%s while waiting for a slot\n",
3603 timeleft
== 0 ? "Timeout" : "Signal");
3604 /* cancel the enable slot request */
3605 return xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3608 if (!xhci
->slot_id
) {
3609 xhci_err(xhci
, "Error while assigning device slot ID\n");
3613 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3614 spin_lock_irqsave(&xhci
->lock
, flags
);
3615 ret
= xhci_reserve_host_control_ep_resources(xhci
);
3617 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3618 xhci_warn(xhci
, "Not enough host resources, "
3619 "active endpoint contexts = %u\n",
3620 xhci
->num_active_eps
);
3623 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3625 /* Use GFP_NOIO, since this function can be called from
3626 * xhci_discover_or_reset_device(), which may be called as part of
3627 * mass storage driver error handling.
3629 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_NOIO
)) {
3630 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
3633 udev
->slot_id
= xhci
->slot_id
;
3634 /* Is this a LS or FS device under a HS hub? */
3635 /* Hub or peripherial? */
3639 /* Disable slot, if we can do it without mem alloc */
3640 spin_lock_irqsave(&xhci
->lock
, flags
);
3641 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
3642 xhci_ring_cmd_db(xhci
);
3643 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3648 * Issue an Address Device command (which will issue a SetAddress request to
3650 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3651 * we should only issue and wait on one address command at the same time.
3653 * We add one to the device address issued by the hardware because the USB core
3654 * uses address 1 for the root hubs (even though they're not really devices).
3656 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3658 unsigned long flags
;
3660 struct xhci_virt_device
*virt_dev
;
3662 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3663 struct xhci_slot_ctx
*slot_ctx
;
3664 struct xhci_input_control_ctx
*ctrl_ctx
;
3666 union xhci_trb
*cmd_trb
;
3668 if (!udev
->slot_id
) {
3669 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
3673 virt_dev
= xhci
->devs
[udev
->slot_id
];
3675 if (WARN_ON(!virt_dev
)) {
3677 * In plug/unplug torture test with an NEC controller,
3678 * a zero-dereference was observed once due to virt_dev = 0.
3679 * Print useful debug rather than crash if it is observed again!
3681 xhci_warn(xhci
, "Virt dev invalid for slot_id 0x%x!\n",
3686 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
3687 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
3689 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3694 * If this is the first Set Address since device plug-in or
3695 * virt_device realloaction after a resume with an xHCI power loss,
3696 * then set up the slot context.
3698 if (!slot_ctx
->dev_info
)
3699 xhci_setup_addressable_virt_dev(xhci
, udev
);
3700 /* Otherwise, update the control endpoint ring enqueue pointer. */
3702 xhci_copy_ep0_dequeue_into_input_ctx(xhci
, udev
);
3703 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
| EP0_FLAG
);
3704 ctrl_ctx
->drop_flags
= 0;
3706 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3707 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3709 spin_lock_irqsave(&xhci
->lock
, flags
);
3710 cmd_trb
= xhci
->cmd_ring
->dequeue
;
3711 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
3714 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3715 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3718 xhci_ring_cmd_db(xhci
);
3719 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3721 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3722 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3723 XHCI_CMD_DEFAULT_TIMEOUT
);
3724 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3725 * the SetAddress() "recovery interval" required by USB and aborting the
3726 * command on a timeout.
3728 if (timeleft
<= 0) {
3729 xhci_warn(xhci
, "%s while waiting for address device command\n",
3730 timeleft
== 0 ? "Timeout" : "Signal");
3731 /* cancel the address device command */
3732 ret
= xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3738 switch (virt_dev
->cmd_status
) {
3739 case COMP_CTX_STATE
:
3741 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
3746 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
3750 dev_warn(&udev
->dev
, "ERROR: Incompatible device for address "
3751 "device command.\n");
3755 xhci_dbg(xhci
, "Successful Address Device command\n");
3758 xhci_err(xhci
, "ERROR: unexpected command completion "
3759 "code 0x%x.\n", virt_dev
->cmd_status
);
3760 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3761 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3768 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
3769 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
3770 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3772 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
3773 (unsigned long long)
3774 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]));
3775 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
3776 (unsigned long long)virt_dev
->out_ctx
->dma
);
3777 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3778 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3779 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3780 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3782 * USB core uses address 1 for the roothubs, so we add one to the
3783 * address given back to us by the HC.
3785 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3786 /* Use kernel assigned address for devices; store xHC assigned
3787 * address locally. */
3788 virt_dev
->address
= (le32_to_cpu(slot_ctx
->dev_state
) & DEV_ADDR_MASK
)
3790 /* Zero the input context control for later use */
3791 ctrl_ctx
->add_flags
= 0;
3792 ctrl_ctx
->drop_flags
= 0;
3794 xhci_dbg(xhci
, "Internal device address = %d\n", virt_dev
->address
);
3800 * Transfer the port index into real index in the HW port status
3801 * registers. Caculate offset between the port's PORTSC register
3802 * and port status base. Divide the number of per port register
3803 * to get the real index. The raw port number bases 1.
3805 int xhci_find_raw_port_number(struct usb_hcd
*hcd
, int port1
)
3807 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3808 __le32 __iomem
*base_addr
= &xhci
->op_regs
->port_status_base
;
3809 __le32 __iomem
*addr
;
3812 if (hcd
->speed
!= HCD_USB3
)
3813 addr
= xhci
->usb2_ports
[port1
- 1];
3815 addr
= xhci
->usb3_ports
[port1
- 1];
3817 raw_port
= (addr
- base_addr
)/NUM_PORT_REGS
+ 1;
3822 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3823 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3825 static int __maybe_unused
xhci_change_max_exit_latency(struct xhci_hcd
*xhci
,
3826 struct usb_device
*udev
, u16 max_exit_latency
)
3828 struct xhci_virt_device
*virt_dev
;
3829 struct xhci_command
*command
;
3830 struct xhci_input_control_ctx
*ctrl_ctx
;
3831 struct xhci_slot_ctx
*slot_ctx
;
3832 unsigned long flags
;
3835 spin_lock_irqsave(&xhci
->lock
, flags
);
3836 if (max_exit_latency
== xhci
->devs
[udev
->slot_id
]->current_mel
) {
3837 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3841 /* Attempt to issue an Evaluate Context command to change the MEL. */
3842 virt_dev
= xhci
->devs
[udev
->slot_id
];
3843 command
= xhci
->lpm_command
;
3844 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, command
->in_ctx
);
3846 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3847 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
3852 xhci_slot_copy(xhci
, command
->in_ctx
, virt_dev
->out_ctx
);
3853 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3855 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
3856 slot_ctx
= xhci_get_slot_ctx(xhci
, command
->in_ctx
);
3857 slot_ctx
->dev_info2
&= cpu_to_le32(~((u32
) MAX_EXIT
));
3858 slot_ctx
->dev_info2
|= cpu_to_le32(max_exit_latency
);
3860 xhci_dbg(xhci
, "Set up evaluate context for LPM MEL change.\n");
3861 xhci_dbg(xhci
, "Slot %u Input Context:\n", udev
->slot_id
);
3862 xhci_dbg_ctx(xhci
, command
->in_ctx
, 0);
3864 /* Issue and wait for the evaluate context command. */
3865 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
3867 xhci_dbg(xhci
, "Slot %u Output Context:\n", udev
->slot_id
);
3868 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 0);
3871 spin_lock_irqsave(&xhci
->lock
, flags
);
3872 virt_dev
->current_mel
= max_exit_latency
;
3873 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3878 #ifdef CONFIG_PM_RUNTIME
3880 /* BESL to HIRD Encoding array for USB2 LPM */
3881 static int xhci_besl_encoding
[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3882 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3884 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3885 static int xhci_calculate_hird_besl(struct xhci_hcd
*xhci
,
3886 struct usb_device
*udev
)
3888 int u2del
, besl
, besl_host
;
3889 int besl_device
= 0;
3892 u2del
= HCS_U2_LATENCY(xhci
->hcs_params3
);
3893 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
3895 if (field
& USB_BESL_SUPPORT
) {
3896 for (besl_host
= 0; besl_host
< 16; besl_host
++) {
3897 if (xhci_besl_encoding
[besl_host
] >= u2del
)
3900 /* Use baseline BESL value as default */
3901 if (field
& USB_BESL_BASELINE_VALID
)
3902 besl_device
= USB_GET_BESL_BASELINE(field
);
3903 else if (field
& USB_BESL_DEEP_VALID
)
3904 besl_device
= USB_GET_BESL_DEEP(field
);
3909 besl_host
= (u2del
- 51) / 75 + 1;
3912 besl
= besl_host
+ besl_device
;
3919 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
3920 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device
*udev
)
3927 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
3929 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
3930 l1
= udev
->l1_params
.timeout
/ 256;
3932 /* device has preferred BESLD */
3933 if (field
& USB_BESL_DEEP_VALID
) {
3934 besld
= USB_GET_BESL_DEEP(field
);
3938 return PORT_BESLD(besld
) | PORT_L1_TIMEOUT(l1
) | PORT_HIRDM(hirdm
);
3941 static int xhci_usb2_software_lpm_test(struct usb_hcd
*hcd
,
3942 struct usb_device
*udev
)
3944 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3945 struct dev_info
*dev_info
;
3946 __le32 __iomem
**port_array
;
3947 __le32 __iomem
*addr
, *pm_addr
;
3949 unsigned int port_num
;
3950 unsigned long flags
;
3954 if (hcd
->speed
== HCD_USB3
|| !xhci
->sw_lpm_support
||
3958 /* we only support lpm for non-hub device connected to root hub yet */
3959 if (!udev
->parent
|| udev
->parent
->parent
||
3960 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
3963 spin_lock_irqsave(&xhci
->lock
, flags
);
3965 /* Look for devices in lpm_failed_devs list */
3966 dev_id
= le16_to_cpu(udev
->descriptor
.idVendor
) << 16 |
3967 le16_to_cpu(udev
->descriptor
.idProduct
);
3968 list_for_each_entry(dev_info
, &xhci
->lpm_failed_devs
, list
) {
3969 if (dev_info
->dev_id
== dev_id
) {
3975 port_array
= xhci
->usb2_ports
;
3976 port_num
= udev
->portnum
- 1;
3978 if (port_num
> HCS_MAX_PORTS(xhci
->hcs_params1
)) {
3979 xhci_dbg(xhci
, "invalid port number %d\n", udev
->portnum
);
3985 * Test USB 2.0 software LPM.
3986 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3987 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3988 * in the June 2011 errata release.
3990 xhci_dbg(xhci
, "test port %d software LPM\n", port_num
);
3992 * Set L1 Device Slot and HIRD/BESL.
3993 * Check device's USB 2.0 extension descriptor to determine whether
3994 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3996 pm_addr
= port_array
[port_num
] + PORTPMSC
;
3997 hird
= xhci_calculate_hird_besl(xhci
, udev
);
3998 temp
= PORT_L1DS(udev
->slot_id
) | PORT_HIRD(hird
);
3999 xhci_writel(xhci
, temp
, pm_addr
);
4001 /* Set port link state to U2(L1) */
4002 addr
= port_array
[port_num
];
4003 xhci_set_link_state(xhci
, port_array
, port_num
, XDEV_U2
);
4006 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4008 spin_lock_irqsave(&xhci
->lock
, flags
);
4010 /* Check L1 Status */
4011 ret
= xhci_handshake(xhci
, pm_addr
,
4012 PORT_L1S_MASK
, PORT_L1S_SUCCESS
, 125);
4013 if (ret
!= -ETIMEDOUT
) {
4014 /* enter L1 successfully */
4015 temp
= xhci_readl(xhci
, addr
);
4016 xhci_dbg(xhci
, "port %d entered L1 state, port status 0x%x\n",
4020 temp
= xhci_readl(xhci
, pm_addr
);
4021 xhci_dbg(xhci
, "port %d software lpm failed, L1 status %d\n",
4022 port_num
, temp
& PORT_L1S_MASK
);
4026 /* Resume the port */
4027 xhci_set_link_state(xhci
, port_array
, port_num
, XDEV_U0
);
4029 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4031 spin_lock_irqsave(&xhci
->lock
, flags
);
4034 xhci_test_and_clear_bit(xhci
, port_array
, port_num
, PORT_PLC
);
4036 /* Check PORTSC to make sure the device is in the right state */
4038 temp
= xhci_readl(xhci
, addr
);
4039 xhci_dbg(xhci
, "resumed port %d status 0x%x\n", port_num
, temp
);
4040 if (!(temp
& PORT_CONNECT
) || !(temp
& PORT_PE
) ||
4041 (temp
& PORT_PLS_MASK
) != XDEV_U0
) {
4042 xhci_dbg(xhci
, "port L1 resume fail\n");
4048 /* Insert dev to lpm_failed_devs list */
4049 xhci_warn(xhci
, "device LPM test failed, may disconnect and "
4051 dev_info
= kzalloc(sizeof(struct dev_info
), GFP_ATOMIC
);
4056 dev_info
->dev_id
= dev_id
;
4057 INIT_LIST_HEAD(&dev_info
->list
);
4058 list_add(&dev_info
->list
, &xhci
->lpm_failed_devs
);
4060 xhci_ring_device(xhci
, udev
->slot_id
);
4064 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4068 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
4069 struct usb_device
*udev
, int enable
)
4071 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4072 __le32 __iomem
**port_array
;
4073 __le32 __iomem
*pm_addr
, *hlpm_addr
;
4074 u32 pm_val
, hlpm_val
, field
;
4075 unsigned int port_num
;
4076 unsigned long flags
;
4077 int hird
, exit_latency
;
4080 if (hcd
->speed
== HCD_USB3
|| !xhci
->hw_lpm_support
||
4084 if (!udev
->parent
|| udev
->parent
->parent
||
4085 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
4088 if (udev
->usb2_hw_lpm_capable
!= 1)
4091 spin_lock_irqsave(&xhci
->lock
, flags
);
4093 port_array
= xhci
->usb2_ports
;
4094 port_num
= udev
->portnum
- 1;
4095 pm_addr
= port_array
[port_num
] + PORTPMSC
;
4096 pm_val
= xhci_readl(xhci
, pm_addr
);
4097 hlpm_addr
= port_array
[port_num
] + PORTHLPMC
;
4098 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
4100 xhci_dbg(xhci
, "%s port %d USB2 hardware LPM\n",
4101 enable
? "enable" : "disable", port_num
);
4104 /* Host supports BESL timeout instead of HIRD */
4105 if (udev
->usb2_hw_lpm_besl_capable
) {
4106 /* if device doesn't have a preferred BESL value use a
4107 * default one which works with mixed HIRD and BESL
4108 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4110 if ((field
& USB_BESL_SUPPORT
) &&
4111 (field
& USB_BESL_BASELINE_VALID
))
4112 hird
= USB_GET_BESL_BASELINE(field
);
4114 hird
= udev
->l1_params
.besl
;
4116 exit_latency
= xhci_besl_encoding
[hird
];
4117 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4119 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4120 * input context for link powermanagement evaluate
4121 * context commands. It is protected by hcd->bandwidth
4122 * mutex and is shared by all devices. We need to set
4123 * the max ext latency in USB 2 BESL LPM as well, so
4124 * use the same mutex and xhci_change_max_exit_latency()
4126 mutex_lock(hcd
->bandwidth_mutex
);
4127 ret
= xhci_change_max_exit_latency(xhci
, udev
,
4129 mutex_unlock(hcd
->bandwidth_mutex
);
4133 spin_lock_irqsave(&xhci
->lock
, flags
);
4135 hlpm_val
= xhci_calculate_usb2_hw_lpm_params(udev
);
4136 xhci_writel(xhci
, hlpm_val
, hlpm_addr
);
4138 xhci_readl(xhci
, hlpm_addr
);
4140 hird
= xhci_calculate_hird_besl(xhci
, udev
);
4143 pm_val
&= ~PORT_HIRD_MASK
;
4144 pm_val
|= PORT_HIRD(hird
) | PORT_RWE
;
4145 xhci_writel(xhci
, pm_val
, pm_addr
);
4146 pm_val
= xhci_readl(xhci
, pm_addr
);
4148 xhci_writel(xhci
, pm_val
, pm_addr
);
4150 xhci_readl(xhci
, pm_addr
);
4152 pm_val
&= ~(PORT_HLE
| PORT_RWE
| PORT_HIRD_MASK
);
4153 xhci_writel(xhci
, pm_val
, pm_addr
);
4155 xhci_readl(xhci
, pm_addr
);
4156 if (udev
->usb2_hw_lpm_besl_capable
) {
4157 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4158 mutex_lock(hcd
->bandwidth_mutex
);
4159 xhci_change_max_exit_latency(xhci
, udev
, 0);
4160 mutex_unlock(hcd
->bandwidth_mutex
);
4165 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4169 /* check if a usb2 port supports a given extened capability protocol
4170 * only USB2 ports extended protocol capability values are cached.
4171 * Return 1 if capability is supported
4173 static int xhci_check_usb2_port_capability(struct xhci_hcd
*xhci
, int port
,
4174 unsigned capability
)
4176 u32 port_offset
, port_count
;
4179 for (i
= 0; i
< xhci
->num_ext_caps
; i
++) {
4180 if (xhci
->ext_caps
[i
] & capability
) {
4181 /* port offsets starts at 1 */
4182 port_offset
= XHCI_EXT_PORT_OFF(xhci
->ext_caps
[i
]) - 1;
4183 port_count
= XHCI_EXT_PORT_COUNT(xhci
->ext_caps
[i
]);
4184 if (port
>= port_offset
&&
4185 port
< port_offset
+ port_count
)
4192 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4194 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4196 int portnum
= udev
->portnum
- 1;
4198 ret
= xhci_usb2_software_lpm_test(hcd
, udev
);
4200 xhci_dbg(xhci
, "software LPM test succeed\n");
4201 if (xhci
->hw_lpm_support
== 1 &&
4202 xhci_check_usb2_port_capability(xhci
, portnum
, XHCI_HLC
)) {
4203 udev
->usb2_hw_lpm_capable
= 1;
4204 udev
->l1_params
.timeout
= XHCI_L1_TIMEOUT
;
4205 udev
->l1_params
.besl
= XHCI_DEFAULT_BESL
;
4206 if (xhci_check_usb2_port_capability(xhci
, portnum
,
4208 udev
->usb2_hw_lpm_besl_capable
= 1;
4209 ret
= xhci_set_usb2_hardware_lpm(hcd
, udev
, 1);
4211 udev
->usb2_hw_lpm_enabled
= 1;
4220 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
4221 struct usb_device
*udev
, int enable
)
4226 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4231 #endif /* CONFIG_PM_RUNTIME */
4233 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4236 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4237 static unsigned long long xhci_service_interval_to_ns(
4238 struct usb_endpoint_descriptor
*desc
)
4240 return (1ULL << (desc
->bInterval
- 1)) * 125 * 1000;
4243 static u16
xhci_get_timeout_no_hub_lpm(struct usb_device
*udev
,
4244 enum usb3_link_state state
)
4246 unsigned long long sel
;
4247 unsigned long long pel
;
4248 unsigned int max_sel_pel
;
4253 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4254 sel
= DIV_ROUND_UP(udev
->u1_params
.sel
, 1000);
4255 pel
= DIV_ROUND_UP(udev
->u1_params
.pel
, 1000);
4256 max_sel_pel
= USB3_LPM_MAX_U1_SEL_PEL
;
4260 sel
= DIV_ROUND_UP(udev
->u2_params
.sel
, 1000);
4261 pel
= DIV_ROUND_UP(udev
->u2_params
.pel
, 1000);
4262 max_sel_pel
= USB3_LPM_MAX_U2_SEL_PEL
;
4266 dev_warn(&udev
->dev
, "%s: Can't get timeout for non-U1 or U2 state.\n",
4268 return USB3_LPM_DISABLED
;
4271 if (sel
<= max_sel_pel
&& pel
<= max_sel_pel
)
4272 return USB3_LPM_DEVICE_INITIATED
;
4274 if (sel
> max_sel_pel
)
4275 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
4276 "due to long SEL %llu ms\n",
4279 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
4280 "due to long PEL %llu ms\n",
4282 return USB3_LPM_DISABLED
;
4285 /* Returns the hub-encoded U1 timeout value.
4286 * The U1 timeout should be the maximum of the following values:
4287 * - For control endpoints, U1 system exit latency (SEL) * 3
4288 * - For bulk endpoints, U1 SEL * 5
4289 * - For interrupt endpoints:
4290 * - Notification EPs, U1 SEL * 3
4291 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4292 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4294 static u16
xhci_calculate_intel_u1_timeout(struct usb_device
*udev
,
4295 struct usb_endpoint_descriptor
*desc
)
4297 unsigned long long timeout_ns
;
4301 ep_type
= usb_endpoint_type(desc
);
4303 case USB_ENDPOINT_XFER_CONTROL
:
4304 timeout_ns
= udev
->u1_params
.sel
* 3;
4306 case USB_ENDPOINT_XFER_BULK
:
4307 timeout_ns
= udev
->u1_params
.sel
* 5;
4309 case USB_ENDPOINT_XFER_INT
:
4310 intr_type
= usb_endpoint_interrupt_type(desc
);
4311 if (intr_type
== USB_ENDPOINT_INTR_NOTIFICATION
) {
4312 timeout_ns
= udev
->u1_params
.sel
* 3;
4315 /* Otherwise the calculation is the same as isoc eps */
4316 case USB_ENDPOINT_XFER_ISOC
:
4317 timeout_ns
= xhci_service_interval_to_ns(desc
);
4318 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
* 105, 100);
4319 if (timeout_ns
< udev
->u1_params
.sel
* 2)
4320 timeout_ns
= udev
->u1_params
.sel
* 2;
4326 /* The U1 timeout is encoded in 1us intervals. */
4327 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 1000);
4328 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
4329 if (timeout_ns
== USB3_LPM_DISABLED
)
4332 /* If the necessary timeout value is bigger than what we can set in the
4333 * USB 3.0 hub, we have to disable hub-initiated U1.
4335 if (timeout_ns
<= USB3_LPM_U1_MAX_TIMEOUT
)
4337 dev_dbg(&udev
->dev
, "Hub-initiated U1 disabled "
4338 "due to long timeout %llu ms\n", timeout_ns
);
4339 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U1
);
4342 /* Returns the hub-encoded U2 timeout value.
4343 * The U2 timeout should be the maximum of:
4344 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4345 * - largest bInterval of any active periodic endpoint (to avoid going
4346 * into lower power link states between intervals).
4347 * - the U2 Exit Latency of the device
4349 static u16
xhci_calculate_intel_u2_timeout(struct usb_device
*udev
,
4350 struct usb_endpoint_descriptor
*desc
)
4352 unsigned long long timeout_ns
;
4353 unsigned long long u2_del_ns
;
4355 timeout_ns
= 10 * 1000 * 1000;
4357 if ((usb_endpoint_xfer_int(desc
) || usb_endpoint_xfer_isoc(desc
)) &&
4358 (xhci_service_interval_to_ns(desc
) > timeout_ns
))
4359 timeout_ns
= xhci_service_interval_to_ns(desc
);
4361 u2_del_ns
= le16_to_cpu(udev
->bos
->ss_cap
->bU2DevExitLat
) * 1000ULL;
4362 if (u2_del_ns
> timeout_ns
)
4363 timeout_ns
= u2_del_ns
;
4365 /* The U2 timeout is encoded in 256us intervals */
4366 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 256 * 1000);
4367 /* If the necessary timeout value is bigger than what we can set in the
4368 * USB 3.0 hub, we have to disable hub-initiated U2.
4370 if (timeout_ns
<= USB3_LPM_U2_MAX_TIMEOUT
)
4372 dev_dbg(&udev
->dev
, "Hub-initiated U2 disabled "
4373 "due to long timeout %llu ms\n", timeout_ns
);
4374 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U2
);
4377 static u16
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4378 struct usb_device
*udev
,
4379 struct usb_endpoint_descriptor
*desc
,
4380 enum usb3_link_state state
,
4383 if (state
== USB3_LPM_U1
) {
4384 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4385 return xhci_calculate_intel_u1_timeout(udev
, desc
);
4387 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4388 return xhci_calculate_intel_u2_timeout(udev
, desc
);
4391 return USB3_LPM_DISABLED
;
4394 static int xhci_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4395 struct usb_device
*udev
,
4396 struct usb_endpoint_descriptor
*desc
,
4397 enum usb3_link_state state
,
4402 alt_timeout
= xhci_call_host_update_timeout_for_endpoint(xhci
, udev
,
4403 desc
, state
, timeout
);
4405 /* If we found we can't enable hub-initiated LPM, or
4406 * the U1 or U2 exit latency was too high to allow
4407 * device-initiated LPM as well, just stop searching.
4409 if (alt_timeout
== USB3_LPM_DISABLED
||
4410 alt_timeout
== USB3_LPM_DEVICE_INITIATED
) {
4411 *timeout
= alt_timeout
;
4414 if (alt_timeout
> *timeout
)
4415 *timeout
= alt_timeout
;
4419 static int xhci_update_timeout_for_interface(struct xhci_hcd
*xhci
,
4420 struct usb_device
*udev
,
4421 struct usb_host_interface
*alt
,
4422 enum usb3_link_state state
,
4427 for (j
= 0; j
< alt
->desc
.bNumEndpoints
; j
++) {
4428 if (xhci_update_timeout_for_endpoint(xhci
, udev
,
4429 &alt
->endpoint
[j
].desc
, state
, timeout
))
4436 static int xhci_check_intel_tier_policy(struct usb_device
*udev
,
4437 enum usb3_link_state state
)
4439 struct usb_device
*parent
;
4440 unsigned int num_hubs
;
4442 if (state
== USB3_LPM_U2
)
4445 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4446 for (parent
= udev
->parent
, num_hubs
= 0; parent
->parent
;
4447 parent
= parent
->parent
)
4453 dev_dbg(&udev
->dev
, "Disabling U1 link state for device"
4454 " below second-tier hub.\n");
4455 dev_dbg(&udev
->dev
, "Plug device into first-tier hub "
4456 "to decrease power consumption.\n");
4460 static int xhci_check_tier_policy(struct xhci_hcd
*xhci
,
4461 struct usb_device
*udev
,
4462 enum usb3_link_state state
)
4464 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4465 return xhci_check_intel_tier_policy(udev
, state
);
4469 /* Returns the U1 or U2 timeout that should be enabled.
4470 * If the tier check or timeout setting functions return with a non-zero exit
4471 * code, that means the timeout value has been finalized and we shouldn't look
4472 * at any more endpoints.
4474 static u16
xhci_calculate_lpm_timeout(struct usb_hcd
*hcd
,
4475 struct usb_device
*udev
, enum usb3_link_state state
)
4477 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4478 struct usb_host_config
*config
;
4481 u16 timeout
= USB3_LPM_DISABLED
;
4483 if (state
== USB3_LPM_U1
)
4485 else if (state
== USB3_LPM_U2
)
4488 dev_warn(&udev
->dev
, "Can't enable unknown link state %i\n",
4493 if (xhci_check_tier_policy(xhci
, udev
, state
) < 0)
4496 /* Gather some information about the currently installed configuration
4497 * and alternate interface settings.
4499 if (xhci_update_timeout_for_endpoint(xhci
, udev
, &udev
->ep0
.desc
,
4503 config
= udev
->actconfig
;
4507 for (i
= 0; i
< USB_MAXINTERFACES
; i
++) {
4508 struct usb_driver
*driver
;
4509 struct usb_interface
*intf
= config
->interface
[i
];
4514 /* Check if any currently bound drivers want hub-initiated LPM
4517 if (intf
->dev
.driver
) {
4518 driver
= to_usb_driver(intf
->dev
.driver
);
4519 if (driver
&& driver
->disable_hub_initiated_lpm
) {
4520 dev_dbg(&udev
->dev
, "Hub-initiated %s disabled "
4521 "at request of driver %s\n",
4522 state_name
, driver
->name
);
4523 return xhci_get_timeout_no_hub_lpm(udev
, state
);
4527 /* Not sure how this could happen... */
4528 if (!intf
->cur_altsetting
)
4531 if (xhci_update_timeout_for_interface(xhci
, udev
,
4532 intf
->cur_altsetting
,
4539 static int calculate_max_exit_latency(struct usb_device
*udev
,
4540 enum usb3_link_state state_changed
,
4541 u16 hub_encoded_timeout
)
4543 unsigned long long u1_mel_us
= 0;
4544 unsigned long long u2_mel_us
= 0;
4545 unsigned long long mel_us
= 0;
4551 disabling_u1
= (state_changed
== USB3_LPM_U1
&&
4552 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4553 disabling_u2
= (state_changed
== USB3_LPM_U2
&&
4554 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4556 enabling_u1
= (state_changed
== USB3_LPM_U1
&&
4557 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4558 enabling_u2
= (state_changed
== USB3_LPM_U2
&&
4559 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4561 /* If U1 was already enabled and we're not disabling it,
4562 * or we're going to enable U1, account for the U1 max exit latency.
4564 if ((udev
->u1_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u1
) ||
4566 u1_mel_us
= DIV_ROUND_UP(udev
->u1_params
.mel
, 1000);
4567 if ((udev
->u2_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u2
) ||
4569 u2_mel_us
= DIV_ROUND_UP(udev
->u2_params
.mel
, 1000);
4571 if (u1_mel_us
> u2_mel_us
)
4575 /* xHCI host controller max exit latency field is only 16 bits wide. */
4576 if (mel_us
> MAX_EXIT
) {
4577 dev_warn(&udev
->dev
, "Link PM max exit latency of %lluus "
4578 "is too big.\n", mel_us
);
4584 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4585 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4586 struct usb_device
*udev
, enum usb3_link_state state
)
4588 struct xhci_hcd
*xhci
;
4589 u16 hub_encoded_timeout
;
4593 xhci
= hcd_to_xhci(hcd
);
4594 /* The LPM timeout values are pretty host-controller specific, so don't
4595 * enable hub-initiated timeouts unless the vendor has provided
4596 * information about their timeout algorithm.
4598 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4599 !xhci
->devs
[udev
->slot_id
])
4600 return USB3_LPM_DISABLED
;
4602 hub_encoded_timeout
= xhci_calculate_lpm_timeout(hcd
, udev
, state
);
4603 mel
= calculate_max_exit_latency(udev
, state
, hub_encoded_timeout
);
4605 /* Max Exit Latency is too big, disable LPM. */
4606 hub_encoded_timeout
= USB3_LPM_DISABLED
;
4610 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4613 return hub_encoded_timeout
;
4616 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4617 struct usb_device
*udev
, enum usb3_link_state state
)
4619 struct xhci_hcd
*xhci
;
4623 xhci
= hcd_to_xhci(hcd
);
4624 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4625 !xhci
->devs
[udev
->slot_id
])
4628 mel
= calculate_max_exit_latency(udev
, state
, USB3_LPM_DISABLED
);
4629 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4634 #else /* CONFIG_PM */
4636 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4637 struct usb_device
*udev
, enum usb3_link_state state
)
4639 return USB3_LPM_DISABLED
;
4642 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4643 struct usb_device
*udev
, enum usb3_link_state state
)
4647 #endif /* CONFIG_PM */
4649 /*-------------------------------------------------------------------------*/
4651 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4652 * internal data structures for the device.
4654 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
4655 struct usb_tt
*tt
, gfp_t mem_flags
)
4657 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4658 struct xhci_virt_device
*vdev
;
4659 struct xhci_command
*config_cmd
;
4660 struct xhci_input_control_ctx
*ctrl_ctx
;
4661 struct xhci_slot_ctx
*slot_ctx
;
4662 unsigned long flags
;
4663 unsigned think_time
;
4666 /* Ignore root hubs */
4670 vdev
= xhci
->devs
[hdev
->slot_id
];
4672 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
4675 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
4677 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
4680 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
4682 xhci_warn(xhci
, "%s: Could not get input context, bad type.\n",
4684 xhci_free_command(xhci
, config_cmd
);
4688 spin_lock_irqsave(&xhci
->lock
, flags
);
4689 if (hdev
->speed
== USB_SPEED_HIGH
&&
4690 xhci_alloc_tt_info(xhci
, vdev
, hdev
, tt
, GFP_ATOMIC
)) {
4691 xhci_dbg(xhci
, "Could not allocate xHCI TT structure.\n");
4692 xhci_free_command(xhci
, config_cmd
);
4693 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4697 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
4698 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
4699 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
4700 slot_ctx
->dev_info
|= cpu_to_le32(DEV_HUB
);
4702 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
4703 if (xhci
->hci_version
> 0x95) {
4704 xhci_dbg(xhci
, "xHCI version %x needs hub "
4705 "TT think time and number of ports\n",
4706 (unsigned int) xhci
->hci_version
);
4707 slot_ctx
->dev_info2
|= cpu_to_le32(XHCI_MAX_PORTS(hdev
->maxchild
));
4708 /* Set TT think time - convert from ns to FS bit times.
4709 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4710 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4712 * xHCI 1.0: this field shall be 0 if the device is not a
4715 think_time
= tt
->think_time
;
4716 if (think_time
!= 0)
4717 think_time
= (think_time
/ 666) - 1;
4718 if (xhci
->hci_version
< 0x100 || hdev
->speed
== USB_SPEED_HIGH
)
4719 slot_ctx
->tt_info
|=
4720 cpu_to_le32(TT_THINK_TIME(think_time
));
4722 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
4723 "TT think time or number of ports\n",
4724 (unsigned int) xhci
->hci_version
);
4726 slot_ctx
->dev_state
= 0;
4727 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4729 xhci_dbg(xhci
, "Set up %s for hub device.\n",
4730 (xhci
->hci_version
> 0x95) ?
4731 "configure endpoint" : "evaluate context");
4732 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
4733 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
4735 /* Issue and wait for the configure endpoint or
4736 * evaluate context command.
4738 if (xhci
->hci_version
> 0x95)
4739 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4742 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4745 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
4746 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
4748 xhci_free_command(xhci
, config_cmd
);
4752 int xhci_get_frame(struct usb_hcd
*hcd
)
4754 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4755 /* EHCI mods by the periodic size. Why? */
4756 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
4759 int xhci_gen_setup(struct usb_hcd
*hcd
, xhci_get_quirks_t get_quirks
)
4761 struct xhci_hcd
*xhci
;
4762 struct device
*dev
= hcd
->self
.controller
;
4766 /* Accept arbitrarily long scatter-gather lists */
4767 hcd
->self
.sg_tablesize
= ~0;
4768 /* XHCI controllers don't stop the ep queue on short packets :| */
4769 hcd
->self
.no_stop_on_short
= 1;
4771 if (usb_hcd_is_primary_hcd(hcd
)) {
4772 xhci
= kzalloc(sizeof(struct xhci_hcd
), GFP_KERNEL
);
4775 *((struct xhci_hcd
**) hcd
->hcd_priv
) = xhci
;
4776 xhci
->main_hcd
= hcd
;
4777 /* Mark the first roothub as being USB 2.0.
4778 * The xHCI driver will register the USB 3.0 roothub.
4780 hcd
->speed
= HCD_USB2
;
4781 hcd
->self
.root_hub
->speed
= USB_SPEED_HIGH
;
4783 * USB 2.0 roothub under xHCI has an integrated TT,
4784 * (rate matching hub) as opposed to having an OHCI/UHCI
4785 * companion controller.
4789 /* xHCI private pointer was set in xhci_pci_probe for the second
4790 * registered roothub.
4792 xhci
= hcd_to_xhci(hcd
);
4793 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4794 if (HCC_64BIT_ADDR(temp
)) {
4795 xhci_dbg(xhci
, "Enabling 64-bit DMA addresses.\n");
4796 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(64));
4798 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(32));
4803 xhci
->cap_regs
= hcd
->regs
;
4804 xhci
->op_regs
= hcd
->regs
+
4805 HC_LENGTH(xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
));
4806 xhci
->run_regs
= hcd
->regs
+
4807 (xhci_readl(xhci
, &xhci
->cap_regs
->run_regs_off
) & RTSOFF_MASK
);
4808 /* Cache read-only capability registers */
4809 xhci
->hcs_params1
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
);
4810 xhci
->hcs_params2
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params2
);
4811 xhci
->hcs_params3
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params3
);
4812 xhci
->hcc_params
= xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
);
4813 xhci
->hci_version
= HC_VERSION(xhci
->hcc_params
);
4814 xhci
->hcc_params
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4815 xhci_print_registers(xhci
);
4817 get_quirks(dev
, xhci
);
4819 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4820 * success event after a short transfer. This quirk will ignore such
4823 if (xhci
->hci_version
> 0x96)
4824 xhci
->quirks
|= XHCI_SPURIOUS_SUCCESS
;
4826 /* Make sure the HC is halted. */
4827 retval
= xhci_halt(xhci
);
4831 xhci_dbg(xhci
, "Resetting HCD\n");
4832 /* Reset the internal HC memory state and registers. */
4833 retval
= xhci_reset(xhci
);
4836 xhci_dbg(xhci
, "Reset complete\n");
4838 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4839 if (HCC_64BIT_ADDR(temp
)) {
4840 xhci_dbg(xhci
, "Enabling 64-bit DMA addresses.\n");
4841 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(64));
4843 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(32));
4846 xhci_dbg(xhci
, "Calling HCD init\n");
4847 /* Initialize HCD and host controller data structures. */
4848 retval
= xhci_init(hcd
);
4851 xhci_dbg(xhci
, "Called HCD init\n");
4858 MODULE_DESCRIPTION(DRIVER_DESC
);
4859 MODULE_AUTHOR(DRIVER_AUTHOR
);
4860 MODULE_LICENSE("GPL");
4862 static int __init
xhci_hcd_init(void)
4866 retval
= xhci_register_pci();
4868 pr_debug("Problem registering PCI driver.\n");
4871 retval
= xhci_register_plat();
4873 pr_debug("Problem registering platform driver.\n");
4877 * Check the compiler generated sizes of structures that must be laid
4878 * out in specific ways for hardware access.
4880 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
4881 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
4882 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
4883 /* xhci_device_control has eight fields, and also
4884 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4886 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
4887 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
4888 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
4889 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
4890 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
4891 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4892 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
4895 xhci_unregister_pci();
4898 module_init(xhci_hcd_init
);
4900 static void __exit
xhci_hcd_cleanup(void)
4902 xhci_unregister_pci();
4903 xhci_unregister_plat();
4905 module_exit(xhci_hcd_cleanup
);