2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk
;
37 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
38 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
48 * Returns negative errno, or zero on success
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
55 u32 mask
, u32 done
, int usec
)
60 result
= xhci_readl(xhci
, ptr
);
61 if (result
== ~(u32
)0) /* card removed */
73 * Disable interrupts and begin the xHCI halting process.
75 void xhci_quiesce(struct xhci_hcd
*xhci
)
82 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
86 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
88 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
92 * Force HC into halt state.
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
96 * should halt within 16 ms of the run/stop bit being cleared.
97 * Read HC Halted bit in the status register to see when the HC is finished.
99 int xhci_halt(struct xhci_hcd
*xhci
)
102 xhci_dbg(xhci
, "// Halt the HC\n");
105 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
106 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
108 xhci
->xhc_state
|= XHCI_STATE_HALTED
;
109 xhci
->cmd_ring_state
= CMD_RING_STATE_STOPPED
;
111 xhci_warn(xhci
, "Host not halted after %u microseconds.\n",
117 * Set the run bit and wait for the host to be running.
119 static int xhci_start(struct xhci_hcd
*xhci
)
124 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
126 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
128 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
131 * Wait for the HCHalted Status bit to be 0 to indicate the host is
134 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
135 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
136 if (ret
== -ETIMEDOUT
)
137 xhci_err(xhci
, "Host took too long to start, "
138 "waited %u microseconds.\n",
141 xhci
->xhc_state
&= ~XHCI_STATE_HALTED
;
148 * This resets pipelines, timers, counters, state machines, etc.
149 * Transactions will be terminated immediately, and operational registers
150 * will be set to their defaults.
152 int xhci_reset(struct xhci_hcd
*xhci
)
158 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
159 if ((state
& STS_HALT
) == 0) {
160 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
164 xhci_dbg(xhci
, "// Reset the HC\n");
165 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
166 command
|= CMD_RESET
;
167 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
169 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
170 CMD_RESET
, 0, 10 * 1000 * 1000);
174 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
176 * xHCI cannot write to any doorbells or operational registers other
177 * than status until the "Controller Not Ready" flag is cleared.
179 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
180 STS_CNR
, 0, 10 * 1000 * 1000);
182 for (i
= 0; i
< 2; ++i
) {
183 xhci
->bus_state
[i
].port_c_suspend
= 0;
184 xhci
->bus_state
[i
].suspended_ports
= 0;
185 xhci
->bus_state
[i
].resuming_ports
= 0;
192 static int xhci_free_msi(struct xhci_hcd
*xhci
)
196 if (!xhci
->msix_entries
)
199 for (i
= 0; i
< xhci
->msix_count
; i
++)
200 if (xhci
->msix_entries
[i
].vector
)
201 free_irq(xhci
->msix_entries
[i
].vector
,
209 static int xhci_setup_msi(struct xhci_hcd
*xhci
)
212 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
214 ret
= pci_enable_msi(pdev
);
216 xhci_dbg(xhci
, "failed to allocate MSI entry\n");
220 ret
= request_irq(pdev
->irq
, (irq_handler_t
)xhci_msi_irq
,
221 0, "xhci_hcd", xhci_to_hcd(xhci
));
223 xhci_dbg(xhci
, "disable MSI interrupt\n");
224 pci_disable_msi(pdev
);
232 * free all IRQs request
234 static void xhci_free_irq(struct xhci_hcd
*xhci
)
236 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
239 /* return if using legacy interrupt */
240 if (xhci_to_hcd(xhci
)->irq
> 0)
243 ret
= xhci_free_msi(xhci
);
247 free_irq(pdev
->irq
, xhci_to_hcd(xhci
));
255 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
258 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
259 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
262 * calculate number of msi-x vectors supported.
263 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
264 * with max number of interrupters based on the xhci HCSPARAMS1.
265 * - num_online_cpus: maximum msi-x vectors per CPUs core.
266 * Add additional 1 vector to ensure always available interrupt.
268 xhci
->msix_count
= min(num_online_cpus() + 1,
269 HCS_MAX_INTRS(xhci
->hcs_params1
));
272 kmalloc((sizeof(struct msix_entry
))*xhci
->msix_count
,
274 if (!xhci
->msix_entries
) {
275 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
279 for (i
= 0; i
< xhci
->msix_count
; i
++) {
280 xhci
->msix_entries
[i
].entry
= i
;
281 xhci
->msix_entries
[i
].vector
= 0;
284 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
286 xhci_dbg(xhci
, "Failed to enable MSI-X\n");
290 for (i
= 0; i
< xhci
->msix_count
; i
++) {
291 ret
= request_irq(xhci
->msix_entries
[i
].vector
,
292 (irq_handler_t
)xhci_msi_irq
,
293 0, "xhci_hcd", xhci_to_hcd(xhci
));
298 hcd
->msix_enabled
= 1;
302 xhci_dbg(xhci
, "disable MSI-X interrupt\n");
304 pci_disable_msix(pdev
);
306 kfree(xhci
->msix_entries
);
307 xhci
->msix_entries
= NULL
;
311 /* Free any IRQs and disable MSI-X */
312 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
314 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
315 struct pci_dev
*pdev
= to_pci_dev(hcd
->self
.controller
);
319 if (xhci
->msix_entries
) {
320 pci_disable_msix(pdev
);
321 kfree(xhci
->msix_entries
);
322 xhci
->msix_entries
= NULL
;
324 pci_disable_msi(pdev
);
327 hcd
->msix_enabled
= 0;
331 static void xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
335 if (xhci
->msix_entries
) {
336 for (i
= 0; i
< xhci
->msix_count
; i
++)
337 synchronize_irq(xhci
->msix_entries
[i
].vector
);
341 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
343 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
344 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
348 * Some Fresco Logic host controllers advertise MSI, but fail to
349 * generate interrupts. Don't even try to enable MSI.
351 if (xhci
->quirks
& XHCI_BROKEN_MSI
)
354 /* unregister the legacy interrupt */
356 free_irq(hcd
->irq
, hcd
);
359 ret
= xhci_setup_msix(xhci
);
361 /* fall back to msi*/
362 ret
= xhci_setup_msi(xhci
);
365 /* hcd->irq is 0, we have MSI */
369 xhci_err(xhci
, "No msi-x/msi found and no IRQ in BIOS\n");
373 /* fall back to legacy interrupt*/
374 ret
= request_irq(pdev
->irq
, &usb_hcd_irq
, IRQF_SHARED
,
375 hcd
->irq_descr
, hcd
);
377 xhci_err(xhci
, "request interrupt %d failed\n",
381 hcd
->irq
= pdev
->irq
;
387 static int xhci_try_enable_msi(struct usb_hcd
*hcd
)
392 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
396 static void xhci_msix_sync_irqs(struct xhci_hcd
*xhci
)
403 * Initialize memory for HCD and xHC (one-time init).
405 * Program the PAGESIZE register, initialize the device context array, create
406 * device contexts (?), set up a command ring segment (or two?), create event
407 * ring (one for now).
409 int xhci_init(struct usb_hcd
*hcd
)
411 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
414 xhci_dbg(xhci
, "xhci_init\n");
415 spin_lock_init(&xhci
->lock
);
416 if (xhci
->hci_version
== 0x95 && link_quirk
) {
417 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
418 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
420 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
422 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
423 xhci_dbg(xhci
, "Finished xhci_init\n");
428 /*-------------------------------------------------------------------------*/
431 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
432 static void xhci_event_ring_work(unsigned long arg
)
437 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
440 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
442 spin_lock_irqsave(&xhci
->lock
, flags
);
443 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
444 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
445 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
) ||
446 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
447 xhci_dbg(xhci
, "HW died, polling stopped.\n");
448 spin_unlock_irqrestore(&xhci
->lock
, flags
);
452 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
453 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
454 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
455 xhci
->error_bitmask
= 0;
456 xhci_dbg(xhci
, "Event ring:\n");
457 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
458 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
459 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
460 temp_64
&= ~ERST_PTR_MASK
;
461 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
462 xhci_dbg(xhci
, "Command ring:\n");
463 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
464 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
465 xhci_dbg_cmd_ptrs(xhci
);
466 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
469 for (j
= 0; j
< 31; ++j
) {
470 xhci_dbg_ep_rings(xhci
, i
, j
, &xhci
->devs
[i
]->eps
[j
]);
473 spin_unlock_irqrestore(&xhci
->lock
, flags
);
476 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
478 xhci_dbg(xhci
, "Quit polling the event ring.\n");
482 static int xhci_run_finished(struct xhci_hcd
*xhci
)
484 if (xhci_start(xhci
)) {
488 xhci
->shared_hcd
->state
= HC_STATE_RUNNING
;
489 xhci
->cmd_ring_state
= CMD_RING_STATE_RUNNING
;
491 if (xhci
->quirks
& XHCI_NEC_HOST
)
492 xhci_ring_cmd_db(xhci
);
494 xhci_dbg(xhci
, "Finished xhci_run for USB3 roothub\n");
499 * Start the HC after it was halted.
501 * This function is called by the USB core when the HC driver is added.
502 * Its opposite is xhci_stop().
504 * xhci_init() must be called once before this function can be called.
505 * Reset the HC, enable device slot contexts, program DCBAAP, and
506 * set command ring pointer and event ring pointer.
508 * Setup MSI-X vectors and enable interrupts.
510 int xhci_run(struct usb_hcd
*hcd
)
515 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
517 /* Start the xHCI host controller running only after the USB 2.0 roothub
521 hcd
->uses_new_polling
= 1;
522 if (!usb_hcd_is_primary_hcd(hcd
))
523 return xhci_run_finished(xhci
);
525 xhci_dbg(xhci
, "xhci_run\n");
527 ret
= xhci_try_enable_msi(hcd
);
531 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
532 init_timer(&xhci
->event_ring_timer
);
533 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
534 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
535 /* Poll the event ring */
536 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
538 xhci_dbg(xhci
, "Setting event ring polling timer\n");
539 add_timer(&xhci
->event_ring_timer
);
542 xhci_dbg(xhci
, "Command ring memory map follows:\n");
543 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
544 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
545 xhci_dbg_cmd_ptrs(xhci
);
547 xhci_dbg(xhci
, "ERST memory map follows:\n");
548 xhci_dbg_erst(xhci
, &xhci
->erst
);
549 xhci_dbg(xhci
, "Event ring:\n");
550 xhci_debug_ring(xhci
, xhci
->event_ring
);
551 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
552 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
553 temp_64
&= ~ERST_PTR_MASK
;
554 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
556 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
557 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
558 temp
&= ~ER_IRQ_INTERVAL_MASK
;
560 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
562 /* Set the HCD state before we enable the irqs */
563 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
565 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
567 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
569 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
570 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
571 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
572 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
573 &xhci
->ir_set
->irq_pending
);
574 xhci_print_ir_set(xhci
, 0);
576 if (xhci
->quirks
& XHCI_NEC_HOST
)
577 xhci_queue_vendor_command(xhci
, 0, 0, 0,
578 TRB_TYPE(TRB_NEC_GET_FW
));
580 xhci_dbg(xhci
, "Finished xhci_run for USB2 roothub\n");
584 static void xhci_only_stop_hcd(struct usb_hcd
*hcd
)
586 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
588 spin_lock_irq(&xhci
->lock
);
591 /* The shared_hcd is going to be deallocated shortly (the USB core only
592 * calls this function when allocation fails in usb_add_hcd(), or
593 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
595 xhci
->shared_hcd
= NULL
;
596 spin_unlock_irq(&xhci
->lock
);
602 * This function is called by the USB core when the HC driver is removed.
603 * Its opposite is xhci_run().
605 * Disable device contexts, disable IRQs, and quiesce the HC.
606 * Reset the HC, finish any completed transactions, and cleanup memory.
608 void xhci_stop(struct usb_hcd
*hcd
)
611 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
613 if (!usb_hcd_is_primary_hcd(hcd
)) {
614 xhci_only_stop_hcd(xhci
->shared_hcd
);
618 spin_lock_irq(&xhci
->lock
);
619 /* Make sure the xHC is halted for a USB3 roothub
620 * (xhci_stop() could be called as part of failed init).
624 spin_unlock_irq(&xhci
->lock
);
626 xhci_cleanup_msix(xhci
);
628 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
629 /* Tell the event ring poll function not to reschedule */
631 del_timer_sync(&xhci
->event_ring_timer
);
634 if (xhci
->quirks
& XHCI_AMD_PLL_FIX
)
637 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
638 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
639 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
640 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
641 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
642 &xhci
->ir_set
->irq_pending
);
643 xhci_print_ir_set(xhci
, 0);
645 xhci_dbg(xhci
, "cleaning up memory\n");
646 xhci_mem_cleanup(xhci
);
647 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
648 xhci_readl(xhci
, &xhci
->op_regs
->status
));
652 * Shutdown HC (not bus-specific)
654 * This is called when the machine is rebooting or halting. We assume that the
655 * machine will be powered off, and the HC's internal state will be reset.
656 * Don't bother to free memory.
658 * This will only ever be called with the main usb_hcd (the USB3 roothub).
660 void xhci_shutdown(struct usb_hcd
*hcd
)
662 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
664 if (xhci
->quirks
&& XHCI_SPURIOUS_REBOOT
)
665 usb_disable_xhci_ports(to_pci_dev(hcd
->self
.controller
));
667 spin_lock_irq(&xhci
->lock
);
669 spin_unlock_irq(&xhci
->lock
);
671 xhci_cleanup_msix(xhci
);
673 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
674 xhci_readl(xhci
, &xhci
->op_regs
->status
));
678 static void xhci_save_registers(struct xhci_hcd
*xhci
)
680 xhci
->s3
.command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
681 xhci
->s3
.dev_nt
= xhci_readl(xhci
, &xhci
->op_regs
->dev_notification
);
682 xhci
->s3
.dcbaa_ptr
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
683 xhci
->s3
.config_reg
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
684 xhci
->s3
.erst_size
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
685 xhci
->s3
.erst_base
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
686 xhci
->s3
.erst_dequeue
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
687 xhci
->s3
.irq_pending
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
688 xhci
->s3
.irq_control
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
691 static void xhci_restore_registers(struct xhci_hcd
*xhci
)
693 xhci_writel(xhci
, xhci
->s3
.command
, &xhci
->op_regs
->command
);
694 xhci_writel(xhci
, xhci
->s3
.dev_nt
, &xhci
->op_regs
->dev_notification
);
695 xhci_write_64(xhci
, xhci
->s3
.dcbaa_ptr
, &xhci
->op_regs
->dcbaa_ptr
);
696 xhci_writel(xhci
, xhci
->s3
.config_reg
, &xhci
->op_regs
->config_reg
);
697 xhci_writel(xhci
, xhci
->s3
.erst_size
, &xhci
->ir_set
->erst_size
);
698 xhci_write_64(xhci
, xhci
->s3
.erst_base
, &xhci
->ir_set
->erst_base
);
699 xhci_write_64(xhci
, xhci
->s3
.erst_dequeue
, &xhci
->ir_set
->erst_dequeue
);
700 xhci_writel(xhci
, xhci
->s3
.irq_pending
, &xhci
->ir_set
->irq_pending
);
701 xhci_writel(xhci
, xhci
->s3
.irq_control
, &xhci
->ir_set
->irq_control
);
704 static void xhci_set_cmd_ring_deq(struct xhci_hcd
*xhci
)
708 /* step 2: initialize command ring buffer */
709 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
710 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
711 (xhci_trb_virt_to_dma(xhci
->cmd_ring
->deq_seg
,
712 xhci
->cmd_ring
->dequeue
) &
713 (u64
) ~CMD_RING_RSVD_BITS
) |
714 xhci
->cmd_ring
->cycle_state
;
715 xhci_dbg(xhci
, "// Setting command ring address to 0x%llx\n",
716 (long unsigned long) val_64
);
717 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
721 * The whole command ring must be cleared to zero when we suspend the host.
723 * The host doesn't save the command ring pointer in the suspend well, so we
724 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
725 * aligned, because of the reserved bits in the command ring dequeue pointer
726 * register. Therefore, we can't just set the dequeue pointer back in the
727 * middle of the ring (TRBs are 16-byte aligned).
729 static void xhci_clear_command_ring(struct xhci_hcd
*xhci
)
731 struct xhci_ring
*ring
;
732 struct xhci_segment
*seg
;
734 ring
= xhci
->cmd_ring
;
738 sizeof(union xhci_trb
) * (TRBS_PER_SEGMENT
- 1));
739 seg
->trbs
[TRBS_PER_SEGMENT
- 1].link
.control
&=
740 cpu_to_le32(~TRB_CYCLE
);
742 } while (seg
!= ring
->deq_seg
);
744 /* Reset the software enqueue and dequeue pointers */
745 ring
->deq_seg
= ring
->first_seg
;
746 ring
->dequeue
= ring
->first_seg
->trbs
;
747 ring
->enq_seg
= ring
->deq_seg
;
748 ring
->enqueue
= ring
->dequeue
;
750 ring
->num_trbs_free
= ring
->num_segs
* (TRBS_PER_SEGMENT
- 1) - 1;
752 * Ring is now zeroed, so the HW should look for change of ownership
753 * when the cycle bit is set to 1.
755 ring
->cycle_state
= 1;
758 * Reset the hardware dequeue pointer.
759 * Yes, this will need to be re-written after resume, but we're paranoid
760 * and want to make sure the hardware doesn't access bogus memory
761 * because, say, the BIOS or an SMI started the host without changing
762 * the command ring pointers.
764 xhci_set_cmd_ring_deq(xhci
);
768 * Stop HC (not bus-specific)
770 * This is called when the machine transition into S3/S4 mode.
773 int xhci_suspend(struct xhci_hcd
*xhci
)
776 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
779 spin_lock_irq(&xhci
->lock
);
780 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
781 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
782 /* step 1: stop endpoint */
783 /* skipped assuming that port suspend has done */
785 /* step 2: clear Run/Stop bit */
786 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
788 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
789 if (handshake(xhci
, &xhci
->op_regs
->status
,
790 STS_HALT
, STS_HALT
, 100*100)) {
791 xhci_warn(xhci
, "WARN: xHC CMD_RUN timeout\n");
792 spin_unlock_irq(&xhci
->lock
);
795 xhci_clear_command_ring(xhci
);
797 /* step 3: save registers */
798 xhci_save_registers(xhci
);
800 /* step 4: set CSS flag */
801 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
803 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
804 if (handshake(xhci
, &xhci
->op_regs
->status
, STS_SAVE
, 0, 10 * 1000)) {
805 xhci_warn(xhci
, "WARN: xHC save state timeout\n");
806 spin_unlock_irq(&xhci
->lock
);
809 spin_unlock_irq(&xhci
->lock
);
811 /* step 5: remove core well power */
812 /* synchronize irq when using MSI-X */
813 xhci_msix_sync_irqs(xhci
);
819 * start xHC (not bus-specific)
821 * This is called when the machine transition from S3/S4 mode.
824 int xhci_resume(struct xhci_hcd
*xhci
, bool hibernated
)
826 u32 command
, temp
= 0;
827 struct usb_hcd
*hcd
= xhci_to_hcd(xhci
);
828 struct usb_hcd
*secondary_hcd
;
831 /* Wait a bit if either of the roothubs need to settle from the
832 * transition into bus suspend.
834 if (time_before(jiffies
, xhci
->bus_state
[0].next_statechange
) ||
836 xhci
->bus_state
[1].next_statechange
))
839 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
840 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &xhci
->shared_hcd
->flags
);
842 spin_lock_irq(&xhci
->lock
);
843 if (xhci
->quirks
& XHCI_RESET_ON_RESUME
)
847 /* step 1: restore register */
848 xhci_restore_registers(xhci
);
849 /* step 2: initialize command ring buffer */
850 xhci_set_cmd_ring_deq(xhci
);
851 /* step 3: restore state and start state*/
852 /* step 3: set CRS flag */
853 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
855 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
856 if (handshake(xhci
, &xhci
->op_regs
->status
,
857 STS_RESTORE
, 0, 10 * 1000)) {
858 xhci_warn(xhci
, "WARN: xHC restore state timeout\n");
859 spin_unlock_irq(&xhci
->lock
);
862 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
865 /* If restore operation fails, re-initialize the HC during resume */
866 if ((temp
& STS_SRE
) || hibernated
) {
867 /* Let the USB core know _both_ roothubs lost power. */
868 usb_root_hub_lost_power(xhci
->main_hcd
->self
.root_hub
);
869 usb_root_hub_lost_power(xhci
->shared_hcd
->self
.root_hub
);
871 xhci_dbg(xhci
, "Stop HCD\n");
874 spin_unlock_irq(&xhci
->lock
);
875 xhci_cleanup_msix(xhci
);
877 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
878 /* Tell the event ring poll function not to reschedule */
880 del_timer_sync(&xhci
->event_ring_timer
);
883 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
884 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
885 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
886 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
887 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
888 &xhci
->ir_set
->irq_pending
);
889 xhci_print_ir_set(xhci
, 0);
891 xhci_dbg(xhci
, "cleaning up memory\n");
892 xhci_mem_cleanup(xhci
);
893 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
894 xhci_readl(xhci
, &xhci
->op_regs
->status
));
896 /* USB core calls the PCI reinit and start functions twice:
897 * first with the primary HCD, and then with the secondary HCD.
898 * If we don't do the same, the host will never be started.
900 if (!usb_hcd_is_primary_hcd(hcd
))
903 secondary_hcd
= xhci
->shared_hcd
;
905 xhci_dbg(xhci
, "Initialize the xhci_hcd\n");
906 retval
= xhci_init(hcd
->primary_hcd
);
909 xhci_dbg(xhci
, "Start the primary HCD\n");
910 retval
= xhci_run(hcd
->primary_hcd
);
912 xhci_dbg(xhci
, "Start the secondary HCD\n");
913 retval
= xhci_run(secondary_hcd
);
915 hcd
->state
= HC_STATE_SUSPENDED
;
916 xhci
->shared_hcd
->state
= HC_STATE_SUSPENDED
;
920 /* step 4: set Run/Stop bit */
921 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
923 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
924 handshake(xhci
, &xhci
->op_regs
->status
, STS_HALT
,
927 /* step 5: walk topology and initialize portsc,
928 * portpmsc and portli
930 /* this is done in bus_resume */
932 /* step 6: restart each of the previously
933 * Running endpoints by ringing their doorbells
936 spin_unlock_irq(&xhci
->lock
);
940 usb_hcd_resume_root_hub(hcd
);
941 usb_hcd_resume_root_hub(xhci
->shared_hcd
);
945 #endif /* CONFIG_PM */
947 /*-------------------------------------------------------------------------*/
950 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
951 * HCDs. Find the index for an endpoint given its descriptor. Use the return
952 * value to right shift 1 for the bitmask.
954 * Index = (epnum * 2) + direction - 1,
955 * where direction = 0 for OUT, 1 for IN.
956 * For control endpoints, the IN index is used (OUT index is unused), so
957 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
959 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
962 if (usb_endpoint_xfer_control(desc
))
963 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
965 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
966 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
970 /* Find the flag for this endpoint (for use in the control context). Use the
971 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
974 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
976 return 1 << (xhci_get_endpoint_index(desc
) + 1);
979 /* Find the flag for this endpoint (for use in the control context). Use the
980 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
983 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
985 return 1 << (ep_index
+ 1);
988 /* Compute the last valid endpoint context index. Basically, this is the
989 * endpoint index plus one. For slot contexts with more than valid endpoint,
990 * we find the most significant bit set in the added contexts flags.
991 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
992 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
994 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
996 return fls(added_ctxs
) - 1;
999 /* Returns 1 if the arguments are OK;
1000 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1002 static int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1003 struct usb_host_endpoint
*ep
, int check_ep
, bool check_virt_dev
,
1005 struct xhci_hcd
*xhci
;
1006 struct xhci_virt_device
*virt_dev
;
1008 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
1009 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
1013 if (!udev
->parent
) {
1014 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
1019 xhci
= hcd_to_xhci(hcd
);
1020 if (xhci
->xhc_state
& XHCI_STATE_HALTED
)
1023 if (check_virt_dev
) {
1024 if (!udev
->slot_id
|| !xhci
->devs
[udev
->slot_id
]) {
1025 printk(KERN_DEBUG
"xHCI %s called with unaddressed "
1030 virt_dev
= xhci
->devs
[udev
->slot_id
];
1031 if (virt_dev
->udev
!= udev
) {
1032 printk(KERN_DEBUG
"xHCI %s called with udev and "
1033 "virt_dev does not match\n", func
);
1041 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1042 struct usb_device
*udev
, struct xhci_command
*command
,
1043 bool ctx_change
, bool must_succeed
);
1046 * Full speed devices may have a max packet size greater than 8 bytes, but the
1047 * USB core doesn't know that until it reads the first 8 bytes of the
1048 * descriptor. If the usb_device's max packet size changes after that point,
1049 * we need to issue an evaluate context command and wait on it.
1051 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
1052 unsigned int ep_index
, struct urb
*urb
)
1054 struct xhci_container_ctx
*in_ctx
;
1055 struct xhci_container_ctx
*out_ctx
;
1056 struct xhci_input_control_ctx
*ctrl_ctx
;
1057 struct xhci_ep_ctx
*ep_ctx
;
1058 int max_packet_size
;
1059 int hw_max_packet_size
;
1062 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
1063 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1064 hw_max_packet_size
= MAX_PACKET_DECODED(le32_to_cpu(ep_ctx
->ep_info2
));
1065 max_packet_size
= usb_endpoint_maxp(&urb
->dev
->ep0
.desc
);
1066 if (hw_max_packet_size
!= max_packet_size
) {
1067 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
1068 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
1070 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
1071 hw_max_packet_size
);
1072 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
1074 /* Set up the modified control endpoint 0 */
1075 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1076 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1077 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1078 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1079 ep_ctx
->ep_info2
&= cpu_to_le32(~MAX_PACKET_MASK
);
1080 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet_size
));
1082 /* Set up the input context flags for the command */
1083 /* FIXME: This won't work if a non-default control endpoint
1084 * changes max packet sizes.
1086 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1087 ctrl_ctx
->add_flags
= cpu_to_le32(EP0_FLAG
);
1088 ctrl_ctx
->drop_flags
= 0;
1090 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
1091 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
1092 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
1093 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
1095 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
1098 /* Clean up the input context for later use by bandwidth
1101 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
);
1107 * non-error returns are a promise to giveback() the urb later
1108 * we drop ownership so next owner (or urb unlink) can get it
1110 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
1112 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1113 struct xhci_td
*buffer
;
1114 unsigned long flags
;
1116 unsigned int slot_id
, ep_index
;
1117 struct urb_priv
*urb_priv
;
1120 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
,
1121 true, true, __func__
) <= 0)
1124 slot_id
= urb
->dev
->slot_id
;
1125 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1127 if (!HCD_HW_ACCESSIBLE(hcd
)) {
1128 if (!in_interrupt())
1129 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
1134 if (usb_endpoint_xfer_isoc(&urb
->ep
->desc
))
1135 size
= urb
->number_of_packets
;
1139 urb_priv
= kzalloc(sizeof(struct urb_priv
) +
1140 size
* sizeof(struct xhci_td
*), mem_flags
);
1144 buffer
= kzalloc(size
* sizeof(struct xhci_td
), mem_flags
);
1150 for (i
= 0; i
< size
; i
++) {
1151 urb_priv
->td
[i
] = buffer
;
1155 urb_priv
->length
= size
;
1156 urb_priv
->td_cnt
= 0;
1157 urb
->hcpriv
= urb_priv
;
1159 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
1160 /* Check to see if the max packet size for the default control
1161 * endpoint changed during FS device enumeration
1163 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
1164 ret
= xhci_check_maxpacket(xhci
, slot_id
,
1167 xhci_urb_free_priv(xhci
, urb_priv
);
1173 /* We have a spinlock and interrupts disabled, so we must pass
1174 * atomic context to this function, which may allocate memory.
1176 spin_lock_irqsave(&xhci
->lock
, flags
);
1177 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1179 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
1183 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1184 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
1185 spin_lock_irqsave(&xhci
->lock
, flags
);
1186 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1188 if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1189 EP_GETTING_STREAMS
) {
1190 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1191 "is transitioning to using streams.\n");
1193 } else if (xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
&
1194 EP_GETTING_NO_STREAMS
) {
1195 xhci_warn(xhci
, "WARN: Can't enqueue URB while bulk ep "
1196 "is transitioning to "
1197 "not having streams.\n");
1200 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
1205 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1206 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
1207 spin_lock_irqsave(&xhci
->lock
, flags
);
1208 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1210 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
1214 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1216 spin_lock_irqsave(&xhci
->lock
, flags
);
1217 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1219 ret
= xhci_queue_isoc_tx_prepare(xhci
, GFP_ATOMIC
, urb
,
1223 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1228 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
1229 "non-responsive xHCI host.\n",
1230 urb
->ep
->desc
.bEndpointAddress
, urb
);
1233 xhci_urb_free_priv(xhci
, urb_priv
);
1235 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1239 /* Get the right ring for the given URB.
1240 * If the endpoint supports streams, boundary check the URB's stream ID.
1241 * If the endpoint doesn't support streams, return the singular endpoint ring.
1243 static struct xhci_ring
*xhci_urb_to_transfer_ring(struct xhci_hcd
*xhci
,
1246 unsigned int slot_id
;
1247 unsigned int ep_index
;
1248 unsigned int stream_id
;
1249 struct xhci_virt_ep
*ep
;
1251 slot_id
= urb
->dev
->slot_id
;
1252 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1253 stream_id
= urb
->stream_id
;
1254 ep
= &xhci
->devs
[slot_id
]->eps
[ep_index
];
1255 /* Common case: no streams */
1256 if (!(ep
->ep_state
& EP_HAS_STREAMS
))
1259 if (stream_id
== 0) {
1261 "WARN: Slot ID %u, ep index %u has streams, "
1262 "but URB has no stream ID.\n",
1267 if (stream_id
< ep
->stream_info
->num_streams
)
1268 return ep
->stream_info
->stream_rings
[stream_id
];
1271 "WARN: Slot ID %u, ep index %u has "
1272 "stream IDs 1 to %u allocated, "
1273 "but stream ID %u is requested.\n",
1275 ep
->stream_info
->num_streams
- 1,
1281 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1282 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1283 * should pick up where it left off in the TD, unless a Set Transfer Ring
1284 * Dequeue Pointer is issued.
1286 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1287 * the ring. Since the ring is a contiguous structure, they can't be physically
1288 * removed. Instead, there are two options:
1290 * 1) If the HC is in the middle of processing the URB to be canceled, we
1291 * simply move the ring's dequeue pointer past those TRBs using the Set
1292 * Transfer Ring Dequeue Pointer command. This will be the common case,
1293 * when drivers timeout on the last submitted URB and attempt to cancel.
1295 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1296 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1297 * HC will need to invalidate the any TRBs it has cached after the stop
1298 * endpoint command, as noted in the xHCI 0.95 errata.
1300 * 3) The TD may have completed by the time the Stop Endpoint Command
1301 * completes, so software needs to handle that case too.
1303 * This function should protect against the TD enqueueing code ringing the
1304 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1305 * It also needs to account for multiple cancellations on happening at the same
1306 * time for the same endpoint.
1308 * Note that this function can be called in any context, or so says
1309 * usb_hcd_unlink_urb()
1311 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1313 unsigned long flags
;
1316 struct xhci_hcd
*xhci
;
1317 struct urb_priv
*urb_priv
;
1319 unsigned int ep_index
;
1320 struct xhci_ring
*ep_ring
;
1321 struct xhci_virt_ep
*ep
;
1323 xhci
= hcd_to_xhci(hcd
);
1324 spin_lock_irqsave(&xhci
->lock
, flags
);
1325 /* Make sure the URB hasn't completed or been unlinked already */
1326 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1327 if (ret
|| !urb
->hcpriv
)
1329 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
1330 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1331 xhci_dbg(xhci
, "HW died, freeing TD.\n");
1332 urb_priv
= urb
->hcpriv
;
1333 for (i
= urb_priv
->td_cnt
; i
< urb_priv
->length
; i
++) {
1334 td
= urb_priv
->td
[i
];
1335 if (!list_empty(&td
->td_list
))
1336 list_del_init(&td
->td_list
);
1337 if (!list_empty(&td
->cancelled_td_list
))
1338 list_del_init(&td
->cancelled_td_list
);
1341 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1342 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1343 usb_hcd_giveback_urb(hcd
, urb
, -ESHUTDOWN
);
1344 xhci_urb_free_priv(xhci
, urb_priv
);
1347 if ((xhci
->xhc_state
& XHCI_STATE_DYING
) ||
1348 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
1349 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
1350 "non-responsive xHCI host.\n",
1351 urb
->ep
->desc
.bEndpointAddress
, urb
);
1352 /* Let the stop endpoint command watchdog timer (which set this
1353 * state) finish cleaning up the endpoint TD lists. We must
1354 * have caught it in the middle of dropping a lock and giving
1360 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
1361 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
1362 ep_ring
= xhci_urb_to_transfer_ring(xhci
, urb
);
1368 urb_priv
= urb
->hcpriv
;
1369 i
= urb_priv
->td_cnt
;
1370 if (i
< urb_priv
->length
)
1371 xhci_dbg(xhci
, "Cancel URB %p, dev %s, ep 0x%x, "
1372 "starting at offset 0x%llx\n",
1373 urb
, urb
->dev
->devpath
,
1374 urb
->ep
->desc
.bEndpointAddress
,
1375 (unsigned long long) xhci_trb_virt_to_dma(
1376 urb_priv
->td
[i
]->start_seg
,
1377 urb_priv
->td
[i
]->first_trb
));
1379 for (; i
< urb_priv
->length
; i
++) {
1380 td
= urb_priv
->td
[i
];
1381 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
1384 /* Queue a stop endpoint command, but only if this is
1385 * the first cancellation to be handled.
1387 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
1388 ep
->ep_state
|= EP_HALT_PENDING
;
1389 ep
->stop_cmds_pending
++;
1390 ep
->stop_cmd_timer
.expires
= jiffies
+
1391 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
1392 add_timer(&ep
->stop_cmd_timer
);
1393 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
, 0);
1394 xhci_ring_cmd_db(xhci
);
1397 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1401 /* Drop an endpoint from a new bandwidth configuration for this device.
1402 * Only one call to this function is allowed per endpoint before
1403 * check_bandwidth() or reset_bandwidth() must be called.
1404 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1405 * add the endpoint to the schedule with possibly new parameters denoted by a
1406 * different endpoint descriptor in usb_host_endpoint.
1407 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1410 * The USB core will not allow URBs to be queued to an endpoint that is being
1411 * disabled, so there's no need for mutual exclusion to protect
1412 * the xhci->devs[slot_id] structure.
1414 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1415 struct usb_host_endpoint
*ep
)
1417 struct xhci_hcd
*xhci
;
1418 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1419 struct xhci_input_control_ctx
*ctrl_ctx
;
1420 struct xhci_slot_ctx
*slot_ctx
;
1421 unsigned int last_ctx
;
1422 unsigned int ep_index
;
1423 struct xhci_ep_ctx
*ep_ctx
;
1425 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1428 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1431 xhci
= hcd_to_xhci(hcd
);
1432 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1435 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1436 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
1437 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
1438 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
1439 __func__
, drop_flag
);
1443 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
1444 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
1445 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1446 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1447 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1448 /* If the HC already knows the endpoint is disabled,
1449 * or the HCD has noted it is disabled, ignore this request
1451 if (((ep_ctx
->ep_info
& cpu_to_le32(EP_STATE_MASK
)) ==
1452 cpu_to_le32(EP_STATE_DISABLED
)) ||
1453 le32_to_cpu(ctrl_ctx
->drop_flags
) &
1454 xhci_get_endpoint_flag(&ep
->desc
)) {
1455 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
1460 ctrl_ctx
->drop_flags
|= cpu_to_le32(drop_flag
);
1461 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1463 ctrl_ctx
->add_flags
&= cpu_to_le32(~drop_flag
);
1464 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1466 last_ctx
= xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx
->add_flags
));
1467 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1468 /* Update the last valid endpoint context, if we deleted the last one */
1469 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) >
1470 LAST_CTX(last_ctx
)) {
1471 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1472 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1474 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1476 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
1478 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1479 (unsigned int) ep
->desc
.bEndpointAddress
,
1481 (unsigned int) new_drop_flags
,
1482 (unsigned int) new_add_flags
,
1483 (unsigned int) new_slot_info
);
1487 /* Add an endpoint to a new possible bandwidth configuration for this device.
1488 * Only one call to this function is allowed per endpoint before
1489 * check_bandwidth() or reset_bandwidth() must be called.
1490 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1491 * add the endpoint to the schedule with possibly new parameters denoted by a
1492 * different endpoint descriptor in usb_host_endpoint.
1493 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1496 * The USB core will not allow URBs to be queued to an endpoint until the
1497 * configuration or alt setting is installed in the device, so there's no need
1498 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1500 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
1501 struct usb_host_endpoint
*ep
)
1503 struct xhci_hcd
*xhci
;
1504 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
1505 unsigned int ep_index
;
1506 struct xhci_ep_ctx
*ep_ctx
;
1507 struct xhci_slot_ctx
*slot_ctx
;
1508 struct xhci_input_control_ctx
*ctrl_ctx
;
1510 unsigned int last_ctx
;
1511 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
1512 struct xhci_virt_device
*virt_dev
;
1515 ret
= xhci_check_args(hcd
, udev
, ep
, 1, true, __func__
);
1517 /* So we won't queue a reset ep command for a root hub */
1521 xhci
= hcd_to_xhci(hcd
);
1522 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
1525 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
1526 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
1527 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
1528 /* FIXME when we have to issue an evaluate endpoint command to
1529 * deal with ep0 max packet size changing once we get the
1532 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1533 __func__
, added_ctxs
);
1537 virt_dev
= xhci
->devs
[udev
->slot_id
];
1538 in_ctx
= virt_dev
->in_ctx
;
1539 out_ctx
= virt_dev
->out_ctx
;
1540 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1541 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1542 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1544 /* If this endpoint is already in use, and the upper layers are trying
1545 * to add it again without dropping it, reject the addition.
1547 if (virt_dev
->eps
[ep_index
].ring
&&
1548 !(le32_to_cpu(ctrl_ctx
->drop_flags
) &
1549 xhci_get_endpoint_flag(&ep
->desc
))) {
1550 xhci_warn(xhci
, "Trying to add endpoint 0x%x "
1551 "without dropping it.\n",
1552 (unsigned int) ep
->desc
.bEndpointAddress
);
1556 /* If the HCD has already noted the endpoint is enabled,
1557 * ignore this request.
1559 if (le32_to_cpu(ctrl_ctx
->add_flags
) &
1560 xhci_get_endpoint_flag(&ep
->desc
)) {
1561 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1567 * Configuration and alternate setting changes must be done in
1568 * process context, not interrupt context (or so documenation
1569 * for usb_set_interface() and usb_set_configuration() claim).
1571 if (xhci_endpoint_init(xhci
, virt_dev
, udev
, ep
, GFP_NOIO
) < 0) {
1572 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1573 __func__
, ep
->desc
.bEndpointAddress
);
1577 ctrl_ctx
->add_flags
|= cpu_to_le32(added_ctxs
);
1578 new_add_flags
= le32_to_cpu(ctrl_ctx
->add_flags
);
1580 /* If xhci_endpoint_disable() was called for this endpoint, but the
1581 * xHC hasn't been notified yet through the check_bandwidth() call,
1582 * this re-adds a new state for the endpoint from the new endpoint
1583 * descriptors. We must drop and re-add this endpoint, so we leave the
1586 new_drop_flags
= le32_to_cpu(ctrl_ctx
->drop_flags
);
1588 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1589 /* Update the last valid endpoint context, if we just added one past */
1590 if ((le32_to_cpu(slot_ctx
->dev_info
) & LAST_CTX_MASK
) <
1591 LAST_CTX(last_ctx
)) {
1592 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1593 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(last_ctx
));
1595 new_slot_info
= le32_to_cpu(slot_ctx
->dev_info
);
1597 /* Store the usb_device pointer for later use */
1600 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1601 (unsigned int) ep
->desc
.bEndpointAddress
,
1603 (unsigned int) new_drop_flags
,
1604 (unsigned int) new_add_flags
,
1605 (unsigned int) new_slot_info
);
1609 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1611 struct xhci_input_control_ctx
*ctrl_ctx
;
1612 struct xhci_ep_ctx
*ep_ctx
;
1613 struct xhci_slot_ctx
*slot_ctx
;
1616 /* When a device's add flag and drop flag are zero, any subsequent
1617 * configure endpoint command will leave that endpoint's state
1618 * untouched. Make sure we don't leave any old state in the input
1619 * endpoint contexts.
1621 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1622 ctrl_ctx
->drop_flags
= 0;
1623 ctrl_ctx
->add_flags
= 0;
1624 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1625 slot_ctx
->dev_info
&= cpu_to_le32(~LAST_CTX_MASK
);
1626 /* Endpoint 0 is always valid */
1627 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1));
1628 for (i
= 1; i
< 31; ++i
) {
1629 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1630 ep_ctx
->ep_info
= 0;
1631 ep_ctx
->ep_info2
= 0;
1633 ep_ctx
->tx_info
= 0;
1637 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1638 struct usb_device
*udev
, u32
*cmd_status
)
1642 switch (*cmd_status
) {
1644 dev_warn(&udev
->dev
, "Not enough host controller resources "
1645 "for new device state.\n");
1647 /* FIXME: can we allocate more resources for the HC? */
1650 case COMP_2ND_BW_ERR
:
1651 dev_warn(&udev
->dev
, "Not enough bandwidth "
1652 "for new device state.\n");
1654 /* FIXME: can we go back to the old state? */
1657 /* the HCD set up something wrong */
1658 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1660 "and endpoint is not disabled.\n");
1664 dev_warn(&udev
->dev
, "ERROR: Incompatible device for endpoint "
1665 "configure command.\n");
1669 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1673 xhci_err(xhci
, "ERROR: unexpected command completion "
1674 "code 0x%x.\n", *cmd_status
);
1681 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1682 struct usb_device
*udev
, u32
*cmd_status
)
1685 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1687 switch (*cmd_status
) {
1689 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1690 "context command.\n");
1694 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1695 "evaluate context command.\n");
1696 case COMP_CTX_STATE
:
1697 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1698 "evaluate context command.\n");
1699 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1703 dev_warn(&udev
->dev
, "ERROR: Incompatible device for evaluate "
1704 "context command.\n");
1708 /* Max Exit Latency too large error */
1709 dev_warn(&udev
->dev
, "WARN: Max Exit Latency too large\n");
1713 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1717 xhci_err(xhci
, "ERROR: unexpected command completion "
1718 "code 0x%x.\n", *cmd_status
);
1725 static u32
xhci_count_num_new_endpoints(struct xhci_hcd
*xhci
,
1726 struct xhci_container_ctx
*in_ctx
)
1728 struct xhci_input_control_ctx
*ctrl_ctx
;
1729 u32 valid_add_flags
;
1730 u32 valid_drop_flags
;
1732 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1733 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1734 * (bit 1). The default control endpoint is added during the Address
1735 * Device command and is never removed until the slot is disabled.
1737 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1738 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1740 /* Use hweight32 to count the number of ones in the add flags, or
1741 * number of endpoints added. Don't count endpoints that are changed
1742 * (both added and dropped).
1744 return hweight32(valid_add_flags
) -
1745 hweight32(valid_add_flags
& valid_drop_flags
);
1748 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd
*xhci
,
1749 struct xhci_container_ctx
*in_ctx
)
1751 struct xhci_input_control_ctx
*ctrl_ctx
;
1752 u32 valid_add_flags
;
1753 u32 valid_drop_flags
;
1755 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1756 valid_add_flags
= ctrl_ctx
->add_flags
>> 2;
1757 valid_drop_flags
= ctrl_ctx
->drop_flags
>> 2;
1759 return hweight32(valid_drop_flags
) -
1760 hweight32(valid_add_flags
& valid_drop_flags
);
1764 * We need to reserve the new number of endpoints before the configure endpoint
1765 * command completes. We can't subtract the dropped endpoints from the number
1766 * of active endpoints until the command completes because we can oversubscribe
1767 * the host in this case:
1769 * - the first configure endpoint command drops more endpoints than it adds
1770 * - a second configure endpoint command that adds more endpoints is queued
1771 * - the first configure endpoint command fails, so the config is unchanged
1772 * - the second command may succeed, even though there isn't enough resources
1774 * Must be called with xhci->lock held.
1776 static int xhci_reserve_host_resources(struct xhci_hcd
*xhci
,
1777 struct xhci_container_ctx
*in_ctx
)
1781 added_eps
= xhci_count_num_new_endpoints(xhci
, in_ctx
);
1782 if (xhci
->num_active_eps
+ added_eps
> xhci
->limit_active_eps
) {
1783 xhci_dbg(xhci
, "Not enough ep ctxs: "
1784 "%u active, need to add %u, limit is %u.\n",
1785 xhci
->num_active_eps
, added_eps
,
1786 xhci
->limit_active_eps
);
1789 xhci
->num_active_eps
+= added_eps
;
1790 xhci_dbg(xhci
, "Adding %u ep ctxs, %u now active.\n", added_eps
,
1791 xhci
->num_active_eps
);
1796 * The configure endpoint was failed by the xHC for some other reason, so we
1797 * need to revert the resources that failed configuration would have used.
1799 * Must be called with xhci->lock held.
1801 static void xhci_free_host_resources(struct xhci_hcd
*xhci
,
1802 struct xhci_container_ctx
*in_ctx
)
1806 num_failed_eps
= xhci_count_num_new_endpoints(xhci
, in_ctx
);
1807 xhci
->num_active_eps
-= num_failed_eps
;
1808 xhci_dbg(xhci
, "Removing %u failed ep ctxs, %u now active.\n",
1810 xhci
->num_active_eps
);
1814 * Now that the command has completed, clean up the active endpoint count by
1815 * subtracting out the endpoints that were dropped (but not changed).
1817 * Must be called with xhci->lock held.
1819 static void xhci_finish_resource_reservation(struct xhci_hcd
*xhci
,
1820 struct xhci_container_ctx
*in_ctx
)
1822 u32 num_dropped_eps
;
1824 num_dropped_eps
= xhci_count_num_dropped_endpoints(xhci
, in_ctx
);
1825 xhci
->num_active_eps
-= num_dropped_eps
;
1826 if (num_dropped_eps
)
1827 xhci_dbg(xhci
, "Removing %u dropped ep ctxs, %u now active.\n",
1829 xhci
->num_active_eps
);
1832 unsigned int xhci_get_block_size(struct usb_device
*udev
)
1834 switch (udev
->speed
) {
1836 case USB_SPEED_FULL
:
1838 case USB_SPEED_HIGH
:
1840 case USB_SPEED_SUPER
:
1842 case USB_SPEED_UNKNOWN
:
1843 case USB_SPEED_WIRELESS
:
1845 /* Should never happen */
1850 unsigned int xhci_get_largest_overhead(struct xhci_interval_bw
*interval_bw
)
1852 if (interval_bw
->overhead
[LS_OVERHEAD_TYPE
])
1854 if (interval_bw
->overhead
[FS_OVERHEAD_TYPE
])
1859 /* If we are changing a LS/FS device under a HS hub,
1860 * make sure (if we are activating a new TT) that the HS bus has enough
1861 * bandwidth for this new TT.
1863 static int xhci_check_tt_bw_table(struct xhci_hcd
*xhci
,
1864 struct xhci_virt_device
*virt_dev
,
1867 struct xhci_interval_bw_table
*bw_table
;
1868 struct xhci_tt_bw_info
*tt_info
;
1870 /* Find the bandwidth table for the root port this TT is attached to. */
1871 bw_table
= &xhci
->rh_bw
[virt_dev
->real_port
- 1].bw_table
;
1872 tt_info
= virt_dev
->tt_info
;
1873 /* If this TT already had active endpoints, the bandwidth for this TT
1874 * has already been added. Removing all periodic endpoints (and thus
1875 * making the TT enactive) will only decrease the bandwidth used.
1879 if (old_active_eps
== 0 && tt_info
->active_eps
!= 0) {
1880 if (bw_table
->bw_used
+ TT_HS_OVERHEAD
> HS_BW_LIMIT
)
1884 /* Not sure why we would have no new active endpoints...
1886 * Maybe because of an Evaluate Context change for a hub update or a
1887 * control endpoint 0 max packet size change?
1888 * FIXME: skip the bandwidth calculation in that case.
1893 static int xhci_check_ss_bw(struct xhci_hcd
*xhci
,
1894 struct xhci_virt_device
*virt_dev
)
1896 unsigned int bw_reserved
;
1898 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_IN
, 100);
1899 if (virt_dev
->bw_table
->ss_bw_in
> (SS_BW_LIMIT_IN
- bw_reserved
))
1902 bw_reserved
= DIV_ROUND_UP(SS_BW_RESERVED
*SS_BW_LIMIT_OUT
, 100);
1903 if (virt_dev
->bw_table
->ss_bw_out
> (SS_BW_LIMIT_OUT
- bw_reserved
))
1910 * This algorithm is a very conservative estimate of the worst-case scheduling
1911 * scenario for any one interval. The hardware dynamically schedules the
1912 * packets, so we can't tell which microframe could be the limiting factor in
1913 * the bandwidth scheduling. This only takes into account periodic endpoints.
1915 * Obviously, we can't solve an NP complete problem to find the minimum worst
1916 * case scenario. Instead, we come up with an estimate that is no less than
1917 * the worst case bandwidth used for any one microframe, but may be an
1920 * We walk the requirements for each endpoint by interval, starting with the
1921 * smallest interval, and place packets in the schedule where there is only one
1922 * possible way to schedule packets for that interval. In order to simplify
1923 * this algorithm, we record the largest max packet size for each interval, and
1924 * assume all packets will be that size.
1926 * For interval 0, we obviously must schedule all packets for each interval.
1927 * The bandwidth for interval 0 is just the amount of data to be transmitted
1928 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1929 * the number of packets).
1931 * For interval 1, we have two possible microframes to schedule those packets
1932 * in. For this algorithm, if we can schedule the same number of packets for
1933 * each possible scheduling opportunity (each microframe), we will do so. The
1934 * remaining number of packets will be saved to be transmitted in the gaps in
1935 * the next interval's scheduling sequence.
1937 * As we move those remaining packets to be scheduled with interval 2 packets,
1938 * we have to double the number of remaining packets to transmit. This is
1939 * because the intervals are actually powers of 2, and we would be transmitting
1940 * the previous interval's packets twice in this interval. We also have to be
1941 * sure that when we look at the largest max packet size for this interval, we
1942 * also look at the largest max packet size for the remaining packets and take
1943 * the greater of the two.
1945 * The algorithm continues to evenly distribute packets in each scheduling
1946 * opportunity, and push the remaining packets out, until we get to the last
1947 * interval. Then those packets and their associated overhead are just added
1948 * to the bandwidth used.
1950 static int xhci_check_bw_table(struct xhci_hcd
*xhci
,
1951 struct xhci_virt_device
*virt_dev
,
1954 unsigned int bw_reserved
;
1955 unsigned int max_bandwidth
;
1956 unsigned int bw_used
;
1957 unsigned int block_size
;
1958 struct xhci_interval_bw_table
*bw_table
;
1959 unsigned int packet_size
= 0;
1960 unsigned int overhead
= 0;
1961 unsigned int packets_transmitted
= 0;
1962 unsigned int packets_remaining
= 0;
1965 if (virt_dev
->udev
->speed
== USB_SPEED_SUPER
)
1966 return xhci_check_ss_bw(xhci
, virt_dev
);
1968 if (virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
1969 max_bandwidth
= HS_BW_LIMIT
;
1970 /* Convert percent of bus BW reserved to blocks reserved */
1971 bw_reserved
= DIV_ROUND_UP(HS_BW_RESERVED
* max_bandwidth
, 100);
1973 max_bandwidth
= FS_BW_LIMIT
;
1974 bw_reserved
= DIV_ROUND_UP(FS_BW_RESERVED
* max_bandwidth
, 100);
1977 bw_table
= virt_dev
->bw_table
;
1978 /* We need to translate the max packet size and max ESIT payloads into
1979 * the units the hardware uses.
1981 block_size
= xhci_get_block_size(virt_dev
->udev
);
1983 /* If we are manipulating a LS/FS device under a HS hub, double check
1984 * that the HS bus has enough bandwidth if we are activing a new TT.
1986 if (virt_dev
->tt_info
) {
1987 xhci_dbg(xhci
, "Recalculating BW for rootport %u\n",
1988 virt_dev
->real_port
);
1989 if (xhci_check_tt_bw_table(xhci
, virt_dev
, old_active_eps
)) {
1990 xhci_warn(xhci
, "Not enough bandwidth on HS bus for "
1991 "newly activated TT.\n");
1994 xhci_dbg(xhci
, "Recalculating BW for TT slot %u port %u\n",
1995 virt_dev
->tt_info
->slot_id
,
1996 virt_dev
->tt_info
->ttport
);
1998 xhci_dbg(xhci
, "Recalculating BW for rootport %u\n",
1999 virt_dev
->real_port
);
2002 /* Add in how much bandwidth will be used for interval zero, or the
2003 * rounded max ESIT payload + number of packets * largest overhead.
2005 bw_used
= DIV_ROUND_UP(bw_table
->interval0_esit_payload
, block_size
) +
2006 bw_table
->interval_bw
[0].num_packets
*
2007 xhci_get_largest_overhead(&bw_table
->interval_bw
[0]);
2009 for (i
= 1; i
< XHCI_MAX_INTERVAL
; i
++) {
2010 unsigned int bw_added
;
2011 unsigned int largest_mps
;
2012 unsigned int interval_overhead
;
2015 * How many packets could we transmit in this interval?
2016 * If packets didn't fit in the previous interval, we will need
2017 * to transmit that many packets twice within this interval.
2019 packets_remaining
= 2 * packets_remaining
+
2020 bw_table
->interval_bw
[i
].num_packets
;
2022 /* Find the largest max packet size of this or the previous
2025 if (list_empty(&bw_table
->interval_bw
[i
].endpoints
))
2028 struct xhci_virt_ep
*virt_ep
;
2029 struct list_head
*ep_entry
;
2031 ep_entry
= bw_table
->interval_bw
[i
].endpoints
.next
;
2032 virt_ep
= list_entry(ep_entry
,
2033 struct xhci_virt_ep
, bw_endpoint_list
);
2034 /* Convert to blocks, rounding up */
2035 largest_mps
= DIV_ROUND_UP(
2036 virt_ep
->bw_info
.max_packet_size
,
2039 if (largest_mps
> packet_size
)
2040 packet_size
= largest_mps
;
2042 /* Use the larger overhead of this or the previous interval. */
2043 interval_overhead
= xhci_get_largest_overhead(
2044 &bw_table
->interval_bw
[i
]);
2045 if (interval_overhead
> overhead
)
2046 overhead
= interval_overhead
;
2048 /* How many packets can we evenly distribute across
2049 * (1 << (i + 1)) possible scheduling opportunities?
2051 packets_transmitted
= packets_remaining
>> (i
+ 1);
2053 /* Add in the bandwidth used for those scheduled packets */
2054 bw_added
= packets_transmitted
* (overhead
+ packet_size
);
2056 /* How many packets do we have remaining to transmit? */
2057 packets_remaining
= packets_remaining
% (1 << (i
+ 1));
2059 /* What largest max packet size should those packets have? */
2060 /* If we've transmitted all packets, don't carry over the
2061 * largest packet size.
2063 if (packets_remaining
== 0) {
2066 } else if (packets_transmitted
> 0) {
2067 /* Otherwise if we do have remaining packets, and we've
2068 * scheduled some packets in this interval, take the
2069 * largest max packet size from endpoints with this
2072 packet_size
= largest_mps
;
2073 overhead
= interval_overhead
;
2075 /* Otherwise carry over packet_size and overhead from the last
2076 * time we had a remainder.
2078 bw_used
+= bw_added
;
2079 if (bw_used
> max_bandwidth
) {
2080 xhci_warn(xhci
, "Not enough bandwidth. "
2081 "Proposed: %u, Max: %u\n",
2082 bw_used
, max_bandwidth
);
2087 * Ok, we know we have some packets left over after even-handedly
2088 * scheduling interval 15. We don't know which microframes they will
2089 * fit into, so we over-schedule and say they will be scheduled every
2092 if (packets_remaining
> 0)
2093 bw_used
+= overhead
+ packet_size
;
2095 if (!virt_dev
->tt_info
&& virt_dev
->udev
->speed
== USB_SPEED_HIGH
) {
2096 unsigned int port_index
= virt_dev
->real_port
- 1;
2098 /* OK, we're manipulating a HS device attached to a
2099 * root port bandwidth domain. Include the number of active TTs
2100 * in the bandwidth used.
2102 bw_used
+= TT_HS_OVERHEAD
*
2103 xhci
->rh_bw
[port_index
].num_active_tts
;
2106 xhci_dbg(xhci
, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2107 "Available: %u " "percent\n",
2108 bw_used
, max_bandwidth
, bw_reserved
,
2109 (max_bandwidth
- bw_used
- bw_reserved
) * 100 /
2112 bw_used
+= bw_reserved
;
2113 if (bw_used
> max_bandwidth
) {
2114 xhci_warn(xhci
, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2115 bw_used
, max_bandwidth
);
2119 bw_table
->bw_used
= bw_used
;
2123 static bool xhci_is_async_ep(unsigned int ep_type
)
2125 return (ep_type
!= ISOC_OUT_EP
&& ep_type
!= INT_OUT_EP
&&
2126 ep_type
!= ISOC_IN_EP
&&
2127 ep_type
!= INT_IN_EP
);
2130 static bool xhci_is_sync_in_ep(unsigned int ep_type
)
2132 return (ep_type
== ISOC_IN_EP
|| ep_type
!= INT_IN_EP
);
2135 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info
*ep_bw
)
2137 unsigned int mps
= DIV_ROUND_UP(ep_bw
->max_packet_size
, SS_BLOCK
);
2139 if (ep_bw
->ep_interval
== 0)
2140 return SS_OVERHEAD_BURST
+
2141 (ep_bw
->mult
* ep_bw
->num_packets
*
2142 (SS_OVERHEAD
+ mps
));
2143 return DIV_ROUND_UP(ep_bw
->mult
* ep_bw
->num_packets
*
2144 (SS_OVERHEAD
+ mps
+ SS_OVERHEAD_BURST
),
2145 1 << ep_bw
->ep_interval
);
2149 void xhci_drop_ep_from_interval_table(struct xhci_hcd
*xhci
,
2150 struct xhci_bw_info
*ep_bw
,
2151 struct xhci_interval_bw_table
*bw_table
,
2152 struct usb_device
*udev
,
2153 struct xhci_virt_ep
*virt_ep
,
2154 struct xhci_tt_bw_info
*tt_info
)
2156 struct xhci_interval_bw
*interval_bw
;
2157 int normalized_interval
;
2159 if (xhci_is_async_ep(ep_bw
->type
))
2162 if (udev
->speed
== USB_SPEED_SUPER
) {
2163 if (xhci_is_sync_in_ep(ep_bw
->type
))
2164 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
-=
2165 xhci_get_ss_bw_consumed(ep_bw
);
2167 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
-=
2168 xhci_get_ss_bw_consumed(ep_bw
);
2172 /* SuperSpeed endpoints never get added to intervals in the table, so
2173 * this check is only valid for HS/FS/LS devices.
2175 if (list_empty(&virt_ep
->bw_endpoint_list
))
2177 /* For LS/FS devices, we need to translate the interval expressed in
2178 * microframes to frames.
2180 if (udev
->speed
== USB_SPEED_HIGH
)
2181 normalized_interval
= ep_bw
->ep_interval
;
2183 normalized_interval
= ep_bw
->ep_interval
- 3;
2185 if (normalized_interval
== 0)
2186 bw_table
->interval0_esit_payload
-= ep_bw
->max_esit_payload
;
2187 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2188 interval_bw
->num_packets
-= ep_bw
->num_packets
;
2189 switch (udev
->speed
) {
2191 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] -= 1;
2193 case USB_SPEED_FULL
:
2194 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] -= 1;
2196 case USB_SPEED_HIGH
:
2197 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] -= 1;
2199 case USB_SPEED_SUPER
:
2200 case USB_SPEED_UNKNOWN
:
2201 case USB_SPEED_WIRELESS
:
2202 /* Should never happen because only LS/FS/HS endpoints will get
2203 * added to the endpoint list.
2208 tt_info
->active_eps
-= 1;
2209 list_del_init(&virt_ep
->bw_endpoint_list
);
2212 static void xhci_add_ep_to_interval_table(struct xhci_hcd
*xhci
,
2213 struct xhci_bw_info
*ep_bw
,
2214 struct xhci_interval_bw_table
*bw_table
,
2215 struct usb_device
*udev
,
2216 struct xhci_virt_ep
*virt_ep
,
2217 struct xhci_tt_bw_info
*tt_info
)
2219 struct xhci_interval_bw
*interval_bw
;
2220 struct xhci_virt_ep
*smaller_ep
;
2221 int normalized_interval
;
2223 if (xhci_is_async_ep(ep_bw
->type
))
2226 if (udev
->speed
== USB_SPEED_SUPER
) {
2227 if (xhci_is_sync_in_ep(ep_bw
->type
))
2228 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_in
+=
2229 xhci_get_ss_bw_consumed(ep_bw
);
2231 xhci
->devs
[udev
->slot_id
]->bw_table
->ss_bw_out
+=
2232 xhci_get_ss_bw_consumed(ep_bw
);
2236 /* For LS/FS devices, we need to translate the interval expressed in
2237 * microframes to frames.
2239 if (udev
->speed
== USB_SPEED_HIGH
)
2240 normalized_interval
= ep_bw
->ep_interval
;
2242 normalized_interval
= ep_bw
->ep_interval
- 3;
2244 if (normalized_interval
== 0)
2245 bw_table
->interval0_esit_payload
+= ep_bw
->max_esit_payload
;
2246 interval_bw
= &bw_table
->interval_bw
[normalized_interval
];
2247 interval_bw
->num_packets
+= ep_bw
->num_packets
;
2248 switch (udev
->speed
) {
2250 interval_bw
->overhead
[LS_OVERHEAD_TYPE
] += 1;
2252 case USB_SPEED_FULL
:
2253 interval_bw
->overhead
[FS_OVERHEAD_TYPE
] += 1;
2255 case USB_SPEED_HIGH
:
2256 interval_bw
->overhead
[HS_OVERHEAD_TYPE
] += 1;
2258 case USB_SPEED_SUPER
:
2259 case USB_SPEED_UNKNOWN
:
2260 case USB_SPEED_WIRELESS
:
2261 /* Should never happen because only LS/FS/HS endpoints will get
2262 * added to the endpoint list.
2268 tt_info
->active_eps
+= 1;
2269 /* Insert the endpoint into the list, largest max packet size first. */
2270 list_for_each_entry(smaller_ep
, &interval_bw
->endpoints
,
2272 if (ep_bw
->max_packet_size
>=
2273 smaller_ep
->bw_info
.max_packet_size
) {
2274 /* Add the new ep before the smaller endpoint */
2275 list_add_tail(&virt_ep
->bw_endpoint_list
,
2276 &smaller_ep
->bw_endpoint_list
);
2280 /* Add the new endpoint at the end of the list. */
2281 list_add_tail(&virt_ep
->bw_endpoint_list
,
2282 &interval_bw
->endpoints
);
2285 void xhci_update_tt_active_eps(struct xhci_hcd
*xhci
,
2286 struct xhci_virt_device
*virt_dev
,
2289 struct xhci_root_port_bw_info
*rh_bw_info
;
2290 if (!virt_dev
->tt_info
)
2293 rh_bw_info
= &xhci
->rh_bw
[virt_dev
->real_port
- 1];
2294 if (old_active_eps
== 0 &&
2295 virt_dev
->tt_info
->active_eps
!= 0) {
2296 rh_bw_info
->num_active_tts
+= 1;
2297 rh_bw_info
->bw_table
.bw_used
+= TT_HS_OVERHEAD
;
2298 } else if (old_active_eps
!= 0 &&
2299 virt_dev
->tt_info
->active_eps
== 0) {
2300 rh_bw_info
->num_active_tts
-= 1;
2301 rh_bw_info
->bw_table
.bw_used
-= TT_HS_OVERHEAD
;
2305 static int xhci_reserve_bandwidth(struct xhci_hcd
*xhci
,
2306 struct xhci_virt_device
*virt_dev
,
2307 struct xhci_container_ctx
*in_ctx
)
2309 struct xhci_bw_info ep_bw_info
[31];
2311 struct xhci_input_control_ctx
*ctrl_ctx
;
2312 int old_active_eps
= 0;
2314 if (virt_dev
->tt_info
)
2315 old_active_eps
= virt_dev
->tt_info
->active_eps
;
2317 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2319 for (i
= 0; i
< 31; i
++) {
2320 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2323 /* Make a copy of the BW info in case we need to revert this */
2324 memcpy(&ep_bw_info
[i
], &virt_dev
->eps
[i
].bw_info
,
2325 sizeof(ep_bw_info
[i
]));
2326 /* Drop the endpoint from the interval table if the endpoint is
2327 * being dropped or changed.
2329 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2330 xhci_drop_ep_from_interval_table(xhci
,
2331 &virt_dev
->eps
[i
].bw_info
,
2337 /* Overwrite the information stored in the endpoints' bw_info */
2338 xhci_update_bw_info(xhci
, virt_dev
->in_ctx
, ctrl_ctx
, virt_dev
);
2339 for (i
= 0; i
< 31; i
++) {
2340 /* Add any changed or added endpoints to the interval table */
2341 if (EP_IS_ADDED(ctrl_ctx
, i
))
2342 xhci_add_ep_to_interval_table(xhci
,
2343 &virt_dev
->eps
[i
].bw_info
,
2350 if (!xhci_check_bw_table(xhci
, virt_dev
, old_active_eps
)) {
2351 /* Ok, this fits in the bandwidth we have.
2352 * Update the number of active TTs.
2354 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
2358 /* We don't have enough bandwidth for this, revert the stored info. */
2359 for (i
= 0; i
< 31; i
++) {
2360 if (!EP_IS_ADDED(ctrl_ctx
, i
) && !EP_IS_DROPPED(ctrl_ctx
, i
))
2363 /* Drop the new copies of any added or changed endpoints from
2364 * the interval table.
2366 if (EP_IS_ADDED(ctrl_ctx
, i
)) {
2367 xhci_drop_ep_from_interval_table(xhci
,
2368 &virt_dev
->eps
[i
].bw_info
,
2374 /* Revert the endpoint back to its old information */
2375 memcpy(&virt_dev
->eps
[i
].bw_info
, &ep_bw_info
[i
],
2376 sizeof(ep_bw_info
[i
]));
2377 /* Add any changed or dropped endpoints back into the table */
2378 if (EP_IS_DROPPED(ctrl_ctx
, i
))
2379 xhci_add_ep_to_interval_table(xhci
,
2380 &virt_dev
->eps
[i
].bw_info
,
2390 /* Issue a configure endpoint command or evaluate context command
2391 * and wait for it to finish.
2393 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
2394 struct usb_device
*udev
,
2395 struct xhci_command
*command
,
2396 bool ctx_change
, bool must_succeed
)
2400 unsigned long flags
;
2401 struct xhci_container_ctx
*in_ctx
;
2402 struct completion
*cmd_completion
;
2404 struct xhci_virt_device
*virt_dev
;
2405 union xhci_trb
*cmd_trb
;
2407 spin_lock_irqsave(&xhci
->lock
, flags
);
2408 virt_dev
= xhci
->devs
[udev
->slot_id
];
2411 in_ctx
= command
->in_ctx
;
2413 in_ctx
= virt_dev
->in_ctx
;
2415 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
) &&
2416 xhci_reserve_host_resources(xhci
, in_ctx
)) {
2417 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2418 xhci_warn(xhci
, "Not enough host resources, "
2419 "active endpoint contexts = %u\n",
2420 xhci
->num_active_eps
);
2423 if ((xhci
->quirks
& XHCI_SW_BW_CHECKING
) &&
2424 xhci_reserve_bandwidth(xhci
, virt_dev
, in_ctx
)) {
2425 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2426 xhci_free_host_resources(xhci
, in_ctx
);
2427 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2428 xhci_warn(xhci
, "Not enough bandwidth\n");
2433 cmd_completion
= command
->completion
;
2434 cmd_status
= &command
->status
;
2435 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
2437 /* Enqueue pointer can be left pointing to the link TRB,
2438 * we must handle that
2440 if (TRB_TYPE_LINK_LE32(command
->command_trb
->link
.control
))
2441 command
->command_trb
=
2442 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
2444 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
2446 cmd_completion
= &virt_dev
->cmd_completion
;
2447 cmd_status
= &virt_dev
->cmd_status
;
2449 init_completion(cmd_completion
);
2451 cmd_trb
= xhci
->cmd_ring
->dequeue
;
2453 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
2454 udev
->slot_id
, must_succeed
);
2456 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
2457 udev
->slot_id
, must_succeed
);
2460 list_del(&command
->cmd_list
);
2461 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
))
2462 xhci_free_host_resources(xhci
, in_ctx
);
2463 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2464 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
2467 xhci_ring_cmd_db(xhci
);
2468 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2470 /* Wait for the configure endpoint command to complete */
2471 timeleft
= wait_for_completion_interruptible_timeout(
2473 XHCI_CMD_DEFAULT_TIMEOUT
);
2474 if (timeleft
<= 0) {
2475 xhci_warn(xhci
, "%s while waiting for %s command\n",
2476 timeleft
== 0 ? "Timeout" : "Signal",
2478 "configure endpoint" :
2479 "evaluate context");
2480 /* cancel the configure endpoint command */
2481 ret
= xhci_cancel_cmd(xhci
, command
, cmd_trb
);
2488 ret
= xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
2490 ret
= xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
2492 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
2493 spin_lock_irqsave(&xhci
->lock
, flags
);
2494 /* If the command failed, remove the reserved resources.
2495 * Otherwise, clean up the estimate to include dropped eps.
2498 xhci_free_host_resources(xhci
, in_ctx
);
2500 xhci_finish_resource_reservation(xhci
, in_ctx
);
2501 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2506 /* Called after one or more calls to xhci_add_endpoint() or
2507 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2508 * to call xhci_reset_bandwidth().
2510 * Since we are in the middle of changing either configuration or
2511 * installing a new alt setting, the USB core won't allow URBs to be
2512 * enqueued for any endpoint on the old config or interface. Nothing
2513 * else should be touching the xhci->devs[slot_id] structure, so we
2514 * don't need to take the xhci->lock for manipulating that.
2516 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2520 struct xhci_hcd
*xhci
;
2521 struct xhci_virt_device
*virt_dev
;
2522 struct xhci_input_control_ctx
*ctrl_ctx
;
2523 struct xhci_slot_ctx
*slot_ctx
;
2525 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2528 xhci
= hcd_to_xhci(hcd
);
2529 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
2532 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2533 virt_dev
= xhci
->devs
[udev
->slot_id
];
2535 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2536 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
2537 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2538 ctrl_ctx
->add_flags
&= cpu_to_le32(~EP0_FLAG
);
2539 ctrl_ctx
->drop_flags
&= cpu_to_le32(~(SLOT_FLAG
| EP0_FLAG
));
2541 /* Don't issue the command if there's no endpoints to update. */
2542 if (ctrl_ctx
->add_flags
== cpu_to_le32(SLOT_FLAG
) &&
2543 ctrl_ctx
->drop_flags
== 0)
2546 xhci_dbg(xhci
, "New Input Control Context:\n");
2547 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
2548 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
2549 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2551 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
2554 /* Callee should call reset_bandwidth() */
2558 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
2559 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
2560 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx
->dev_info
)));
2562 /* Free any rings that were dropped, but not changed. */
2563 for (i
= 1; i
< 31; ++i
) {
2564 if ((le32_to_cpu(ctrl_ctx
->drop_flags
) & (1 << (i
+ 1))) &&
2565 !(le32_to_cpu(ctrl_ctx
->add_flags
) & (1 << (i
+ 1))))
2566 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2568 xhci_zero_in_ctx(xhci
, virt_dev
);
2570 * Install any rings for completely new endpoints or changed endpoints,
2571 * and free or cache any old rings from changed endpoints.
2573 for (i
= 1; i
< 31; ++i
) {
2574 if (!virt_dev
->eps
[i
].new_ring
)
2576 /* Only cache or free the old ring if it exists.
2577 * It may not if this is the first add of an endpoint.
2579 if (virt_dev
->eps
[i
].ring
) {
2580 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
2582 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
2583 virt_dev
->eps
[i
].new_ring
= NULL
;
2589 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
2591 struct xhci_hcd
*xhci
;
2592 struct xhci_virt_device
*virt_dev
;
2595 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
2598 xhci
= hcd_to_xhci(hcd
);
2600 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
2601 virt_dev
= xhci
->devs
[udev
->slot_id
];
2602 /* Free any rings allocated for added endpoints */
2603 for (i
= 0; i
< 31; ++i
) {
2604 if (virt_dev
->eps
[i
].new_ring
) {
2605 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
2606 virt_dev
->eps
[i
].new_ring
= NULL
;
2609 xhci_zero_in_ctx(xhci
, virt_dev
);
2612 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
2613 struct xhci_container_ctx
*in_ctx
,
2614 struct xhci_container_ctx
*out_ctx
,
2615 u32 add_flags
, u32 drop_flags
)
2617 struct xhci_input_control_ctx
*ctrl_ctx
;
2618 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
2619 ctrl_ctx
->add_flags
= cpu_to_le32(add_flags
);
2620 ctrl_ctx
->drop_flags
= cpu_to_le32(drop_flags
);
2621 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
2622 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
2624 xhci_dbg(xhci
, "Input Context:\n");
2625 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
2628 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
2629 unsigned int slot_id
, unsigned int ep_index
,
2630 struct xhci_dequeue_state
*deq_state
)
2632 struct xhci_container_ctx
*in_ctx
;
2633 struct xhci_ep_ctx
*ep_ctx
;
2637 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2638 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
2639 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
2640 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
2641 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
2642 deq_state
->new_deq_ptr
);
2644 xhci_warn(xhci
, "WARN Cannot submit config ep after "
2645 "reset ep command\n");
2646 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
2647 deq_state
->new_deq_seg
,
2648 deq_state
->new_deq_ptr
);
2651 ep_ctx
->deq
= cpu_to_le64(addr
| deq_state
->new_cycle_state
);
2653 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
2654 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
2655 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
2658 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
2659 struct usb_device
*udev
, unsigned int ep_index
)
2661 struct xhci_dequeue_state deq_state
;
2662 struct xhci_virt_ep
*ep
;
2664 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
2665 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2666 /* We need to move the HW's dequeue pointer past this TD,
2667 * or it will attempt to resend it on the next doorbell ring.
2669 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
2670 ep_index
, ep
->stopped_stream
, ep
->stopped_td
,
2673 /* HW with the reset endpoint quirk will use the saved dequeue state to
2674 * issue a configure endpoint command later.
2676 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
2677 xhci_dbg(xhci
, "Queueing new dequeue state\n");
2678 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
2679 ep_index
, ep
->stopped_stream
, &deq_state
);
2681 /* Better hope no one uses the input context between now and the
2682 * reset endpoint completion!
2683 * XXX: No idea how this hardware will react when stream rings
2686 xhci_dbg(xhci
, "Setting up input context for "
2687 "configure endpoint command\n");
2688 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
2689 ep_index
, &deq_state
);
2693 /* Deal with stalled endpoints. The core should have sent the control message
2694 * to clear the halt condition. However, we need to make the xHCI hardware
2695 * reset its sequence number, since a device will expect a sequence number of
2696 * zero after the halt condition is cleared.
2697 * Context: in_interrupt
2699 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
2700 struct usb_host_endpoint
*ep
)
2702 struct xhci_hcd
*xhci
;
2703 struct usb_device
*udev
;
2704 unsigned int ep_index
;
2705 unsigned long flags
;
2707 struct xhci_virt_ep
*virt_ep
;
2709 xhci
= hcd_to_xhci(hcd
);
2710 udev
= (struct usb_device
*) ep
->hcpriv
;
2711 /* Called with a root hub endpoint (or an endpoint that wasn't added
2712 * with xhci_add_endpoint()
2716 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2717 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
2718 if (!virt_ep
->stopped_td
) {
2719 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
2720 ep
->desc
.bEndpointAddress
);
2723 if (usb_endpoint_xfer_control(&ep
->desc
)) {
2724 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
2728 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
2729 spin_lock_irqsave(&xhci
->lock
, flags
);
2730 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
2732 * Can't change the ring dequeue pointer until it's transitioned to the
2733 * stopped state, which is only upon a successful reset endpoint
2734 * command. Better hope that last command worked!
2737 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
2738 kfree(virt_ep
->stopped_td
);
2739 xhci_ring_cmd_db(xhci
);
2741 virt_ep
->stopped_td
= NULL
;
2742 virt_ep
->stopped_trb
= NULL
;
2743 virt_ep
->stopped_stream
= 0;
2744 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2747 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
2750 static int xhci_check_streams_endpoint(struct xhci_hcd
*xhci
,
2751 struct usb_device
*udev
, struct usb_host_endpoint
*ep
,
2752 unsigned int slot_id
)
2755 unsigned int ep_index
;
2756 unsigned int ep_state
;
2760 ret
= xhci_check_args(xhci_to_hcd(xhci
), udev
, ep
, 1, true, __func__
);
2763 if (ep
->ss_ep_comp
.bmAttributes
== 0) {
2764 xhci_warn(xhci
, "WARN: SuperSpeed Endpoint Companion"
2765 " descriptor for ep 0x%x does not support streams\n",
2766 ep
->desc
.bEndpointAddress
);
2770 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
2771 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2772 if (ep_state
& EP_HAS_STREAMS
||
2773 ep_state
& EP_GETTING_STREAMS
) {
2774 xhci_warn(xhci
, "WARN: SuperSpeed bulk endpoint 0x%x "
2775 "already has streams set up.\n",
2776 ep
->desc
.bEndpointAddress
);
2777 xhci_warn(xhci
, "Send email to xHCI maintainer and ask for "
2778 "dynamic stream context array reallocation.\n");
2781 if (!list_empty(&xhci
->devs
[slot_id
]->eps
[ep_index
].ring
->td_list
)) {
2782 xhci_warn(xhci
, "Cannot setup streams for SuperSpeed bulk "
2783 "endpoint 0x%x; URBs are pending.\n",
2784 ep
->desc
.bEndpointAddress
);
2790 static void xhci_calculate_streams_entries(struct xhci_hcd
*xhci
,
2791 unsigned int *num_streams
, unsigned int *num_stream_ctxs
)
2793 unsigned int max_streams
;
2795 /* The stream context array size must be a power of two */
2796 *num_stream_ctxs
= roundup_pow_of_two(*num_streams
);
2798 * Find out how many primary stream array entries the host controller
2799 * supports. Later we may use secondary stream arrays (similar to 2nd
2800 * level page entries), but that's an optional feature for xHCI host
2801 * controllers. xHCs must support at least 4 stream IDs.
2803 max_streams
= HCC_MAX_PSA(xhci
->hcc_params
);
2804 if (*num_stream_ctxs
> max_streams
) {
2805 xhci_dbg(xhci
, "xHCI HW only supports %u stream ctx entries.\n",
2807 *num_stream_ctxs
= max_streams
;
2808 *num_streams
= max_streams
;
2812 /* Returns an error code if one of the endpoint already has streams.
2813 * This does not change any data structures, it only checks and gathers
2816 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd
*xhci
,
2817 struct usb_device
*udev
,
2818 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2819 unsigned int *num_streams
, u32
*changed_ep_bitmask
)
2821 unsigned int max_streams
;
2822 unsigned int endpoint_flag
;
2826 for (i
= 0; i
< num_eps
; i
++) {
2827 ret
= xhci_check_streams_endpoint(xhci
, udev
,
2828 eps
[i
], udev
->slot_id
);
2832 max_streams
= usb_ss_max_streams(&eps
[i
]->ss_ep_comp
);
2833 if (max_streams
< (*num_streams
- 1)) {
2834 xhci_dbg(xhci
, "Ep 0x%x only supports %u stream IDs.\n",
2835 eps
[i
]->desc
.bEndpointAddress
,
2837 *num_streams
= max_streams
+1;
2840 endpoint_flag
= xhci_get_endpoint_flag(&eps
[i
]->desc
);
2841 if (*changed_ep_bitmask
& endpoint_flag
)
2843 *changed_ep_bitmask
|= endpoint_flag
;
2848 static u32
xhci_calculate_no_streams_bitmask(struct xhci_hcd
*xhci
,
2849 struct usb_device
*udev
,
2850 struct usb_host_endpoint
**eps
, unsigned int num_eps
)
2852 u32 changed_ep_bitmask
= 0;
2853 unsigned int slot_id
;
2854 unsigned int ep_index
;
2855 unsigned int ep_state
;
2858 slot_id
= udev
->slot_id
;
2859 if (!xhci
->devs
[slot_id
])
2862 for (i
= 0; i
< num_eps
; i
++) {
2863 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2864 ep_state
= xhci
->devs
[slot_id
]->eps
[ep_index
].ep_state
;
2865 /* Are streams already being freed for the endpoint? */
2866 if (ep_state
& EP_GETTING_NO_STREAMS
) {
2867 xhci_warn(xhci
, "WARN Can't disable streams for "
2869 "streams are being disabled already.",
2870 eps
[i
]->desc
.bEndpointAddress
);
2873 /* Are there actually any streams to free? */
2874 if (!(ep_state
& EP_HAS_STREAMS
) &&
2875 !(ep_state
& EP_GETTING_STREAMS
)) {
2876 xhci_warn(xhci
, "WARN Can't disable streams for "
2878 "streams are already disabled!",
2879 eps
[i
]->desc
.bEndpointAddress
);
2880 xhci_warn(xhci
, "WARN xhci_free_streams() called "
2881 "with non-streams endpoint\n");
2884 changed_ep_bitmask
|= xhci_get_endpoint_flag(&eps
[i
]->desc
);
2886 return changed_ep_bitmask
;
2890 * The USB device drivers use this function (though the HCD interface in USB
2891 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2892 * coordinate mass storage command queueing across multiple endpoints (basically
2893 * a stream ID == a task ID).
2895 * Setting up streams involves allocating the same size stream context array
2896 * for each endpoint and issuing a configure endpoint command for all endpoints.
2898 * Don't allow the call to succeed if one endpoint only supports one stream
2899 * (which means it doesn't support streams at all).
2901 * Drivers may get less stream IDs than they asked for, if the host controller
2902 * hardware or endpoints claim they can't support the number of requested
2905 int xhci_alloc_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
2906 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
2907 unsigned int num_streams
, gfp_t mem_flags
)
2910 struct xhci_hcd
*xhci
;
2911 struct xhci_virt_device
*vdev
;
2912 struct xhci_command
*config_cmd
;
2913 unsigned int ep_index
;
2914 unsigned int num_stream_ctxs
;
2915 unsigned long flags
;
2916 u32 changed_ep_bitmask
= 0;
2921 /* Add one to the number of streams requested to account for
2922 * stream 0 that is reserved for xHCI usage.
2925 xhci
= hcd_to_xhci(hcd
);
2926 xhci_dbg(xhci
, "Driver wants %u stream IDs (including stream 0).\n",
2929 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
2931 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
2935 /* Check to make sure all endpoints are not already configured for
2936 * streams. While we're at it, find the maximum number of streams that
2937 * all the endpoints will support and check for duplicate endpoints.
2939 spin_lock_irqsave(&xhci
->lock
, flags
);
2940 ret
= xhci_calculate_streams_and_bitmask(xhci
, udev
, eps
,
2941 num_eps
, &num_streams
, &changed_ep_bitmask
);
2943 xhci_free_command(xhci
, config_cmd
);
2944 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2947 if (num_streams
<= 1) {
2948 xhci_warn(xhci
, "WARN: endpoints can't handle "
2949 "more than one stream.\n");
2950 xhci_free_command(xhci
, config_cmd
);
2951 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2954 vdev
= xhci
->devs
[udev
->slot_id
];
2955 /* Mark each endpoint as being in transition, so
2956 * xhci_urb_enqueue() will reject all URBs.
2958 for (i
= 0; i
< num_eps
; i
++) {
2959 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2960 vdev
->eps
[ep_index
].ep_state
|= EP_GETTING_STREAMS
;
2962 spin_unlock_irqrestore(&xhci
->lock
, flags
);
2964 /* Setup internal data structures and allocate HW data structures for
2965 * streams (but don't install the HW structures in the input context
2966 * until we're sure all memory allocation succeeded).
2968 xhci_calculate_streams_entries(xhci
, &num_streams
, &num_stream_ctxs
);
2969 xhci_dbg(xhci
, "Need %u stream ctx entries for %u stream IDs.\n",
2970 num_stream_ctxs
, num_streams
);
2972 for (i
= 0; i
< num_eps
; i
++) {
2973 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2974 vdev
->eps
[ep_index
].stream_info
= xhci_alloc_stream_info(xhci
,
2976 num_streams
, mem_flags
);
2977 if (!vdev
->eps
[ep_index
].stream_info
)
2979 /* Set maxPstreams in endpoint context and update deq ptr to
2980 * point to stream context array. FIXME
2984 /* Set up the input context for a configure endpoint command. */
2985 for (i
= 0; i
< num_eps
; i
++) {
2986 struct xhci_ep_ctx
*ep_ctx
;
2988 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
2989 ep_ctx
= xhci_get_ep_ctx(xhci
, config_cmd
->in_ctx
, ep_index
);
2991 xhci_endpoint_copy(xhci
, config_cmd
->in_ctx
,
2992 vdev
->out_ctx
, ep_index
);
2993 xhci_setup_streams_ep_input_ctx(xhci
, ep_ctx
,
2994 vdev
->eps
[ep_index
].stream_info
);
2996 /* Tell the HW to drop its old copy of the endpoint context info
2997 * and add the updated copy from the input context.
2999 xhci_setup_input_ctx_for_config_ep(xhci
, config_cmd
->in_ctx
,
3000 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
3002 /* Issue and wait for the configure endpoint command */
3003 ret
= xhci_configure_endpoint(xhci
, udev
, config_cmd
,
3006 /* xHC rejected the configure endpoint command for some reason, so we
3007 * leave the old ring intact and free our internal streams data
3013 spin_lock_irqsave(&xhci
->lock
, flags
);
3014 for (i
= 0; i
< num_eps
; i
++) {
3015 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3016 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3017 xhci_dbg(xhci
, "Slot %u ep ctx %u now has streams.\n",
3018 udev
->slot_id
, ep_index
);
3019 vdev
->eps
[ep_index
].ep_state
|= EP_HAS_STREAMS
;
3021 xhci_free_command(xhci
, config_cmd
);
3022 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3024 /* Subtract 1 for stream 0, which drivers can't use */
3025 return num_streams
- 1;
3028 /* If it didn't work, free the streams! */
3029 for (i
= 0; i
< num_eps
; i
++) {
3030 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3031 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3032 vdev
->eps
[ep_index
].stream_info
= NULL
;
3033 /* FIXME Unset maxPstreams in endpoint context and
3034 * update deq ptr to point to normal string ring.
3036 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_STREAMS
;
3037 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3038 xhci_endpoint_zero(xhci
, vdev
, eps
[i
]);
3040 xhci_free_command(xhci
, config_cmd
);
3044 /* Transition the endpoint from using streams to being a "normal" endpoint
3047 * Modify the endpoint context state, submit a configure endpoint command,
3048 * and free all endpoint rings for streams if that completes successfully.
3050 int xhci_free_streams(struct usb_hcd
*hcd
, struct usb_device
*udev
,
3051 struct usb_host_endpoint
**eps
, unsigned int num_eps
,
3055 struct xhci_hcd
*xhci
;
3056 struct xhci_virt_device
*vdev
;
3057 struct xhci_command
*command
;
3058 unsigned int ep_index
;
3059 unsigned long flags
;
3060 u32 changed_ep_bitmask
;
3062 xhci
= hcd_to_xhci(hcd
);
3063 vdev
= xhci
->devs
[udev
->slot_id
];
3065 /* Set up a configure endpoint command to remove the streams rings */
3066 spin_lock_irqsave(&xhci
->lock
, flags
);
3067 changed_ep_bitmask
= xhci_calculate_no_streams_bitmask(xhci
,
3068 udev
, eps
, num_eps
);
3069 if (changed_ep_bitmask
== 0) {
3070 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3074 /* Use the xhci_command structure from the first endpoint. We may have
3075 * allocated too many, but the driver may call xhci_free_streams() for
3076 * each endpoint it grouped into one call to xhci_alloc_streams().
3078 ep_index
= xhci_get_endpoint_index(&eps
[0]->desc
);
3079 command
= vdev
->eps
[ep_index
].stream_info
->free_streams_command
;
3080 for (i
= 0; i
< num_eps
; i
++) {
3081 struct xhci_ep_ctx
*ep_ctx
;
3083 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3084 ep_ctx
= xhci_get_ep_ctx(xhci
, command
->in_ctx
, ep_index
);
3085 xhci
->devs
[udev
->slot_id
]->eps
[ep_index
].ep_state
|=
3086 EP_GETTING_NO_STREAMS
;
3088 xhci_endpoint_copy(xhci
, command
->in_ctx
,
3089 vdev
->out_ctx
, ep_index
);
3090 xhci_setup_no_streams_ep_input_ctx(xhci
, ep_ctx
,
3091 &vdev
->eps
[ep_index
]);
3093 xhci_setup_input_ctx_for_config_ep(xhci
, command
->in_ctx
,
3094 vdev
->out_ctx
, changed_ep_bitmask
, changed_ep_bitmask
);
3095 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3097 /* Issue and wait for the configure endpoint command,
3098 * which must succeed.
3100 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
3103 /* xHC rejected the configure endpoint command for some reason, so we
3104 * leave the streams rings intact.
3109 spin_lock_irqsave(&xhci
->lock
, flags
);
3110 for (i
= 0; i
< num_eps
; i
++) {
3111 ep_index
= xhci_get_endpoint_index(&eps
[i
]->desc
);
3112 xhci_free_stream_info(xhci
, vdev
->eps
[ep_index
].stream_info
);
3113 vdev
->eps
[ep_index
].stream_info
= NULL
;
3114 /* FIXME Unset maxPstreams in endpoint context and
3115 * update deq ptr to point to normal string ring.
3117 vdev
->eps
[ep_index
].ep_state
&= ~EP_GETTING_NO_STREAMS
;
3118 vdev
->eps
[ep_index
].ep_state
&= ~EP_HAS_STREAMS
;
3120 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3126 * Deletes endpoint resources for endpoints that were active before a Reset
3127 * Device command, or a Disable Slot command. The Reset Device command leaves
3128 * the control endpoint intact, whereas the Disable Slot command deletes it.
3130 * Must be called with xhci->lock held.
3132 void xhci_free_device_endpoint_resources(struct xhci_hcd
*xhci
,
3133 struct xhci_virt_device
*virt_dev
, bool drop_control_ep
)
3136 unsigned int num_dropped_eps
= 0;
3137 unsigned int drop_flags
= 0;
3139 for (i
= (drop_control_ep
? 0 : 1); i
< 31; i
++) {
3140 if (virt_dev
->eps
[i
].ring
) {
3141 drop_flags
|= 1 << i
;
3145 xhci
->num_active_eps
-= num_dropped_eps
;
3146 if (num_dropped_eps
)
3147 xhci_dbg(xhci
, "Dropped %u ep ctxs, flags = 0x%x, "
3149 num_dropped_eps
, drop_flags
,
3150 xhci
->num_active_eps
);
3154 * This submits a Reset Device Command, which will set the device state to 0,
3155 * set the device address to 0, and disable all the endpoints except the default
3156 * control endpoint. The USB core should come back and call
3157 * xhci_address_device(), and then re-set up the configuration. If this is
3158 * called because of a usb_reset_and_verify_device(), then the old alternate
3159 * settings will be re-installed through the normal bandwidth allocation
3162 * Wait for the Reset Device command to finish. Remove all structures
3163 * associated with the endpoints that were disabled. Clear the input device
3164 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3166 * If the virt_dev to be reset does not exist or does not match the udev,
3167 * it means the device is lost, possibly due to the xHC restore error and
3168 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3169 * re-allocate the device.
3171 int xhci_discover_or_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3174 unsigned long flags
;
3175 struct xhci_hcd
*xhci
;
3176 unsigned int slot_id
;
3177 struct xhci_virt_device
*virt_dev
;
3178 struct xhci_command
*reset_device_cmd
;
3180 int last_freed_endpoint
;
3181 struct xhci_slot_ctx
*slot_ctx
;
3182 int old_active_eps
= 0;
3184 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, false, __func__
);
3187 xhci
= hcd_to_xhci(hcd
);
3188 slot_id
= udev
->slot_id
;
3189 virt_dev
= xhci
->devs
[slot_id
];
3191 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3192 "not exist. Re-allocate the device\n", slot_id
);
3193 ret
= xhci_alloc_dev(hcd
, udev
);
3200 if (virt_dev
->udev
!= udev
) {
3201 /* If the virt_dev and the udev does not match, this virt_dev
3202 * may belong to another udev.
3203 * Re-allocate the device.
3205 xhci_dbg(xhci
, "The device to be reset with slot ID %u does "
3206 "not match the udev. Re-allocate the device\n",
3208 ret
= xhci_alloc_dev(hcd
, udev
);
3215 /* If device is not setup, there is no point in resetting it */
3216 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3217 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
)) ==
3218 SLOT_STATE_DISABLED
)
3221 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
3222 /* Allocate the command structure that holds the struct completion.
3223 * Assume we're in process context, since the normal device reset
3224 * process has to wait for the device anyway. Storage devices are
3225 * reset as part of error handling, so use GFP_NOIO instead of
3228 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
3229 if (!reset_device_cmd
) {
3230 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
3234 /* Attempt to submit the Reset Device command to the command ring */
3235 spin_lock_irqsave(&xhci
->lock
, flags
);
3236 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
3238 /* Enqueue pointer can be left pointing to the link TRB,
3239 * we must handle that
3241 if (TRB_TYPE_LINK_LE32(reset_device_cmd
->command_trb
->link
.control
))
3242 reset_device_cmd
->command_trb
=
3243 xhci
->cmd_ring
->enq_seg
->next
->trbs
;
3245 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
3246 ret
= xhci_queue_reset_device(xhci
, slot_id
);
3248 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3249 list_del(&reset_device_cmd
->cmd_list
);
3250 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3251 goto command_cleanup
;
3253 xhci_ring_cmd_db(xhci
);
3254 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3256 /* Wait for the Reset Device command to finish */
3257 timeleft
= wait_for_completion_interruptible_timeout(
3258 reset_device_cmd
->completion
,
3259 USB_CTRL_SET_TIMEOUT
);
3260 if (timeleft
<= 0) {
3261 xhci_warn(xhci
, "%s while waiting for reset device command\n",
3262 timeleft
== 0 ? "Timeout" : "Signal");
3263 spin_lock_irqsave(&xhci
->lock
, flags
);
3264 /* The timeout might have raced with the event ring handler, so
3265 * only delete from the list if the item isn't poisoned.
3267 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
3268 list_del(&reset_device_cmd
->cmd_list
);
3269 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3271 goto command_cleanup
;
3274 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3275 * unless we tried to reset a slot ID that wasn't enabled,
3276 * or the device wasn't in the addressed or configured state.
3278 ret
= reset_device_cmd
->status
;
3280 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
3281 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
3282 xhci_info(xhci
, "Can't reset device (slot ID %u) in %s state\n",
3284 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
3285 xhci_info(xhci
, "Not freeing device rings.\n");
3286 /* Don't treat this as an error. May change my mind later. */
3288 goto command_cleanup
;
3290 xhci_dbg(xhci
, "Successful reset device command.\n");
3293 if (xhci_is_vendor_info_code(xhci
, ret
))
3295 xhci_warn(xhci
, "Unknown completion code %u for "
3296 "reset device command.\n", ret
);
3298 goto command_cleanup
;
3301 /* Free up host controller endpoint resources */
3302 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3303 spin_lock_irqsave(&xhci
->lock
, flags
);
3304 /* Don't delete the default control endpoint resources */
3305 xhci_free_device_endpoint_resources(xhci
, virt_dev
, false);
3306 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3309 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3310 last_freed_endpoint
= 1;
3311 for (i
= 1; i
< 31; ++i
) {
3312 struct xhci_virt_ep
*ep
= &virt_dev
->eps
[i
];
3314 if (ep
->ep_state
& EP_HAS_STREAMS
) {
3315 xhci_free_stream_info(xhci
, ep
->stream_info
);
3316 ep
->stream_info
= NULL
;
3317 ep
->ep_state
&= ~EP_HAS_STREAMS
;
3321 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
3322 last_freed_endpoint
= i
;
3324 if (!list_empty(&virt_dev
->eps
[i
].bw_endpoint_list
))
3325 xhci_drop_ep_from_interval_table(xhci
,
3326 &virt_dev
->eps
[i
].bw_info
,
3331 xhci_clear_endpoint_bw_info(&virt_dev
->eps
[i
].bw_info
);
3333 /* If necessary, update the number of active TTs on this root port */
3334 xhci_update_tt_active_eps(xhci
, virt_dev
, old_active_eps
);
3336 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
3337 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
3341 xhci_free_command(xhci
, reset_device_cmd
);
3346 * At this point, the struct usb_device is about to go away, the device has
3347 * disconnected, and all traffic has been stopped and the endpoints have been
3348 * disabled. Free any HC data structures associated with that device.
3350 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3352 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3353 struct xhci_virt_device
*virt_dev
;
3354 unsigned long flags
;
3358 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, true, __func__
);
3359 /* If the host is halted due to driver unload, we still need to free the
3362 if (ret
<= 0 && ret
!= -ENODEV
)
3365 virt_dev
= xhci
->devs
[udev
->slot_id
];
3367 /* Stop any wayward timer functions (which may grab the lock) */
3368 for (i
= 0; i
< 31; ++i
) {
3369 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
3370 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
3373 if (udev
->usb2_hw_lpm_enabled
) {
3374 xhci_set_usb2_hardware_lpm(hcd
, udev
, 0);
3375 udev
->usb2_hw_lpm_enabled
= 0;
3378 spin_lock_irqsave(&xhci
->lock
, flags
);
3379 /* Don't disable the slot if the host controller is dead. */
3380 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
3381 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
) ||
3382 (xhci
->xhc_state
& XHCI_STATE_HALTED
)) {
3383 xhci_free_virt_device(xhci
, udev
->slot_id
);
3384 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3388 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
3389 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3390 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3393 xhci_ring_cmd_db(xhci
);
3394 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3396 * Event command completion handler will free any data structures
3397 * associated with the slot. XXX Can free sleep?
3402 * Checks if we have enough host controller resources for the default control
3405 * Must be called with xhci->lock held.
3407 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd
*xhci
)
3409 if (xhci
->num_active_eps
+ 1 > xhci
->limit_active_eps
) {
3410 xhci_dbg(xhci
, "Not enough ep ctxs: "
3411 "%u active, need to add 1, limit is %u.\n",
3412 xhci
->num_active_eps
, xhci
->limit_active_eps
);
3415 xhci
->num_active_eps
+= 1;
3416 xhci_dbg(xhci
, "Adding 1 ep ctx, %u now active.\n",
3417 xhci
->num_active_eps
);
3423 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3424 * timed out, or allocating memory failed. Returns 1 on success.
3426 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3428 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3429 unsigned long flags
;
3432 union xhci_trb
*cmd_trb
;
3434 spin_lock_irqsave(&xhci
->lock
, flags
);
3435 cmd_trb
= xhci
->cmd_ring
->dequeue
;
3436 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
3438 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3439 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3442 xhci_ring_cmd_db(xhci
);
3443 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3445 /* XXX: how much time for xHC slot assignment? */
3446 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3447 XHCI_CMD_DEFAULT_TIMEOUT
);
3448 if (timeleft
<= 0) {
3449 xhci_warn(xhci
, "%s while waiting for a slot\n",
3450 timeleft
== 0 ? "Timeout" : "Signal");
3451 /* cancel the enable slot request */
3452 return xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3455 if (!xhci
->slot_id
) {
3456 xhci_err(xhci
, "Error while assigning device slot ID\n");
3460 if ((xhci
->quirks
& XHCI_EP_LIMIT_QUIRK
)) {
3461 spin_lock_irqsave(&xhci
->lock
, flags
);
3462 ret
= xhci_reserve_host_control_ep_resources(xhci
);
3464 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3465 xhci_warn(xhci
, "Not enough host resources, "
3466 "active endpoint contexts = %u\n",
3467 xhci
->num_active_eps
);
3470 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3472 /* Use GFP_NOIO, since this function can be called from
3473 * xhci_discover_or_reset_device(), which may be called as part of
3474 * mass storage driver error handling.
3476 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_NOIO
)) {
3477 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
3480 udev
->slot_id
= xhci
->slot_id
;
3481 /* Is this a LS or FS device under a HS hub? */
3482 /* Hub or peripherial? */
3486 /* Disable slot, if we can do it without mem alloc */
3487 spin_lock_irqsave(&xhci
->lock
, flags
);
3488 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
3489 xhci_ring_cmd_db(xhci
);
3490 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3495 * Issue an Address Device command (which will issue a SetAddress request to
3497 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3498 * we should only issue and wait on one address command at the same time.
3500 * We add one to the device address issued by the hardware because the USB core
3501 * uses address 1 for the root hubs (even though they're not really devices).
3503 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3505 unsigned long flags
;
3507 struct xhci_virt_device
*virt_dev
;
3509 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3510 struct xhci_slot_ctx
*slot_ctx
;
3511 struct xhci_input_control_ctx
*ctrl_ctx
;
3513 union xhci_trb
*cmd_trb
;
3515 if (!udev
->slot_id
) {
3516 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
3520 virt_dev
= xhci
->devs
[udev
->slot_id
];
3522 if (WARN_ON(!virt_dev
)) {
3524 * In plug/unplug torture test with an NEC controller,
3525 * a zero-dereference was observed once due to virt_dev = 0.
3526 * Print useful debug rather than crash if it is observed again!
3528 xhci_warn(xhci
, "Virt dev invalid for slot_id 0x%x!\n",
3533 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
3535 * If this is the first Set Address since device plug-in or
3536 * virt_device realloaction after a resume with an xHCI power loss,
3537 * then set up the slot context.
3539 if (!slot_ctx
->dev_info
)
3540 xhci_setup_addressable_virt_dev(xhci
, udev
);
3541 /* Otherwise, update the control endpoint ring enqueue pointer. */
3543 xhci_copy_ep0_dequeue_into_input_ctx(xhci
, udev
);
3544 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
3545 ctrl_ctx
->add_flags
= cpu_to_le32(SLOT_FLAG
| EP0_FLAG
);
3546 ctrl_ctx
->drop_flags
= 0;
3548 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3549 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3551 spin_lock_irqsave(&xhci
->lock
, flags
);
3552 cmd_trb
= xhci
->cmd_ring
->dequeue
;
3553 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
3556 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3557 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
3560 xhci_ring_cmd_db(xhci
);
3561 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3563 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3564 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
3565 XHCI_CMD_DEFAULT_TIMEOUT
);
3566 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3567 * the SetAddress() "recovery interval" required by USB and aborting the
3568 * command on a timeout.
3570 if (timeleft
<= 0) {
3571 xhci_warn(xhci
, "%s while waiting for address device command\n",
3572 timeleft
== 0 ? "Timeout" : "Signal");
3573 /* cancel the address device command */
3574 ret
= xhci_cancel_cmd(xhci
, NULL
, cmd_trb
);
3580 switch (virt_dev
->cmd_status
) {
3581 case COMP_CTX_STATE
:
3583 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
3588 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
3592 dev_warn(&udev
->dev
, "ERROR: Incompatible device for address "
3593 "device command.\n");
3597 xhci_dbg(xhci
, "Successful Address Device command\n");
3600 xhci_err(xhci
, "ERROR: unexpected command completion "
3601 "code 0x%x.\n", virt_dev
->cmd_status
);
3602 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3603 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3610 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
3611 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
3612 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3614 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
3615 (unsigned long long)
3616 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]));
3617 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
3618 (unsigned long long)virt_dev
->out_ctx
->dma
);
3619 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
3620 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
3621 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
3622 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
3624 * USB core uses address 1 for the roothubs, so we add one to the
3625 * address given back to us by the HC.
3627 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
3628 /* Use kernel assigned address for devices; store xHC assigned
3629 * address locally. */
3630 virt_dev
->address
= (le32_to_cpu(slot_ctx
->dev_state
) & DEV_ADDR_MASK
)
3632 /* Zero the input context control for later use */
3633 ctrl_ctx
->add_flags
= 0;
3634 ctrl_ctx
->drop_flags
= 0;
3636 xhci_dbg(xhci
, "Internal device address = %d\n", virt_dev
->address
);
3641 #ifdef CONFIG_USB_SUSPEND
3643 /* BESL to HIRD Encoding array for USB2 LPM */
3644 static int xhci_besl_encoding
[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3645 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3647 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3648 static int xhci_calculate_hird_besl(struct xhci_hcd
*xhci
,
3649 struct usb_device
*udev
)
3651 int u2del
, besl
, besl_host
;
3652 int besl_device
= 0;
3655 u2del
= HCS_U2_LATENCY(xhci
->hcs_params3
);
3656 field
= le32_to_cpu(udev
->bos
->ext_cap
->bmAttributes
);
3658 if (field
& USB_BESL_SUPPORT
) {
3659 for (besl_host
= 0; besl_host
< 16; besl_host
++) {
3660 if (xhci_besl_encoding
[besl_host
] >= u2del
)
3663 /* Use baseline BESL value as default */
3664 if (field
& USB_BESL_BASELINE_VALID
)
3665 besl_device
= USB_GET_BESL_BASELINE(field
);
3666 else if (field
& USB_BESL_DEEP_VALID
)
3667 besl_device
= USB_GET_BESL_DEEP(field
);
3672 besl_host
= (u2del
- 51) / 75 + 1;
3675 besl
= besl_host
+ besl_device
;
3682 static int xhci_usb2_software_lpm_test(struct usb_hcd
*hcd
,
3683 struct usb_device
*udev
)
3685 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3686 struct dev_info
*dev_info
;
3687 __le32 __iomem
**port_array
;
3688 __le32 __iomem
*addr
, *pm_addr
;
3690 unsigned int port_num
;
3691 unsigned long flags
;
3695 if (hcd
->speed
== HCD_USB3
|| !xhci
->sw_lpm_support
||
3699 /* we only support lpm for non-hub device connected to root hub yet */
3700 if (!udev
->parent
|| udev
->parent
->parent
||
3701 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
3704 spin_lock_irqsave(&xhci
->lock
, flags
);
3706 /* Look for devices in lpm_failed_devs list */
3707 dev_id
= le16_to_cpu(udev
->descriptor
.idVendor
) << 16 |
3708 le16_to_cpu(udev
->descriptor
.idProduct
);
3709 list_for_each_entry(dev_info
, &xhci
->lpm_failed_devs
, list
) {
3710 if (dev_info
->dev_id
== dev_id
) {
3716 port_array
= xhci
->usb2_ports
;
3717 port_num
= udev
->portnum
- 1;
3719 if (port_num
> HCS_MAX_PORTS(xhci
->hcs_params1
)) {
3720 xhci_dbg(xhci
, "invalid port number %d\n", udev
->portnum
);
3726 * Test USB 2.0 software LPM.
3727 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3728 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3729 * in the June 2011 errata release.
3731 xhci_dbg(xhci
, "test port %d software LPM\n", port_num
);
3733 * Set L1 Device Slot and HIRD/BESL.
3734 * Check device's USB 2.0 extension descriptor to determine whether
3735 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3737 pm_addr
= port_array
[port_num
] + 1;
3738 hird
= xhci_calculate_hird_besl(xhci
, udev
);
3739 temp
= PORT_L1DS(udev
->slot_id
) | PORT_HIRD(hird
);
3740 xhci_writel(xhci
, temp
, pm_addr
);
3742 /* Set port link state to U2(L1) */
3743 addr
= port_array
[port_num
];
3744 xhci_set_link_state(xhci
, port_array
, port_num
, XDEV_U2
);
3747 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3749 spin_lock_irqsave(&xhci
->lock
, flags
);
3751 /* Check L1 Status */
3752 ret
= handshake(xhci
, pm_addr
, PORT_L1S_MASK
, PORT_L1S_SUCCESS
, 125);
3753 if (ret
!= -ETIMEDOUT
) {
3754 /* enter L1 successfully */
3755 temp
= xhci_readl(xhci
, addr
);
3756 xhci_dbg(xhci
, "port %d entered L1 state, port status 0x%x\n",
3760 temp
= xhci_readl(xhci
, pm_addr
);
3761 xhci_dbg(xhci
, "port %d software lpm failed, L1 status %d\n",
3762 port_num
, temp
& PORT_L1S_MASK
);
3766 /* Resume the port */
3767 xhci_set_link_state(xhci
, port_array
, port_num
, XDEV_U0
);
3769 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3771 spin_lock_irqsave(&xhci
->lock
, flags
);
3774 xhci_test_and_clear_bit(xhci
, port_array
, port_num
, PORT_PLC
);
3776 /* Check PORTSC to make sure the device is in the right state */
3778 temp
= xhci_readl(xhci
, addr
);
3779 xhci_dbg(xhci
, "resumed port %d status 0x%x\n", port_num
, temp
);
3780 if (!(temp
& PORT_CONNECT
) || !(temp
& PORT_PE
) ||
3781 (temp
& PORT_PLS_MASK
) != XDEV_U0
) {
3782 xhci_dbg(xhci
, "port L1 resume fail\n");
3788 /* Insert dev to lpm_failed_devs list */
3789 xhci_warn(xhci
, "device LPM test failed, may disconnect and "
3791 dev_info
= kzalloc(sizeof(struct dev_info
), GFP_ATOMIC
);
3796 dev_info
->dev_id
= dev_id
;
3797 INIT_LIST_HEAD(&dev_info
->list
);
3798 list_add(&dev_info
->list
, &xhci
->lpm_failed_devs
);
3800 xhci_ring_device(xhci
, udev
->slot_id
);
3804 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3808 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
3809 struct usb_device
*udev
, int enable
)
3811 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3812 __le32 __iomem
**port_array
;
3813 __le32 __iomem
*pm_addr
;
3815 unsigned int port_num
;
3816 unsigned long flags
;
3819 if (hcd
->speed
== HCD_USB3
|| !xhci
->hw_lpm_support
||
3823 if (!udev
->parent
|| udev
->parent
->parent
||
3824 udev
->descriptor
.bDeviceClass
== USB_CLASS_HUB
)
3827 if (udev
->usb2_hw_lpm_capable
!= 1)
3830 spin_lock_irqsave(&xhci
->lock
, flags
);
3832 port_array
= xhci
->usb2_ports
;
3833 port_num
= udev
->portnum
- 1;
3834 pm_addr
= port_array
[port_num
] + 1;
3835 temp
= xhci_readl(xhci
, pm_addr
);
3837 xhci_dbg(xhci
, "%s port %d USB2 hardware LPM\n",
3838 enable
? "enable" : "disable", port_num
);
3840 hird
= xhci_calculate_hird_besl(xhci
, udev
);
3843 temp
&= ~PORT_HIRD_MASK
;
3844 temp
|= PORT_HIRD(hird
) | PORT_RWE
;
3845 xhci_writel(xhci
, temp
, pm_addr
);
3846 temp
= xhci_readl(xhci
, pm_addr
);
3848 xhci_writel(xhci
, temp
, pm_addr
);
3850 temp
&= ~(PORT_HLE
| PORT_RWE
| PORT_HIRD_MASK
);
3851 xhci_writel(xhci
, temp
, pm_addr
);
3854 spin_unlock_irqrestore(&xhci
->lock
, flags
);
3858 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3860 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
3863 ret
= xhci_usb2_software_lpm_test(hcd
, udev
);
3865 xhci_dbg(xhci
, "software LPM test succeed\n");
3866 if (xhci
->hw_lpm_support
== 1) {
3867 udev
->usb2_hw_lpm_capable
= 1;
3868 ret
= xhci_set_usb2_hardware_lpm(hcd
, udev
, 1);
3870 udev
->usb2_hw_lpm_enabled
= 1;
3879 int xhci_set_usb2_hardware_lpm(struct usb_hcd
*hcd
,
3880 struct usb_device
*udev
, int enable
)
3885 int xhci_update_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
3890 #endif /* CONFIG_USB_SUSPEND */
3892 /*---------------------- USB 3.0 Link PM functions ------------------------*/
3895 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
3896 static unsigned long long xhci_service_interval_to_ns(
3897 struct usb_endpoint_descriptor
*desc
)
3899 return (1 << (desc
->bInterval
- 1)) * 125 * 1000;
3902 static u16
xhci_get_timeout_no_hub_lpm(struct usb_device
*udev
,
3903 enum usb3_link_state state
)
3905 unsigned long long sel
;
3906 unsigned long long pel
;
3907 unsigned int max_sel_pel
;
3912 /* Convert SEL and PEL stored in nanoseconds to microseconds */
3913 sel
= DIV_ROUND_UP(udev
->u1_params
.sel
, 1000);
3914 pel
= DIV_ROUND_UP(udev
->u1_params
.pel
, 1000);
3915 max_sel_pel
= USB3_LPM_MAX_U1_SEL_PEL
;
3919 sel
= DIV_ROUND_UP(udev
->u2_params
.sel
, 1000);
3920 pel
= DIV_ROUND_UP(udev
->u2_params
.pel
, 1000);
3921 max_sel_pel
= USB3_LPM_MAX_U2_SEL_PEL
;
3925 dev_warn(&udev
->dev
, "%s: Can't get timeout for non-U1 or U2 state.\n",
3927 return USB3_LPM_DISABLED
;
3930 if (sel
<= max_sel_pel
&& pel
<= max_sel_pel
)
3931 return USB3_LPM_DEVICE_INITIATED
;
3933 if (sel
> max_sel_pel
)
3934 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
3935 "due to long SEL %llu ms\n",
3938 dev_dbg(&udev
->dev
, "Device-initiated %s disabled "
3939 "due to long PEL %llu\n ms",
3941 return USB3_LPM_DISABLED
;
3944 /* Returns the hub-encoded U1 timeout value.
3945 * The U1 timeout should be the maximum of the following values:
3946 * - For control endpoints, U1 system exit latency (SEL) * 3
3947 * - For bulk endpoints, U1 SEL * 5
3948 * - For interrupt endpoints:
3949 * - Notification EPs, U1 SEL * 3
3950 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
3951 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
3953 static u16
xhci_calculate_intel_u1_timeout(struct usb_device
*udev
,
3954 struct usb_endpoint_descriptor
*desc
)
3956 unsigned long long timeout_ns
;
3960 ep_type
= usb_endpoint_type(desc
);
3962 case USB_ENDPOINT_XFER_CONTROL
:
3963 timeout_ns
= udev
->u1_params
.sel
* 3;
3965 case USB_ENDPOINT_XFER_BULK
:
3966 timeout_ns
= udev
->u1_params
.sel
* 5;
3968 case USB_ENDPOINT_XFER_INT
:
3969 intr_type
= usb_endpoint_interrupt_type(desc
);
3970 if (intr_type
== USB_ENDPOINT_INTR_NOTIFICATION
) {
3971 timeout_ns
= udev
->u1_params
.sel
* 3;
3974 /* Otherwise the calculation is the same as isoc eps */
3975 case USB_ENDPOINT_XFER_ISOC
:
3976 timeout_ns
= xhci_service_interval_to_ns(desc
);
3977 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
* 105, 100);
3978 if (timeout_ns
< udev
->u1_params
.sel
* 2)
3979 timeout_ns
= udev
->u1_params
.sel
* 2;
3985 /* The U1 timeout is encoded in 1us intervals. */
3986 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 1000);
3987 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
3988 if (timeout_ns
== USB3_LPM_DISABLED
)
3991 /* If the necessary timeout value is bigger than what we can set in the
3992 * USB 3.0 hub, we have to disable hub-initiated U1.
3994 if (timeout_ns
<= USB3_LPM_U1_MAX_TIMEOUT
)
3996 dev_dbg(&udev
->dev
, "Hub-initiated U1 disabled "
3997 "due to long timeout %llu ms\n", timeout_ns
);
3998 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U1
);
4001 /* Returns the hub-encoded U2 timeout value.
4002 * The U2 timeout should be the maximum of:
4003 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4004 * - largest bInterval of any active periodic endpoint (to avoid going
4005 * into lower power link states between intervals).
4006 * - the U2 Exit Latency of the device
4008 static u16
xhci_calculate_intel_u2_timeout(struct usb_device
*udev
,
4009 struct usb_endpoint_descriptor
*desc
)
4011 unsigned long long timeout_ns
;
4012 unsigned long long u2_del_ns
;
4014 timeout_ns
= 10 * 1000 * 1000;
4016 if ((usb_endpoint_xfer_int(desc
) || usb_endpoint_xfer_isoc(desc
)) &&
4017 (xhci_service_interval_to_ns(desc
) > timeout_ns
))
4018 timeout_ns
= xhci_service_interval_to_ns(desc
);
4020 u2_del_ns
= udev
->bos
->ss_cap
->bU2DevExitLat
* 1000;
4021 if (u2_del_ns
> timeout_ns
)
4022 timeout_ns
= u2_del_ns
;
4024 /* The U2 timeout is encoded in 256us intervals */
4025 timeout_ns
= DIV_ROUND_UP_ULL(timeout_ns
, 256 * 1000);
4026 /* If the necessary timeout value is bigger than what we can set in the
4027 * USB 3.0 hub, we have to disable hub-initiated U2.
4029 if (timeout_ns
<= USB3_LPM_U2_MAX_TIMEOUT
)
4031 dev_dbg(&udev
->dev
, "Hub-initiated U2 disabled "
4032 "due to long timeout %llu ms\n", timeout_ns
);
4033 return xhci_get_timeout_no_hub_lpm(udev
, USB3_LPM_U2
);
4036 static u16
xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4037 struct usb_device
*udev
,
4038 struct usb_endpoint_descriptor
*desc
,
4039 enum usb3_link_state state
,
4042 if (state
== USB3_LPM_U1
) {
4043 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4044 return xhci_calculate_intel_u1_timeout(udev
, desc
);
4046 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4047 return xhci_calculate_intel_u2_timeout(udev
, desc
);
4050 return USB3_LPM_DISABLED
;
4053 static int xhci_update_timeout_for_endpoint(struct xhci_hcd
*xhci
,
4054 struct usb_device
*udev
,
4055 struct usb_endpoint_descriptor
*desc
,
4056 enum usb3_link_state state
,
4061 alt_timeout
= xhci_call_host_update_timeout_for_endpoint(xhci
, udev
,
4062 desc
, state
, timeout
);
4064 /* If we found we can't enable hub-initiated LPM, or
4065 * the U1 or U2 exit latency was too high to allow
4066 * device-initiated LPM as well, just stop searching.
4068 if (alt_timeout
== USB3_LPM_DISABLED
||
4069 alt_timeout
== USB3_LPM_DEVICE_INITIATED
) {
4070 *timeout
= alt_timeout
;
4073 if (alt_timeout
> *timeout
)
4074 *timeout
= alt_timeout
;
4078 static int xhci_update_timeout_for_interface(struct xhci_hcd
*xhci
,
4079 struct usb_device
*udev
,
4080 struct usb_host_interface
*alt
,
4081 enum usb3_link_state state
,
4086 for (j
= 0; j
< alt
->desc
.bNumEndpoints
; j
++) {
4087 if (xhci_update_timeout_for_endpoint(xhci
, udev
,
4088 &alt
->endpoint
[j
].desc
, state
, timeout
))
4095 static int xhci_check_intel_tier_policy(struct usb_device
*udev
,
4096 enum usb3_link_state state
)
4098 struct usb_device
*parent
;
4099 unsigned int num_hubs
;
4101 if (state
== USB3_LPM_U2
)
4104 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4105 for (parent
= udev
->parent
, num_hubs
= 0; parent
->parent
;
4106 parent
= parent
->parent
)
4112 dev_dbg(&udev
->dev
, "Disabling U1 link state for device"
4113 " below second-tier hub.\n");
4114 dev_dbg(&udev
->dev
, "Plug device into first-tier hub "
4115 "to decrease power consumption.\n");
4119 static int xhci_check_tier_policy(struct xhci_hcd
*xhci
,
4120 struct usb_device
*udev
,
4121 enum usb3_link_state state
)
4123 if (xhci
->quirks
& XHCI_INTEL_HOST
)
4124 return xhci_check_intel_tier_policy(udev
, state
);
4128 /* Returns the U1 or U2 timeout that should be enabled.
4129 * If the tier check or timeout setting functions return with a non-zero exit
4130 * code, that means the timeout value has been finalized and we shouldn't look
4131 * at any more endpoints.
4133 static u16
xhci_calculate_lpm_timeout(struct usb_hcd
*hcd
,
4134 struct usb_device
*udev
, enum usb3_link_state state
)
4136 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4137 struct usb_host_config
*config
;
4140 u16 timeout
= USB3_LPM_DISABLED
;
4142 if (state
== USB3_LPM_U1
)
4144 else if (state
== USB3_LPM_U2
)
4147 dev_warn(&udev
->dev
, "Can't enable unknown link state %i\n",
4152 if (xhci_check_tier_policy(xhci
, udev
, state
) < 0)
4155 /* Gather some information about the currently installed configuration
4156 * and alternate interface settings.
4158 if (xhci_update_timeout_for_endpoint(xhci
, udev
, &udev
->ep0
.desc
,
4162 config
= udev
->actconfig
;
4166 for (i
= 0; i
< USB_MAXINTERFACES
; i
++) {
4167 struct usb_driver
*driver
;
4168 struct usb_interface
*intf
= config
->interface
[i
];
4173 /* Check if any currently bound drivers want hub-initiated LPM
4176 if (intf
->dev
.driver
) {
4177 driver
= to_usb_driver(intf
->dev
.driver
);
4178 if (driver
&& driver
->disable_hub_initiated_lpm
) {
4179 dev_dbg(&udev
->dev
, "Hub-initiated %s disabled "
4180 "at request of driver %s\n",
4181 state_name
, driver
->name
);
4182 return xhci_get_timeout_no_hub_lpm(udev
, state
);
4186 /* Not sure how this could happen... */
4187 if (!intf
->cur_altsetting
)
4190 if (xhci_update_timeout_for_interface(xhci
, udev
,
4191 intf
->cur_altsetting
,
4199 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4200 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4202 static int xhci_change_max_exit_latency(struct xhci_hcd
*xhci
,
4203 struct usb_device
*udev
, u16 max_exit_latency
)
4205 struct xhci_virt_device
*virt_dev
;
4206 struct xhci_command
*command
;
4207 struct xhci_input_control_ctx
*ctrl_ctx
;
4208 struct xhci_slot_ctx
*slot_ctx
;
4209 unsigned long flags
;
4212 spin_lock_irqsave(&xhci
->lock
, flags
);
4213 if (max_exit_latency
== xhci
->devs
[udev
->slot_id
]->current_mel
) {
4214 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4218 /* Attempt to issue an Evaluate Context command to change the MEL. */
4219 virt_dev
= xhci
->devs
[udev
->slot_id
];
4220 command
= xhci
->lpm_command
;
4221 xhci_slot_copy(xhci
, command
->in_ctx
, virt_dev
->out_ctx
);
4222 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4224 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, command
->in_ctx
);
4225 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
4226 slot_ctx
= xhci_get_slot_ctx(xhci
, command
->in_ctx
);
4227 slot_ctx
->dev_info2
&= cpu_to_le32(~((u32
) MAX_EXIT
));
4228 slot_ctx
->dev_info2
|= cpu_to_le32(max_exit_latency
);
4230 xhci_dbg(xhci
, "Set up evaluate context for LPM MEL change.\n");
4231 xhci_dbg(xhci
, "Slot %u Input Context:\n", udev
->slot_id
);
4232 xhci_dbg_ctx(xhci
, command
->in_ctx
, 0);
4234 /* Issue and wait for the evaluate context command. */
4235 ret
= xhci_configure_endpoint(xhci
, udev
, command
,
4237 xhci_dbg(xhci
, "Slot %u Output Context:\n", udev
->slot_id
);
4238 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 0);
4241 spin_lock_irqsave(&xhci
->lock
, flags
);
4242 virt_dev
->current_mel
= max_exit_latency
;
4243 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4248 static int calculate_max_exit_latency(struct usb_device
*udev
,
4249 enum usb3_link_state state_changed
,
4250 u16 hub_encoded_timeout
)
4252 unsigned long long u1_mel_us
= 0;
4253 unsigned long long u2_mel_us
= 0;
4254 unsigned long long mel_us
= 0;
4260 disabling_u1
= (state_changed
== USB3_LPM_U1
&&
4261 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4262 disabling_u2
= (state_changed
== USB3_LPM_U2
&&
4263 hub_encoded_timeout
== USB3_LPM_DISABLED
);
4265 enabling_u1
= (state_changed
== USB3_LPM_U1
&&
4266 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4267 enabling_u2
= (state_changed
== USB3_LPM_U2
&&
4268 hub_encoded_timeout
!= USB3_LPM_DISABLED
);
4270 /* If U1 was already enabled and we're not disabling it,
4271 * or we're going to enable U1, account for the U1 max exit latency.
4273 if ((udev
->u1_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u1
) ||
4275 u1_mel_us
= DIV_ROUND_UP(udev
->u1_params
.mel
, 1000);
4276 if ((udev
->u2_params
.timeout
!= USB3_LPM_DISABLED
&& !disabling_u2
) ||
4278 u2_mel_us
= DIV_ROUND_UP(udev
->u2_params
.mel
, 1000);
4280 if (u1_mel_us
> u2_mel_us
)
4284 /* xHCI host controller max exit latency field is only 16 bits wide. */
4285 if (mel_us
> MAX_EXIT
) {
4286 dev_warn(&udev
->dev
, "Link PM max exit latency of %lluus "
4287 "is too big.\n", mel_us
);
4293 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4294 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4295 struct usb_device
*udev
, enum usb3_link_state state
)
4297 struct xhci_hcd
*xhci
;
4298 u16 hub_encoded_timeout
;
4302 xhci
= hcd_to_xhci(hcd
);
4303 /* The LPM timeout values are pretty host-controller specific, so don't
4304 * enable hub-initiated timeouts unless the vendor has provided
4305 * information about their timeout algorithm.
4307 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4308 !xhci
->devs
[udev
->slot_id
])
4309 return USB3_LPM_DISABLED
;
4311 hub_encoded_timeout
= xhci_calculate_lpm_timeout(hcd
, udev
, state
);
4312 mel
= calculate_max_exit_latency(udev
, state
, hub_encoded_timeout
);
4314 /* Max Exit Latency is too big, disable LPM. */
4315 hub_encoded_timeout
= USB3_LPM_DISABLED
;
4319 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4322 return hub_encoded_timeout
;
4325 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4326 struct usb_device
*udev
, enum usb3_link_state state
)
4328 struct xhci_hcd
*xhci
;
4332 xhci
= hcd_to_xhci(hcd
);
4333 if (!xhci
|| !(xhci
->quirks
& XHCI_LPM_SUPPORT
) ||
4334 !xhci
->devs
[udev
->slot_id
])
4337 mel
= calculate_max_exit_latency(udev
, state
, USB3_LPM_DISABLED
);
4338 ret
= xhci_change_max_exit_latency(xhci
, udev
, mel
);
4343 #else /* CONFIG_PM */
4345 int xhci_enable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4346 struct usb_device
*udev
, enum usb3_link_state state
)
4348 return USB3_LPM_DISABLED
;
4351 int xhci_disable_usb3_lpm_timeout(struct usb_hcd
*hcd
,
4352 struct usb_device
*udev
, enum usb3_link_state state
)
4356 #endif /* CONFIG_PM */
4358 /*-------------------------------------------------------------------------*/
4360 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4361 * internal data structures for the device.
4363 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
4364 struct usb_tt
*tt
, gfp_t mem_flags
)
4366 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4367 struct xhci_virt_device
*vdev
;
4368 struct xhci_command
*config_cmd
;
4369 struct xhci_input_control_ctx
*ctrl_ctx
;
4370 struct xhci_slot_ctx
*slot_ctx
;
4371 unsigned long flags
;
4372 unsigned think_time
;
4375 /* Ignore root hubs */
4379 vdev
= xhci
->devs
[hdev
->slot_id
];
4381 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
4384 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
4386 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
4390 spin_lock_irqsave(&xhci
->lock
, flags
);
4391 if (hdev
->speed
== USB_SPEED_HIGH
&&
4392 xhci_alloc_tt_info(xhci
, vdev
, hdev
, tt
, GFP_ATOMIC
)) {
4393 xhci_dbg(xhci
, "Could not allocate xHCI TT structure.\n");
4394 xhci_free_command(xhci
, config_cmd
);
4395 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4399 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
4400 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
4401 ctrl_ctx
->add_flags
|= cpu_to_le32(SLOT_FLAG
);
4402 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
4403 slot_ctx
->dev_info
|= cpu_to_le32(DEV_HUB
);
4405 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
4406 if (xhci
->hci_version
> 0x95) {
4407 xhci_dbg(xhci
, "xHCI version %x needs hub "
4408 "TT think time and number of ports\n",
4409 (unsigned int) xhci
->hci_version
);
4410 slot_ctx
->dev_info2
|= cpu_to_le32(XHCI_MAX_PORTS(hdev
->maxchild
));
4411 /* Set TT think time - convert from ns to FS bit times.
4412 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4413 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4415 * xHCI 1.0: this field shall be 0 if the device is not a
4418 think_time
= tt
->think_time
;
4419 if (think_time
!= 0)
4420 think_time
= (think_time
/ 666) - 1;
4421 if (xhci
->hci_version
< 0x100 || hdev
->speed
== USB_SPEED_HIGH
)
4422 slot_ctx
->tt_info
|=
4423 cpu_to_le32(TT_THINK_TIME(think_time
));
4425 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
4426 "TT think time or number of ports\n",
4427 (unsigned int) xhci
->hci_version
);
4429 slot_ctx
->dev_state
= 0;
4430 spin_unlock_irqrestore(&xhci
->lock
, flags
);
4432 xhci_dbg(xhci
, "Set up %s for hub device.\n",
4433 (xhci
->hci_version
> 0x95) ?
4434 "configure endpoint" : "evaluate context");
4435 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
4436 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
4438 /* Issue and wait for the configure endpoint or
4439 * evaluate context command.
4441 if (xhci
->hci_version
> 0x95)
4442 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4445 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
4448 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
4449 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
4451 xhci_free_command(xhci
, config_cmd
);
4455 int xhci_get_frame(struct usb_hcd
*hcd
)
4457 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
4458 /* EHCI mods by the periodic size. Why? */
4459 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
4462 int xhci_gen_setup(struct usb_hcd
*hcd
, xhci_get_quirks_t get_quirks
)
4464 struct xhci_hcd
*xhci
;
4465 struct device
*dev
= hcd
->self
.controller
;
4469 /* Accept arbitrarily long scatter-gather lists */
4470 hcd
->self
.sg_tablesize
= ~0;
4471 /* XHCI controllers don't stop the ep queue on short packets :| */
4472 hcd
->self
.no_stop_on_short
= 1;
4474 if (usb_hcd_is_primary_hcd(hcd
)) {
4475 xhci
= kzalloc(sizeof(struct xhci_hcd
), GFP_KERNEL
);
4478 *((struct xhci_hcd
**) hcd
->hcd_priv
) = xhci
;
4479 xhci
->main_hcd
= hcd
;
4480 /* Mark the first roothub as being USB 2.0.
4481 * The xHCI driver will register the USB 3.0 roothub.
4483 hcd
->speed
= HCD_USB2
;
4484 hcd
->self
.root_hub
->speed
= USB_SPEED_HIGH
;
4486 * USB 2.0 roothub under xHCI has an integrated TT,
4487 * (rate matching hub) as opposed to having an OHCI/UHCI
4488 * companion controller.
4492 /* xHCI private pointer was set in xhci_pci_probe for the second
4493 * registered roothub.
4495 xhci
= hcd_to_xhci(hcd
);
4496 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4497 if (HCC_64BIT_ADDR(temp
)) {
4498 xhci_dbg(xhci
, "Enabling 64-bit DMA addresses.\n");
4499 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(64));
4501 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(32));
4506 xhci
->cap_regs
= hcd
->regs
;
4507 xhci
->op_regs
= hcd
->regs
+
4508 HC_LENGTH(xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
));
4509 xhci
->run_regs
= hcd
->regs
+
4510 (xhci_readl(xhci
, &xhci
->cap_regs
->run_regs_off
) & RTSOFF_MASK
);
4511 /* Cache read-only capability registers */
4512 xhci
->hcs_params1
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
);
4513 xhci
->hcs_params2
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params2
);
4514 xhci
->hcs_params3
= xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params3
);
4515 xhci
->hcc_params
= xhci_readl(xhci
, &xhci
->cap_regs
->hc_capbase
);
4516 xhci
->hci_version
= HC_VERSION(xhci
->hcc_params
);
4517 xhci
->hcc_params
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4518 xhci_print_registers(xhci
);
4520 get_quirks(dev
, xhci
);
4522 /* Make sure the HC is halted. */
4523 retval
= xhci_halt(xhci
);
4527 xhci_dbg(xhci
, "Resetting HCD\n");
4528 /* Reset the internal HC memory state and registers. */
4529 retval
= xhci_reset(xhci
);
4532 xhci_dbg(xhci
, "Reset complete\n");
4534 temp
= xhci_readl(xhci
, &xhci
->cap_regs
->hcc_params
);
4535 if (HCC_64BIT_ADDR(temp
)) {
4536 xhci_dbg(xhci
, "Enabling 64-bit DMA addresses.\n");
4537 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(64));
4539 dma_set_mask(hcd
->self
.controller
, DMA_BIT_MASK(32));
4542 xhci_dbg(xhci
, "Calling HCD init\n");
4543 /* Initialize HCD and host controller data structures. */
4544 retval
= xhci_init(hcd
);
4547 xhci_dbg(xhci
, "Called HCD init\n");
4554 MODULE_DESCRIPTION(DRIVER_DESC
);
4555 MODULE_AUTHOR(DRIVER_AUTHOR
);
4556 MODULE_LICENSE("GPL");
4558 static int __init
xhci_hcd_init(void)
4562 retval
= xhci_register_pci();
4564 printk(KERN_DEBUG
"Problem registering PCI driver.");
4567 retval
= xhci_register_plat();
4569 printk(KERN_DEBUG
"Problem registering platform driver.");
4573 * Check the compiler generated sizes of structures that must be laid
4574 * out in specific ways for hardware access.
4576 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
4577 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
4578 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
4579 /* xhci_device_control has eight fields, and also
4580 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4582 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
4583 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
4584 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
4585 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
4586 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
4587 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4588 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
4591 xhci_unregister_pci();
4594 module_init(xhci_hcd_init
);
4596 static void __exit
xhci_hcd_cleanup(void)
4598 xhci_unregister_pci();
4599 xhci_unregister_plat();
4601 module_exit(xhci_hcd_cleanup
);