2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma-mapping.h>
32 * Allocates a generic ring segment from the ring pool, sets the dma address,
33 * initializes the segment to zero, and sets the private next pointer to NULL.
36 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
38 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
,
39 unsigned int cycle_state
, gfp_t flags
)
41 struct xhci_segment
*seg
;
45 seg
= kzalloc(sizeof *seg
, flags
);
49 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
55 memset(seg
->trbs
, 0, TRB_SEGMENT_SIZE
);
56 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
57 if (cycle_state
== 0) {
58 for (i
= 0; i
< TRBS_PER_SEGMENT
; i
++)
59 seg
->trbs
[i
].link
.control
|= TRB_CYCLE
;
67 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
70 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
76 static void xhci_free_segments_for_ring(struct xhci_hcd
*xhci
,
77 struct xhci_segment
*first
)
79 struct xhci_segment
*seg
;
82 while (seg
!= first
) {
83 struct xhci_segment
*next
= seg
->next
;
84 xhci_segment_free(xhci
, seg
);
87 xhci_segment_free(xhci
, first
);
91 * Make the prev segment point to the next segment.
93 * Change the last TRB in the prev segment to be a Link TRB which points to the
94 * DMA address of the next segment. The caller needs to set any Link TRB
95 * related flags, such as End TRB, Toggle Cycle, and no snoop.
97 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
98 struct xhci_segment
*next
, enum xhci_ring_type type
)
105 if (type
!= TYPE_EVENT
) {
106 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
=
107 cpu_to_le64(next
->dma
);
109 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
110 val
= le32_to_cpu(prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
);
111 val
&= ~TRB_TYPE_BITMASK
;
112 val
|= TRB_TYPE(TRB_LINK
);
113 /* Always set the chain bit with 0.95 hardware */
114 /* Set chain bit for isoc rings on AMD 0.96 host */
115 if (xhci_link_trb_quirk(xhci
) ||
116 (type
== TYPE_ISOC
&&
117 (xhci
->quirks
& XHCI_AMD_0x96_HOST
)))
119 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= cpu_to_le32(val
);
124 * Link the ring to the new segments.
125 * Set Toggle Cycle for the new ring if needed.
127 static void xhci_link_rings(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
128 struct xhci_segment
*first
, struct xhci_segment
*last
,
129 unsigned int num_segs
)
131 struct xhci_segment
*next
;
133 if (!ring
|| !first
|| !last
)
136 next
= ring
->enq_seg
->next
;
137 xhci_link_segments(xhci
, ring
->enq_seg
, first
, ring
->type
);
138 xhci_link_segments(xhci
, last
, next
, ring
->type
);
139 ring
->num_segs
+= num_segs
;
140 ring
->num_trbs_free
+= (TRBS_PER_SEGMENT
- 1) * num_segs
;
142 if (ring
->type
!= TYPE_EVENT
&& ring
->enq_seg
== ring
->last_seg
) {
143 ring
->last_seg
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
144 &= ~cpu_to_le32(LINK_TOGGLE
);
145 last
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
146 |= cpu_to_le32(LINK_TOGGLE
);
147 ring
->last_seg
= last
;
151 /* XXX: Do we need the hcd structure in all these functions? */
152 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
158 xhci_free_segments_for_ring(xhci
, ring
->first_seg
);
163 static void xhci_initialize_ring_info(struct xhci_ring
*ring
,
164 unsigned int cycle_state
)
166 /* The ring is empty, so the enqueue pointer == dequeue pointer */
167 ring
->enqueue
= ring
->first_seg
->trbs
;
168 ring
->enq_seg
= ring
->first_seg
;
169 ring
->dequeue
= ring
->enqueue
;
170 ring
->deq_seg
= ring
->first_seg
;
171 /* The ring is initialized to 0. The producer must write 1 to the cycle
172 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
173 * compare CCS to the cycle bit to check ownership, so CCS = 1.
175 * New rings are initialized with cycle state equal to 1; if we are
176 * handling ring expansion, set the cycle state equal to the old ring.
178 ring
->cycle_state
= cycle_state
;
179 /* Not necessary for new rings, but needed for re-initialized rings */
180 ring
->enq_updates
= 0;
181 ring
->deq_updates
= 0;
184 * Each segment has a link TRB, and leave an extra TRB for SW
187 ring
->num_trbs_free
= ring
->num_segs
* (TRBS_PER_SEGMENT
- 1) - 1;
190 /* Allocate segments and link them for a ring */
191 static int xhci_alloc_segments_for_ring(struct xhci_hcd
*xhci
,
192 struct xhci_segment
**first
, struct xhci_segment
**last
,
193 unsigned int num_segs
, unsigned int cycle_state
,
194 enum xhci_ring_type type
, gfp_t flags
)
196 struct xhci_segment
*prev
;
198 prev
= xhci_segment_alloc(xhci
, cycle_state
, flags
);
204 while (num_segs
> 0) {
205 struct xhci_segment
*next
;
207 next
= xhci_segment_alloc(xhci
, cycle_state
, flags
);
212 xhci_segment_free(xhci
, prev
);
217 xhci_link_segments(xhci
, prev
, next
, type
);
222 xhci_link_segments(xhci
, prev
, *first
, type
);
229 * Create a new ring with zero or more segments.
231 * Link each segment together into a ring.
232 * Set the end flag and the cycle toggle bit on the last segment.
233 * See section 4.9.1 and figures 15 and 16.
235 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
236 unsigned int num_segs
, unsigned int cycle_state
,
237 enum xhci_ring_type type
, gfp_t flags
)
239 struct xhci_ring
*ring
;
242 ring
= kzalloc(sizeof *(ring
), flags
);
246 ring
->num_segs
= num_segs
;
247 INIT_LIST_HEAD(&ring
->td_list
);
252 ret
= xhci_alloc_segments_for_ring(xhci
, &ring
->first_seg
,
253 &ring
->last_seg
, num_segs
, cycle_state
, type
, flags
);
257 /* Only event ring does not use link TRB */
258 if (type
!= TYPE_EVENT
) {
259 /* See section 4.9.2.1 and 6.4.4.1 */
260 ring
->last_seg
->trbs
[TRBS_PER_SEGMENT
- 1].link
.control
|=
261 cpu_to_le32(LINK_TOGGLE
);
263 xhci_initialize_ring_info(ring
, cycle_state
);
271 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd
*xhci
,
272 struct xhci_virt_device
*virt_dev
,
273 unsigned int ep_index
)
277 rings_cached
= virt_dev
->num_rings_cached
;
278 if (rings_cached
< XHCI_MAX_RINGS_CACHED
) {
279 virt_dev
->ring_cache
[rings_cached
] =
280 virt_dev
->eps
[ep_index
].ring
;
281 virt_dev
->num_rings_cached
++;
282 xhci_dbg(xhci
, "Cached old ring, "
283 "%d ring%s cached\n",
284 virt_dev
->num_rings_cached
,
285 (virt_dev
->num_rings_cached
> 1) ? "s" : "");
287 xhci_ring_free(xhci
, virt_dev
->eps
[ep_index
].ring
);
288 xhci_dbg(xhci
, "Ring cache full (%d rings), "
290 virt_dev
->num_rings_cached
);
292 virt_dev
->eps
[ep_index
].ring
= NULL
;
295 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
296 * pointers to the beginning of the ring.
298 static void xhci_reinit_cached_ring(struct xhci_hcd
*xhci
,
299 struct xhci_ring
*ring
, unsigned int cycle_state
,
300 enum xhci_ring_type type
)
302 struct xhci_segment
*seg
= ring
->first_seg
;
307 sizeof(union xhci_trb
)*TRBS_PER_SEGMENT
);
308 if (cycle_state
== 0) {
309 for (i
= 0; i
< TRBS_PER_SEGMENT
; i
++)
310 seg
->trbs
[i
].link
.control
|= TRB_CYCLE
;
312 /* All endpoint rings have link TRBs */
313 xhci_link_segments(xhci
, seg
, seg
->next
, type
);
315 } while (seg
!= ring
->first_seg
);
317 xhci_initialize_ring_info(ring
, cycle_state
);
318 /* td list should be empty since all URBs have been cancelled,
319 * but just in case...
321 INIT_LIST_HEAD(&ring
->td_list
);
325 * Expand an existing ring.
326 * Look for a cached ring or allocate a new ring which has same segment numbers
327 * and link the two rings.
329 int xhci_ring_expansion(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
330 unsigned int num_trbs
, gfp_t flags
)
332 struct xhci_segment
*first
;
333 struct xhci_segment
*last
;
334 unsigned int num_segs
;
335 unsigned int num_segs_needed
;
338 num_segs_needed
= (num_trbs
+ (TRBS_PER_SEGMENT
- 1) - 1) /
339 (TRBS_PER_SEGMENT
- 1);
341 /* Allocate number of segments we needed, or double the ring size */
342 num_segs
= ring
->num_segs
> num_segs_needed
?
343 ring
->num_segs
: num_segs_needed
;
345 ret
= xhci_alloc_segments_for_ring(xhci
, &first
, &last
,
346 num_segs
, ring
->cycle_state
, ring
->type
, flags
);
350 xhci_link_rings(xhci
, ring
, first
, last
, num_segs
);
351 xhci_dbg(xhci
, "ring expansion succeed, now has %d segments\n",
357 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
359 static struct xhci_container_ctx
*xhci_alloc_container_ctx(struct xhci_hcd
*xhci
,
360 int type
, gfp_t flags
)
362 struct xhci_container_ctx
*ctx
;
364 if ((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
))
367 ctx
= kzalloc(sizeof(*ctx
), flags
);
372 ctx
->size
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
) ? 2048 : 1024;
373 if (type
== XHCI_CTX_TYPE_INPUT
)
374 ctx
->size
+= CTX_SIZE(xhci
->hcc_params
);
376 ctx
->bytes
= dma_pool_alloc(xhci
->device_pool
, flags
, &ctx
->dma
);
381 memset(ctx
->bytes
, 0, ctx
->size
);
385 static void xhci_free_container_ctx(struct xhci_hcd
*xhci
,
386 struct xhci_container_ctx
*ctx
)
390 dma_pool_free(xhci
->device_pool
, ctx
->bytes
, ctx
->dma
);
394 struct xhci_input_control_ctx
*xhci_get_input_control_ctx(struct xhci_hcd
*xhci
,
395 struct xhci_container_ctx
*ctx
)
397 if (ctx
->type
!= XHCI_CTX_TYPE_INPUT
)
400 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
403 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_hcd
*xhci
,
404 struct xhci_container_ctx
*ctx
)
406 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
407 return (struct xhci_slot_ctx
*)ctx
->bytes
;
409 return (struct xhci_slot_ctx
*)
410 (ctx
->bytes
+ CTX_SIZE(xhci
->hcc_params
));
413 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_hcd
*xhci
,
414 struct xhci_container_ctx
*ctx
,
415 unsigned int ep_index
)
417 /* increment ep index by offset of start of ep ctx array */
419 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
422 return (struct xhci_ep_ctx
*)
423 (ctx
->bytes
+ (ep_index
* CTX_SIZE(xhci
->hcc_params
)));
427 /***************** Streams structures manipulation *************************/
429 static void xhci_free_stream_ctx(struct xhci_hcd
*xhci
,
430 unsigned int num_stream_ctxs
,
431 struct xhci_stream_ctx
*stream_ctx
, dma_addr_t dma
)
433 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
435 if (num_stream_ctxs
> MEDIUM_STREAM_ARRAY_SIZE
)
436 dma_free_coherent(&pdev
->dev
,
437 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
,
439 else if (num_stream_ctxs
<= SMALL_STREAM_ARRAY_SIZE
)
440 return dma_pool_free(xhci
->small_streams_pool
,
443 return dma_pool_free(xhci
->medium_streams_pool
,
448 * The stream context array for each endpoint with bulk streams enabled can
449 * vary in size, based on:
450 * - how many streams the endpoint supports,
451 * - the maximum primary stream array size the host controller supports,
452 * - and how many streams the device driver asks for.
454 * The stream context array must be a power of 2, and can be as small as
455 * 64 bytes or as large as 1MB.
457 static struct xhci_stream_ctx
*xhci_alloc_stream_ctx(struct xhci_hcd
*xhci
,
458 unsigned int num_stream_ctxs
, dma_addr_t
*dma
,
461 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
463 if (num_stream_ctxs
> MEDIUM_STREAM_ARRAY_SIZE
)
464 return dma_alloc_coherent(&pdev
->dev
,
465 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
,
467 else if (num_stream_ctxs
<= SMALL_STREAM_ARRAY_SIZE
)
468 return dma_pool_alloc(xhci
->small_streams_pool
,
471 return dma_pool_alloc(xhci
->medium_streams_pool
,
475 struct xhci_ring
*xhci_dma_to_transfer_ring(
476 struct xhci_virt_ep
*ep
,
479 if (ep
->ep_state
& EP_HAS_STREAMS
)
480 return radix_tree_lookup(&ep
->stream_info
->trb_address_map
,
481 address
>> TRB_SEGMENT_SHIFT
);
485 /* Only use this when you know stream_info is valid */
486 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
487 static struct xhci_ring
*dma_to_stream_ring(
488 struct xhci_stream_info
*stream_info
,
491 return radix_tree_lookup(&stream_info
->trb_address_map
,
492 address
>> TRB_SEGMENT_SHIFT
);
494 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
496 struct xhci_ring
*xhci_stream_id_to_ring(
497 struct xhci_virt_device
*dev
,
498 unsigned int ep_index
,
499 unsigned int stream_id
)
501 struct xhci_virt_ep
*ep
= &dev
->eps
[ep_index
];
505 if (!ep
->stream_info
)
508 if (stream_id
> ep
->stream_info
->num_streams
)
510 return ep
->stream_info
->stream_rings
[stream_id
];
513 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
514 static int xhci_test_radix_tree(struct xhci_hcd
*xhci
,
515 unsigned int num_streams
,
516 struct xhci_stream_info
*stream_info
)
519 struct xhci_ring
*cur_ring
;
522 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
523 struct xhci_ring
*mapped_ring
;
524 int trb_size
= sizeof(union xhci_trb
);
526 cur_ring
= stream_info
->stream_rings
[cur_stream
];
527 for (addr
= cur_ring
->first_seg
->dma
;
528 addr
< cur_ring
->first_seg
->dma
+ TRB_SEGMENT_SIZE
;
530 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
531 if (cur_ring
!= mapped_ring
) {
532 xhci_warn(xhci
, "WARN: DMA address 0x%08llx "
533 "didn't map to stream ID %u; "
534 "mapped to ring %p\n",
535 (unsigned long long) addr
,
541 /* One TRB after the end of the ring segment shouldn't return a
542 * pointer to the current ring (although it may be a part of a
545 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
546 if (mapped_ring
!= cur_ring
) {
547 /* One TRB before should also fail */
548 addr
= cur_ring
->first_seg
->dma
- trb_size
;
549 mapped_ring
= dma_to_stream_ring(stream_info
, addr
);
551 if (mapped_ring
== cur_ring
) {
552 xhci_warn(xhci
, "WARN: Bad DMA address 0x%08llx "
553 "mapped to valid stream ID %u; "
554 "mapped ring = %p\n",
555 (unsigned long long) addr
,
563 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
566 * Change an endpoint's internal structure so it supports stream IDs. The
567 * number of requested streams includes stream 0, which cannot be used by device
570 * The number of stream contexts in the stream context array may be bigger than
571 * the number of streams the driver wants to use. This is because the number of
572 * stream context array entries must be a power of two.
574 * We need a radix tree for mapping physical addresses of TRBs to which stream
575 * ID they belong to. We need to do this because the host controller won't tell
576 * us which stream ring the TRB came from. We could store the stream ID in an
577 * event data TRB, but that doesn't help us for the cancellation case, since the
578 * endpoint may stop before it reaches that event data TRB.
580 * The radix tree maps the upper portion of the TRB DMA address to a ring
581 * segment that has the same upper portion of DMA addresses. For example, say I
582 * have segments of size 1KB, that are always 64-byte aligned. A segment may
583 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
584 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
585 * pass the radix tree a key to get the right stream ID:
587 * 0x10c90fff >> 10 = 0x43243
588 * 0x10c912c0 >> 10 = 0x43244
589 * 0x10c91400 >> 10 = 0x43245
591 * Obviously, only those TRBs with DMA addresses that are within the segment
592 * will make the radix tree return the stream ID for that ring.
594 * Caveats for the radix tree:
596 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
597 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
598 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
599 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
600 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
601 * extended systems (where the DMA address can be bigger than 32-bits),
602 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
604 struct xhci_stream_info
*xhci_alloc_stream_info(struct xhci_hcd
*xhci
,
605 unsigned int num_stream_ctxs
,
606 unsigned int num_streams
, gfp_t mem_flags
)
608 struct xhci_stream_info
*stream_info
;
610 struct xhci_ring
*cur_ring
;
615 xhci_dbg(xhci
, "Allocating %u streams and %u "
616 "stream context array entries.\n",
617 num_streams
, num_stream_ctxs
);
618 if (xhci
->cmd_ring_reserved_trbs
== MAX_RSVD_CMD_TRBS
) {
619 xhci_dbg(xhci
, "Command ring has no reserved TRBs available\n");
622 xhci
->cmd_ring_reserved_trbs
++;
624 stream_info
= kzalloc(sizeof(struct xhci_stream_info
), mem_flags
);
628 stream_info
->num_streams
= num_streams
;
629 stream_info
->num_stream_ctxs
= num_stream_ctxs
;
631 /* Initialize the array of virtual pointers to stream rings. */
632 stream_info
->stream_rings
= kzalloc(
633 sizeof(struct xhci_ring
*)*num_streams
,
635 if (!stream_info
->stream_rings
)
638 /* Initialize the array of DMA addresses for stream rings for the HW. */
639 stream_info
->stream_ctx_array
= xhci_alloc_stream_ctx(xhci
,
640 num_stream_ctxs
, &stream_info
->ctx_array_dma
,
642 if (!stream_info
->stream_ctx_array
)
644 memset(stream_info
->stream_ctx_array
, 0,
645 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
);
647 /* Allocate everything needed to free the stream rings later */
648 stream_info
->free_streams_command
=
649 xhci_alloc_command(xhci
, true, true, mem_flags
);
650 if (!stream_info
->free_streams_command
)
653 INIT_RADIX_TREE(&stream_info
->trb_address_map
, GFP_ATOMIC
);
655 /* Allocate rings for all the streams that the driver will use,
656 * and add their segment DMA addresses to the radix tree.
657 * Stream 0 is reserved.
659 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
660 stream_info
->stream_rings
[cur_stream
] =
661 xhci_ring_alloc(xhci
, 2, 1, TYPE_STREAM
, mem_flags
);
662 cur_ring
= stream_info
->stream_rings
[cur_stream
];
665 cur_ring
->stream_id
= cur_stream
;
666 /* Set deq ptr, cycle bit, and stream context type */
667 addr
= cur_ring
->first_seg
->dma
|
668 SCT_FOR_CTX(SCT_PRI_TR
) |
669 cur_ring
->cycle_state
;
670 stream_info
->stream_ctx_array
[cur_stream
].stream_ring
=
672 xhci_dbg(xhci
, "Setting stream %d ring ptr to 0x%08llx\n",
673 cur_stream
, (unsigned long long) addr
);
675 key
= (unsigned long)
676 (cur_ring
->first_seg
->dma
>> TRB_SEGMENT_SHIFT
);
677 ret
= radix_tree_insert(&stream_info
->trb_address_map
,
680 xhci_ring_free(xhci
, cur_ring
);
681 stream_info
->stream_rings
[cur_stream
] = NULL
;
685 /* Leave the other unused stream ring pointers in the stream context
686 * array initialized to zero. This will cause the xHC to give us an
687 * error if the device asks for a stream ID we don't have setup (if it
688 * was any other way, the host controller would assume the ring is
689 * "empty" and wait forever for data to be queued to that stream ID).
692 /* Do a little test on the radix tree to make sure it returns the
695 if (xhci_test_radix_tree(xhci
, num_streams
, stream_info
))
702 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
703 cur_ring
= stream_info
->stream_rings
[cur_stream
];
705 addr
= cur_ring
->first_seg
->dma
;
706 radix_tree_delete(&stream_info
->trb_address_map
,
707 addr
>> TRB_SEGMENT_SHIFT
);
708 xhci_ring_free(xhci
, cur_ring
);
709 stream_info
->stream_rings
[cur_stream
] = NULL
;
712 xhci_free_command(xhci
, stream_info
->free_streams_command
);
714 kfree(stream_info
->stream_rings
);
718 xhci
->cmd_ring_reserved_trbs
--;
722 * Sets the MaxPStreams field and the Linear Stream Array field.
723 * Sets the dequeue pointer to the stream context array.
725 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd
*xhci
,
726 struct xhci_ep_ctx
*ep_ctx
,
727 struct xhci_stream_info
*stream_info
)
729 u32 max_primary_streams
;
730 /* MaxPStreams is the number of stream context array entries, not the
731 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
732 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
734 max_primary_streams
= fls(stream_info
->num_stream_ctxs
) - 2;
735 xhci_dbg(xhci
, "Setting number of stream ctx array entries to %u\n",
736 1 << (max_primary_streams
+ 1));
737 ep_ctx
->ep_info
&= cpu_to_le32(~EP_MAXPSTREAMS_MASK
);
738 ep_ctx
->ep_info
|= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams
)
740 ep_ctx
->deq
= cpu_to_le64(stream_info
->ctx_array_dma
);
744 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
745 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
746 * not at the beginning of the ring).
748 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd
*xhci
,
749 struct xhci_ep_ctx
*ep_ctx
,
750 struct xhci_virt_ep
*ep
)
753 ep_ctx
->ep_info
&= cpu_to_le32(~(EP_MAXPSTREAMS_MASK
| EP_HAS_LSA
));
754 addr
= xhci_trb_virt_to_dma(ep
->ring
->deq_seg
, ep
->ring
->dequeue
);
755 ep_ctx
->deq
= cpu_to_le64(addr
| ep
->ring
->cycle_state
);
758 /* Frees all stream contexts associated with the endpoint,
760 * Caller should fix the endpoint context streams fields.
762 void xhci_free_stream_info(struct xhci_hcd
*xhci
,
763 struct xhci_stream_info
*stream_info
)
766 struct xhci_ring
*cur_ring
;
772 for (cur_stream
= 1; cur_stream
< stream_info
->num_streams
;
774 cur_ring
= stream_info
->stream_rings
[cur_stream
];
776 addr
= cur_ring
->first_seg
->dma
;
777 radix_tree_delete(&stream_info
->trb_address_map
,
778 addr
>> TRB_SEGMENT_SHIFT
);
779 xhci_ring_free(xhci
, cur_ring
);
780 stream_info
->stream_rings
[cur_stream
] = NULL
;
783 xhci_free_command(xhci
, stream_info
->free_streams_command
);
784 xhci
->cmd_ring_reserved_trbs
--;
785 if (stream_info
->stream_ctx_array
)
786 xhci_free_stream_ctx(xhci
,
787 stream_info
->num_stream_ctxs
,
788 stream_info
->stream_ctx_array
,
789 stream_info
->ctx_array_dma
);
792 kfree(stream_info
->stream_rings
);
797 /***************** Device context manipulation *************************/
799 static void xhci_init_endpoint_timer(struct xhci_hcd
*xhci
,
800 struct xhci_virt_ep
*ep
)
802 init_timer(&ep
->stop_cmd_timer
);
803 ep
->stop_cmd_timer
.data
= (unsigned long) ep
;
804 ep
->stop_cmd_timer
.function
= xhci_stop_endpoint_command_watchdog
;
808 static void xhci_free_tt_info(struct xhci_hcd
*xhci
,
809 struct xhci_virt_device
*virt_dev
,
812 struct list_head
*tt_list_head
;
813 struct xhci_tt_bw_info
*tt_info
, *next
;
814 bool slot_found
= false;
816 /* If the device never made it past the Set Address stage,
817 * it may not have the real_port set correctly.
819 if (virt_dev
->real_port
== 0 ||
820 virt_dev
->real_port
> HCS_MAX_PORTS(xhci
->hcs_params1
)) {
821 xhci_dbg(xhci
, "Bad real port.\n");
825 tt_list_head
= &(xhci
->rh_bw
[virt_dev
->real_port
- 1].tts
);
826 list_for_each_entry_safe(tt_info
, next
, tt_list_head
, tt_list
) {
827 /* Multi-TT hubs will have more than one entry */
828 if (tt_info
->slot_id
== slot_id
) {
830 list_del(&tt_info
->tt_list
);
832 } else if (slot_found
) {
838 int xhci_alloc_tt_info(struct xhci_hcd
*xhci
,
839 struct xhci_virt_device
*virt_dev
,
840 struct usb_device
*hdev
,
841 struct usb_tt
*tt
, gfp_t mem_flags
)
843 struct xhci_tt_bw_info
*tt_info
;
844 unsigned int num_ports
;
850 num_ports
= hdev
->maxchild
;
852 for (i
= 0; i
< num_ports
; i
++, tt_info
++) {
853 struct xhci_interval_bw_table
*bw_table
;
855 tt_info
= kzalloc(sizeof(*tt_info
), mem_flags
);
858 INIT_LIST_HEAD(&tt_info
->tt_list
);
859 list_add(&tt_info
->tt_list
,
860 &xhci
->rh_bw
[virt_dev
->real_port
- 1].tts
);
861 tt_info
->slot_id
= virt_dev
->udev
->slot_id
;
863 tt_info
->ttport
= i
+1;
864 bw_table
= &tt_info
->bw_table
;
865 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++)
866 INIT_LIST_HEAD(&bw_table
->interval_bw
[j
].endpoints
);
871 xhci_free_tt_info(xhci
, virt_dev
, virt_dev
->udev
->slot_id
);
876 /* All the xhci_tds in the ring's TD list should be freed at this point.
877 * Should be called with xhci->lock held if there is any chance the TT lists
878 * will be manipulated by the configure endpoint, allocate device, or update
879 * hub functions while this function is removing the TT entries from the list.
881 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
883 struct xhci_virt_device
*dev
;
885 int old_active_eps
= 0;
887 /* Slot ID 0 is reserved */
888 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
891 dev
= xhci
->devs
[slot_id
];
892 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
897 old_active_eps
= dev
->tt_info
->active_eps
;
899 for (i
= 0; i
< 31; ++i
) {
900 if (dev
->eps
[i
].ring
)
901 xhci_ring_free(xhci
, dev
->eps
[i
].ring
);
902 if (dev
->eps
[i
].stream_info
)
903 xhci_free_stream_info(xhci
,
904 dev
->eps
[i
].stream_info
);
905 /* Endpoints on the TT/root port lists should have been removed
906 * when usb_disable_device() was called for the device.
907 * We can't drop them anyway, because the udev might have gone
908 * away by this point, and we can't tell what speed it was.
910 if (!list_empty(&dev
->eps
[i
].bw_endpoint_list
))
911 xhci_warn(xhci
, "Slot %u endpoint %u "
912 "not removed from BW list!\n",
915 /* If this is a hub, free the TT(s) from the TT list */
916 xhci_free_tt_info(xhci
, dev
, slot_id
);
917 /* If necessary, update the number of active TTs on this root port */
918 xhci_update_tt_active_eps(xhci
, dev
, old_active_eps
);
920 if (dev
->ring_cache
) {
921 for (i
= 0; i
< dev
->num_rings_cached
; i
++)
922 xhci_ring_free(xhci
, dev
->ring_cache
[i
]);
923 kfree(dev
->ring_cache
);
927 xhci_free_container_ctx(xhci
, dev
->in_ctx
);
929 xhci_free_container_ctx(xhci
, dev
->out_ctx
);
931 kfree(xhci
->devs
[slot_id
]);
932 xhci
->devs
[slot_id
] = NULL
;
935 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
936 struct usb_device
*udev
, gfp_t flags
)
938 struct xhci_virt_device
*dev
;
941 /* Slot ID 0 is reserved */
942 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
943 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
947 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
948 if (!xhci
->devs
[slot_id
])
950 dev
= xhci
->devs
[slot_id
];
952 /* Allocate the (output) device context that will be used in the HC. */
953 dev
->out_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
957 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
958 (unsigned long long)dev
->out_ctx
->dma
);
960 /* Allocate the (input) device context for address device command */
961 dev
->in_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, flags
);
965 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
966 (unsigned long long)dev
->in_ctx
->dma
);
968 /* Initialize the cancellation list and watchdog timers for each ep */
969 for (i
= 0; i
< 31; i
++) {
970 xhci_init_endpoint_timer(xhci
, &dev
->eps
[i
]);
971 INIT_LIST_HEAD(&dev
->eps
[i
].cancelled_td_list
);
972 INIT_LIST_HEAD(&dev
->eps
[i
].bw_endpoint_list
);
975 /* Allocate endpoint 0 ring */
976 dev
->eps
[0].ring
= xhci_ring_alloc(xhci
, 2, 1, TYPE_CTRL
, flags
);
977 if (!dev
->eps
[0].ring
)
980 /* Allocate pointers to the ring cache */
981 dev
->ring_cache
= kzalloc(
982 sizeof(struct xhci_ring
*)*XHCI_MAX_RINGS_CACHED
,
984 if (!dev
->ring_cache
)
986 dev
->num_rings_cached
= 0;
988 init_completion(&dev
->cmd_completion
);
989 INIT_LIST_HEAD(&dev
->cmd_list
);
992 /* Point to output device context in dcbaa. */
993 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = cpu_to_le64(dev
->out_ctx
->dma
);
994 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
996 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
997 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[slot_id
]));
1001 xhci_free_virt_device(xhci
, slot_id
);
1005 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd
*xhci
,
1006 struct usb_device
*udev
)
1008 struct xhci_virt_device
*virt_dev
;
1009 struct xhci_ep_ctx
*ep0_ctx
;
1010 struct xhci_ring
*ep_ring
;
1012 virt_dev
= xhci
->devs
[udev
->slot_id
];
1013 ep0_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, 0);
1014 ep_ring
= virt_dev
->eps
[0].ring
;
1016 * FIXME we don't keep track of the dequeue pointer very well after a
1017 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1018 * host to our enqueue pointer. This should only be called after a
1019 * configured device has reset, so all control transfers should have
1020 * been completed or cancelled before the reset.
1022 ep0_ctx
->deq
= cpu_to_le64(xhci_trb_virt_to_dma(ep_ring
->enq_seg
,
1024 | ep_ring
->cycle_state
);
1028 * The xHCI roothub may have ports of differing speeds in any order in the port
1029 * status registers. xhci->port_array provides an array of the port speed for
1030 * each offset into the port status registers.
1032 * The xHCI hardware wants to know the roothub port number that the USB device
1033 * is attached to (or the roothub port its ancestor hub is attached to). All we
1034 * know is the index of that port under either the USB 2.0 or the USB 3.0
1035 * roothub, but that doesn't give us the real index into the HW port status
1036 * registers. Call xhci_find_raw_port_number() to get real index.
1038 static u32
xhci_find_real_port_number(struct xhci_hcd
*xhci
,
1039 struct usb_device
*udev
)
1041 struct usb_device
*top_dev
;
1042 struct usb_hcd
*hcd
;
1044 if (udev
->speed
== USB_SPEED_SUPER
)
1045 hcd
= xhci
->shared_hcd
;
1047 hcd
= xhci
->main_hcd
;
1049 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
1050 top_dev
= top_dev
->parent
)
1051 /* Found device below root hub */;
1053 return xhci_find_raw_port_number(hcd
, top_dev
->portnum
);
1056 /* Setup an xHCI virtual device for a Set Address command */
1057 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
1059 struct xhci_virt_device
*dev
;
1060 struct xhci_ep_ctx
*ep0_ctx
;
1061 struct xhci_slot_ctx
*slot_ctx
;
1064 struct usb_device
*top_dev
;
1066 dev
= xhci
->devs
[udev
->slot_id
];
1067 /* Slot ID 0 is reserved */
1068 if (udev
->slot_id
== 0 || !dev
) {
1069 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
1073 ep0_ctx
= xhci_get_ep_ctx(xhci
, dev
->in_ctx
, 0);
1074 slot_ctx
= xhci_get_slot_ctx(xhci
, dev
->in_ctx
);
1076 /* 3) Only the control endpoint is valid - one endpoint context */
1077 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1) | udev
->route
);
1078 switch (udev
->speed
) {
1079 case USB_SPEED_SUPER
:
1080 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_SS
);
1081 max_packets
= MAX_PACKET(512);
1083 case USB_SPEED_HIGH
:
1084 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_HS
);
1085 max_packets
= MAX_PACKET(64);
1087 /* USB core guesses at a 64-byte max packet first for FS devices */
1088 case USB_SPEED_FULL
:
1089 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_FS
);
1090 max_packets
= MAX_PACKET(64);
1093 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_LS
);
1094 max_packets
= MAX_PACKET(8);
1096 case USB_SPEED_WIRELESS
:
1097 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
1101 /* Speed was set earlier, this shouldn't happen. */
1104 /* Find the root hub port this device is under */
1105 port_num
= xhci_find_real_port_number(xhci
, udev
);
1108 slot_ctx
->dev_info2
|= cpu_to_le32(ROOT_HUB_PORT(port_num
));
1109 /* Set the port number in the virtual_device to the faked port number */
1110 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
1111 top_dev
= top_dev
->parent
)
1112 /* Found device below root hub */;
1113 dev
->fake_port
= top_dev
->portnum
;
1114 dev
->real_port
= port_num
;
1115 xhci_dbg(xhci
, "Set root hub portnum to %d\n", port_num
);
1116 xhci_dbg(xhci
, "Set fake root hub portnum to %d\n", dev
->fake_port
);
1118 /* Find the right bandwidth table that this device will be a part of.
1119 * If this is a full speed device attached directly to a root port (or a
1120 * decendent of one), it counts as a primary bandwidth domain, not a
1121 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1122 * will never be created for the HS root hub.
1124 if (!udev
->tt
|| !udev
->tt
->hub
->parent
) {
1125 dev
->bw_table
= &xhci
->rh_bw
[port_num
- 1].bw_table
;
1127 struct xhci_root_port_bw_info
*rh_bw
;
1128 struct xhci_tt_bw_info
*tt_bw
;
1130 rh_bw
= &xhci
->rh_bw
[port_num
- 1];
1131 /* Find the right TT. */
1132 list_for_each_entry(tt_bw
, &rh_bw
->tts
, tt_list
) {
1133 if (tt_bw
->slot_id
!= udev
->tt
->hub
->slot_id
)
1136 if (!dev
->udev
->tt
->multi
||
1138 tt_bw
->ttport
== dev
->udev
->ttport
)) {
1139 dev
->bw_table
= &tt_bw
->bw_table
;
1140 dev
->tt_info
= tt_bw
;
1145 xhci_warn(xhci
, "WARN: Didn't find a matching TT\n");
1148 /* Is this a LS/FS device under an external HS hub? */
1149 if (udev
->tt
&& udev
->tt
->hub
->parent
) {
1150 slot_ctx
->tt_info
= cpu_to_le32(udev
->tt
->hub
->slot_id
|
1151 (udev
->ttport
<< 8));
1152 if (udev
->tt
->multi
)
1153 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
1155 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
1156 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
1158 /* Step 4 - ring already allocated */
1160 ep0_ctx
->ep_info2
= cpu_to_le32(EP_TYPE(CTRL_EP
));
1162 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1163 ep0_ctx
->ep_info2
|= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1166 ep0_ctx
->deq
= cpu_to_le64(dev
->eps
[0].ring
->first_seg
->dma
|
1167 dev
->eps
[0].ring
->cycle_state
);
1169 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1175 * Convert interval expressed as 2^(bInterval - 1) == interval into
1176 * straight exponent value 2^n == interval.
1179 static unsigned int xhci_parse_exponent_interval(struct usb_device
*udev
,
1180 struct usb_host_endpoint
*ep
)
1182 unsigned int interval
;
1184 interval
= clamp_val(ep
->desc
.bInterval
, 1, 16) - 1;
1185 if (interval
!= ep
->desc
.bInterval
- 1)
1186 dev_warn(&udev
->dev
,
1187 "ep %#x - rounding interval to %d %sframes\n",
1188 ep
->desc
.bEndpointAddress
,
1190 udev
->speed
== USB_SPEED_FULL
? "" : "micro");
1192 if (udev
->speed
== USB_SPEED_FULL
) {
1194 * Full speed isoc endpoints specify interval in frames,
1195 * not microframes. We are using microframes everywhere,
1196 * so adjust accordingly.
1198 interval
+= 3; /* 1 frame = 2^3 uframes */
1205 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1206 * microframes, rounded down to nearest power of 2.
1208 static unsigned int xhci_microframes_to_exponent(struct usb_device
*udev
,
1209 struct usb_host_endpoint
*ep
, unsigned int desc_interval
,
1210 unsigned int min_exponent
, unsigned int max_exponent
)
1212 unsigned int interval
;
1214 interval
= fls(desc_interval
) - 1;
1215 interval
= clamp_val(interval
, min_exponent
, max_exponent
);
1216 if ((1 << interval
) != desc_interval
)
1217 dev_warn(&udev
->dev
,
1218 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1219 ep
->desc
.bEndpointAddress
,
1226 static unsigned int xhci_parse_microframe_interval(struct usb_device
*udev
,
1227 struct usb_host_endpoint
*ep
)
1229 if (ep
->desc
.bInterval
== 0)
1231 return xhci_microframes_to_exponent(udev
, ep
,
1232 ep
->desc
.bInterval
, 0, 15);
1236 static unsigned int xhci_parse_frame_interval(struct usb_device
*udev
,
1237 struct usb_host_endpoint
*ep
)
1239 return xhci_microframes_to_exponent(udev
, ep
,
1240 ep
->desc
.bInterval
* 8, 3, 10);
1243 /* Return the polling or NAK interval.
1245 * The polling interval is expressed in "microframes". If xHCI's Interval field
1246 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1248 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1251 static unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
1252 struct usb_host_endpoint
*ep
)
1254 unsigned int interval
= 0;
1256 switch (udev
->speed
) {
1257 case USB_SPEED_HIGH
:
1259 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1260 usb_endpoint_xfer_bulk(&ep
->desc
)) {
1261 interval
= xhci_parse_microframe_interval(udev
, ep
);
1264 /* Fall through - SS and HS isoc/int have same decoding */
1266 case USB_SPEED_SUPER
:
1267 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1268 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1269 interval
= xhci_parse_exponent_interval(udev
, ep
);
1273 case USB_SPEED_FULL
:
1274 if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
1275 interval
= xhci_parse_exponent_interval(udev
, ep
);
1279 * Fall through for interrupt endpoint interval decoding
1280 * since it uses the same rules as low speed interrupt
1285 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1286 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1288 interval
= xhci_parse_frame_interval(udev
, ep
);
1295 return EP_INTERVAL(interval
);
1298 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1299 * High speed endpoint descriptors can define "the number of additional
1300 * transaction opportunities per microframe", but that goes in the Max Burst
1301 * endpoint context field.
1303 static u32
xhci_get_endpoint_mult(struct usb_device
*udev
,
1304 struct usb_host_endpoint
*ep
)
1306 if (udev
->speed
!= USB_SPEED_SUPER
||
1307 !usb_endpoint_xfer_isoc(&ep
->desc
))
1309 return ep
->ss_ep_comp
.bmAttributes
;
1312 static u32
xhci_get_endpoint_type(struct usb_device
*udev
,
1313 struct usb_host_endpoint
*ep
)
1318 in
= usb_endpoint_dir_in(&ep
->desc
);
1319 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1320 type
= EP_TYPE(CTRL_EP
);
1321 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
1323 type
= EP_TYPE(BULK_IN_EP
);
1325 type
= EP_TYPE(BULK_OUT_EP
);
1326 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
1328 type
= EP_TYPE(ISOC_IN_EP
);
1330 type
= EP_TYPE(ISOC_OUT_EP
);
1331 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
1333 type
= EP_TYPE(INT_IN_EP
);
1335 type
= EP_TYPE(INT_OUT_EP
);
1342 /* Return the maximum endpoint service interval time (ESIT) payload.
1343 * Basically, this is the maxpacket size, multiplied by the burst size
1346 static u32
xhci_get_max_esit_payload(struct xhci_hcd
*xhci
,
1347 struct usb_device
*udev
,
1348 struct usb_host_endpoint
*ep
)
1353 /* Only applies for interrupt or isochronous endpoints */
1354 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1355 usb_endpoint_xfer_bulk(&ep
->desc
))
1358 if (udev
->speed
== USB_SPEED_SUPER
)
1359 return le16_to_cpu(ep
->ss_ep_comp
.wBytesPerInterval
);
1361 max_packet
= GET_MAX_PACKET(usb_endpoint_maxp(&ep
->desc
));
1362 max_burst
= (usb_endpoint_maxp(&ep
->desc
) & 0x1800) >> 11;
1363 /* A 0 in max burst means 1 transfer per ESIT */
1364 return max_packet
* (max_burst
+ 1);
1367 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1368 * Drivers will have to call usb_alloc_streams() to do that.
1370 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
1371 struct xhci_virt_device
*virt_dev
,
1372 struct usb_device
*udev
,
1373 struct usb_host_endpoint
*ep
,
1376 unsigned int ep_index
;
1377 struct xhci_ep_ctx
*ep_ctx
;
1378 struct xhci_ring
*ep_ring
;
1379 unsigned int max_packet
;
1380 unsigned int max_burst
;
1381 enum xhci_ring_type type
;
1382 u32 max_esit_payload
;
1385 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1386 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1388 endpoint_type
= xhci_get_endpoint_type(udev
, ep
);
1391 ep_ctx
->ep_info2
= cpu_to_le32(endpoint_type
);
1393 type
= usb_endpoint_type(&ep
->desc
);
1394 /* Set up the endpoint ring */
1395 virt_dev
->eps
[ep_index
].new_ring
=
1396 xhci_ring_alloc(xhci
, 2, 1, type
, mem_flags
);
1397 if (!virt_dev
->eps
[ep_index
].new_ring
) {
1398 /* Attempt to use the ring cache */
1399 if (virt_dev
->num_rings_cached
== 0)
1401 virt_dev
->eps
[ep_index
].new_ring
=
1402 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
];
1403 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
] = NULL
;
1404 virt_dev
->num_rings_cached
--;
1405 xhci_reinit_cached_ring(xhci
, virt_dev
->eps
[ep_index
].new_ring
,
1408 virt_dev
->eps
[ep_index
].skip
= false;
1409 ep_ring
= virt_dev
->eps
[ep_index
].new_ring
;
1410 ep_ctx
->deq
= cpu_to_le64(ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
);
1412 ep_ctx
->ep_info
= cpu_to_le32(xhci_get_endpoint_interval(udev
, ep
)
1413 | EP_MULT(xhci_get_endpoint_mult(udev
, ep
)));
1415 /* FIXME dig Mult and streams info out of ep companion desc */
1417 /* Allow 3 retries for everything but isoc;
1418 * CErr shall be set to 0 for Isoch endpoints.
1420 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
1421 ep_ctx
->ep_info2
|= cpu_to_le32(ERROR_COUNT(3));
1423 ep_ctx
->ep_info2
|= cpu_to_le32(ERROR_COUNT(0));
1425 /* Set the max packet size and max burst */
1426 max_packet
= GET_MAX_PACKET(usb_endpoint_maxp(&ep
->desc
));
1428 switch (udev
->speed
) {
1429 case USB_SPEED_SUPER
:
1430 /* dig out max burst from ep companion desc */
1431 max_burst
= ep
->ss_ep_comp
.bMaxBurst
;
1433 case USB_SPEED_HIGH
:
1434 /* Some devices get this wrong */
1435 if (usb_endpoint_xfer_bulk(&ep
->desc
))
1437 /* bits 11:12 specify the number of additional transaction
1438 * opportunities per microframe (USB 2.0, section 9.6.6)
1440 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
1441 usb_endpoint_xfer_int(&ep
->desc
)) {
1442 max_burst
= (usb_endpoint_maxp(&ep
->desc
)
1446 case USB_SPEED_FULL
:
1452 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet
) |
1453 MAX_BURST(max_burst
));
1454 max_esit_payload
= xhci_get_max_esit_payload(xhci
, udev
, ep
);
1455 ep_ctx
->tx_info
= cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload
));
1458 * XXX no idea how to calculate the average TRB buffer length for bulk
1459 * endpoints, as the driver gives us no clue how big each scatter gather
1460 * list entry (or buffer) is going to be.
1462 * For isochronous and interrupt endpoints, we set it to the max
1463 * available, until we have new API in the USB core to allow drivers to
1464 * declare how much bandwidth they actually need.
1466 * Normally, it would be calculated by taking the total of the buffer
1467 * lengths in the TD and then dividing by the number of TRBs in a TD,
1468 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1469 * use Event Data TRBs, and we don't chain in a link TRB on short
1470 * transfers, we're basically dividing by 1.
1472 * xHCI 1.0 specification indicates that the Average TRB Length should
1473 * be set to 8 for control endpoints.
1475 if (usb_endpoint_xfer_control(&ep
->desc
) && xhci
->hci_version
== 0x100)
1476 ep_ctx
->tx_info
|= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1479 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload
));
1481 /* FIXME Debug endpoint context */
1485 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
1486 struct xhci_virt_device
*virt_dev
,
1487 struct usb_host_endpoint
*ep
)
1489 unsigned int ep_index
;
1490 struct xhci_ep_ctx
*ep_ctx
;
1492 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1493 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1495 ep_ctx
->ep_info
= 0;
1496 ep_ctx
->ep_info2
= 0;
1498 ep_ctx
->tx_info
= 0;
1499 /* Don't free the endpoint ring until the set interface or configuration
1504 void xhci_clear_endpoint_bw_info(struct xhci_bw_info
*bw_info
)
1506 bw_info
->ep_interval
= 0;
1508 bw_info
->num_packets
= 0;
1509 bw_info
->max_packet_size
= 0;
1511 bw_info
->max_esit_payload
= 0;
1514 void xhci_update_bw_info(struct xhci_hcd
*xhci
,
1515 struct xhci_container_ctx
*in_ctx
,
1516 struct xhci_input_control_ctx
*ctrl_ctx
,
1517 struct xhci_virt_device
*virt_dev
)
1519 struct xhci_bw_info
*bw_info
;
1520 struct xhci_ep_ctx
*ep_ctx
;
1521 unsigned int ep_type
;
1524 for (i
= 1; i
< 31; ++i
) {
1525 bw_info
= &virt_dev
->eps
[i
].bw_info
;
1527 /* We can't tell what endpoint type is being dropped, but
1528 * unconditionally clearing the bandwidth info for non-periodic
1529 * endpoints should be harmless because the info will never be
1530 * set in the first place.
1532 if (!EP_IS_ADDED(ctrl_ctx
, i
) && EP_IS_DROPPED(ctrl_ctx
, i
)) {
1533 /* Dropped endpoint */
1534 xhci_clear_endpoint_bw_info(bw_info
);
1538 if (EP_IS_ADDED(ctrl_ctx
, i
)) {
1539 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, i
);
1540 ep_type
= CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx
->ep_info2
));
1542 /* Ignore non-periodic endpoints */
1543 if (ep_type
!= ISOC_OUT_EP
&& ep_type
!= INT_OUT_EP
&&
1544 ep_type
!= ISOC_IN_EP
&&
1545 ep_type
!= INT_IN_EP
)
1548 /* Added or changed endpoint */
1549 bw_info
->ep_interval
= CTX_TO_EP_INTERVAL(
1550 le32_to_cpu(ep_ctx
->ep_info
));
1551 /* Number of packets and mult are zero-based in the
1552 * input context, but we want one-based for the
1555 bw_info
->mult
= CTX_TO_EP_MULT(
1556 le32_to_cpu(ep_ctx
->ep_info
)) + 1;
1557 bw_info
->num_packets
= CTX_TO_MAX_BURST(
1558 le32_to_cpu(ep_ctx
->ep_info2
)) + 1;
1559 bw_info
->max_packet_size
= MAX_PACKET_DECODED(
1560 le32_to_cpu(ep_ctx
->ep_info2
));
1561 bw_info
->type
= ep_type
;
1562 bw_info
->max_esit_payload
= CTX_TO_MAX_ESIT_PAYLOAD(
1563 le32_to_cpu(ep_ctx
->tx_info
));
1568 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1569 * Useful when you want to change one particular aspect of the endpoint and then
1570 * issue a configure endpoint command.
1572 void xhci_endpoint_copy(struct xhci_hcd
*xhci
,
1573 struct xhci_container_ctx
*in_ctx
,
1574 struct xhci_container_ctx
*out_ctx
,
1575 unsigned int ep_index
)
1577 struct xhci_ep_ctx
*out_ep_ctx
;
1578 struct xhci_ep_ctx
*in_ep_ctx
;
1580 out_ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1581 in_ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1583 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
1584 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
1585 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
1586 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
1589 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1590 * Useful when you want to change one particular aspect of the endpoint and then
1591 * issue a configure endpoint command. Only the context entries field matters,
1592 * but we'll copy the whole thing anyway.
1594 void xhci_slot_copy(struct xhci_hcd
*xhci
,
1595 struct xhci_container_ctx
*in_ctx
,
1596 struct xhci_container_ctx
*out_ctx
)
1598 struct xhci_slot_ctx
*in_slot_ctx
;
1599 struct xhci_slot_ctx
*out_slot_ctx
;
1601 in_slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1602 out_slot_ctx
= xhci_get_slot_ctx(xhci
, out_ctx
);
1604 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
1605 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
1606 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
1607 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
1610 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1611 static int scratchpad_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
1614 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1615 int num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1617 xhci_dbg(xhci
, "Allocating %d scratchpad buffers\n", num_sp
);
1622 xhci
->scratchpad
= kzalloc(sizeof(*xhci
->scratchpad
), flags
);
1623 if (!xhci
->scratchpad
)
1626 xhci
->scratchpad
->sp_array
= dma_alloc_coherent(dev
,
1627 num_sp
* sizeof(u64
),
1628 &xhci
->scratchpad
->sp_dma
, flags
);
1629 if (!xhci
->scratchpad
->sp_array
)
1632 xhci
->scratchpad
->sp_buffers
= kzalloc(sizeof(void *) * num_sp
, flags
);
1633 if (!xhci
->scratchpad
->sp_buffers
)
1636 xhci
->scratchpad
->sp_dma_buffers
=
1637 kzalloc(sizeof(dma_addr_t
) * num_sp
, flags
);
1639 if (!xhci
->scratchpad
->sp_dma_buffers
)
1642 xhci
->dcbaa
->dev_context_ptrs
[0] = cpu_to_le64(xhci
->scratchpad
->sp_dma
);
1643 for (i
= 0; i
< num_sp
; i
++) {
1645 void *buf
= dma_alloc_coherent(dev
, xhci
->page_size
, &dma
,
1650 xhci
->scratchpad
->sp_array
[i
] = dma
;
1651 xhci
->scratchpad
->sp_buffers
[i
] = buf
;
1652 xhci
->scratchpad
->sp_dma_buffers
[i
] = dma
;
1658 for (i
= i
- 1; i
>= 0; i
--) {
1659 dma_free_coherent(dev
, xhci
->page_size
,
1660 xhci
->scratchpad
->sp_buffers
[i
],
1661 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1663 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1666 kfree(xhci
->scratchpad
->sp_buffers
);
1669 dma_free_coherent(dev
, num_sp
* sizeof(u64
),
1670 xhci
->scratchpad
->sp_array
,
1671 xhci
->scratchpad
->sp_dma
);
1674 kfree(xhci
->scratchpad
);
1675 xhci
->scratchpad
= NULL
;
1681 static void scratchpad_free(struct xhci_hcd
*xhci
)
1685 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
1687 if (!xhci
->scratchpad
)
1690 num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1692 for (i
= 0; i
< num_sp
; i
++) {
1693 dma_free_coherent(&pdev
->dev
, xhci
->page_size
,
1694 xhci
->scratchpad
->sp_buffers
[i
],
1695 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1697 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1698 kfree(xhci
->scratchpad
->sp_buffers
);
1699 dma_free_coherent(&pdev
->dev
, num_sp
* sizeof(u64
),
1700 xhci
->scratchpad
->sp_array
,
1701 xhci
->scratchpad
->sp_dma
);
1702 kfree(xhci
->scratchpad
);
1703 xhci
->scratchpad
= NULL
;
1706 struct xhci_command
*xhci_alloc_command(struct xhci_hcd
*xhci
,
1707 bool allocate_in_ctx
, bool allocate_completion
,
1710 struct xhci_command
*command
;
1712 command
= kzalloc(sizeof(*command
), mem_flags
);
1716 if (allocate_in_ctx
) {
1718 xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
,
1720 if (!command
->in_ctx
) {
1726 if (allocate_completion
) {
1727 command
->completion
=
1728 kzalloc(sizeof(struct completion
), mem_flags
);
1729 if (!command
->completion
) {
1730 xhci_free_container_ctx(xhci
, command
->in_ctx
);
1734 init_completion(command
->completion
);
1737 command
->status
= 0;
1738 INIT_LIST_HEAD(&command
->cmd_list
);
1742 void xhci_urb_free_priv(struct xhci_hcd
*xhci
, struct urb_priv
*urb_priv
)
1745 kfree(urb_priv
->td
[0]);
1750 void xhci_free_command(struct xhci_hcd
*xhci
,
1751 struct xhci_command
*command
)
1753 xhci_free_container_ctx(xhci
,
1755 kfree(command
->completion
);
1759 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
1761 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
1762 struct dev_info
*dev_info
, *next
;
1763 struct xhci_cd
*cur_cd
, *next_cd
;
1764 unsigned long flags
;
1766 int i
, j
, num_ports
;
1768 /* Free the Event Ring Segment Table and the actual Event Ring */
1769 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
1770 if (xhci
->erst
.entries
)
1771 dma_free_coherent(&pdev
->dev
, size
,
1772 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
1773 xhci
->erst
.entries
= NULL
;
1774 xhci_dbg(xhci
, "Freed ERST\n");
1775 if (xhci
->event_ring
)
1776 xhci_ring_free(xhci
, xhci
->event_ring
);
1777 xhci
->event_ring
= NULL
;
1778 xhci_dbg(xhci
, "Freed event ring\n");
1780 if (xhci
->lpm_command
)
1781 xhci_free_command(xhci
, xhci
->lpm_command
);
1782 xhci
->cmd_ring_reserved_trbs
= 0;
1784 xhci_ring_free(xhci
, xhci
->cmd_ring
);
1785 xhci
->cmd_ring
= NULL
;
1786 xhci_dbg(xhci
, "Freed command ring\n");
1787 list_for_each_entry_safe(cur_cd
, next_cd
,
1788 &xhci
->cancel_cmd_list
, cancel_cmd_list
) {
1789 list_del(&cur_cd
->cancel_cmd_list
);
1793 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
1794 xhci_free_virt_device(xhci
, i
);
1796 if (xhci
->segment_pool
)
1797 dma_pool_destroy(xhci
->segment_pool
);
1798 xhci
->segment_pool
= NULL
;
1799 xhci_dbg(xhci
, "Freed segment pool\n");
1801 if (xhci
->device_pool
)
1802 dma_pool_destroy(xhci
->device_pool
);
1803 xhci
->device_pool
= NULL
;
1804 xhci_dbg(xhci
, "Freed device context pool\n");
1806 if (xhci
->small_streams_pool
)
1807 dma_pool_destroy(xhci
->small_streams_pool
);
1808 xhci
->small_streams_pool
= NULL
;
1809 xhci_dbg(xhci
, "Freed small stream array pool\n");
1811 if (xhci
->medium_streams_pool
)
1812 dma_pool_destroy(xhci
->medium_streams_pool
);
1813 xhci
->medium_streams_pool
= NULL
;
1814 xhci_dbg(xhci
, "Freed medium stream array pool\n");
1817 dma_free_coherent(&pdev
->dev
, sizeof(*xhci
->dcbaa
),
1818 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
1821 scratchpad_free(xhci
);
1823 spin_lock_irqsave(&xhci
->lock
, flags
);
1824 list_for_each_entry_safe(dev_info
, next
, &xhci
->lpm_failed_devs
, list
) {
1825 list_del(&dev_info
->list
);
1828 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1833 num_ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
1834 for (i
= 0; i
< num_ports
; i
++) {
1835 struct xhci_interval_bw_table
*bwt
= &xhci
->rh_bw
[i
].bw_table
;
1836 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++) {
1837 struct list_head
*ep
= &bwt
->interval_bw
[j
].endpoints
;
1838 while (!list_empty(ep
))
1839 list_del_init(ep
->next
);
1843 for (i
= 0; i
< num_ports
; i
++) {
1844 struct xhci_tt_bw_info
*tt
, *n
;
1845 list_for_each_entry_safe(tt
, n
, &xhci
->rh_bw
[i
].tts
, tt_list
) {
1846 list_del(&tt
->tt_list
);
1852 xhci
->num_usb2_ports
= 0;
1853 xhci
->num_usb3_ports
= 0;
1854 xhci
->num_active_eps
= 0;
1855 kfree(xhci
->usb2_ports
);
1856 kfree(xhci
->usb3_ports
);
1857 kfree(xhci
->port_array
);
1859 kfree(xhci
->ext_caps
);
1861 xhci
->page_size
= 0;
1862 xhci
->page_shift
= 0;
1863 xhci
->bus_state
[0].bus_suspended
= 0;
1864 xhci
->bus_state
[1].bus_suspended
= 0;
1867 static int xhci_test_trb_in_td(struct xhci_hcd
*xhci
,
1868 struct xhci_segment
*input_seg
,
1869 union xhci_trb
*start_trb
,
1870 union xhci_trb
*end_trb
,
1871 dma_addr_t input_dma
,
1872 struct xhci_segment
*result_seg
,
1873 char *test_name
, int test_number
)
1875 unsigned long long start_dma
;
1876 unsigned long long end_dma
;
1877 struct xhci_segment
*seg
;
1879 start_dma
= xhci_trb_virt_to_dma(input_seg
, start_trb
);
1880 end_dma
= xhci_trb_virt_to_dma(input_seg
, end_trb
);
1882 seg
= trb_in_td(input_seg
, start_trb
, end_trb
, input_dma
);
1883 if (seg
!= result_seg
) {
1884 xhci_warn(xhci
, "WARN: %s TRB math test %d failed!\n",
1885 test_name
, test_number
);
1886 xhci_warn(xhci
, "Tested TRB math w/ seg %p and "
1887 "input DMA 0x%llx\n",
1889 (unsigned long long) input_dma
);
1890 xhci_warn(xhci
, "starting TRB %p (0x%llx DMA), "
1891 "ending TRB %p (0x%llx DMA)\n",
1892 start_trb
, start_dma
,
1894 xhci_warn(xhci
, "Expected seg %p, got seg %p\n",
1901 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1902 static int xhci_check_trb_in_td_math(struct xhci_hcd
*xhci
, gfp_t mem_flags
)
1905 dma_addr_t input_dma
;
1906 struct xhci_segment
*result_seg
;
1907 } simple_test_vector
[] = {
1908 /* A zeroed DMA field should fail */
1910 /* One TRB before the ring start should fail */
1911 { xhci
->event_ring
->first_seg
->dma
- 16, NULL
},
1912 /* One byte before the ring start should fail */
1913 { xhci
->event_ring
->first_seg
->dma
- 1, NULL
},
1914 /* Starting TRB should succeed */
1915 { xhci
->event_ring
->first_seg
->dma
, xhci
->event_ring
->first_seg
},
1916 /* Ending TRB should succeed */
1917 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16,
1918 xhci
->event_ring
->first_seg
},
1919 /* One byte after the ring end should fail */
1920 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16 + 1, NULL
},
1921 /* One TRB after the ring end should fail */
1922 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
)*16, NULL
},
1923 /* An address of all ones should fail */
1924 { (dma_addr_t
) (~0), NULL
},
1927 struct xhci_segment
*input_seg
;
1928 union xhci_trb
*start_trb
;
1929 union xhci_trb
*end_trb
;
1930 dma_addr_t input_dma
;
1931 struct xhci_segment
*result_seg
;
1932 } complex_test_vector
[] = {
1933 /* Test feeding a valid DMA address from a different ring */
1934 { .input_seg
= xhci
->event_ring
->first_seg
,
1935 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1936 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1937 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1940 /* Test feeding a valid end TRB from a different ring */
1941 { .input_seg
= xhci
->event_ring
->first_seg
,
1942 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1943 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1944 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1947 /* Test feeding a valid start and end TRB from a different ring */
1948 { .input_seg
= xhci
->event_ring
->first_seg
,
1949 .start_trb
= xhci
->cmd_ring
->first_seg
->trbs
,
1950 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1951 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1954 /* TRB in this ring, but after this TD */
1955 { .input_seg
= xhci
->event_ring
->first_seg
,
1956 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[0],
1957 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1958 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 4*16,
1961 /* TRB in this ring, but before this TD */
1962 { .input_seg
= xhci
->event_ring
->first_seg
,
1963 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1964 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[6],
1965 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1968 /* TRB in this ring, but after this wrapped TD */
1969 { .input_seg
= xhci
->event_ring
->first_seg
,
1970 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1971 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1972 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1975 /* TRB in this ring, but before this wrapped TD */
1976 { .input_seg
= xhci
->event_ring
->first_seg
,
1977 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1978 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1979 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 4)*16,
1982 /* TRB not in this ring, and we have a wrapped TD */
1983 { .input_seg
= xhci
->event_ring
->first_seg
,
1984 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1985 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1986 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
+ 2*16,
1991 unsigned int num_tests
;
1994 num_tests
= ARRAY_SIZE(simple_test_vector
);
1995 for (i
= 0; i
< num_tests
; i
++) {
1996 ret
= xhci_test_trb_in_td(xhci
,
1997 xhci
->event_ring
->first_seg
,
1998 xhci
->event_ring
->first_seg
->trbs
,
1999 &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
2000 simple_test_vector
[i
].input_dma
,
2001 simple_test_vector
[i
].result_seg
,
2007 num_tests
= ARRAY_SIZE(complex_test_vector
);
2008 for (i
= 0; i
< num_tests
; i
++) {
2009 ret
= xhci_test_trb_in_td(xhci
,
2010 complex_test_vector
[i
].input_seg
,
2011 complex_test_vector
[i
].start_trb
,
2012 complex_test_vector
[i
].end_trb
,
2013 complex_test_vector
[i
].input_dma
,
2014 complex_test_vector
[i
].result_seg
,
2019 xhci_dbg(xhci
, "TRB math tests passed.\n");
2023 static void xhci_set_hc_event_deq(struct xhci_hcd
*xhci
)
2028 deq
= xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
,
2029 xhci
->event_ring
->dequeue
);
2030 if (deq
== 0 && !in_interrupt())
2031 xhci_warn(xhci
, "WARN something wrong with SW event ring "
2033 /* Update HC event ring dequeue pointer */
2034 temp
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
2035 temp
&= ERST_PTR_MASK
;
2036 /* Don't clear the EHB bit (which is RW1C) because
2037 * there might be more events to service.
2040 xhci_dbg(xhci
, "// Write event ring dequeue pointer, "
2041 "preserving EHB bit\n");
2042 xhci_write_64(xhci
, ((u64
) deq
& (u64
) ~ERST_PTR_MASK
) | temp
,
2043 &xhci
->ir_set
->erst_dequeue
);
2046 static void xhci_add_in_port(struct xhci_hcd
*xhci
, unsigned int num_ports
,
2047 __le32 __iomem
*addr
, u8 major_revision
, int max_caps
)
2049 u32 temp
, port_offset
, port_count
;
2052 if (major_revision
> 0x03) {
2053 xhci_warn(xhci
, "Ignoring unknown port speed, "
2054 "Ext Cap %p, revision = 0x%x\n",
2055 addr
, major_revision
);
2056 /* Ignoring port protocol we can't understand. FIXME */
2060 /* Port offset and count in the third dword, see section 7.2 */
2061 temp
= xhci_readl(xhci
, addr
+ 2);
2062 port_offset
= XHCI_EXT_PORT_OFF(temp
);
2063 port_count
= XHCI_EXT_PORT_COUNT(temp
);
2064 xhci_dbg(xhci
, "Ext Cap %p, port offset = %u, "
2065 "count = %u, revision = 0x%x\n",
2066 addr
, port_offset
, port_count
, major_revision
);
2067 /* Port count includes the current port offset */
2068 if (port_offset
== 0 || (port_offset
+ port_count
- 1) > num_ports
)
2069 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2072 /* cache usb2 port capabilities */
2073 if (major_revision
< 0x03 && xhci
->num_ext_caps
< max_caps
)
2074 xhci
->ext_caps
[xhci
->num_ext_caps
++] = temp
;
2076 /* Check the host's USB2 LPM capability */
2077 if ((xhci
->hci_version
== 0x96) && (major_revision
!= 0x03) &&
2078 (temp
& XHCI_L1C
)) {
2079 xhci_dbg(xhci
, "xHCI 0.96: support USB2 software lpm\n");
2080 xhci
->sw_lpm_support
= 1;
2083 if ((xhci
->hci_version
>= 0x100) && (major_revision
!= 0x03)) {
2084 xhci_dbg(xhci
, "xHCI 1.0: support USB2 software lpm\n");
2085 xhci
->sw_lpm_support
= 1;
2086 if (temp
& XHCI_HLC
) {
2087 xhci_dbg(xhci
, "xHCI 1.0: support USB2 hardware lpm\n");
2088 xhci
->hw_lpm_support
= 1;
2093 for (i
= port_offset
; i
< (port_offset
+ port_count
); i
++) {
2094 /* Duplicate entry. Ignore the port if the revisions differ. */
2095 if (xhci
->port_array
[i
] != 0) {
2096 xhci_warn(xhci
, "Duplicate port entry, Ext Cap %p,"
2097 " port %u\n", addr
, i
);
2098 xhci_warn(xhci
, "Port was marked as USB %u, "
2099 "duplicated as USB %u\n",
2100 xhci
->port_array
[i
], major_revision
);
2101 /* Only adjust the roothub port counts if we haven't
2102 * found a similar duplicate.
2104 if (xhci
->port_array
[i
] != major_revision
&&
2105 xhci
->port_array
[i
] != DUPLICATE_ENTRY
) {
2106 if (xhci
->port_array
[i
] == 0x03)
2107 xhci
->num_usb3_ports
--;
2109 xhci
->num_usb2_ports
--;
2110 xhci
->port_array
[i
] = DUPLICATE_ENTRY
;
2112 /* FIXME: Should we disable the port? */
2115 xhci
->port_array
[i
] = major_revision
;
2116 if (major_revision
== 0x03)
2117 xhci
->num_usb3_ports
++;
2119 xhci
->num_usb2_ports
++;
2121 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2125 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2126 * specify what speeds each port is supposed to be. We can't count on the port
2127 * speed bits in the PORTSC register being correct until a device is connected,
2128 * but we need to set up the two fake roothubs with the correct number of USB
2129 * 3.0 and USB 2.0 ports at host controller initialization time.
2131 static int xhci_setup_port_arrays(struct xhci_hcd
*xhci
, gfp_t flags
)
2133 __le32 __iomem
*addr
, *tmp_addr
;
2134 u32 offset
, tmp_offset
;
2135 unsigned int num_ports
;
2136 int i
, j
, port_index
;
2139 addr
= &xhci
->cap_regs
->hcc_params
;
2140 offset
= XHCI_HCC_EXT_CAPS(xhci_readl(xhci
, addr
));
2142 xhci_err(xhci
, "No Extended Capability registers, "
2143 "unable to set up roothub.\n");
2147 num_ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
2148 xhci
->port_array
= kzalloc(sizeof(*xhci
->port_array
)*num_ports
, flags
);
2149 if (!xhci
->port_array
)
2152 xhci
->rh_bw
= kzalloc(sizeof(*xhci
->rh_bw
)*num_ports
, flags
);
2155 for (i
= 0; i
< num_ports
; i
++) {
2156 struct xhci_interval_bw_table
*bw_table
;
2158 INIT_LIST_HEAD(&xhci
->rh_bw
[i
].tts
);
2159 bw_table
= &xhci
->rh_bw
[i
].bw_table
;
2160 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++)
2161 INIT_LIST_HEAD(&bw_table
->interval_bw
[j
].endpoints
);
2165 * For whatever reason, the first capability offset is from the
2166 * capability register base, not from the HCCPARAMS register.
2167 * See section 5.3.6 for offset calculation.
2169 addr
= &xhci
->cap_regs
->hc_capbase
+ offset
;
2172 tmp_offset
= offset
;
2174 /* count extended protocol capability entries for later caching */
2177 cap_id
= xhci_readl(xhci
, tmp_addr
);
2178 if (XHCI_EXT_CAPS_ID(cap_id
) == XHCI_EXT_CAPS_PROTOCOL
)
2180 tmp_offset
= XHCI_EXT_CAPS_NEXT(cap_id
);
2181 tmp_addr
+= tmp_offset
;
2182 } while (tmp_offset
);
2184 xhci
->ext_caps
= kzalloc(sizeof(*xhci
->ext_caps
) * cap_count
, flags
);
2185 if (!xhci
->ext_caps
)
2191 cap_id
= xhci_readl(xhci
, addr
);
2192 if (XHCI_EXT_CAPS_ID(cap_id
) == XHCI_EXT_CAPS_PROTOCOL
)
2193 xhci_add_in_port(xhci
, num_ports
, addr
,
2194 (u8
) XHCI_EXT_PORT_MAJOR(cap_id
),
2196 offset
= XHCI_EXT_CAPS_NEXT(cap_id
);
2197 if (!offset
|| (xhci
->num_usb2_ports
+ xhci
->num_usb3_ports
)
2201 * Once you're into the Extended Capabilities, the offset is
2202 * always relative to the register holding the offset.
2207 if (xhci
->num_usb2_ports
== 0 && xhci
->num_usb3_ports
== 0) {
2208 xhci_warn(xhci
, "No ports on the roothubs?\n");
2211 xhci_dbg(xhci
, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2212 xhci
->num_usb2_ports
, xhci
->num_usb3_ports
);
2214 /* Place limits on the number of roothub ports so that the hub
2215 * descriptors aren't longer than the USB core will allocate.
2217 if (xhci
->num_usb3_ports
> 15) {
2218 xhci_dbg(xhci
, "Limiting USB 3.0 roothub ports to 15.\n");
2219 xhci
->num_usb3_ports
= 15;
2221 if (xhci
->num_usb2_ports
> USB_MAXCHILDREN
) {
2222 xhci_dbg(xhci
, "Limiting USB 2.0 roothub ports to %u.\n",
2224 xhci
->num_usb2_ports
= USB_MAXCHILDREN
;
2228 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2229 * Not sure how the USB core will handle a hub with no ports...
2231 if (xhci
->num_usb2_ports
) {
2232 xhci
->usb2_ports
= kmalloc(sizeof(*xhci
->usb2_ports
)*
2233 xhci
->num_usb2_ports
, flags
);
2234 if (!xhci
->usb2_ports
)
2238 for (i
= 0; i
< num_ports
; i
++) {
2239 if (xhci
->port_array
[i
] == 0x03 ||
2240 xhci
->port_array
[i
] == 0 ||
2241 xhci
->port_array
[i
] == DUPLICATE_ENTRY
)
2244 xhci
->usb2_ports
[port_index
] =
2245 &xhci
->op_regs
->port_status_base
+
2247 xhci_dbg(xhci
, "USB 2.0 port at index %u, "
2249 xhci
->usb2_ports
[port_index
]);
2251 if (port_index
== xhci
->num_usb2_ports
)
2255 if (xhci
->num_usb3_ports
) {
2256 xhci
->usb3_ports
= kmalloc(sizeof(*xhci
->usb3_ports
)*
2257 xhci
->num_usb3_ports
, flags
);
2258 if (!xhci
->usb3_ports
)
2262 for (i
= 0; i
< num_ports
; i
++)
2263 if (xhci
->port_array
[i
] == 0x03) {
2264 xhci
->usb3_ports
[port_index
] =
2265 &xhci
->op_regs
->port_status_base
+
2267 xhci_dbg(xhci
, "USB 3.0 port at index %u, "
2269 xhci
->usb3_ports
[port_index
]);
2271 if (port_index
== xhci
->num_usb3_ports
)
2278 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
2281 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
2282 unsigned int val
, val2
;
2284 struct xhci_segment
*seg
;
2285 u32 page_size
, temp
;
2288 INIT_LIST_HEAD(&xhci
->lpm_failed_devs
);
2289 INIT_LIST_HEAD(&xhci
->cancel_cmd_list
);
2291 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
2292 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
2293 for (i
= 0; i
< 16; i
++) {
2294 if ((0x1 & page_size
) != 0)
2296 page_size
= page_size
>> 1;
2299 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
2301 xhci_warn(xhci
, "WARN: no supported page size\n");
2302 /* Use 4K pages, since that's common and the minimum the HC supports */
2303 xhci
->page_shift
= 12;
2304 xhci
->page_size
= 1 << xhci
->page_shift
;
2305 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
2308 * Program the Number of Device Slots Enabled field in the CONFIG
2309 * register with the max value of slots the HC can handle.
2311 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
2312 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
2313 (unsigned int) val
);
2314 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
2315 val
|= (val2
& ~HCS_SLOTS_MASK
);
2316 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
2317 (unsigned int) val
);
2318 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
2321 * Section 5.4.8 - doorbell array must be
2322 * "physically contiguous and 64-byte (cache line) aligned".
2324 xhci
->dcbaa
= dma_alloc_coherent(dev
, sizeof(*xhci
->dcbaa
), &dma
,
2328 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
2329 xhci
->dcbaa
->dma
= dma
;
2330 xhci_dbg(xhci
, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2331 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
2332 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
2335 * Initialize the ring segment pool. The ring must be a contiguous
2336 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2337 * however, the command ring segment needs 64-byte aligned segments,
2338 * so we pick the greater alignment need.
2340 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
2341 TRB_SEGMENT_SIZE
, 64, xhci
->page_size
);
2343 /* See Table 46 and Note on Figure 55 */
2344 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
2345 2112, 64, xhci
->page_size
);
2346 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
2349 /* Linear stream context arrays don't have any boundary restrictions,
2350 * and only need to be 16-byte aligned.
2352 xhci
->small_streams_pool
=
2353 dma_pool_create("xHCI 256 byte stream ctx arrays",
2354 dev
, SMALL_STREAM_ARRAY_SIZE
, 16, 0);
2355 xhci
->medium_streams_pool
=
2356 dma_pool_create("xHCI 1KB stream ctx arrays",
2357 dev
, MEDIUM_STREAM_ARRAY_SIZE
, 16, 0);
2358 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2359 * will be allocated with dma_alloc_coherent()
2362 if (!xhci
->small_streams_pool
|| !xhci
->medium_streams_pool
)
2365 /* Set up the command ring to have one segments for now. */
2366 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, 1, TYPE_COMMAND
, flags
);
2367 if (!xhci
->cmd_ring
)
2369 xhci_dbg(xhci
, "Allocated command ring at %p\n", xhci
->cmd_ring
);
2370 xhci_dbg(xhci
, "First segment DMA is 0x%llx\n",
2371 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
2373 /* Set the address in the Command Ring Control register */
2374 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
2375 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
2376 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
2377 xhci
->cmd_ring
->cycle_state
;
2378 xhci_dbg(xhci
, "// Setting command ring address to 0x%x\n", val
);
2379 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
2380 xhci_dbg_cmd_ptrs(xhci
);
2382 xhci
->lpm_command
= xhci_alloc_command(xhci
, true, true, flags
);
2383 if (!xhci
->lpm_command
)
2386 /* Reserve one command ring TRB for disabling LPM.
2387 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2388 * disabling LPM, we only need to reserve one TRB for all devices.
2390 xhci
->cmd_ring_reserved_trbs
++;
2392 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
2394 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
2395 " from cap regs base addr\n", val
);
2396 xhci
->dba
= (void __iomem
*) xhci
->cap_regs
+ val
;
2397 xhci_dbg_regs(xhci
);
2398 xhci_print_run_regs(xhci
);
2399 /* Set ir_set to interrupt register set 0 */
2400 xhci
->ir_set
= &xhci
->run_regs
->ir_set
[0];
2403 * Event ring setup: Allocate a normal ring, but also setup
2404 * the event ring segment table (ERST). Section 4.9.3.
2406 xhci_dbg(xhci
, "// Allocating event ring\n");
2407 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, 1, TYPE_EVENT
,
2409 if (!xhci
->event_ring
)
2411 if (xhci_check_trb_in_td_math(xhci
, flags
) < 0)
2414 xhci
->erst
.entries
= dma_alloc_coherent(dev
,
2415 sizeof(struct xhci_erst_entry
) * ERST_NUM_SEGS
, &dma
,
2417 if (!xhci
->erst
.entries
)
2419 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%llx\n",
2420 (unsigned long long)dma
);
2422 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
2423 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
2424 xhci
->erst
.erst_dma_addr
= dma
;
2425 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2426 xhci
->erst
.num_entries
,
2428 (unsigned long long)xhci
->erst
.erst_dma_addr
);
2430 /* set ring base address and size for each segment table entry */
2431 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
2432 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
2433 entry
->seg_addr
= cpu_to_le64(seg
->dma
);
2434 entry
->seg_size
= cpu_to_le32(TRBS_PER_SEGMENT
);
2439 /* set ERST count with the number of entries in the segment table */
2440 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
2441 val
&= ERST_SIZE_MASK
;
2442 val
|= ERST_NUM_SEGS
;
2443 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2445 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
2447 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
2448 /* set the segment table base address */
2449 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2450 (unsigned long long)xhci
->erst
.erst_dma_addr
);
2451 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
2452 val_64
&= ERST_PTR_MASK
;
2453 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
2454 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
2456 /* Set the event ring dequeue address */
2457 xhci_set_hc_event_deq(xhci
);
2458 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
2459 xhci_print_ir_set(xhci
, 0);
2462 * XXX: Might need to set the Interrupter Moderation Register to
2463 * something other than the default (~1ms minimum between interrupts).
2464 * See section 5.5.1.2.
2466 init_completion(&xhci
->addr_dev
);
2467 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
2468 xhci
->devs
[i
] = NULL
;
2469 for (i
= 0; i
< USB_MAXCHILDREN
; ++i
) {
2470 xhci
->bus_state
[0].resume_done
[i
] = 0;
2471 xhci
->bus_state
[1].resume_done
[i
] = 0;
2474 if (scratchpad_alloc(xhci
, flags
))
2476 if (xhci_setup_port_arrays(xhci
, flags
))
2479 /* Enable USB 3.0 device notifications for function remote wake, which
2480 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2481 * U3 (device suspend).
2483 temp
= xhci_readl(xhci
, &xhci
->op_regs
->dev_notification
);
2484 temp
&= ~DEV_NOTE_MASK
;
2485 temp
|= DEV_NOTE_FWAKE
;
2486 xhci_writel(xhci
, temp
, &xhci
->op_regs
->dev_notification
);
2491 xhci_warn(xhci
, "Couldn't initialize memory\n");
2494 xhci_mem_cleanup(xhci
);