2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma-mapping.h>
30 #include "xhci-trace.h"
33 * Allocates a generic ring segment from the ring pool, sets the dma address,
34 * initializes the segment to zero, and sets the private next pointer to NULL.
37 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
39 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
,
40 unsigned int cycle_state
, gfp_t flags
)
42 struct xhci_segment
*seg
;
46 seg
= kzalloc(sizeof *seg
, flags
);
50 seg
->trbs
= dma_pool_zalloc(xhci
->segment_pool
, flags
, &dma
);
56 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
57 if (cycle_state
== 0) {
58 for (i
= 0; i
< TRBS_PER_SEGMENT
; i
++)
59 seg
->trbs
[i
].link
.control
|= cpu_to_le32(TRB_CYCLE
);
67 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
70 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
76 static void xhci_free_segments_for_ring(struct xhci_hcd
*xhci
,
77 struct xhci_segment
*first
)
79 struct xhci_segment
*seg
;
82 while (seg
!= first
) {
83 struct xhci_segment
*next
= seg
->next
;
84 xhci_segment_free(xhci
, seg
);
87 xhci_segment_free(xhci
, first
);
91 * Make the prev segment point to the next segment.
93 * Change the last TRB in the prev segment to be a Link TRB which points to the
94 * DMA address of the next segment. The caller needs to set any Link TRB
95 * related flags, such as End TRB, Toggle Cycle, and no snoop.
97 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
98 struct xhci_segment
*next
, enum xhci_ring_type type
)
105 if (type
!= TYPE_EVENT
) {
106 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
=
107 cpu_to_le64(next
->dma
);
109 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
110 val
= le32_to_cpu(prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
);
111 val
&= ~TRB_TYPE_BITMASK
;
112 val
|= TRB_TYPE(TRB_LINK
);
113 /* Always set the chain bit with 0.95 hardware */
114 /* Set chain bit for isoc rings on AMD 0.96 host */
115 if (xhci_link_trb_quirk(xhci
) ||
116 (type
== TYPE_ISOC
&&
117 (xhci
->quirks
& XHCI_AMD_0x96_HOST
)))
119 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= cpu_to_le32(val
);
124 * Link the ring to the new segments.
125 * Set Toggle Cycle for the new ring if needed.
127 static void xhci_link_rings(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
128 struct xhci_segment
*first
, struct xhci_segment
*last
,
129 unsigned int num_segs
)
131 struct xhci_segment
*next
;
133 if (!ring
|| !first
|| !last
)
136 next
= ring
->enq_seg
->next
;
137 xhci_link_segments(xhci
, ring
->enq_seg
, first
, ring
->type
);
138 xhci_link_segments(xhci
, last
, next
, ring
->type
);
139 ring
->num_segs
+= num_segs
;
140 ring
->num_trbs_free
+= (TRBS_PER_SEGMENT
- 1) * num_segs
;
142 if (ring
->type
!= TYPE_EVENT
&& ring
->enq_seg
== ring
->last_seg
) {
143 ring
->last_seg
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
144 &= ~cpu_to_le32(LINK_TOGGLE
);
145 last
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
146 |= cpu_to_le32(LINK_TOGGLE
);
147 ring
->last_seg
= last
;
152 * We need a radix tree for mapping physical addresses of TRBs to which stream
153 * ID they belong to. We need to do this because the host controller won't tell
154 * us which stream ring the TRB came from. We could store the stream ID in an
155 * event data TRB, but that doesn't help us for the cancellation case, since the
156 * endpoint may stop before it reaches that event data TRB.
158 * The radix tree maps the upper portion of the TRB DMA address to a ring
159 * segment that has the same upper portion of DMA addresses. For example, say I
160 * have segments of size 1KB, that are always 1KB aligned. A segment may
161 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
162 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
163 * pass the radix tree a key to get the right stream ID:
165 * 0x10c90fff >> 10 = 0x43243
166 * 0x10c912c0 >> 10 = 0x43244
167 * 0x10c91400 >> 10 = 0x43245
169 * Obviously, only those TRBs with DMA addresses that are within the segment
170 * will make the radix tree return the stream ID for that ring.
172 * Caveats for the radix tree:
174 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
175 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
176 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
177 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
178 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
179 * extended systems (where the DMA address can be bigger than 32-bits),
180 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
182 static int xhci_insert_segment_mapping(struct radix_tree_root
*trb_address_map
,
183 struct xhci_ring
*ring
,
184 struct xhci_segment
*seg
,
190 key
= (unsigned long)(seg
->dma
>> TRB_SEGMENT_SHIFT
);
191 /* Skip any segments that were already added. */
192 if (radix_tree_lookup(trb_address_map
, key
))
195 ret
= radix_tree_maybe_preload(mem_flags
);
198 ret
= radix_tree_insert(trb_address_map
,
200 radix_tree_preload_end();
204 static void xhci_remove_segment_mapping(struct radix_tree_root
*trb_address_map
,
205 struct xhci_segment
*seg
)
209 key
= (unsigned long)(seg
->dma
>> TRB_SEGMENT_SHIFT
);
210 if (radix_tree_lookup(trb_address_map
, key
))
211 radix_tree_delete(trb_address_map
, key
);
214 static int xhci_update_stream_segment_mapping(
215 struct radix_tree_root
*trb_address_map
,
216 struct xhci_ring
*ring
,
217 struct xhci_segment
*first_seg
,
218 struct xhci_segment
*last_seg
,
221 struct xhci_segment
*seg
;
222 struct xhci_segment
*failed_seg
;
225 if (WARN_ON_ONCE(trb_address_map
== NULL
))
230 ret
= xhci_insert_segment_mapping(trb_address_map
,
231 ring
, seg
, mem_flags
);
237 } while (seg
!= first_seg
);
245 xhci_remove_segment_mapping(trb_address_map
, seg
);
246 if (seg
== failed_seg
)
249 } while (seg
!= first_seg
);
254 static void xhci_remove_stream_mapping(struct xhci_ring
*ring
)
256 struct xhci_segment
*seg
;
258 if (WARN_ON_ONCE(ring
->trb_address_map
== NULL
))
261 seg
= ring
->first_seg
;
263 xhci_remove_segment_mapping(ring
->trb_address_map
, seg
);
265 } while (seg
!= ring
->first_seg
);
268 static int xhci_update_stream_mapping(struct xhci_ring
*ring
, gfp_t mem_flags
)
270 return xhci_update_stream_segment_mapping(ring
->trb_address_map
, ring
,
271 ring
->first_seg
, ring
->last_seg
, mem_flags
);
274 /* XXX: Do we need the hcd structure in all these functions? */
275 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
280 if (ring
->first_seg
) {
281 if (ring
->type
== TYPE_STREAM
)
282 xhci_remove_stream_mapping(ring
);
283 xhci_free_segments_for_ring(xhci
, ring
->first_seg
);
289 static void xhci_initialize_ring_info(struct xhci_ring
*ring
,
290 unsigned int cycle_state
)
292 /* The ring is empty, so the enqueue pointer == dequeue pointer */
293 ring
->enqueue
= ring
->first_seg
->trbs
;
294 ring
->enq_seg
= ring
->first_seg
;
295 ring
->dequeue
= ring
->enqueue
;
296 ring
->deq_seg
= ring
->first_seg
;
297 /* The ring is initialized to 0. The producer must write 1 to the cycle
298 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
299 * compare CCS to the cycle bit to check ownership, so CCS = 1.
301 * New rings are initialized with cycle state equal to 1; if we are
302 * handling ring expansion, set the cycle state equal to the old ring.
304 ring
->cycle_state
= cycle_state
;
305 /* Not necessary for new rings, but needed for re-initialized rings */
306 ring
->enq_updates
= 0;
307 ring
->deq_updates
= 0;
310 * Each segment has a link TRB, and leave an extra TRB for SW
313 ring
->num_trbs_free
= ring
->num_segs
* (TRBS_PER_SEGMENT
- 1) - 1;
316 /* Allocate segments and link them for a ring */
317 static int xhci_alloc_segments_for_ring(struct xhci_hcd
*xhci
,
318 struct xhci_segment
**first
, struct xhci_segment
**last
,
319 unsigned int num_segs
, unsigned int cycle_state
,
320 enum xhci_ring_type type
, gfp_t flags
)
322 struct xhci_segment
*prev
;
324 prev
= xhci_segment_alloc(xhci
, cycle_state
, flags
);
330 while (num_segs
> 0) {
331 struct xhci_segment
*next
;
333 next
= xhci_segment_alloc(xhci
, cycle_state
, flags
);
338 xhci_segment_free(xhci
, prev
);
343 xhci_link_segments(xhci
, prev
, next
, type
);
348 xhci_link_segments(xhci
, prev
, *first
, type
);
355 * Create a new ring with zero or more segments.
357 * Link each segment together into a ring.
358 * Set the end flag and the cycle toggle bit on the last segment.
359 * See section 4.9.1 and figures 15 and 16.
361 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
362 unsigned int num_segs
, unsigned int cycle_state
,
363 enum xhci_ring_type type
, gfp_t flags
)
365 struct xhci_ring
*ring
;
368 ring
= kzalloc(sizeof *(ring
), flags
);
372 ring
->num_segs
= num_segs
;
373 INIT_LIST_HEAD(&ring
->td_list
);
378 ret
= xhci_alloc_segments_for_ring(xhci
, &ring
->first_seg
,
379 &ring
->last_seg
, num_segs
, cycle_state
, type
, flags
);
383 /* Only event ring does not use link TRB */
384 if (type
!= TYPE_EVENT
) {
385 /* See section 4.9.2.1 and 6.4.4.1 */
386 ring
->last_seg
->trbs
[TRBS_PER_SEGMENT
- 1].link
.control
|=
387 cpu_to_le32(LINK_TOGGLE
);
389 xhci_initialize_ring_info(ring
, cycle_state
);
397 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd
*xhci
,
398 struct xhci_virt_device
*virt_dev
,
399 unsigned int ep_index
)
403 rings_cached
= virt_dev
->num_rings_cached
;
404 if (rings_cached
< XHCI_MAX_RINGS_CACHED
) {
405 virt_dev
->ring_cache
[rings_cached
] =
406 virt_dev
->eps
[ep_index
].ring
;
407 virt_dev
->num_rings_cached
++;
408 xhci_dbg(xhci
, "Cached old ring, "
409 "%d ring%s cached\n",
410 virt_dev
->num_rings_cached
,
411 (virt_dev
->num_rings_cached
> 1) ? "s" : "");
413 xhci_ring_free(xhci
, virt_dev
->eps
[ep_index
].ring
);
414 xhci_dbg(xhci
, "Ring cache full (%d rings), "
416 virt_dev
->num_rings_cached
);
418 virt_dev
->eps
[ep_index
].ring
= NULL
;
421 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
422 * pointers to the beginning of the ring.
424 static void xhci_reinit_cached_ring(struct xhci_hcd
*xhci
,
425 struct xhci_ring
*ring
, unsigned int cycle_state
,
426 enum xhci_ring_type type
)
428 struct xhci_segment
*seg
= ring
->first_seg
;
433 sizeof(union xhci_trb
)*TRBS_PER_SEGMENT
);
434 if (cycle_state
== 0) {
435 for (i
= 0; i
< TRBS_PER_SEGMENT
; i
++)
436 seg
->trbs
[i
].link
.control
|=
437 cpu_to_le32(TRB_CYCLE
);
439 /* All endpoint rings have link TRBs */
440 xhci_link_segments(xhci
, seg
, seg
->next
, type
);
442 } while (seg
!= ring
->first_seg
);
444 xhci_initialize_ring_info(ring
, cycle_state
);
445 /* td list should be empty since all URBs have been cancelled,
446 * but just in case...
448 INIT_LIST_HEAD(&ring
->td_list
);
452 * Expand an existing ring.
453 * Look for a cached ring or allocate a new ring which has same segment numbers
454 * and link the two rings.
456 int xhci_ring_expansion(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
457 unsigned int num_trbs
, gfp_t flags
)
459 struct xhci_segment
*first
;
460 struct xhci_segment
*last
;
461 unsigned int num_segs
;
462 unsigned int num_segs_needed
;
465 num_segs_needed
= (num_trbs
+ (TRBS_PER_SEGMENT
- 1) - 1) /
466 (TRBS_PER_SEGMENT
- 1);
468 /* Allocate number of segments we needed, or double the ring size */
469 num_segs
= ring
->num_segs
> num_segs_needed
?
470 ring
->num_segs
: num_segs_needed
;
472 ret
= xhci_alloc_segments_for_ring(xhci
, &first
, &last
,
473 num_segs
, ring
->cycle_state
, ring
->type
, flags
);
477 if (ring
->type
== TYPE_STREAM
)
478 ret
= xhci_update_stream_segment_mapping(ring
->trb_address_map
,
479 ring
, first
, last
, flags
);
481 struct xhci_segment
*next
;
484 xhci_segment_free(xhci
, first
);
492 xhci_link_rings(xhci
, ring
, first
, last
, num_segs
);
493 xhci_dbg_trace(xhci
, trace_xhci_dbg_ring_expansion
,
494 "ring expansion succeed, now has %d segments",
500 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
502 static struct xhci_container_ctx
*xhci_alloc_container_ctx(struct xhci_hcd
*xhci
,
503 int type
, gfp_t flags
)
505 struct xhci_container_ctx
*ctx
;
507 if ((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
))
510 ctx
= kzalloc(sizeof(*ctx
), flags
);
515 ctx
->size
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
) ? 2048 : 1024;
516 if (type
== XHCI_CTX_TYPE_INPUT
)
517 ctx
->size
+= CTX_SIZE(xhci
->hcc_params
);
519 ctx
->bytes
= dma_pool_zalloc(xhci
->device_pool
, flags
, &ctx
->dma
);
527 static void xhci_free_container_ctx(struct xhci_hcd
*xhci
,
528 struct xhci_container_ctx
*ctx
)
532 dma_pool_free(xhci
->device_pool
, ctx
->bytes
, ctx
->dma
);
536 struct xhci_input_control_ctx
*xhci_get_input_control_ctx(
537 struct xhci_container_ctx
*ctx
)
539 if (ctx
->type
!= XHCI_CTX_TYPE_INPUT
)
542 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
545 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_hcd
*xhci
,
546 struct xhci_container_ctx
*ctx
)
548 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
549 return (struct xhci_slot_ctx
*)ctx
->bytes
;
551 return (struct xhci_slot_ctx
*)
552 (ctx
->bytes
+ CTX_SIZE(xhci
->hcc_params
));
555 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_hcd
*xhci
,
556 struct xhci_container_ctx
*ctx
,
557 unsigned int ep_index
)
559 /* increment ep index by offset of start of ep ctx array */
561 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
564 return (struct xhci_ep_ctx
*)
565 (ctx
->bytes
+ (ep_index
* CTX_SIZE(xhci
->hcc_params
)));
569 /***************** Streams structures manipulation *************************/
571 static void xhci_free_stream_ctx(struct xhci_hcd
*xhci
,
572 unsigned int num_stream_ctxs
,
573 struct xhci_stream_ctx
*stream_ctx
, dma_addr_t dma
)
575 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
576 size_t size
= sizeof(struct xhci_stream_ctx
) * num_stream_ctxs
;
578 if (size
> MEDIUM_STREAM_ARRAY_SIZE
)
579 dma_free_coherent(dev
, size
,
581 else if (size
<= SMALL_STREAM_ARRAY_SIZE
)
582 return dma_pool_free(xhci
->small_streams_pool
,
585 return dma_pool_free(xhci
->medium_streams_pool
,
590 * The stream context array for each endpoint with bulk streams enabled can
591 * vary in size, based on:
592 * - how many streams the endpoint supports,
593 * - the maximum primary stream array size the host controller supports,
594 * - and how many streams the device driver asks for.
596 * The stream context array must be a power of 2, and can be as small as
597 * 64 bytes or as large as 1MB.
599 static struct xhci_stream_ctx
*xhci_alloc_stream_ctx(struct xhci_hcd
*xhci
,
600 unsigned int num_stream_ctxs
, dma_addr_t
*dma
,
603 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
604 size_t size
= sizeof(struct xhci_stream_ctx
) * num_stream_ctxs
;
606 if (size
> MEDIUM_STREAM_ARRAY_SIZE
)
607 return dma_alloc_coherent(dev
, size
,
609 else if (size
<= SMALL_STREAM_ARRAY_SIZE
)
610 return dma_pool_alloc(xhci
->small_streams_pool
,
613 return dma_pool_alloc(xhci
->medium_streams_pool
,
617 struct xhci_ring
*xhci_dma_to_transfer_ring(
618 struct xhci_virt_ep
*ep
,
621 if (ep
->ep_state
& EP_HAS_STREAMS
)
622 return radix_tree_lookup(&ep
->stream_info
->trb_address_map
,
623 address
>> TRB_SEGMENT_SHIFT
);
627 struct xhci_ring
*xhci_stream_id_to_ring(
628 struct xhci_virt_device
*dev
,
629 unsigned int ep_index
,
630 unsigned int stream_id
)
632 struct xhci_virt_ep
*ep
= &dev
->eps
[ep_index
];
636 if (!ep
->stream_info
)
639 if (stream_id
> ep
->stream_info
->num_streams
)
641 return ep
->stream_info
->stream_rings
[stream_id
];
645 * Change an endpoint's internal structure so it supports stream IDs. The
646 * number of requested streams includes stream 0, which cannot be used by device
649 * The number of stream contexts in the stream context array may be bigger than
650 * the number of streams the driver wants to use. This is because the number of
651 * stream context array entries must be a power of two.
653 struct xhci_stream_info
*xhci_alloc_stream_info(struct xhci_hcd
*xhci
,
654 unsigned int num_stream_ctxs
,
655 unsigned int num_streams
, gfp_t mem_flags
)
657 struct xhci_stream_info
*stream_info
;
659 struct xhci_ring
*cur_ring
;
663 xhci_dbg(xhci
, "Allocating %u streams and %u "
664 "stream context array entries.\n",
665 num_streams
, num_stream_ctxs
);
666 if (xhci
->cmd_ring_reserved_trbs
== MAX_RSVD_CMD_TRBS
) {
667 xhci_dbg(xhci
, "Command ring has no reserved TRBs available\n");
670 xhci
->cmd_ring_reserved_trbs
++;
672 stream_info
= kzalloc(sizeof(struct xhci_stream_info
), mem_flags
);
676 stream_info
->num_streams
= num_streams
;
677 stream_info
->num_stream_ctxs
= num_stream_ctxs
;
679 /* Initialize the array of virtual pointers to stream rings. */
680 stream_info
->stream_rings
= kzalloc(
681 sizeof(struct xhci_ring
*)*num_streams
,
683 if (!stream_info
->stream_rings
)
686 /* Initialize the array of DMA addresses for stream rings for the HW. */
687 stream_info
->stream_ctx_array
= xhci_alloc_stream_ctx(xhci
,
688 num_stream_ctxs
, &stream_info
->ctx_array_dma
,
690 if (!stream_info
->stream_ctx_array
)
692 memset(stream_info
->stream_ctx_array
, 0,
693 sizeof(struct xhci_stream_ctx
)*num_stream_ctxs
);
695 /* Allocate everything needed to free the stream rings later */
696 stream_info
->free_streams_command
=
697 xhci_alloc_command(xhci
, true, true, mem_flags
);
698 if (!stream_info
->free_streams_command
)
701 INIT_RADIX_TREE(&stream_info
->trb_address_map
, GFP_ATOMIC
);
703 /* Allocate rings for all the streams that the driver will use,
704 * and add their segment DMA addresses to the radix tree.
705 * Stream 0 is reserved.
707 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
708 stream_info
->stream_rings
[cur_stream
] =
709 xhci_ring_alloc(xhci
, 2, 1, TYPE_STREAM
, mem_flags
);
710 cur_ring
= stream_info
->stream_rings
[cur_stream
];
713 cur_ring
->stream_id
= cur_stream
;
714 cur_ring
->trb_address_map
= &stream_info
->trb_address_map
;
715 /* Set deq ptr, cycle bit, and stream context type */
716 addr
= cur_ring
->first_seg
->dma
|
717 SCT_FOR_CTX(SCT_PRI_TR
) |
718 cur_ring
->cycle_state
;
719 stream_info
->stream_ctx_array
[cur_stream
].stream_ring
=
721 xhci_dbg(xhci
, "Setting stream %d ring ptr to 0x%08llx\n",
722 cur_stream
, (unsigned long long) addr
);
724 ret
= xhci_update_stream_mapping(cur_ring
, mem_flags
);
726 xhci_ring_free(xhci
, cur_ring
);
727 stream_info
->stream_rings
[cur_stream
] = NULL
;
731 /* Leave the other unused stream ring pointers in the stream context
732 * array initialized to zero. This will cause the xHC to give us an
733 * error if the device asks for a stream ID we don't have setup (if it
734 * was any other way, the host controller would assume the ring is
735 * "empty" and wait forever for data to be queued to that stream ID).
741 for (cur_stream
= 1; cur_stream
< num_streams
; cur_stream
++) {
742 cur_ring
= stream_info
->stream_rings
[cur_stream
];
744 xhci_ring_free(xhci
, cur_ring
);
745 stream_info
->stream_rings
[cur_stream
] = NULL
;
748 xhci_free_command(xhci
, stream_info
->free_streams_command
);
750 kfree(stream_info
->stream_rings
);
754 xhci
->cmd_ring_reserved_trbs
--;
758 * Sets the MaxPStreams field and the Linear Stream Array field.
759 * Sets the dequeue pointer to the stream context array.
761 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd
*xhci
,
762 struct xhci_ep_ctx
*ep_ctx
,
763 struct xhci_stream_info
*stream_info
)
765 u32 max_primary_streams
;
766 /* MaxPStreams is the number of stream context array entries, not the
767 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
768 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
770 max_primary_streams
= fls(stream_info
->num_stream_ctxs
) - 2;
771 xhci_dbg_trace(xhci
, trace_xhci_dbg_context_change
,
772 "Setting number of stream ctx array entries to %u",
773 1 << (max_primary_streams
+ 1));
774 ep_ctx
->ep_info
&= cpu_to_le32(~EP_MAXPSTREAMS_MASK
);
775 ep_ctx
->ep_info
|= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams
)
777 ep_ctx
->deq
= cpu_to_le64(stream_info
->ctx_array_dma
);
781 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
782 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
783 * not at the beginning of the ring).
785 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx
*ep_ctx
,
786 struct xhci_virt_ep
*ep
)
789 ep_ctx
->ep_info
&= cpu_to_le32(~(EP_MAXPSTREAMS_MASK
| EP_HAS_LSA
));
790 addr
= xhci_trb_virt_to_dma(ep
->ring
->deq_seg
, ep
->ring
->dequeue
);
791 ep_ctx
->deq
= cpu_to_le64(addr
| ep
->ring
->cycle_state
);
794 /* Frees all stream contexts associated with the endpoint,
796 * Caller should fix the endpoint context streams fields.
798 void xhci_free_stream_info(struct xhci_hcd
*xhci
,
799 struct xhci_stream_info
*stream_info
)
802 struct xhci_ring
*cur_ring
;
807 for (cur_stream
= 1; cur_stream
< stream_info
->num_streams
;
809 cur_ring
= stream_info
->stream_rings
[cur_stream
];
811 xhci_ring_free(xhci
, cur_ring
);
812 stream_info
->stream_rings
[cur_stream
] = NULL
;
815 xhci_free_command(xhci
, stream_info
->free_streams_command
);
816 xhci
->cmd_ring_reserved_trbs
--;
817 if (stream_info
->stream_ctx_array
)
818 xhci_free_stream_ctx(xhci
,
819 stream_info
->num_stream_ctxs
,
820 stream_info
->stream_ctx_array
,
821 stream_info
->ctx_array_dma
);
823 kfree(stream_info
->stream_rings
);
828 /***************** Device context manipulation *************************/
830 static void xhci_init_endpoint_timer(struct xhci_hcd
*xhci
,
831 struct xhci_virt_ep
*ep
)
833 setup_timer(&ep
->stop_cmd_timer
, xhci_stop_endpoint_command_watchdog
,
838 static void xhci_free_tt_info(struct xhci_hcd
*xhci
,
839 struct xhci_virt_device
*virt_dev
,
842 struct list_head
*tt_list_head
;
843 struct xhci_tt_bw_info
*tt_info
, *next
;
844 bool slot_found
= false;
846 /* If the device never made it past the Set Address stage,
847 * it may not have the real_port set correctly.
849 if (virt_dev
->real_port
== 0 ||
850 virt_dev
->real_port
> HCS_MAX_PORTS(xhci
->hcs_params1
)) {
851 xhci_dbg(xhci
, "Bad real port.\n");
855 tt_list_head
= &(xhci
->rh_bw
[virt_dev
->real_port
- 1].tts
);
856 list_for_each_entry_safe(tt_info
, next
, tt_list_head
, tt_list
) {
857 /* Multi-TT hubs will have more than one entry */
858 if (tt_info
->slot_id
== slot_id
) {
860 list_del(&tt_info
->tt_list
);
862 } else if (slot_found
) {
868 int xhci_alloc_tt_info(struct xhci_hcd
*xhci
,
869 struct xhci_virt_device
*virt_dev
,
870 struct usb_device
*hdev
,
871 struct usb_tt
*tt
, gfp_t mem_flags
)
873 struct xhci_tt_bw_info
*tt_info
;
874 unsigned int num_ports
;
880 num_ports
= hdev
->maxchild
;
882 for (i
= 0; i
< num_ports
; i
++, tt_info
++) {
883 struct xhci_interval_bw_table
*bw_table
;
885 tt_info
= kzalloc(sizeof(*tt_info
), mem_flags
);
888 INIT_LIST_HEAD(&tt_info
->tt_list
);
889 list_add(&tt_info
->tt_list
,
890 &xhci
->rh_bw
[virt_dev
->real_port
- 1].tts
);
891 tt_info
->slot_id
= virt_dev
->udev
->slot_id
;
893 tt_info
->ttport
= i
+1;
894 bw_table
= &tt_info
->bw_table
;
895 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++)
896 INIT_LIST_HEAD(&bw_table
->interval_bw
[j
].endpoints
);
901 xhci_free_tt_info(xhci
, virt_dev
, virt_dev
->udev
->slot_id
);
906 /* All the xhci_tds in the ring's TD list should be freed at this point.
907 * Should be called with xhci->lock held if there is any chance the TT lists
908 * will be manipulated by the configure endpoint, allocate device, or update
909 * hub functions while this function is removing the TT entries from the list.
911 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
913 struct xhci_virt_device
*dev
;
915 int old_active_eps
= 0;
917 /* Slot ID 0 is reserved */
918 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
921 dev
= xhci
->devs
[slot_id
];
922 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
927 old_active_eps
= dev
->tt_info
->active_eps
;
929 for (i
= 0; i
< 31; ++i
) {
930 if (dev
->eps
[i
].ring
)
931 xhci_ring_free(xhci
, dev
->eps
[i
].ring
);
932 if (dev
->eps
[i
].stream_info
)
933 xhci_free_stream_info(xhci
,
934 dev
->eps
[i
].stream_info
);
935 /* Endpoints on the TT/root port lists should have been removed
936 * when usb_disable_device() was called for the device.
937 * We can't drop them anyway, because the udev might have gone
938 * away by this point, and we can't tell what speed it was.
940 if (!list_empty(&dev
->eps
[i
].bw_endpoint_list
))
941 xhci_warn(xhci
, "Slot %u endpoint %u "
942 "not removed from BW list!\n",
945 /* If this is a hub, free the TT(s) from the TT list */
946 xhci_free_tt_info(xhci
, dev
, slot_id
);
947 /* If necessary, update the number of active TTs on this root port */
948 xhci_update_tt_active_eps(xhci
, dev
, old_active_eps
);
950 if (dev
->ring_cache
) {
951 for (i
= 0; i
< dev
->num_rings_cached
; i
++)
952 xhci_ring_free(xhci
, dev
->ring_cache
[i
]);
953 kfree(dev
->ring_cache
);
957 xhci_free_container_ctx(xhci
, dev
->in_ctx
);
959 xhci_free_container_ctx(xhci
, dev
->out_ctx
);
961 kfree(xhci
->devs
[slot_id
]);
962 xhci
->devs
[slot_id
] = NULL
;
965 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
966 struct usb_device
*udev
, gfp_t flags
)
968 struct xhci_virt_device
*dev
;
971 /* Slot ID 0 is reserved */
972 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
973 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
977 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
978 if (!xhci
->devs
[slot_id
])
980 dev
= xhci
->devs
[slot_id
];
982 /* Allocate the (output) device context that will be used in the HC. */
983 dev
->out_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
987 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
988 (unsigned long long)dev
->out_ctx
->dma
);
990 /* Allocate the (input) device context for address device command */
991 dev
->in_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, flags
);
995 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
996 (unsigned long long)dev
->in_ctx
->dma
);
998 /* Initialize the cancellation list and watchdog timers for each ep */
999 for (i
= 0; i
< 31; i
++) {
1000 xhci_init_endpoint_timer(xhci
, &dev
->eps
[i
]);
1001 INIT_LIST_HEAD(&dev
->eps
[i
].cancelled_td_list
);
1002 INIT_LIST_HEAD(&dev
->eps
[i
].bw_endpoint_list
);
1005 /* Allocate endpoint 0 ring */
1006 dev
->eps
[0].ring
= xhci_ring_alloc(xhci
, 2, 1, TYPE_CTRL
, flags
);
1007 if (!dev
->eps
[0].ring
)
1010 /* Allocate pointers to the ring cache */
1011 dev
->ring_cache
= kzalloc(
1012 sizeof(struct xhci_ring
*)*XHCI_MAX_RINGS_CACHED
,
1014 if (!dev
->ring_cache
)
1016 dev
->num_rings_cached
= 0;
1018 init_completion(&dev
->cmd_completion
);
1021 /* Point to output device context in dcbaa. */
1022 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = cpu_to_le64(dev
->out_ctx
->dma
);
1023 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1025 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
1026 le64_to_cpu(xhci
->dcbaa
->dev_context_ptrs
[slot_id
]));
1030 xhci_free_virt_device(xhci
, slot_id
);
1034 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd
*xhci
,
1035 struct usb_device
*udev
)
1037 struct xhci_virt_device
*virt_dev
;
1038 struct xhci_ep_ctx
*ep0_ctx
;
1039 struct xhci_ring
*ep_ring
;
1041 virt_dev
= xhci
->devs
[udev
->slot_id
];
1042 ep0_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, 0);
1043 ep_ring
= virt_dev
->eps
[0].ring
;
1045 * FIXME we don't keep track of the dequeue pointer very well after a
1046 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1047 * host to our enqueue pointer. This should only be called after a
1048 * configured device has reset, so all control transfers should have
1049 * been completed or cancelled before the reset.
1051 ep0_ctx
->deq
= cpu_to_le64(xhci_trb_virt_to_dma(ep_ring
->enq_seg
,
1053 | ep_ring
->cycle_state
);
1057 * The xHCI roothub may have ports of differing speeds in any order in the port
1058 * status registers. xhci->port_array provides an array of the port speed for
1059 * each offset into the port status registers.
1061 * The xHCI hardware wants to know the roothub port number that the USB device
1062 * is attached to (or the roothub port its ancestor hub is attached to). All we
1063 * know is the index of that port under either the USB 2.0 or the USB 3.0
1064 * roothub, but that doesn't give us the real index into the HW port status
1065 * registers. Call xhci_find_raw_port_number() to get real index.
1067 static u32
xhci_find_real_port_number(struct xhci_hcd
*xhci
,
1068 struct usb_device
*udev
)
1070 struct usb_device
*top_dev
;
1071 struct usb_hcd
*hcd
;
1073 if (udev
->speed
== USB_SPEED_SUPER
)
1074 hcd
= xhci
->shared_hcd
;
1076 hcd
= xhci
->main_hcd
;
1078 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
1079 top_dev
= top_dev
->parent
)
1080 /* Found device below root hub */;
1082 return xhci_find_raw_port_number(hcd
, top_dev
->portnum
);
1085 /* Setup an xHCI virtual device for a Set Address command */
1086 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
1088 struct xhci_virt_device
*dev
;
1089 struct xhci_ep_ctx
*ep0_ctx
;
1090 struct xhci_slot_ctx
*slot_ctx
;
1093 struct usb_device
*top_dev
;
1095 dev
= xhci
->devs
[udev
->slot_id
];
1096 /* Slot ID 0 is reserved */
1097 if (udev
->slot_id
== 0 || !dev
) {
1098 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
1102 ep0_ctx
= xhci_get_ep_ctx(xhci
, dev
->in_ctx
, 0);
1103 slot_ctx
= xhci_get_slot_ctx(xhci
, dev
->in_ctx
);
1105 /* 3) Only the control endpoint is valid - one endpoint context */
1106 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1) | udev
->route
);
1107 switch (udev
->speed
) {
1108 case USB_SPEED_SUPER
:
1109 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_SS
);
1110 max_packets
= MAX_PACKET(512);
1112 case USB_SPEED_HIGH
:
1113 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_HS
);
1114 max_packets
= MAX_PACKET(64);
1116 /* USB core guesses at a 64-byte max packet first for FS devices */
1117 case USB_SPEED_FULL
:
1118 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_FS
);
1119 max_packets
= MAX_PACKET(64);
1122 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_LS
);
1123 max_packets
= MAX_PACKET(8);
1125 case USB_SPEED_WIRELESS
:
1126 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
1130 /* Speed was set earlier, this shouldn't happen. */
1133 /* Find the root hub port this device is under */
1134 port_num
= xhci_find_real_port_number(xhci
, udev
);
1137 slot_ctx
->dev_info2
|= cpu_to_le32(ROOT_HUB_PORT(port_num
));
1138 /* Set the port number in the virtual_device to the faked port number */
1139 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
1140 top_dev
= top_dev
->parent
)
1141 /* Found device below root hub */;
1142 dev
->fake_port
= top_dev
->portnum
;
1143 dev
->real_port
= port_num
;
1144 xhci_dbg(xhci
, "Set root hub portnum to %d\n", port_num
);
1145 xhci_dbg(xhci
, "Set fake root hub portnum to %d\n", dev
->fake_port
);
1147 /* Find the right bandwidth table that this device will be a part of.
1148 * If this is a full speed device attached directly to a root port (or a
1149 * decendent of one), it counts as a primary bandwidth domain, not a
1150 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1151 * will never be created for the HS root hub.
1153 if (!udev
->tt
|| !udev
->tt
->hub
->parent
) {
1154 dev
->bw_table
= &xhci
->rh_bw
[port_num
- 1].bw_table
;
1156 struct xhci_root_port_bw_info
*rh_bw
;
1157 struct xhci_tt_bw_info
*tt_bw
;
1159 rh_bw
= &xhci
->rh_bw
[port_num
- 1];
1160 /* Find the right TT. */
1161 list_for_each_entry(tt_bw
, &rh_bw
->tts
, tt_list
) {
1162 if (tt_bw
->slot_id
!= udev
->tt
->hub
->slot_id
)
1165 if (!dev
->udev
->tt
->multi
||
1167 tt_bw
->ttport
== dev
->udev
->ttport
)) {
1168 dev
->bw_table
= &tt_bw
->bw_table
;
1169 dev
->tt_info
= tt_bw
;
1174 xhci_warn(xhci
, "WARN: Didn't find a matching TT\n");
1177 /* Is this a LS/FS device under an external HS hub? */
1178 if (udev
->tt
&& udev
->tt
->hub
->parent
) {
1179 slot_ctx
->tt_info
= cpu_to_le32(udev
->tt
->hub
->slot_id
|
1180 (udev
->ttport
<< 8));
1181 if (udev
->tt
->multi
)
1182 slot_ctx
->dev_info
|= cpu_to_le32(DEV_MTT
);
1184 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
1185 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
1187 /* Step 4 - ring already allocated */
1189 ep0_ctx
->ep_info2
= cpu_to_le32(EP_TYPE(CTRL_EP
));
1191 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1192 ep0_ctx
->ep_info2
|= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1195 ep0_ctx
->deq
= cpu_to_le64(dev
->eps
[0].ring
->first_seg
->dma
|
1196 dev
->eps
[0].ring
->cycle_state
);
1198 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1204 * Convert interval expressed as 2^(bInterval - 1) == interval into
1205 * straight exponent value 2^n == interval.
1208 static unsigned int xhci_parse_exponent_interval(struct usb_device
*udev
,
1209 struct usb_host_endpoint
*ep
)
1211 unsigned int interval
;
1213 interval
= clamp_val(ep
->desc
.bInterval
, 1, 16) - 1;
1214 if (interval
!= ep
->desc
.bInterval
- 1)
1215 dev_warn(&udev
->dev
,
1216 "ep %#x - rounding interval to %d %sframes\n",
1217 ep
->desc
.bEndpointAddress
,
1219 udev
->speed
== USB_SPEED_FULL
? "" : "micro");
1221 if (udev
->speed
== USB_SPEED_FULL
) {
1223 * Full speed isoc endpoints specify interval in frames,
1224 * not microframes. We are using microframes everywhere,
1225 * so adjust accordingly.
1227 interval
+= 3; /* 1 frame = 2^3 uframes */
1234 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1235 * microframes, rounded down to nearest power of 2.
1237 static unsigned int xhci_microframes_to_exponent(struct usb_device
*udev
,
1238 struct usb_host_endpoint
*ep
, unsigned int desc_interval
,
1239 unsigned int min_exponent
, unsigned int max_exponent
)
1241 unsigned int interval
;
1243 interval
= fls(desc_interval
) - 1;
1244 interval
= clamp_val(interval
, min_exponent
, max_exponent
);
1245 if ((1 << interval
) != desc_interval
)
1247 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1248 ep
->desc
.bEndpointAddress
,
1255 static unsigned int xhci_parse_microframe_interval(struct usb_device
*udev
,
1256 struct usb_host_endpoint
*ep
)
1258 if (ep
->desc
.bInterval
== 0)
1260 return xhci_microframes_to_exponent(udev
, ep
,
1261 ep
->desc
.bInterval
, 0, 15);
1265 static unsigned int xhci_parse_frame_interval(struct usb_device
*udev
,
1266 struct usb_host_endpoint
*ep
)
1268 return xhci_microframes_to_exponent(udev
, ep
,
1269 ep
->desc
.bInterval
* 8, 3, 10);
1272 /* Return the polling or NAK interval.
1274 * The polling interval is expressed in "microframes". If xHCI's Interval field
1275 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1277 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1280 static unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
1281 struct usb_host_endpoint
*ep
)
1283 unsigned int interval
= 0;
1285 switch (udev
->speed
) {
1286 case USB_SPEED_HIGH
:
1288 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1289 usb_endpoint_xfer_bulk(&ep
->desc
)) {
1290 interval
= xhci_parse_microframe_interval(udev
, ep
);
1293 /* Fall through - SS and HS isoc/int have same decoding */
1295 case USB_SPEED_SUPER
:
1296 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1297 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1298 interval
= xhci_parse_exponent_interval(udev
, ep
);
1302 case USB_SPEED_FULL
:
1303 if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
1304 interval
= xhci_parse_exponent_interval(udev
, ep
);
1308 * Fall through for interrupt endpoint interval decoding
1309 * since it uses the same rules as low speed interrupt
1314 if (usb_endpoint_xfer_int(&ep
->desc
) ||
1315 usb_endpoint_xfer_isoc(&ep
->desc
)) {
1317 interval
= xhci_parse_frame_interval(udev
, ep
);
1324 return EP_INTERVAL(interval
);
1327 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1328 * High speed endpoint descriptors can define "the number of additional
1329 * transaction opportunities per microframe", but that goes in the Max Burst
1330 * endpoint context field.
1332 static u32
xhci_get_endpoint_mult(struct usb_device
*udev
,
1333 struct usb_host_endpoint
*ep
)
1335 if (udev
->speed
!= USB_SPEED_SUPER
||
1336 !usb_endpoint_xfer_isoc(&ep
->desc
))
1338 return ep
->ss_ep_comp
.bmAttributes
;
1341 static u32
xhci_get_endpoint_type(struct usb_host_endpoint
*ep
)
1346 in
= usb_endpoint_dir_in(&ep
->desc
);
1347 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1348 type
= EP_TYPE(CTRL_EP
);
1349 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
1351 type
= EP_TYPE(BULK_IN_EP
);
1353 type
= EP_TYPE(BULK_OUT_EP
);
1354 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
1356 type
= EP_TYPE(ISOC_IN_EP
);
1358 type
= EP_TYPE(ISOC_OUT_EP
);
1359 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
1361 type
= EP_TYPE(INT_IN_EP
);
1363 type
= EP_TYPE(INT_OUT_EP
);
1370 /* Return the maximum endpoint service interval time (ESIT) payload.
1371 * Basically, this is the maxpacket size, multiplied by the burst size
1374 static u32
xhci_get_max_esit_payload(struct usb_device
*udev
,
1375 struct usb_host_endpoint
*ep
)
1380 /* Only applies for interrupt or isochronous endpoints */
1381 if (usb_endpoint_xfer_control(&ep
->desc
) ||
1382 usb_endpoint_xfer_bulk(&ep
->desc
))
1385 if (udev
->speed
== USB_SPEED_SUPER
)
1386 return le16_to_cpu(ep
->ss_ep_comp
.wBytesPerInterval
);
1388 max_packet
= GET_MAX_PACKET(usb_endpoint_maxp(&ep
->desc
));
1389 max_burst
= (usb_endpoint_maxp(&ep
->desc
) & 0x1800) >> 11;
1390 /* A 0 in max burst means 1 transfer per ESIT */
1391 return max_packet
* (max_burst
+ 1);
1394 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1395 * Drivers will have to call usb_alloc_streams() to do that.
1397 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
1398 struct xhci_virt_device
*virt_dev
,
1399 struct usb_device
*udev
,
1400 struct usb_host_endpoint
*ep
,
1403 unsigned int ep_index
;
1404 struct xhci_ep_ctx
*ep_ctx
;
1405 struct xhci_ring
*ep_ring
;
1406 unsigned int max_packet
;
1407 unsigned int max_burst
;
1408 enum xhci_ring_type type
;
1409 u32 max_esit_payload
;
1412 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1413 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1415 endpoint_type
= xhci_get_endpoint_type(ep
);
1418 ep_ctx
->ep_info2
= cpu_to_le32(endpoint_type
);
1420 type
= usb_endpoint_type(&ep
->desc
);
1421 /* Set up the endpoint ring */
1422 virt_dev
->eps
[ep_index
].new_ring
=
1423 xhci_ring_alloc(xhci
, 2, 1, type
, mem_flags
);
1424 if (!virt_dev
->eps
[ep_index
].new_ring
) {
1425 /* Attempt to use the ring cache */
1426 if (virt_dev
->num_rings_cached
== 0)
1428 virt_dev
->num_rings_cached
--;
1429 virt_dev
->eps
[ep_index
].new_ring
=
1430 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
];
1431 virt_dev
->ring_cache
[virt_dev
->num_rings_cached
] = NULL
;
1432 xhci_reinit_cached_ring(xhci
, virt_dev
->eps
[ep_index
].new_ring
,
1435 virt_dev
->eps
[ep_index
].skip
= false;
1436 ep_ring
= virt_dev
->eps
[ep_index
].new_ring
;
1437 ep_ctx
->deq
= cpu_to_le64(ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
);
1439 ep_ctx
->ep_info
= cpu_to_le32(xhci_get_endpoint_interval(udev
, ep
)
1440 | EP_MULT(xhci_get_endpoint_mult(udev
, ep
)));
1442 /* FIXME dig Mult and streams info out of ep companion desc */
1444 /* Allow 3 retries for everything but isoc;
1445 * CErr shall be set to 0 for Isoch endpoints.
1447 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
1448 ep_ctx
->ep_info2
|= cpu_to_le32(ERROR_COUNT(3));
1450 ep_ctx
->ep_info2
|= cpu_to_le32(ERROR_COUNT(0));
1452 /* Set the max packet size and max burst */
1453 max_packet
= GET_MAX_PACKET(usb_endpoint_maxp(&ep
->desc
));
1455 switch (udev
->speed
) {
1456 case USB_SPEED_SUPER
:
1457 /* dig out max burst from ep companion desc */
1458 max_burst
= ep
->ss_ep_comp
.bMaxBurst
;
1460 case USB_SPEED_HIGH
:
1461 /* Some devices get this wrong */
1462 if (usb_endpoint_xfer_bulk(&ep
->desc
))
1464 /* bits 11:12 specify the number of additional transaction
1465 * opportunities per microframe (USB 2.0, section 9.6.6)
1467 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
1468 usb_endpoint_xfer_int(&ep
->desc
)) {
1469 max_burst
= (usb_endpoint_maxp(&ep
->desc
)
1473 case USB_SPEED_FULL
:
1479 ep_ctx
->ep_info2
|= cpu_to_le32(MAX_PACKET(max_packet
) |
1480 MAX_BURST(max_burst
));
1481 max_esit_payload
= xhci_get_max_esit_payload(udev
, ep
);
1482 ep_ctx
->tx_info
= cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload
));
1485 * XXX no idea how to calculate the average TRB buffer length for bulk
1486 * endpoints, as the driver gives us no clue how big each scatter gather
1487 * list entry (or buffer) is going to be.
1489 * For isochronous and interrupt endpoints, we set it to the max
1490 * available, until we have new API in the USB core to allow drivers to
1491 * declare how much bandwidth they actually need.
1493 * Normally, it would be calculated by taking the total of the buffer
1494 * lengths in the TD and then dividing by the number of TRBs in a TD,
1495 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1496 * use Event Data TRBs, and we don't chain in a link TRB on short
1497 * transfers, we're basically dividing by 1.
1499 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
1500 * should be set to 8 for control endpoints.
1502 if (usb_endpoint_xfer_control(&ep
->desc
) && xhci
->hci_version
>= 0x100)
1503 ep_ctx
->tx_info
|= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1506 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload
));
1508 /* FIXME Debug endpoint context */
1512 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
1513 struct xhci_virt_device
*virt_dev
,
1514 struct usb_host_endpoint
*ep
)
1516 unsigned int ep_index
;
1517 struct xhci_ep_ctx
*ep_ctx
;
1519 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1520 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
1522 ep_ctx
->ep_info
= 0;
1523 ep_ctx
->ep_info2
= 0;
1525 ep_ctx
->tx_info
= 0;
1526 /* Don't free the endpoint ring until the set interface or configuration
1531 void xhci_clear_endpoint_bw_info(struct xhci_bw_info
*bw_info
)
1533 bw_info
->ep_interval
= 0;
1535 bw_info
->num_packets
= 0;
1536 bw_info
->max_packet_size
= 0;
1538 bw_info
->max_esit_payload
= 0;
1541 void xhci_update_bw_info(struct xhci_hcd
*xhci
,
1542 struct xhci_container_ctx
*in_ctx
,
1543 struct xhci_input_control_ctx
*ctrl_ctx
,
1544 struct xhci_virt_device
*virt_dev
)
1546 struct xhci_bw_info
*bw_info
;
1547 struct xhci_ep_ctx
*ep_ctx
;
1548 unsigned int ep_type
;
1551 for (i
= 1; i
< 31; ++i
) {
1552 bw_info
= &virt_dev
->eps
[i
].bw_info
;
1554 /* We can't tell what endpoint type is being dropped, but
1555 * unconditionally clearing the bandwidth info for non-periodic
1556 * endpoints should be harmless because the info will never be
1557 * set in the first place.
1559 if (!EP_IS_ADDED(ctrl_ctx
, i
) && EP_IS_DROPPED(ctrl_ctx
, i
)) {
1560 /* Dropped endpoint */
1561 xhci_clear_endpoint_bw_info(bw_info
);
1565 if (EP_IS_ADDED(ctrl_ctx
, i
)) {
1566 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, i
);
1567 ep_type
= CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx
->ep_info2
));
1569 /* Ignore non-periodic endpoints */
1570 if (ep_type
!= ISOC_OUT_EP
&& ep_type
!= INT_OUT_EP
&&
1571 ep_type
!= ISOC_IN_EP
&&
1572 ep_type
!= INT_IN_EP
)
1575 /* Added or changed endpoint */
1576 bw_info
->ep_interval
= CTX_TO_EP_INTERVAL(
1577 le32_to_cpu(ep_ctx
->ep_info
));
1578 /* Number of packets and mult are zero-based in the
1579 * input context, but we want one-based for the
1582 bw_info
->mult
= CTX_TO_EP_MULT(
1583 le32_to_cpu(ep_ctx
->ep_info
)) + 1;
1584 bw_info
->num_packets
= CTX_TO_MAX_BURST(
1585 le32_to_cpu(ep_ctx
->ep_info2
)) + 1;
1586 bw_info
->max_packet_size
= MAX_PACKET_DECODED(
1587 le32_to_cpu(ep_ctx
->ep_info2
));
1588 bw_info
->type
= ep_type
;
1589 bw_info
->max_esit_payload
= CTX_TO_MAX_ESIT_PAYLOAD(
1590 le32_to_cpu(ep_ctx
->tx_info
));
1595 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1596 * Useful when you want to change one particular aspect of the endpoint and then
1597 * issue a configure endpoint command.
1599 void xhci_endpoint_copy(struct xhci_hcd
*xhci
,
1600 struct xhci_container_ctx
*in_ctx
,
1601 struct xhci_container_ctx
*out_ctx
,
1602 unsigned int ep_index
)
1604 struct xhci_ep_ctx
*out_ep_ctx
;
1605 struct xhci_ep_ctx
*in_ep_ctx
;
1607 out_ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1608 in_ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1610 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
1611 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
1612 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
1613 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
1616 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1617 * Useful when you want to change one particular aspect of the endpoint and then
1618 * issue a configure endpoint command. Only the context entries field matters,
1619 * but we'll copy the whole thing anyway.
1621 void xhci_slot_copy(struct xhci_hcd
*xhci
,
1622 struct xhci_container_ctx
*in_ctx
,
1623 struct xhci_container_ctx
*out_ctx
)
1625 struct xhci_slot_ctx
*in_slot_ctx
;
1626 struct xhci_slot_ctx
*out_slot_ctx
;
1628 in_slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1629 out_slot_ctx
= xhci_get_slot_ctx(xhci
, out_ctx
);
1631 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
1632 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
1633 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
1634 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
1637 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1638 static int scratchpad_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
1641 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1642 int num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1644 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
1645 "Allocating %d scratchpad buffers", num_sp
);
1650 xhci
->scratchpad
= kzalloc(sizeof(*xhci
->scratchpad
), flags
);
1651 if (!xhci
->scratchpad
)
1654 xhci
->scratchpad
->sp_array
= dma_alloc_coherent(dev
,
1655 num_sp
* sizeof(u64
),
1656 &xhci
->scratchpad
->sp_dma
, flags
);
1657 if (!xhci
->scratchpad
->sp_array
)
1660 xhci
->scratchpad
->sp_buffers
= kzalloc(sizeof(void *) * num_sp
, flags
);
1661 if (!xhci
->scratchpad
->sp_buffers
)
1664 xhci
->scratchpad
->sp_dma_buffers
=
1665 kzalloc(sizeof(dma_addr_t
) * num_sp
, flags
);
1667 if (!xhci
->scratchpad
->sp_dma_buffers
)
1670 xhci
->dcbaa
->dev_context_ptrs
[0] = cpu_to_le64(xhci
->scratchpad
->sp_dma
);
1671 for (i
= 0; i
< num_sp
; i
++) {
1673 void *buf
= dma_alloc_coherent(dev
, xhci
->page_size
, &dma
,
1678 xhci
->scratchpad
->sp_array
[i
] = dma
;
1679 xhci
->scratchpad
->sp_buffers
[i
] = buf
;
1680 xhci
->scratchpad
->sp_dma_buffers
[i
] = dma
;
1686 for (i
= i
- 1; i
>= 0; i
--) {
1687 dma_free_coherent(dev
, xhci
->page_size
,
1688 xhci
->scratchpad
->sp_buffers
[i
],
1689 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1691 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1694 kfree(xhci
->scratchpad
->sp_buffers
);
1697 dma_free_coherent(dev
, num_sp
* sizeof(u64
),
1698 xhci
->scratchpad
->sp_array
,
1699 xhci
->scratchpad
->sp_dma
);
1702 kfree(xhci
->scratchpad
);
1703 xhci
->scratchpad
= NULL
;
1709 static void scratchpad_free(struct xhci_hcd
*xhci
)
1713 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1715 if (!xhci
->scratchpad
)
1718 num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
1720 for (i
= 0; i
< num_sp
; i
++) {
1721 dma_free_coherent(dev
, xhci
->page_size
,
1722 xhci
->scratchpad
->sp_buffers
[i
],
1723 xhci
->scratchpad
->sp_dma_buffers
[i
]);
1725 kfree(xhci
->scratchpad
->sp_dma_buffers
);
1726 kfree(xhci
->scratchpad
->sp_buffers
);
1727 dma_free_coherent(dev
, num_sp
* sizeof(u64
),
1728 xhci
->scratchpad
->sp_array
,
1729 xhci
->scratchpad
->sp_dma
);
1730 kfree(xhci
->scratchpad
);
1731 xhci
->scratchpad
= NULL
;
1734 struct xhci_command
*xhci_alloc_command(struct xhci_hcd
*xhci
,
1735 bool allocate_in_ctx
, bool allocate_completion
,
1738 struct xhci_command
*command
;
1740 command
= kzalloc(sizeof(*command
), mem_flags
);
1744 if (allocate_in_ctx
) {
1746 xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
,
1748 if (!command
->in_ctx
) {
1754 if (allocate_completion
) {
1755 command
->completion
=
1756 kzalloc(sizeof(struct completion
), mem_flags
);
1757 if (!command
->completion
) {
1758 xhci_free_container_ctx(xhci
, command
->in_ctx
);
1762 init_completion(command
->completion
);
1765 command
->status
= 0;
1766 INIT_LIST_HEAD(&command
->cmd_list
);
1770 void xhci_urb_free_priv(struct urb_priv
*urb_priv
)
1773 kfree(urb_priv
->td
[0]);
1778 void xhci_free_command(struct xhci_hcd
*xhci
,
1779 struct xhci_command
*command
)
1781 xhci_free_container_ctx(xhci
,
1783 kfree(command
->completion
);
1787 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
1789 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
1791 int i
, j
, num_ports
;
1793 del_timer_sync(&xhci
->cmd_timer
);
1795 /* Free the Event Ring Segment Table and the actual Event Ring */
1796 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
1797 if (xhci
->erst
.entries
)
1798 dma_free_coherent(dev
, size
,
1799 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
1800 xhci
->erst
.entries
= NULL
;
1801 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Freed ERST");
1802 if (xhci
->event_ring
)
1803 xhci_ring_free(xhci
, xhci
->event_ring
);
1804 xhci
->event_ring
= NULL
;
1805 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Freed event ring");
1807 if (xhci
->lpm_command
)
1808 xhci_free_command(xhci
, xhci
->lpm_command
);
1809 xhci
->lpm_command
= NULL
;
1811 xhci_ring_free(xhci
, xhci
->cmd_ring
);
1812 xhci
->cmd_ring
= NULL
;
1813 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Freed command ring");
1814 xhci_cleanup_command_queue(xhci
);
1816 num_ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
1817 for (i
= 0; i
< num_ports
&& xhci
->rh_bw
; i
++) {
1818 struct xhci_interval_bw_table
*bwt
= &xhci
->rh_bw
[i
].bw_table
;
1819 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++) {
1820 struct list_head
*ep
= &bwt
->interval_bw
[j
].endpoints
;
1821 while (!list_empty(ep
))
1822 list_del_init(ep
->next
);
1826 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
1827 xhci_free_virt_device(xhci
, i
);
1829 dma_pool_destroy(xhci
->segment_pool
);
1830 xhci
->segment_pool
= NULL
;
1831 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Freed segment pool");
1833 dma_pool_destroy(xhci
->device_pool
);
1834 xhci
->device_pool
= NULL
;
1835 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "Freed device context pool");
1837 dma_pool_destroy(xhci
->small_streams_pool
);
1838 xhci
->small_streams_pool
= NULL
;
1839 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
1840 "Freed small stream array pool");
1842 dma_pool_destroy(xhci
->medium_streams_pool
);
1843 xhci
->medium_streams_pool
= NULL
;
1844 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
1845 "Freed medium stream array pool");
1848 dma_free_coherent(dev
, sizeof(*xhci
->dcbaa
),
1849 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
1852 scratchpad_free(xhci
);
1857 for (i
= 0; i
< num_ports
; i
++) {
1858 struct xhci_tt_bw_info
*tt
, *n
;
1859 list_for_each_entry_safe(tt
, n
, &xhci
->rh_bw
[i
].tts
, tt_list
) {
1860 list_del(&tt
->tt_list
);
1866 xhci
->cmd_ring_reserved_trbs
= 0;
1867 xhci
->num_usb2_ports
= 0;
1868 xhci
->num_usb3_ports
= 0;
1869 xhci
->num_active_eps
= 0;
1870 kfree(xhci
->usb2_ports
);
1871 kfree(xhci
->usb3_ports
);
1872 kfree(xhci
->port_array
);
1874 kfree(xhci
->ext_caps
);
1876 xhci
->page_size
= 0;
1877 xhci
->page_shift
= 0;
1878 xhci
->bus_state
[0].bus_suspended
= 0;
1879 xhci
->bus_state
[1].bus_suspended
= 0;
1882 static int xhci_test_trb_in_td(struct xhci_hcd
*xhci
,
1883 struct xhci_segment
*input_seg
,
1884 union xhci_trb
*start_trb
,
1885 union xhci_trb
*end_trb
,
1886 dma_addr_t input_dma
,
1887 struct xhci_segment
*result_seg
,
1888 char *test_name
, int test_number
)
1890 unsigned long long start_dma
;
1891 unsigned long long end_dma
;
1892 struct xhci_segment
*seg
;
1894 start_dma
= xhci_trb_virt_to_dma(input_seg
, start_trb
);
1895 end_dma
= xhci_trb_virt_to_dma(input_seg
, end_trb
);
1897 seg
= trb_in_td(xhci
, input_seg
, start_trb
, end_trb
, input_dma
, false);
1898 if (seg
!= result_seg
) {
1899 xhci_warn(xhci
, "WARN: %s TRB math test %d failed!\n",
1900 test_name
, test_number
);
1901 xhci_warn(xhci
, "Tested TRB math w/ seg %p and "
1902 "input DMA 0x%llx\n",
1904 (unsigned long long) input_dma
);
1905 xhci_warn(xhci
, "starting TRB %p (0x%llx DMA), "
1906 "ending TRB %p (0x%llx DMA)\n",
1907 start_trb
, start_dma
,
1909 xhci_warn(xhci
, "Expected seg %p, got seg %p\n",
1911 trb_in_td(xhci
, input_seg
, start_trb
, end_trb
, input_dma
,
1918 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1919 static int xhci_check_trb_in_td_math(struct xhci_hcd
*xhci
)
1922 dma_addr_t input_dma
;
1923 struct xhci_segment
*result_seg
;
1924 } simple_test_vector
[] = {
1925 /* A zeroed DMA field should fail */
1927 /* One TRB before the ring start should fail */
1928 { xhci
->event_ring
->first_seg
->dma
- 16, NULL
},
1929 /* One byte before the ring start should fail */
1930 { xhci
->event_ring
->first_seg
->dma
- 1, NULL
},
1931 /* Starting TRB should succeed */
1932 { xhci
->event_ring
->first_seg
->dma
, xhci
->event_ring
->first_seg
},
1933 /* Ending TRB should succeed */
1934 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16,
1935 xhci
->event_ring
->first_seg
},
1936 /* One byte after the ring end should fail */
1937 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 1)*16 + 1, NULL
},
1938 /* One TRB after the ring end should fail */
1939 { xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
)*16, NULL
},
1940 /* An address of all ones should fail */
1941 { (dma_addr_t
) (~0), NULL
},
1944 struct xhci_segment
*input_seg
;
1945 union xhci_trb
*start_trb
;
1946 union xhci_trb
*end_trb
;
1947 dma_addr_t input_dma
;
1948 struct xhci_segment
*result_seg
;
1949 } complex_test_vector
[] = {
1950 /* Test feeding a valid DMA address from a different ring */
1951 { .input_seg
= xhci
->event_ring
->first_seg
,
1952 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1953 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1954 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1957 /* Test feeding a valid end TRB from a different ring */
1958 { .input_seg
= xhci
->event_ring
->first_seg
,
1959 .start_trb
= xhci
->event_ring
->first_seg
->trbs
,
1960 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1961 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1964 /* Test feeding a valid start and end TRB from a different ring */
1965 { .input_seg
= xhci
->event_ring
->first_seg
,
1966 .start_trb
= xhci
->cmd_ring
->first_seg
->trbs
,
1967 .end_trb
= &xhci
->cmd_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
1968 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
,
1971 /* TRB in this ring, but after this TD */
1972 { .input_seg
= xhci
->event_ring
->first_seg
,
1973 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[0],
1974 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1975 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 4*16,
1978 /* TRB in this ring, but before this TD */
1979 { .input_seg
= xhci
->event_ring
->first_seg
,
1980 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[3],
1981 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[6],
1982 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1985 /* TRB in this ring, but after this wrapped TD */
1986 { .input_seg
= xhci
->event_ring
->first_seg
,
1987 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1988 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1989 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ 2*16,
1992 /* TRB in this ring, but before this wrapped TD */
1993 { .input_seg
= xhci
->event_ring
->first_seg
,
1994 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
1995 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
1996 .input_dma
= xhci
->event_ring
->first_seg
->dma
+ (TRBS_PER_SEGMENT
- 4)*16,
1999 /* TRB not in this ring, and we have a wrapped TD */
2000 { .input_seg
= xhci
->event_ring
->first_seg
,
2001 .start_trb
= &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 3],
2002 .end_trb
= &xhci
->event_ring
->first_seg
->trbs
[1],
2003 .input_dma
= xhci
->cmd_ring
->first_seg
->dma
+ 2*16,
2008 unsigned int num_tests
;
2011 num_tests
= ARRAY_SIZE(simple_test_vector
);
2012 for (i
= 0; i
< num_tests
; i
++) {
2013 ret
= xhci_test_trb_in_td(xhci
,
2014 xhci
->event_ring
->first_seg
,
2015 xhci
->event_ring
->first_seg
->trbs
,
2016 &xhci
->event_ring
->first_seg
->trbs
[TRBS_PER_SEGMENT
- 1],
2017 simple_test_vector
[i
].input_dma
,
2018 simple_test_vector
[i
].result_seg
,
2024 num_tests
= ARRAY_SIZE(complex_test_vector
);
2025 for (i
= 0; i
< num_tests
; i
++) {
2026 ret
= xhci_test_trb_in_td(xhci
,
2027 complex_test_vector
[i
].input_seg
,
2028 complex_test_vector
[i
].start_trb
,
2029 complex_test_vector
[i
].end_trb
,
2030 complex_test_vector
[i
].input_dma
,
2031 complex_test_vector
[i
].result_seg
,
2036 xhci_dbg(xhci
, "TRB math tests passed.\n");
2040 static void xhci_set_hc_event_deq(struct xhci_hcd
*xhci
)
2045 deq
= xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
,
2046 xhci
->event_ring
->dequeue
);
2047 if (deq
== 0 && !in_interrupt())
2048 xhci_warn(xhci
, "WARN something wrong with SW event ring "
2050 /* Update HC event ring dequeue pointer */
2051 temp
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
2052 temp
&= ERST_PTR_MASK
;
2053 /* Don't clear the EHB bit (which is RW1C) because
2054 * there might be more events to service.
2057 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2058 "// Write event ring dequeue pointer, "
2059 "preserving EHB bit");
2060 xhci_write_64(xhci
, ((u64
) deq
& (u64
) ~ERST_PTR_MASK
) | temp
,
2061 &xhci
->ir_set
->erst_dequeue
);
2064 static void xhci_add_in_port(struct xhci_hcd
*xhci
, unsigned int num_ports
,
2065 __le32 __iomem
*addr
, int max_caps
)
2067 u32 temp
, port_offset
, port_count
;
2070 struct xhci_hub
*rhub
;
2073 major_revision
= XHCI_EXT_PORT_MAJOR(temp
);
2075 if (major_revision
== 0x03) {
2076 rhub
= &xhci
->usb3_rhub
;
2077 } else if (major_revision
<= 0x02) {
2078 rhub
= &xhci
->usb2_rhub
;
2080 xhci_warn(xhci
, "Ignoring unknown port speed, "
2081 "Ext Cap %p, revision = 0x%x\n",
2082 addr
, major_revision
);
2083 /* Ignoring port protocol we can't understand. FIXME */
2086 rhub
->maj_rev
= XHCI_EXT_PORT_MAJOR(temp
);
2087 rhub
->min_rev
= XHCI_EXT_PORT_MINOR(temp
);
2089 /* Port offset and count in the third dword, see section 7.2 */
2090 temp
= readl(addr
+ 2);
2091 port_offset
= XHCI_EXT_PORT_OFF(temp
);
2092 port_count
= XHCI_EXT_PORT_COUNT(temp
);
2093 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2094 "Ext Cap %p, port offset = %u, "
2095 "count = %u, revision = 0x%x",
2096 addr
, port_offset
, port_count
, major_revision
);
2097 /* Port count includes the current port offset */
2098 if (port_offset
== 0 || (port_offset
+ port_count
- 1) > num_ports
)
2099 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2102 rhub
->psi_count
= XHCI_EXT_PORT_PSIC(temp
);
2103 if (rhub
->psi_count
) {
2104 rhub
->psi
= kcalloc(rhub
->psi_count
, sizeof(*rhub
->psi
),
2107 rhub
->psi_count
= 0;
2109 rhub
->psi_uid_count
++;
2110 for (i
= 0; i
< rhub
->psi_count
; i
++) {
2111 rhub
->psi
[i
] = readl(addr
+ 4 + i
);
2113 /* count unique ID values, two consecutive entries can
2114 * have the same ID if link is assymetric
2116 if (i
&& (XHCI_EXT_PORT_PSIV(rhub
->psi
[i
]) !=
2117 XHCI_EXT_PORT_PSIV(rhub
->psi
[i
- 1])))
2118 rhub
->psi_uid_count
++;
2120 xhci_dbg(xhci
, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2121 XHCI_EXT_PORT_PSIV(rhub
->psi
[i
]),
2122 XHCI_EXT_PORT_PSIE(rhub
->psi
[i
]),
2123 XHCI_EXT_PORT_PLT(rhub
->psi
[i
]),
2124 XHCI_EXT_PORT_PFD(rhub
->psi
[i
]),
2125 XHCI_EXT_PORT_LP(rhub
->psi
[i
]),
2126 XHCI_EXT_PORT_PSIM(rhub
->psi
[i
]));
2129 /* cache usb2 port capabilities */
2130 if (major_revision
< 0x03 && xhci
->num_ext_caps
< max_caps
)
2131 xhci
->ext_caps
[xhci
->num_ext_caps
++] = temp
;
2133 /* Check the host's USB2 LPM capability */
2134 if ((xhci
->hci_version
== 0x96) && (major_revision
!= 0x03) &&
2135 (temp
& XHCI_L1C
)) {
2136 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2137 "xHCI 0.96: support USB2 software lpm");
2138 xhci
->sw_lpm_support
= 1;
2141 if ((xhci
->hci_version
>= 0x100) && (major_revision
!= 0x03)) {
2142 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2143 "xHCI 1.0: support USB2 software lpm");
2144 xhci
->sw_lpm_support
= 1;
2145 if (temp
& XHCI_HLC
) {
2146 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2147 "xHCI 1.0: support USB2 hardware lpm");
2148 xhci
->hw_lpm_support
= 1;
2153 for (i
= port_offset
; i
< (port_offset
+ port_count
); i
++) {
2154 /* Duplicate entry. Ignore the port if the revisions differ. */
2155 if (xhci
->port_array
[i
] != 0) {
2156 xhci_warn(xhci
, "Duplicate port entry, Ext Cap %p,"
2157 " port %u\n", addr
, i
);
2158 xhci_warn(xhci
, "Port was marked as USB %u, "
2159 "duplicated as USB %u\n",
2160 xhci
->port_array
[i
], major_revision
);
2161 /* Only adjust the roothub port counts if we haven't
2162 * found a similar duplicate.
2164 if (xhci
->port_array
[i
] != major_revision
&&
2165 xhci
->port_array
[i
] != DUPLICATE_ENTRY
) {
2166 if (xhci
->port_array
[i
] == 0x03)
2167 xhci
->num_usb3_ports
--;
2169 xhci
->num_usb2_ports
--;
2170 xhci
->port_array
[i
] = DUPLICATE_ENTRY
;
2172 /* FIXME: Should we disable the port? */
2175 xhci
->port_array
[i
] = major_revision
;
2176 if (major_revision
== 0x03)
2177 xhci
->num_usb3_ports
++;
2179 xhci
->num_usb2_ports
++;
2181 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2185 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2186 * specify what speeds each port is supposed to be. We can't count on the port
2187 * speed bits in the PORTSC register being correct until a device is connected,
2188 * but we need to set up the two fake roothubs with the correct number of USB
2189 * 3.0 and USB 2.0 ports at host controller initialization time.
2191 static int xhci_setup_port_arrays(struct xhci_hcd
*xhci
, gfp_t flags
)
2195 unsigned int num_ports
;
2196 int i
, j
, port_index
;
2200 num_ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
2201 xhci
->port_array
= kzalloc(sizeof(*xhci
->port_array
)*num_ports
, flags
);
2202 if (!xhci
->port_array
)
2205 xhci
->rh_bw
= kzalloc(sizeof(*xhci
->rh_bw
)*num_ports
, flags
);
2208 for (i
= 0; i
< num_ports
; i
++) {
2209 struct xhci_interval_bw_table
*bw_table
;
2211 INIT_LIST_HEAD(&xhci
->rh_bw
[i
].tts
);
2212 bw_table
= &xhci
->rh_bw
[i
].bw_table
;
2213 for (j
= 0; j
< XHCI_MAX_INTERVAL
; j
++)
2214 INIT_LIST_HEAD(&bw_table
->interval_bw
[j
].endpoints
);
2216 base
= &xhci
->cap_regs
->hc_capbase
;
2218 cap_start
= xhci_find_next_ext_cap(base
, 0, XHCI_EXT_CAPS_PROTOCOL
);
2220 xhci_err(xhci
, "No Extended Capability registers, unable to set up roothub\n");
2225 /* count extended protocol capability entries for later caching */
2228 offset
= xhci_find_next_ext_cap(base
, offset
,
2229 XHCI_EXT_CAPS_PROTOCOL
);
2232 xhci
->ext_caps
= kzalloc(sizeof(*xhci
->ext_caps
) * cap_count
, flags
);
2233 if (!xhci
->ext_caps
)
2239 xhci_add_in_port(xhci
, num_ports
, base
+ offset
, cap_count
);
2240 if (xhci
->num_usb2_ports
+ xhci
->num_usb3_ports
== num_ports
)
2242 offset
= xhci_find_next_ext_cap(base
, offset
,
2243 XHCI_EXT_CAPS_PROTOCOL
);
2246 if (xhci
->num_usb2_ports
== 0 && xhci
->num_usb3_ports
== 0) {
2247 xhci_warn(xhci
, "No ports on the roothubs?\n");
2250 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2251 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2252 xhci
->num_usb2_ports
, xhci
->num_usb3_ports
);
2254 /* Place limits on the number of roothub ports so that the hub
2255 * descriptors aren't longer than the USB core will allocate.
2257 if (xhci
->num_usb3_ports
> 15) {
2258 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2259 "Limiting USB 3.0 roothub ports to 15.");
2260 xhci
->num_usb3_ports
= 15;
2262 if (xhci
->num_usb2_ports
> USB_MAXCHILDREN
) {
2263 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2264 "Limiting USB 2.0 roothub ports to %u.",
2266 xhci
->num_usb2_ports
= USB_MAXCHILDREN
;
2270 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2271 * Not sure how the USB core will handle a hub with no ports...
2273 if (xhci
->num_usb2_ports
) {
2274 xhci
->usb2_ports
= kmalloc(sizeof(*xhci
->usb2_ports
)*
2275 xhci
->num_usb2_ports
, flags
);
2276 if (!xhci
->usb2_ports
)
2280 for (i
= 0; i
< num_ports
; i
++) {
2281 if (xhci
->port_array
[i
] == 0x03 ||
2282 xhci
->port_array
[i
] == 0 ||
2283 xhci
->port_array
[i
] == DUPLICATE_ENTRY
)
2286 xhci
->usb2_ports
[port_index
] =
2287 &xhci
->op_regs
->port_status_base
+
2289 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2290 "USB 2.0 port at index %u, "
2292 xhci
->usb2_ports
[port_index
]);
2294 if (port_index
== xhci
->num_usb2_ports
)
2298 if (xhci
->num_usb3_ports
) {
2299 xhci
->usb3_ports
= kmalloc(sizeof(*xhci
->usb3_ports
)*
2300 xhci
->num_usb3_ports
, flags
);
2301 if (!xhci
->usb3_ports
)
2305 for (i
= 0; i
< num_ports
; i
++)
2306 if (xhci
->port_array
[i
] == 0x03) {
2307 xhci
->usb3_ports
[port_index
] =
2308 &xhci
->op_regs
->port_status_base
+
2310 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2311 "USB 3.0 port at index %u, "
2313 xhci
->usb3_ports
[port_index
]);
2315 if (port_index
== xhci
->num_usb3_ports
)
2322 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
2325 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
2326 unsigned int val
, val2
;
2328 struct xhci_segment
*seg
;
2329 u32 page_size
, temp
;
2332 INIT_LIST_HEAD(&xhci
->cmd_list
);
2334 /* init command timeout timer */
2335 setup_timer(&xhci
->cmd_timer
, xhci_handle_command_timeout
,
2336 (unsigned long)xhci
);
2338 page_size
= readl(&xhci
->op_regs
->page_size
);
2339 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2340 "Supported page size register = 0x%x", page_size
);
2341 for (i
= 0; i
< 16; i
++) {
2342 if ((0x1 & page_size
) != 0)
2344 page_size
= page_size
>> 1;
2347 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2348 "Supported page size of %iK", (1 << (i
+12)) / 1024);
2350 xhci_warn(xhci
, "WARN: no supported page size\n");
2351 /* Use 4K pages, since that's common and the minimum the HC supports */
2352 xhci
->page_shift
= 12;
2353 xhci
->page_size
= 1 << xhci
->page_shift
;
2354 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2355 "HCD page size set to %iK", xhci
->page_size
/ 1024);
2358 * Program the Number of Device Slots Enabled field in the CONFIG
2359 * register with the max value of slots the HC can handle.
2361 val
= HCS_MAX_SLOTS(readl(&xhci
->cap_regs
->hcs_params1
));
2362 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2363 "// xHC can handle at most %d device slots.", val
);
2364 val2
= readl(&xhci
->op_regs
->config_reg
);
2365 val
|= (val2
& ~HCS_SLOTS_MASK
);
2366 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2367 "// Setting Max device slots reg = 0x%x.", val
);
2368 writel(val
, &xhci
->op_regs
->config_reg
);
2371 * Section 5.4.8 - doorbell array must be
2372 * "physically contiguous and 64-byte (cache line) aligned".
2374 xhci
->dcbaa
= dma_alloc_coherent(dev
, sizeof(*xhci
->dcbaa
), &dma
,
2378 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
2379 xhci
->dcbaa
->dma
= dma
;
2380 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2381 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2382 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
2383 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
2386 * Initialize the ring segment pool. The ring must be a contiguous
2387 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2388 * however, the command ring segment needs 64-byte aligned segments
2389 * and our use of dma addresses in the trb_address_map radix tree needs
2390 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2392 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
2393 TRB_SEGMENT_SIZE
, TRB_SEGMENT_SIZE
, xhci
->page_size
);
2395 /* See Table 46 and Note on Figure 55 */
2396 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
2397 2112, 64, xhci
->page_size
);
2398 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
2401 /* Linear stream context arrays don't have any boundary restrictions,
2402 * and only need to be 16-byte aligned.
2404 xhci
->small_streams_pool
=
2405 dma_pool_create("xHCI 256 byte stream ctx arrays",
2406 dev
, SMALL_STREAM_ARRAY_SIZE
, 16, 0);
2407 xhci
->medium_streams_pool
=
2408 dma_pool_create("xHCI 1KB stream ctx arrays",
2409 dev
, MEDIUM_STREAM_ARRAY_SIZE
, 16, 0);
2410 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2411 * will be allocated with dma_alloc_coherent()
2414 if (!xhci
->small_streams_pool
|| !xhci
->medium_streams_pool
)
2417 /* Set up the command ring to have one segments for now. */
2418 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, 1, TYPE_COMMAND
, flags
);
2419 if (!xhci
->cmd_ring
)
2421 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2422 "Allocated command ring at %p", xhci
->cmd_ring
);
2423 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "First segment DMA is 0x%llx",
2424 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
2426 /* Set the address in the Command Ring Control register */
2427 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
2428 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
2429 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
2430 xhci
->cmd_ring
->cycle_state
;
2431 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2432 "// Setting command ring address to 0x%x", val
);
2433 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
2434 xhci_dbg_cmd_ptrs(xhci
);
2436 xhci
->lpm_command
= xhci_alloc_command(xhci
, true, true, flags
);
2437 if (!xhci
->lpm_command
)
2440 /* Reserve one command ring TRB for disabling LPM.
2441 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2442 * disabling LPM, we only need to reserve one TRB for all devices.
2444 xhci
->cmd_ring_reserved_trbs
++;
2446 val
= readl(&xhci
->cap_regs
->db_off
);
2448 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2449 "// Doorbell array is located at offset 0x%x"
2450 " from cap regs base addr", val
);
2451 xhci
->dba
= (void __iomem
*) xhci
->cap_regs
+ val
;
2452 xhci_dbg_regs(xhci
);
2453 xhci_print_run_regs(xhci
);
2454 /* Set ir_set to interrupt register set 0 */
2455 xhci
->ir_set
= &xhci
->run_regs
->ir_set
[0];
2458 * Event ring setup: Allocate a normal ring, but also setup
2459 * the event ring segment table (ERST). Section 4.9.3.
2461 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
, "// Allocating event ring");
2462 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, 1, TYPE_EVENT
,
2464 if (!xhci
->event_ring
)
2466 if (xhci_check_trb_in_td_math(xhci
) < 0)
2469 xhci
->erst
.entries
= dma_alloc_coherent(dev
,
2470 sizeof(struct xhci_erst_entry
) * ERST_NUM_SEGS
, &dma
,
2472 if (!xhci
->erst
.entries
)
2474 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2475 "// Allocated event ring segment table at 0x%llx",
2476 (unsigned long long)dma
);
2478 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
2479 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
2480 xhci
->erst
.erst_dma_addr
= dma
;
2481 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2482 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2483 xhci
->erst
.num_entries
,
2485 (unsigned long long)xhci
->erst
.erst_dma_addr
);
2487 /* set ring base address and size for each segment table entry */
2488 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
2489 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
2490 entry
->seg_addr
= cpu_to_le64(seg
->dma
);
2491 entry
->seg_size
= cpu_to_le32(TRBS_PER_SEGMENT
);
2496 /* set ERST count with the number of entries in the segment table */
2497 val
= readl(&xhci
->ir_set
->erst_size
);
2498 val
&= ERST_SIZE_MASK
;
2499 val
|= ERST_NUM_SEGS
;
2500 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2501 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2503 writel(val
, &xhci
->ir_set
->erst_size
);
2505 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2506 "// Set ERST entries to point to event ring.");
2507 /* set the segment table base address */
2508 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2509 "// Set ERST base address for ir_set 0 = 0x%llx",
2510 (unsigned long long)xhci
->erst
.erst_dma_addr
);
2511 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
2512 val_64
&= ERST_PTR_MASK
;
2513 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
2514 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
2516 /* Set the event ring dequeue address */
2517 xhci_set_hc_event_deq(xhci
);
2518 xhci_dbg_trace(xhci
, trace_xhci_dbg_init
,
2519 "Wrote ERST address to ir_set 0.");
2520 xhci_print_ir_set(xhci
, 0);
2523 * XXX: Might need to set the Interrupter Moderation Register to
2524 * something other than the default (~1ms minimum between interrupts).
2525 * See section 5.5.1.2.
2527 init_completion(&xhci
->addr_dev
);
2528 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
2529 xhci
->devs
[i
] = NULL
;
2530 for (i
= 0; i
< USB_MAXCHILDREN
; ++i
) {
2531 xhci
->bus_state
[0].resume_done
[i
] = 0;
2532 xhci
->bus_state
[1].resume_done
[i
] = 0;
2533 /* Only the USB 2.0 completions will ever be used. */
2534 init_completion(&xhci
->bus_state
[1].rexit_done
[i
]);
2537 if (scratchpad_alloc(xhci
, flags
))
2539 if (xhci_setup_port_arrays(xhci
, flags
))
2542 /* Enable USB 3.0 device notifications for function remote wake, which
2543 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2544 * U3 (device suspend).
2546 temp
= readl(&xhci
->op_regs
->dev_notification
);
2547 temp
&= ~DEV_NOTE_MASK
;
2548 temp
|= DEV_NOTE_FWAKE
;
2549 writel(temp
, &xhci
->op_regs
->dev_notification
);
2554 xhci_warn(xhci
, "Couldn't initialize memory\n");
2557 xhci_mem_cleanup(xhci
);