2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/dmapool.h>
30 * Allocates a generic ring segment from the ring pool, sets the dma address,
31 * initializes the segment to zero, and sets the private next pointer to NULL.
34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
38 struct xhci_segment
*seg
;
41 seg
= kzalloc(sizeof *seg
, flags
);
44 xhci_dbg(xhci
, "Allocating priv segment structure at %p\n", seg
);
46 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
51 xhci_dbg(xhci
, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 seg
->trbs
, (unsigned long long)dma
);
54 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
61 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
66 xhci_dbg(xhci
, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 seg
->trbs
, (unsigned long long)seg
->dma
);
68 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
71 xhci_dbg(xhci
, "Freeing priv segment structure at %p\n", seg
);
76 * Make the prev segment point to the next segment.
78 * Change the last TRB in the prev segment to be a Link TRB which points to the
79 * DMA address of the next segment. The caller needs to set any Link TRB
80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
83 struct xhci_segment
*next
, bool link_trbs
)
91 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= next
->dma
;
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val
= prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
;
95 val
&= ~TRB_TYPE_BITMASK
;
96 val
|= TRB_TYPE(TRB_LINK
);
97 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= val
;
99 xhci_dbg(xhci
, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
100 (unsigned long long)prev
->dma
,
101 (unsigned long long)next
->dma
);
104 /* XXX: Do we need the hcd structure in all these functions? */
105 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
107 struct xhci_segment
*seg
;
108 struct xhci_segment
*first_seg
;
110 if (!ring
|| !ring
->first_seg
)
112 first_seg
= ring
->first_seg
;
113 seg
= first_seg
->next
;
114 xhci_dbg(xhci
, "Freeing ring at %p\n", ring
);
115 while (seg
!= first_seg
) {
116 struct xhci_segment
*next
= seg
->next
;
117 xhci_segment_free(xhci
, seg
);
120 xhci_segment_free(xhci
, first_seg
);
121 ring
->first_seg
= NULL
;
126 * Create a new ring with zero or more segments.
128 * Link each segment together into a ring.
129 * Set the end flag and the cycle toggle bit on the last segment.
130 * See section 4.9.1 and figures 15 and 16.
132 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
133 unsigned int num_segs
, bool link_trbs
, gfp_t flags
)
135 struct xhci_ring
*ring
;
136 struct xhci_segment
*prev
;
138 ring
= kzalloc(sizeof *(ring
), flags
);
139 xhci_dbg(xhci
, "Allocating ring at %p\n", ring
);
143 INIT_LIST_HEAD(&ring
->td_list
);
144 INIT_LIST_HEAD(&ring
->cancelled_td_list
);
148 ring
->first_seg
= xhci_segment_alloc(xhci
, flags
);
149 if (!ring
->first_seg
)
153 prev
= ring
->first_seg
;
154 while (num_segs
> 0) {
155 struct xhci_segment
*next
;
157 next
= xhci_segment_alloc(xhci
, flags
);
160 xhci_link_segments(xhci
, prev
, next
, link_trbs
);
165 xhci_link_segments(xhci
, prev
, ring
->first_seg
, link_trbs
);
168 /* See section 4.9.2.1 and 6.4.4.1 */
169 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|= (LINK_TOGGLE
);
170 xhci_dbg(xhci
, "Wrote link toggle flag to"
171 " segment %p (virtual), 0x%llx (DMA)\n",
172 prev
, (unsigned long long)prev
->dma
);
174 /* The ring is empty, so the enqueue pointer == dequeue pointer */
175 ring
->enqueue
= ring
->first_seg
->trbs
;
176 ring
->enq_seg
= ring
->first_seg
;
177 ring
->dequeue
= ring
->enqueue
;
178 ring
->deq_seg
= ring
->first_seg
;
179 /* The ring is initialized to 0. The producer must write 1 to the cycle
180 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
181 * compare CCS to the cycle bit to check ownership, so CCS = 1.
183 ring
->cycle_state
= 1;
188 xhci_ring_free(xhci
, ring
);
192 /* All the xhci_tds in the ring's TD list should be freed at this point */
193 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
195 struct xhci_virt_device
*dev
;
198 /* Slot ID 0 is reserved */
199 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
202 dev
= xhci
->devs
[slot_id
];
203 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
207 for (i
= 0; i
< 31; ++i
)
208 if (dev
->ep_rings
[i
])
209 xhci_ring_free(xhci
, dev
->ep_rings
[i
]);
212 dma_pool_free(xhci
->device_pool
,
213 dev
->in_ctx
, dev
->in_ctx_dma
);
215 dma_pool_free(xhci
->device_pool
,
216 dev
->out_ctx
, dev
->out_ctx_dma
);
217 kfree(xhci
->devs
[slot_id
]);
218 xhci
->devs
[slot_id
] = 0;
221 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
222 struct usb_device
*udev
, gfp_t flags
)
225 struct xhci_virt_device
*dev
;
227 /* Slot ID 0 is reserved */
228 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
229 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
233 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
234 if (!xhci
->devs
[slot_id
])
236 dev
= xhci
->devs
[slot_id
];
238 /* Allocate the (output) device context that will be used in the HC */
239 dev
->out_ctx
= dma_pool_alloc(xhci
->device_pool
, flags
, &dma
);
242 dev
->out_ctx_dma
= dma
;
243 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
244 (unsigned long long)dma
);
245 memset(dev
->out_ctx
, 0, sizeof(*dev
->out_ctx
));
247 /* Allocate the (input) device context for address device command */
248 dev
->in_ctx
= dma_pool_alloc(xhci
->device_pool
, flags
, &dma
);
251 dev
->in_ctx_dma
= dma
;
252 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
253 (unsigned long long)dma
);
254 memset(dev
->in_ctx
, 0, sizeof(*dev
->in_ctx
));
256 /* Allocate endpoint 0 ring */
257 dev
->ep_rings
[0] = xhci_ring_alloc(xhci
, 1, true, flags
);
258 if (!dev
->ep_rings
[0])
261 init_completion(&dev
->cmd_completion
);
264 * Point to output device context in dcbaa; skip the output control
265 * context, which is eight 32 bit fields (or 32 bytes long)
267 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] =
268 (u32
) dev
->out_ctx_dma
+ (32);
269 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
272 (unsigned long long)dev
->out_ctx_dma
);
276 xhci_free_virt_device(xhci
, slot_id
);
280 /* Setup an xHCI virtual device for a Set Address command */
281 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
283 struct xhci_virt_device
*dev
;
284 struct xhci_ep_ctx
*ep0_ctx
;
285 struct usb_device
*top_dev
;
287 dev
= xhci
->devs
[udev
->slot_id
];
288 /* Slot ID 0 is reserved */
289 if (udev
->slot_id
== 0 || !dev
) {
290 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
294 ep0_ctx
= &dev
->in_ctx
->ep
[0];
296 /* 2) New slot context and endpoint 0 context are valid*/
297 dev
->in_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
299 /* 3) Only the control endpoint is valid - one endpoint context */
300 dev
->in_ctx
->slot
.dev_info
|= LAST_CTX(1);
302 switch (udev
->speed
) {
303 case USB_SPEED_SUPER
:
304 dev
->in_ctx
->slot
.dev_info
|= (u32
) udev
->route
;
305 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_SS
;
308 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_HS
;
311 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_FS
;
314 dev
->in_ctx
->slot
.dev_info
|= (u32
) SLOT_SPEED_LS
;
316 case USB_SPEED_VARIABLE
:
317 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
321 /* Speed was set earlier, this shouldn't happen. */
324 /* Find the root hub port this device is under */
325 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
326 top_dev
= top_dev
->parent
)
327 /* Found device below root hub */;
328 dev
->in_ctx
->slot
.dev_info2
|= (u32
) ROOT_HUB_PORT(top_dev
->portnum
);
329 xhci_dbg(xhci
, "Set root hub portnum to %d\n", top_dev
->portnum
);
331 /* Is this a LS/FS device under a HS hub? */
333 * FIXME: I don't think this is right, where does the TT info for the
334 * roothub or parent hub come from?
336 if ((udev
->speed
== USB_SPEED_LOW
|| udev
->speed
== USB_SPEED_FULL
) &&
338 dev
->in_ctx
->slot
.tt_info
= udev
->tt
->hub
->slot_id
;
339 dev
->in_ctx
->slot
.tt_info
|= udev
->ttport
<< 8;
341 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
342 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
344 /* Step 4 - ring already allocated */
346 ep0_ctx
->ep_info2
= EP_TYPE(CTRL_EP
);
348 * See section 4.3 bullet 6:
349 * The default Max Packet size for ep0 is "8 bytes for a USB2
350 * LS/FS/HS device or 512 bytes for a USB3 SS device"
351 * XXX: Not sure about wireless USB devices.
353 if (udev
->speed
== USB_SPEED_SUPER
)
354 ep0_ctx
->ep_info2
|= MAX_PACKET(512);
356 ep0_ctx
->ep_info2
|= MAX_PACKET(8);
357 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
358 ep0_ctx
->ep_info2
|= MAX_BURST(0);
359 ep0_ctx
->ep_info2
|= ERROR_COUNT(3);
362 dev
->ep_rings
[0]->first_seg
->dma
;
363 ep0_ctx
->deq
|= dev
->ep_rings
[0]->cycle_state
;
365 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
370 /* Return the polling or NAK interval.
372 * The polling interval is expressed in "microframes". If xHCI's Interval field
373 * is set to N, it will service the endpoint every 2^(Interval)*125us.
375 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
378 static inline unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
379 struct usb_host_endpoint
*ep
)
381 unsigned int interval
= 0;
383 switch (udev
->speed
) {
386 if (usb_endpoint_xfer_control(&ep
->desc
) ||
387 usb_endpoint_xfer_bulk(&ep
->desc
))
388 interval
= ep
->desc
.bInterval
;
389 /* Fall through - SS and HS isoc/int have same decoding */
390 case USB_SPEED_SUPER
:
391 if (usb_endpoint_xfer_int(&ep
->desc
) ||
392 usb_endpoint_xfer_isoc(&ep
->desc
)) {
393 if (ep
->desc
.bInterval
== 0)
396 interval
= ep
->desc
.bInterval
- 1;
399 if (interval
!= ep
->desc
.bInterval
+ 1)
400 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
401 ep
->desc
.bEndpointAddress
, 1 << interval
);
404 /* Convert bInterval (in 1-255 frames) to microframes and round down to
405 * nearest power of 2.
409 if (usb_endpoint_xfer_int(&ep
->desc
) ||
410 usb_endpoint_xfer_isoc(&ep
->desc
)) {
411 interval
= fls(8*ep
->desc
.bInterval
) - 1;
416 if ((1 << interval
) != 8*ep
->desc
.bInterval
)
417 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
418 ep
->desc
.bEndpointAddress
, 1 << interval
);
424 return EP_INTERVAL(interval
);
427 static inline u32
xhci_get_endpoint_type(struct usb_device
*udev
,
428 struct usb_host_endpoint
*ep
)
433 in
= usb_endpoint_dir_in(&ep
->desc
);
434 if (usb_endpoint_xfer_control(&ep
->desc
)) {
435 type
= EP_TYPE(CTRL_EP
);
436 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
438 type
= EP_TYPE(BULK_IN_EP
);
440 type
= EP_TYPE(BULK_OUT_EP
);
441 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
443 type
= EP_TYPE(ISOC_IN_EP
);
445 type
= EP_TYPE(ISOC_OUT_EP
);
446 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
448 type
= EP_TYPE(INT_IN_EP
);
450 type
= EP_TYPE(INT_OUT_EP
);
457 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
458 struct xhci_virt_device
*virt_dev
,
459 struct usb_device
*udev
,
460 struct usb_host_endpoint
*ep
,
463 unsigned int ep_index
;
464 struct xhci_ep_ctx
*ep_ctx
;
465 struct xhci_ring
*ep_ring
;
466 unsigned int max_packet
;
467 unsigned int max_burst
;
469 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
470 ep_ctx
= &virt_dev
->in_ctx
->ep
[ep_index
];
472 /* Set up the endpoint ring */
473 virt_dev
->new_ep_rings
[ep_index
] = xhci_ring_alloc(xhci
, 1, true, mem_flags
);
474 if (!virt_dev
->new_ep_rings
[ep_index
])
476 ep_ring
= virt_dev
->new_ep_rings
[ep_index
];
477 ep_ctx
->deq
= ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
;
479 ep_ctx
->ep_info
= xhci_get_endpoint_interval(udev
, ep
);
481 /* FIXME dig Mult and streams info out of ep companion desc */
483 /* Allow 3 retries for everything but isoc;
484 * error count = 0 means infinite retries.
486 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
487 ep_ctx
->ep_info2
= ERROR_COUNT(3);
489 ep_ctx
->ep_info2
= ERROR_COUNT(1);
491 ep_ctx
->ep_info2
|= xhci_get_endpoint_type(udev
, ep
);
493 /* Set the max packet size and max burst */
494 switch (udev
->speed
) {
495 case USB_SPEED_SUPER
:
496 max_packet
= ep
->desc
.wMaxPacketSize
;
497 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
498 /* dig out max burst from ep companion desc */
499 max_packet
= ep
->ss_ep_comp
->desc
.bMaxBurst
;
500 ep_ctx
->ep_info2
|= MAX_BURST(max_packet
);
503 /* bits 11:12 specify the number of additional transaction
504 * opportunities per microframe (USB 2.0, section 9.6.6)
506 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
507 usb_endpoint_xfer_int(&ep
->desc
)) {
508 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
509 ep_ctx
->ep_info2
|= MAX_BURST(max_burst
);
514 max_packet
= ep
->desc
.wMaxPacketSize
& 0x3ff;
515 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
520 /* FIXME Debug endpoint context */
524 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
525 struct xhci_virt_device
*virt_dev
,
526 struct usb_host_endpoint
*ep
)
528 unsigned int ep_index
;
529 struct xhci_ep_ctx
*ep_ctx
;
531 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
532 ep_ctx
= &virt_dev
->in_ctx
->ep
[ep_index
];
535 ep_ctx
->ep_info2
= 0;
538 /* Don't free the endpoint ring until the set interface or configuration
543 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
545 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
549 /* Free the Event Ring Segment Table and the actual Event Ring */
550 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_size
);
551 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_base
);
552 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_dequeue
);
553 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
554 if (xhci
->erst
.entries
)
555 pci_free_consistent(pdev
, size
,
556 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
557 xhci
->erst
.entries
= NULL
;
558 xhci_dbg(xhci
, "Freed ERST\n");
559 if (xhci
->event_ring
)
560 xhci_ring_free(xhci
, xhci
->event_ring
);
561 xhci
->event_ring
= NULL
;
562 xhci_dbg(xhci
, "Freed event ring\n");
564 xhci_write_64(xhci
, 0, &xhci
->op_regs
->cmd_ring
);
566 xhci_ring_free(xhci
, xhci
->cmd_ring
);
567 xhci
->cmd_ring
= NULL
;
568 xhci_dbg(xhci
, "Freed command ring\n");
570 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
571 xhci_free_virt_device(xhci
, i
);
573 if (xhci
->segment_pool
)
574 dma_pool_destroy(xhci
->segment_pool
);
575 xhci
->segment_pool
= NULL
;
576 xhci_dbg(xhci
, "Freed segment pool\n");
578 if (xhci
->device_pool
)
579 dma_pool_destroy(xhci
->device_pool
);
580 xhci
->device_pool
= NULL
;
581 xhci_dbg(xhci
, "Freed device context pool\n");
583 xhci_write_64(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
);
585 pci_free_consistent(pdev
, sizeof(*xhci
->dcbaa
),
586 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
590 xhci
->page_shift
= 0;
593 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
596 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
597 unsigned int val
, val2
;
599 struct xhci_segment
*seg
;
603 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
604 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
605 for (i
= 0; i
< 16; i
++) {
606 if ((0x1 & page_size
) != 0)
608 page_size
= page_size
>> 1;
611 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
613 xhci_warn(xhci
, "WARN: no supported page size\n");
614 /* Use 4K pages, since that's common and the minimum the HC supports */
615 xhci
->page_shift
= 12;
616 xhci
->page_size
= 1 << xhci
->page_shift
;
617 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
620 * Program the Number of Device Slots Enabled field in the CONFIG
621 * register with the max value of slots the HC can handle.
623 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
624 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
626 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
627 val
|= (val2
& ~HCS_SLOTS_MASK
);
628 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
630 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
633 * Section 5.4.8 - doorbell array must be
634 * "physically contiguous and 64-byte (cache line) aligned".
636 xhci
->dcbaa
= pci_alloc_consistent(to_pci_dev(dev
),
637 sizeof(*xhci
->dcbaa
), &dma
);
640 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
641 xhci
->dcbaa
->dma
= dma
;
642 xhci_dbg(xhci
, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
643 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
644 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
647 * Initialize the ring segment pool. The ring must be a contiguous
648 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
649 * however, the command ring segment needs 64-byte aligned segments,
650 * so we pick the greater alignment need.
652 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
653 SEGMENT_SIZE
, 64, xhci
->page_size
);
654 /* See Table 46 and Note on Figure 55 */
655 /* FIXME support 64-byte contexts */
656 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
657 sizeof(struct xhci_device_control
),
658 64, xhci
->page_size
);
659 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
662 /* Set up the command ring to have one segments for now. */
663 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
666 xhci_dbg(xhci
, "Allocated command ring at %p\n", xhci
->cmd_ring
);
667 xhci_dbg(xhci
, "First segment DMA is 0x%llx\n",
668 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
670 /* Set the address in the Command Ring Control register */
671 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
672 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
673 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
674 xhci
->cmd_ring
->cycle_state
;
675 xhci_dbg(xhci
, "// Setting command ring address to 0x%x\n", val
);
676 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
677 xhci_dbg_cmd_ptrs(xhci
);
679 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
681 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
682 " from cap regs base addr\n", val
);
683 xhci
->dba
= (void *) xhci
->cap_regs
+ val
;
685 xhci_print_run_regs(xhci
);
686 /* Set ir_set to interrupt register set 0 */
687 xhci
->ir_set
= (void *) xhci
->run_regs
->ir_set
;
690 * Event ring setup: Allocate a normal ring, but also setup
691 * the event ring segment table (ERST). Section 4.9.3.
693 xhci_dbg(xhci
, "// Allocating event ring\n");
694 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, false, flags
);
695 if (!xhci
->event_ring
)
698 xhci
->erst
.entries
= pci_alloc_consistent(to_pci_dev(dev
),
699 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
);
700 if (!xhci
->erst
.entries
)
702 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%llx\n",
703 (unsigned long long)dma
);
705 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
706 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
707 xhci
->erst
.erst_dma_addr
= dma
;
708 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
709 xhci
->erst
.num_entries
,
711 (unsigned long long)xhci
->erst
.erst_dma_addr
);
713 /* set ring base address and size for each segment table entry */
714 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
715 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
716 entry
->seg_addr
= seg
->dma
;
717 entry
->seg_size
= TRBS_PER_SEGMENT
;
722 /* set ERST count with the number of entries in the segment table */
723 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
724 val
&= ERST_SIZE_MASK
;
725 val
|= ERST_NUM_SEGS
;
726 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
728 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
730 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
731 /* set the segment table base address */
732 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%llx\n",
733 (unsigned long long)xhci
->erst
.erst_dma_addr
);
734 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
735 val_64
&= ERST_PTR_MASK
;
736 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
737 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
739 /* Set the event ring dequeue address */
740 xhci_set_hc_event_deq(xhci
);
741 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
742 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
745 * XXX: Might need to set the Interrupter Moderation Register to
746 * something other than the default (~1ms minimum between interrupts).
747 * See section 5.5.1.2.
749 init_completion(&xhci
->addr_dev
);
750 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
755 xhci_warn(xhci
, "Couldn't initialize memory\n");
756 xhci_mem_cleanup(xhci
);
This page took 0.075631 seconds and 5 git commands to generate.