Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/usb.h> | |
0ebbab37 | 24 | #include <linux/pci.h> |
66d4eadd SS |
25 | |
26 | #include "xhci.h" | |
27 | ||
0ebbab37 SS |
28 | /* |
29 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
30 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
31 | * | |
32 | * Section 4.11.1.1: | |
33 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
34 | */ | |
35 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
36 | { | |
37 | struct xhci_segment *seg; | |
38 | dma_addr_t dma; | |
39 | ||
40 | seg = kzalloc(sizeof *seg, flags); | |
41 | if (!seg) | |
42 | return 0; | |
43 | xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n", | |
44 | (unsigned int) seg); | |
45 | ||
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | |
47 | if (!seg->trbs) { | |
48 | kfree(seg); | |
49 | return 0; | |
50 | } | |
51 | xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n", | |
52 | (unsigned int) seg->trbs, (u32) dma); | |
53 | ||
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
55 | seg->dma = dma; | |
56 | seg->next = NULL; | |
57 | ||
58 | return seg; | |
59 | } | |
60 | ||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |
62 | { | |
63 | if (!seg) | |
64 | return; | |
65 | if (seg->trbs) { | |
66 | xhci_dbg(xhci, "Freeing DMA segment at 0x%x" | |
67 | " (virtual) 0x%x (DMA)\n", | |
68 | (unsigned int) seg->trbs, (u32) seg->dma); | |
69 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); | |
70 | seg->trbs = NULL; | |
71 | } | |
72 | xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n", | |
73 | (unsigned int) seg); | |
74 | kfree(seg); | |
75 | } | |
76 | ||
77 | /* | |
78 | * Make the prev segment point to the next segment. | |
79 | * | |
80 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
81 | * DMA address of the next segment. The caller needs to set any Link TRB | |
82 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
83 | */ | |
84 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |
85 | struct xhci_segment *next, bool link_trbs) | |
86 | { | |
87 | u32 val; | |
88 | ||
89 | if (!prev || !next) | |
90 | return; | |
91 | prev->next = next; | |
92 | if (link_trbs) { | |
93 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | |
94 | ||
95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | |
96 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | |
97 | val &= ~TRB_TYPE_BITMASK; | |
98 | val |= TRB_TYPE(TRB_LINK); | |
99 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | |
100 | } | |
101 | xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n", | |
102 | prev->dma, next->dma); | |
103 | } | |
104 | ||
105 | /* XXX: Do we need the hcd structure in all these functions? */ | |
f94e0186 | 106 | void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) |
0ebbab37 SS |
107 | { |
108 | struct xhci_segment *seg; | |
109 | struct xhci_segment *first_seg; | |
110 | ||
111 | if (!ring || !ring->first_seg) | |
112 | return; | |
113 | first_seg = ring->first_seg; | |
114 | seg = first_seg->next; | |
115 | xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring); | |
116 | while (seg != first_seg) { | |
117 | struct xhci_segment *next = seg->next; | |
118 | xhci_segment_free(xhci, seg); | |
119 | seg = next; | |
120 | } | |
121 | xhci_segment_free(xhci, first_seg); | |
122 | ring->first_seg = NULL; | |
123 | kfree(ring); | |
124 | } | |
125 | ||
126 | /** | |
127 | * Create a new ring with zero or more segments. | |
128 | * | |
129 | * Link each segment together into a ring. | |
130 | * Set the end flag and the cycle toggle bit on the last segment. | |
131 | * See section 4.9.1 and figures 15 and 16. | |
132 | */ | |
133 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |
134 | unsigned int num_segs, bool link_trbs, gfp_t flags) | |
135 | { | |
136 | struct xhci_ring *ring; | |
137 | struct xhci_segment *prev; | |
138 | ||
139 | ring = kzalloc(sizeof *(ring), flags); | |
140 | xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring); | |
141 | if (!ring) | |
142 | return 0; | |
143 | ||
d0e96f5a | 144 | INIT_LIST_HEAD(&ring->td_list); |
0ebbab37 SS |
145 | if (num_segs == 0) |
146 | return ring; | |
147 | ||
148 | ring->first_seg = xhci_segment_alloc(xhci, flags); | |
149 | if (!ring->first_seg) | |
150 | goto fail; | |
151 | num_segs--; | |
152 | ||
153 | prev = ring->first_seg; | |
154 | while (num_segs > 0) { | |
155 | struct xhci_segment *next; | |
156 | ||
157 | next = xhci_segment_alloc(xhci, flags); | |
158 | if (!next) | |
159 | goto fail; | |
160 | xhci_link_segments(xhci, prev, next, link_trbs); | |
161 | ||
162 | prev = next; | |
163 | num_segs--; | |
164 | } | |
165 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | |
166 | ||
167 | if (link_trbs) { | |
168 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
169 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | |
170 | xhci_dbg(xhci, "Wrote link toggle flag to" | |
171 | " segment 0x%x (virtual), 0x%x (DMA)\n", | |
172 | (unsigned int) prev, (u32) prev->dma); | |
173 | } | |
174 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | |
175 | ring->enqueue = ring->first_seg->trbs; | |
7f84eef0 | 176 | ring->enq_seg = ring->first_seg; |
0ebbab37 | 177 | ring->dequeue = ring->enqueue; |
7f84eef0 | 178 | ring->deq_seg = ring->first_seg; |
0ebbab37 SS |
179 | /* The ring is initialized to 0. The producer must write 1 to the cycle |
180 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | |
181 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | |
182 | */ | |
183 | ring->cycle_state = 1; | |
184 | ||
185 | return ring; | |
186 | ||
187 | fail: | |
188 | xhci_ring_free(xhci, ring); | |
189 | return 0; | |
190 | } | |
191 | ||
d0e96f5a | 192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
3ffbba95 SS |
193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
194 | { | |
195 | struct xhci_virt_device *dev; | |
196 | int i; | |
197 | ||
198 | /* Slot ID 0 is reserved */ | |
199 | if (slot_id == 0 || !xhci->devs[slot_id]) | |
200 | return; | |
201 | ||
202 | dev = xhci->devs[slot_id]; | |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | |
205 | if (!dev) | |
206 | return; | |
207 | ||
208 | for (i = 0; i < 31; ++i) | |
209 | if (dev->ep_rings[i]) | |
210 | xhci_ring_free(xhci, dev->ep_rings[i]); | |
211 | ||
212 | if (dev->in_ctx) | |
213 | dma_pool_free(xhci->device_pool, | |
214 | dev->in_ctx, dev->in_ctx_dma); | |
215 | if (dev->out_ctx) | |
216 | dma_pool_free(xhci->device_pool, | |
217 | dev->out_ctx, dev->out_ctx_dma); | |
218 | kfree(xhci->devs[slot_id]); | |
219 | xhci->devs[slot_id] = 0; | |
220 | } | |
221 | ||
222 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |
223 | struct usb_device *udev, gfp_t flags) | |
224 | { | |
225 | dma_addr_t dma; | |
226 | struct xhci_virt_device *dev; | |
227 | ||
228 | /* Slot ID 0 is reserved */ | |
229 | if (slot_id == 0 || xhci->devs[slot_id]) { | |
230 | xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); | |
231 | return 0; | |
232 | } | |
233 | ||
234 | xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); | |
235 | if (!xhci->devs[slot_id]) | |
236 | return 0; | |
237 | dev = xhci->devs[slot_id]; | |
238 | ||
239 | /* Allocate the (output) device context that will be used in the HC */ | |
240 | dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | |
241 | if (!dev->out_ctx) | |
242 | goto fail; | |
243 | dev->out_ctx_dma = dma; | |
244 | xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma); | |
245 | memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); | |
246 | ||
247 | /* Allocate the (input) device context for address device command */ | |
248 | dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | |
249 | if (!dev->in_ctx) | |
250 | goto fail; | |
251 | dev->in_ctx_dma = dma; | |
252 | xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma); | |
253 | memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); | |
254 | ||
255 | /* Allocate endpoint 0 ring */ | |
256 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | |
257 | if (!dev->ep_rings[0]) | |
258 | goto fail; | |
259 | ||
f94e0186 SS |
260 | init_completion(&dev->cmd_completion); |
261 | ||
3ffbba95 SS |
262 | /* |
263 | * Point to output device context in dcbaa; skip the output control | |
264 | * context, which is eight 32 bit fields (or 32 bytes long) | |
265 | */ | |
266 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | |
267 | (u32) dev->out_ctx_dma + (32); | |
268 | xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n", | |
269 | slot_id, | |
270 | (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id], | |
271 | dev->out_ctx_dma); | |
272 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | |
273 | ||
274 | return 1; | |
275 | fail: | |
276 | xhci_free_virt_device(xhci, slot_id); | |
277 | return 0; | |
278 | } | |
279 | ||
280 | /* Setup an xHCI virtual device for a Set Address command */ | |
281 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) | |
282 | { | |
283 | struct xhci_virt_device *dev; | |
284 | struct xhci_ep_ctx *ep0_ctx; | |
285 | struct usb_device *top_dev; | |
286 | ||
287 | dev = xhci->devs[udev->slot_id]; | |
288 | /* Slot ID 0 is reserved */ | |
289 | if (udev->slot_id == 0 || !dev) { | |
290 | xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", | |
291 | udev->slot_id); | |
292 | return -EINVAL; | |
293 | } | |
294 | ep0_ctx = &dev->in_ctx->ep[0]; | |
295 | ||
296 | /* 2) New slot context and endpoint 0 context are valid*/ | |
297 | dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | |
298 | ||
299 | /* 3) Only the control endpoint is valid - one endpoint context */ | |
300 | dev->in_ctx->slot.dev_info |= LAST_CTX(1); | |
301 | ||
302 | switch (udev->speed) { | |
303 | case USB_SPEED_SUPER: | |
304 | dev->in_ctx->slot.dev_info |= (u32) udev->route; | |
305 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; | |
306 | break; | |
307 | case USB_SPEED_HIGH: | |
308 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; | |
309 | break; | |
310 | case USB_SPEED_FULL: | |
311 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; | |
312 | break; | |
313 | case USB_SPEED_LOW: | |
314 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; | |
315 | break; | |
316 | case USB_SPEED_VARIABLE: | |
317 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | |
318 | return -EINVAL; | |
319 | break; | |
320 | default: | |
321 | /* Speed was set earlier, this shouldn't happen. */ | |
322 | BUG(); | |
323 | } | |
324 | /* Find the root hub port this device is under */ | |
325 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | |
326 | top_dev = top_dev->parent) | |
327 | /* Found device below root hub */; | |
328 | dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); | |
329 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | |
330 | ||
331 | /* Is this a LS/FS device under a HS hub? */ | |
332 | /* | |
333 | * FIXME: I don't think this is right, where does the TT info for the | |
334 | * roothub or parent hub come from? | |
335 | */ | |
336 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | |
337 | udev->tt) { | |
338 | dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; | |
339 | dev->in_ctx->slot.tt_info |= udev->ttport << 8; | |
340 | } | |
341 | xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt); | |
342 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | |
343 | ||
344 | /* Step 4 - ring already allocated */ | |
345 | /* Step 5 */ | |
346 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | |
347 | /* | |
348 | * See section 4.3 bullet 6: | |
349 | * The default Max Packet size for ep0 is "8 bytes for a USB2 | |
350 | * LS/FS/HS device or 512 bytes for a USB3 SS device" | |
351 | * XXX: Not sure about wireless USB devices. | |
352 | */ | |
353 | if (udev->speed == USB_SPEED_SUPER) | |
354 | ep0_ctx->ep_info2 |= MAX_PACKET(512); | |
355 | else | |
356 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | |
357 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | |
358 | ep0_ctx->ep_info2 |= MAX_BURST(0); | |
359 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | |
360 | ||
361 | ep0_ctx->deq[0] = | |
362 | dev->ep_rings[0]->first_seg->dma; | |
363 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | |
364 | ep0_ctx->deq[1] = 0; | |
365 | ||
366 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
f94e0186 SS |
371 | /* Return the polling or NAK interval. |
372 | * | |
373 | * The polling interval is expressed in "microframes". If xHCI's Interval field | |
374 | * is set to N, it will service the endpoint every 2^(Interval)*125us. | |
375 | * | |
376 | * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval | |
377 | * is set to 0. | |
378 | */ | |
379 | static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |
380 | struct usb_host_endpoint *ep) | |
381 | { | |
382 | unsigned int interval = 0; | |
383 | ||
384 | switch (udev->speed) { | |
385 | case USB_SPEED_HIGH: | |
386 | /* Max NAK rate */ | |
387 | if (usb_endpoint_xfer_control(&ep->desc) || | |
388 | usb_endpoint_xfer_bulk(&ep->desc)) | |
389 | interval = ep->desc.bInterval; | |
390 | /* Fall through - SS and HS isoc/int have same decoding */ | |
391 | case USB_SPEED_SUPER: | |
392 | if (usb_endpoint_xfer_int(&ep->desc) || | |
393 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
394 | if (ep->desc.bInterval == 0) | |
395 | interval = 0; | |
396 | else | |
397 | interval = ep->desc.bInterval - 1; | |
398 | if (interval > 15) | |
399 | interval = 15; | |
400 | if (interval != ep->desc.bInterval + 1) | |
401 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
402 | ep->desc.bEndpointAddress, 1 << interval); | |
403 | } | |
404 | break; | |
405 | /* Convert bInterval (in 1-255 frames) to microframes and round down to | |
406 | * nearest power of 2. | |
407 | */ | |
408 | case USB_SPEED_FULL: | |
409 | case USB_SPEED_LOW: | |
410 | if (usb_endpoint_xfer_int(&ep->desc) || | |
411 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
412 | interval = fls(8*ep->desc.bInterval) - 1; | |
413 | if (interval > 10) | |
414 | interval = 10; | |
415 | if (interval < 3) | |
416 | interval = 3; | |
417 | if ((1 << interval) != 8*ep->desc.bInterval) | |
418 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
419 | ep->desc.bEndpointAddress, 1 << interval); | |
420 | } | |
421 | break; | |
422 | default: | |
423 | BUG(); | |
424 | } | |
425 | return EP_INTERVAL(interval); | |
426 | } | |
427 | ||
428 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | |
429 | struct usb_host_endpoint *ep) | |
430 | { | |
431 | int in; | |
432 | u32 type; | |
433 | ||
434 | in = usb_endpoint_dir_in(&ep->desc); | |
435 | if (usb_endpoint_xfer_control(&ep->desc)) { | |
436 | type = EP_TYPE(CTRL_EP); | |
437 | } else if (usb_endpoint_xfer_bulk(&ep->desc)) { | |
438 | if (in) | |
439 | type = EP_TYPE(BULK_IN_EP); | |
440 | else | |
441 | type = EP_TYPE(BULK_OUT_EP); | |
442 | } else if (usb_endpoint_xfer_isoc(&ep->desc)) { | |
443 | if (in) | |
444 | type = EP_TYPE(ISOC_IN_EP); | |
445 | else | |
446 | type = EP_TYPE(ISOC_OUT_EP); | |
447 | } else if (usb_endpoint_xfer_int(&ep->desc)) { | |
448 | if (in) | |
449 | type = EP_TYPE(INT_IN_EP); | |
450 | else | |
451 | type = EP_TYPE(INT_OUT_EP); | |
452 | } else { | |
453 | BUG(); | |
454 | } | |
455 | return type; | |
456 | } | |
457 | ||
458 | int xhci_endpoint_init(struct xhci_hcd *xhci, | |
459 | struct xhci_virt_device *virt_dev, | |
460 | struct usb_device *udev, | |
461 | struct usb_host_endpoint *ep) | |
462 | { | |
463 | unsigned int ep_index; | |
464 | struct xhci_ep_ctx *ep_ctx; | |
465 | struct xhci_ring *ep_ring; | |
466 | unsigned int max_packet; | |
467 | unsigned int max_burst; | |
468 | ||
469 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
470 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | |
471 | ||
472 | /* Set up the endpoint ring */ | |
473 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, GFP_KERNEL); | |
474 | if (!virt_dev->new_ep_rings[ep_index]) | |
475 | return -ENOMEM; | |
476 | ep_ring = virt_dev->new_ep_rings[ep_index]; | |
477 | ep_ctx->deq[1] = 0; | |
478 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | |
479 | ||
480 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | |
481 | ||
482 | /* FIXME dig Mult and streams info out of ep companion desc */ | |
483 | ||
484 | /* Allow 3 retries for everything but isoc */ | |
485 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | |
486 | ep_ctx->ep_info2 = ERROR_COUNT(3); | |
487 | else | |
488 | ep_ctx->ep_info2 = ERROR_COUNT(0); | |
489 | ||
490 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | |
491 | ||
492 | /* Set the max packet size and max burst */ | |
493 | switch (udev->speed) { | |
494 | case USB_SPEED_SUPER: | |
495 | max_packet = ep->desc.wMaxPacketSize; | |
496 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
497 | /* FIXME dig out burst from ep companion desc */ | |
498 | break; | |
499 | case USB_SPEED_HIGH: | |
500 | /* bits 11:12 specify the number of additional transaction | |
501 | * opportunities per microframe (USB 2.0, section 9.6.6) | |
502 | */ | |
503 | if (usb_endpoint_xfer_isoc(&ep->desc) || | |
504 | usb_endpoint_xfer_int(&ep->desc)) { | |
505 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | |
506 | ep_ctx->ep_info2 |= MAX_BURST(max_burst); | |
507 | } | |
508 | /* Fall through */ | |
509 | case USB_SPEED_FULL: | |
510 | case USB_SPEED_LOW: | |
511 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | |
512 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
513 | break; | |
514 | default: | |
515 | BUG(); | |
516 | } | |
517 | /* FIXME Debug endpoint context */ | |
518 | return 0; | |
519 | } | |
520 | ||
521 | void xhci_endpoint_zero(struct xhci_hcd *xhci, | |
522 | struct xhci_virt_device *virt_dev, | |
523 | struct usb_host_endpoint *ep) | |
524 | { | |
525 | unsigned int ep_index; | |
526 | struct xhci_ep_ctx *ep_ctx; | |
527 | ||
528 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
529 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | |
530 | ||
531 | ep_ctx->ep_info = 0; | |
532 | ep_ctx->ep_info2 = 0; | |
533 | ep_ctx->deq[1] = 0; | |
534 | ep_ctx->deq[0] = 0; | |
535 | ep_ctx->tx_info = 0; | |
536 | /* Don't free the endpoint ring until the set interface or configuration | |
537 | * request succeeds. | |
538 | */ | |
539 | } | |
540 | ||
66d4eadd SS |
541 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
542 | { | |
0ebbab37 SS |
543 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
544 | int size; | |
3ffbba95 | 545 | int i; |
0ebbab37 SS |
546 | |
547 | /* Free the Event Ring Segment Table and the actual Event Ring */ | |
548 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | |
549 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
550 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | |
551 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
552 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | |
553 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | |
554 | if (xhci->erst.entries) | |
555 | pci_free_consistent(pdev, size, | |
556 | xhci->erst.entries, xhci->erst.erst_dma_addr); | |
557 | xhci->erst.entries = NULL; | |
558 | xhci_dbg(xhci, "Freed ERST\n"); | |
559 | if (xhci->event_ring) | |
560 | xhci_ring_free(xhci, xhci->event_ring); | |
561 | xhci->event_ring = NULL; | |
562 | xhci_dbg(xhci, "Freed event ring\n"); | |
563 | ||
564 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | |
565 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | |
566 | if (xhci->cmd_ring) | |
567 | xhci_ring_free(xhci, xhci->cmd_ring); | |
568 | xhci->cmd_ring = NULL; | |
569 | xhci_dbg(xhci, "Freed command ring\n"); | |
3ffbba95 SS |
570 | |
571 | for (i = 1; i < MAX_HC_SLOTS; ++i) | |
572 | xhci_free_virt_device(xhci, i); | |
573 | ||
0ebbab37 SS |
574 | if (xhci->segment_pool) |
575 | dma_pool_destroy(xhci->segment_pool); | |
576 | xhci->segment_pool = NULL; | |
577 | xhci_dbg(xhci, "Freed segment pool\n"); | |
3ffbba95 SS |
578 | |
579 | if (xhci->device_pool) | |
580 | dma_pool_destroy(xhci->device_pool); | |
581 | xhci->device_pool = NULL; | |
582 | xhci_dbg(xhci, "Freed device context pool\n"); | |
583 | ||
a74588f9 SS |
584 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); |
585 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | |
586 | if (xhci->dcbaa) | |
587 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | |
588 | xhci->dcbaa, xhci->dcbaa->dma); | |
589 | xhci->dcbaa = NULL; | |
3ffbba95 | 590 | |
66d4eadd SS |
591 | xhci->page_size = 0; |
592 | xhci->page_shift = 0; | |
593 | } | |
594 | ||
595 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |
596 | { | |
0ebbab37 SS |
597 | dma_addr_t dma; |
598 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
66d4eadd | 599 | unsigned int val, val2; |
0ebbab37 | 600 | struct xhci_segment *seg; |
66d4eadd SS |
601 | u32 page_size; |
602 | int i; | |
603 | ||
604 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | |
605 | xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); | |
606 | for (i = 0; i < 16; i++) { | |
607 | if ((0x1 & page_size) != 0) | |
608 | break; | |
609 | page_size = page_size >> 1; | |
610 | } | |
611 | if (i < 16) | |
612 | xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); | |
613 | else | |
614 | xhci_warn(xhci, "WARN: no supported page size\n"); | |
615 | /* Use 4K pages, since that's common and the minimum the HC supports */ | |
616 | xhci->page_shift = 12; | |
617 | xhci->page_size = 1 << xhci->page_shift; | |
618 | xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); | |
619 | ||
620 | /* | |
621 | * Program the Number of Device Slots Enabled field in the CONFIG | |
622 | * register with the max value of slots the HC can handle. | |
623 | */ | |
624 | val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); | |
625 | xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", | |
626 | (unsigned int) val); | |
627 | val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
628 | val |= (val2 & ~HCS_SLOTS_MASK); | |
629 | xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", | |
630 | (unsigned int) val); | |
631 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | |
632 | ||
a74588f9 SS |
633 | /* |
634 | * Section 5.4.8 - doorbell array must be | |
635 | * "physically contiguous and 64-byte (cache line) aligned". | |
636 | */ | |
637 | xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), | |
638 | sizeof(*xhci->dcbaa), &dma); | |
639 | if (!xhci->dcbaa) | |
640 | goto fail; | |
641 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); | |
642 | xhci->dcbaa->dma = dma; | |
3ffbba95 SS |
643 | xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n", |
644 | xhci->dcbaa->dma, (unsigned int) xhci->dcbaa); | |
a74588f9 SS |
645 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); |
646 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | |
647 | ||
0ebbab37 SS |
648 | /* |
649 | * Initialize the ring segment pool. The ring must be a contiguous | |
650 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | |
651 | * however, the command ring segment needs 64-byte aligned segments, | |
652 | * so we pick the greater alignment need. | |
653 | */ | |
654 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | |
655 | SEGMENT_SIZE, 64, xhci->page_size); | |
3ffbba95 SS |
656 | /* See Table 46 and Note on Figure 55 */ |
657 | /* FIXME support 64-byte contexts */ | |
658 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, | |
659 | sizeof(struct xhci_device_control), | |
660 | 64, xhci->page_size); | |
661 | if (!xhci->segment_pool || !xhci->device_pool) | |
0ebbab37 SS |
662 | goto fail; |
663 | ||
664 | /* Set up the command ring to have one segments for now. */ | |
665 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | |
666 | if (!xhci->cmd_ring) | |
667 | goto fail; | |
668 | xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring); | |
669 | xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma); | |
670 | ||
671 | /* Set the address in the Command Ring Control register */ | |
672 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | |
673 | val = (val & ~CMD_RING_ADDR_MASK) | | |
674 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | |
675 | xhci->cmd_ring->cycle_state; | |
676 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | |
677 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | |
678 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | |
679 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | |
680 | xhci_dbg_cmd_ptrs(xhci); | |
681 | ||
682 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | |
683 | val &= DBOFF_MASK; | |
684 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | |
685 | " from cap regs base addr\n", val); | |
686 | xhci->dba = (void *) xhci->cap_regs + val; | |
687 | xhci_dbg_regs(xhci); | |
688 | xhci_print_run_regs(xhci); | |
689 | /* Set ir_set to interrupt register set 0 */ | |
690 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | |
691 | ||
692 | /* | |
693 | * Event ring setup: Allocate a normal ring, but also setup | |
694 | * the event ring segment table (ERST). Section 4.9.3. | |
695 | */ | |
696 | xhci_dbg(xhci, "// Allocating event ring\n"); | |
697 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | |
698 | if (!xhci->event_ring) | |
699 | goto fail; | |
700 | ||
701 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | |
702 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | |
703 | if (!xhci->erst.entries) | |
704 | goto fail; | |
705 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma); | |
706 | ||
707 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | |
708 | xhci->erst.num_entries = ERST_NUM_SEGS; | |
709 | xhci->erst.erst_dma_addr = dma; | |
710 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n", | |
711 | xhci->erst.num_entries, | |
712 | (unsigned int) xhci->erst.entries, | |
713 | xhci->erst.erst_dma_addr); | |
714 | ||
715 | /* set ring base address and size for each segment table entry */ | |
716 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | |
717 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | |
718 | entry->seg_addr[1] = 0; | |
719 | entry->seg_addr[0] = seg->dma; | |
720 | entry->seg_size = TRBS_PER_SEGMENT; | |
721 | entry->rsvd = 0; | |
722 | seg = seg->next; | |
723 | } | |
724 | ||
725 | /* set ERST count with the number of entries in the segment table */ | |
726 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
727 | val &= ERST_SIZE_MASK; | |
728 | val |= ERST_NUM_SEGS; | |
729 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | |
730 | val); | |
731 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | |
732 | ||
733 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | |
734 | /* set the segment table base address */ | |
735 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n", | |
736 | xhci->erst.erst_dma_addr); | |
737 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
738 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | |
739 | val &= ERST_PTR_MASK; | |
740 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | |
741 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | |
742 | ||
743 | /* Set the event ring dequeue address */ | |
7f84eef0 | 744 | set_hc_event_deq(xhci); |
0ebbab37 SS |
745 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
746 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
747 | ||
748 | /* | |
749 | * XXX: Might need to set the Interrupter Moderation Register to | |
750 | * something other than the default (~1ms minimum between interrupts). | |
751 | * See section 5.5.1.2. | |
752 | */ | |
3ffbba95 SS |
753 | init_completion(&xhci->addr_dev); |
754 | for (i = 0; i < MAX_HC_SLOTS; ++i) | |
755 | xhci->devs[i] = 0; | |
66d4eadd SS |
756 | |
757 | return 0; | |
758 | fail: | |
759 | xhci_warn(xhci, "Couldn't initialize memory\n"); | |
760 | xhci_mem_cleanup(xhci); | |
761 | return -ENOMEM; | |
762 | } |