Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/usb.h> | |
0ebbab37 | 24 | #include <linux/pci.h> |
66d4eadd SS |
25 | |
26 | #include "xhci.h" | |
27 | ||
0ebbab37 SS |
28 | /* |
29 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
30 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
31 | * | |
32 | * Section 4.11.1.1: | |
33 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
34 | */ | |
35 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
36 | { | |
37 | struct xhci_segment *seg; | |
38 | dma_addr_t dma; | |
39 | ||
40 | seg = kzalloc(sizeof *seg, flags); | |
41 | if (!seg) | |
42 | return 0; | |
43 | xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n", | |
44 | (unsigned int) seg); | |
45 | ||
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | |
47 | if (!seg->trbs) { | |
48 | kfree(seg); | |
49 | return 0; | |
50 | } | |
51 | xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n", | |
52 | (unsigned int) seg->trbs, (u32) dma); | |
53 | ||
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
55 | seg->dma = dma; | |
56 | seg->next = NULL; | |
57 | ||
58 | return seg; | |
59 | } | |
60 | ||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |
62 | { | |
63 | if (!seg) | |
64 | return; | |
65 | if (seg->trbs) { | |
66 | xhci_dbg(xhci, "Freeing DMA segment at 0x%x" | |
67 | " (virtual) 0x%x (DMA)\n", | |
68 | (unsigned int) seg->trbs, (u32) seg->dma); | |
69 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); | |
70 | seg->trbs = NULL; | |
71 | } | |
72 | xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n", | |
73 | (unsigned int) seg); | |
74 | kfree(seg); | |
75 | } | |
76 | ||
77 | /* | |
78 | * Make the prev segment point to the next segment. | |
79 | * | |
80 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
81 | * DMA address of the next segment. The caller needs to set any Link TRB | |
82 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
83 | */ | |
84 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |
85 | struct xhci_segment *next, bool link_trbs) | |
86 | { | |
87 | u32 val; | |
88 | ||
89 | if (!prev || !next) | |
90 | return; | |
91 | prev->next = next; | |
92 | if (link_trbs) { | |
93 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | |
94 | ||
95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | |
96 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | |
97 | val &= ~TRB_TYPE_BITMASK; | |
98 | val |= TRB_TYPE(TRB_LINK); | |
99 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | |
100 | } | |
101 | xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n", | |
102 | prev->dma, next->dma); | |
103 | } | |
104 | ||
105 | /* XXX: Do we need the hcd structure in all these functions? */ | |
106 | static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) | |
107 | { | |
108 | struct xhci_segment *seg; | |
109 | struct xhci_segment *first_seg; | |
110 | ||
111 | if (!ring || !ring->first_seg) | |
112 | return; | |
113 | first_seg = ring->first_seg; | |
114 | seg = first_seg->next; | |
115 | xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring); | |
116 | while (seg != first_seg) { | |
117 | struct xhci_segment *next = seg->next; | |
118 | xhci_segment_free(xhci, seg); | |
119 | seg = next; | |
120 | } | |
121 | xhci_segment_free(xhci, first_seg); | |
122 | ring->first_seg = NULL; | |
123 | kfree(ring); | |
124 | } | |
125 | ||
126 | /** | |
127 | * Create a new ring with zero or more segments. | |
128 | * | |
129 | * Link each segment together into a ring. | |
130 | * Set the end flag and the cycle toggle bit on the last segment. | |
131 | * See section 4.9.1 and figures 15 and 16. | |
132 | */ | |
133 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |
134 | unsigned int num_segs, bool link_trbs, gfp_t flags) | |
135 | { | |
136 | struct xhci_ring *ring; | |
137 | struct xhci_segment *prev; | |
138 | ||
139 | ring = kzalloc(sizeof *(ring), flags); | |
140 | xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring); | |
141 | if (!ring) | |
142 | return 0; | |
143 | ||
144 | if (num_segs == 0) | |
145 | return ring; | |
146 | ||
147 | ring->first_seg = xhci_segment_alloc(xhci, flags); | |
148 | if (!ring->first_seg) | |
149 | goto fail; | |
150 | num_segs--; | |
151 | ||
152 | prev = ring->first_seg; | |
153 | while (num_segs > 0) { | |
154 | struct xhci_segment *next; | |
155 | ||
156 | next = xhci_segment_alloc(xhci, flags); | |
157 | if (!next) | |
158 | goto fail; | |
159 | xhci_link_segments(xhci, prev, next, link_trbs); | |
160 | ||
161 | prev = next; | |
162 | num_segs--; | |
163 | } | |
164 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | |
165 | ||
166 | if (link_trbs) { | |
167 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
168 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | |
169 | xhci_dbg(xhci, "Wrote link toggle flag to" | |
170 | " segment 0x%x (virtual), 0x%x (DMA)\n", | |
171 | (unsigned int) prev, (u32) prev->dma); | |
172 | } | |
173 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | |
174 | ring->enqueue = ring->first_seg->trbs; | |
175 | ring->dequeue = ring->enqueue; | |
176 | /* The ring is initialized to 0. The producer must write 1 to the cycle | |
177 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | |
178 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | |
179 | */ | |
180 | ring->cycle_state = 1; | |
181 | ||
182 | return ring; | |
183 | ||
184 | fail: | |
185 | xhci_ring_free(xhci, ring); | |
186 | return 0; | |
187 | } | |
188 | ||
66d4eadd SS |
189 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
190 | { | |
0ebbab37 SS |
191 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
192 | int size; | |
193 | ||
194 | /* XXX: Free all the segments in the various rings */ | |
195 | ||
196 | /* Free the Event Ring Segment Table and the actual Event Ring */ | |
197 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | |
198 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
199 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | |
200 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
201 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | |
202 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | |
203 | if (xhci->erst.entries) | |
204 | pci_free_consistent(pdev, size, | |
205 | xhci->erst.entries, xhci->erst.erst_dma_addr); | |
206 | xhci->erst.entries = NULL; | |
207 | xhci_dbg(xhci, "Freed ERST\n"); | |
208 | if (xhci->event_ring) | |
209 | xhci_ring_free(xhci, xhci->event_ring); | |
210 | xhci->event_ring = NULL; | |
211 | xhci_dbg(xhci, "Freed event ring\n"); | |
212 | ||
213 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | |
214 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | |
215 | if (xhci->cmd_ring) | |
216 | xhci_ring_free(xhci, xhci->cmd_ring); | |
217 | xhci->cmd_ring = NULL; | |
218 | xhci_dbg(xhci, "Freed command ring\n"); | |
219 | if (xhci->segment_pool) | |
220 | dma_pool_destroy(xhci->segment_pool); | |
221 | xhci->segment_pool = NULL; | |
222 | xhci_dbg(xhci, "Freed segment pool\n"); | |
a74588f9 SS |
223 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); |
224 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | |
225 | if (xhci->dcbaa) | |
226 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | |
227 | xhci->dcbaa, xhci->dcbaa->dma); | |
228 | xhci->dcbaa = NULL; | |
66d4eadd SS |
229 | xhci->page_size = 0; |
230 | xhci->page_shift = 0; | |
231 | } | |
232 | ||
233 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |
234 | { | |
0ebbab37 SS |
235 | dma_addr_t dma; |
236 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
66d4eadd | 237 | unsigned int val, val2; |
0ebbab37 | 238 | struct xhci_segment *seg; |
66d4eadd SS |
239 | u32 page_size; |
240 | int i; | |
241 | ||
242 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | |
243 | xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); | |
244 | for (i = 0; i < 16; i++) { | |
245 | if ((0x1 & page_size) != 0) | |
246 | break; | |
247 | page_size = page_size >> 1; | |
248 | } | |
249 | if (i < 16) | |
250 | xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); | |
251 | else | |
252 | xhci_warn(xhci, "WARN: no supported page size\n"); | |
253 | /* Use 4K pages, since that's common and the minimum the HC supports */ | |
254 | xhci->page_shift = 12; | |
255 | xhci->page_size = 1 << xhci->page_shift; | |
256 | xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); | |
257 | ||
258 | /* | |
259 | * Program the Number of Device Slots Enabled field in the CONFIG | |
260 | * register with the max value of slots the HC can handle. | |
261 | */ | |
262 | val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); | |
263 | xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", | |
264 | (unsigned int) val); | |
265 | val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
266 | val |= (val2 & ~HCS_SLOTS_MASK); | |
267 | xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", | |
268 | (unsigned int) val); | |
269 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | |
270 | ||
a74588f9 SS |
271 | /* |
272 | * Section 5.4.8 - doorbell array must be | |
273 | * "physically contiguous and 64-byte (cache line) aligned". | |
274 | */ | |
275 | xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), | |
276 | sizeof(*xhci->dcbaa), &dma); | |
277 | if (!xhci->dcbaa) | |
278 | goto fail; | |
279 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); | |
280 | xhci->dcbaa->dma = dma; | |
281 | xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n", | |
282 | xhci->dcbaa->dma); | |
283 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | |
284 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | |
285 | ||
0ebbab37 SS |
286 | /* |
287 | * Initialize the ring segment pool. The ring must be a contiguous | |
288 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | |
289 | * however, the command ring segment needs 64-byte aligned segments, | |
290 | * so we pick the greater alignment need. | |
291 | */ | |
292 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | |
293 | SEGMENT_SIZE, 64, xhci->page_size); | |
294 | if (!xhci->segment_pool) | |
295 | goto fail; | |
296 | ||
297 | /* Set up the command ring to have one segments for now. */ | |
298 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | |
299 | if (!xhci->cmd_ring) | |
300 | goto fail; | |
301 | xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring); | |
302 | xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma); | |
303 | ||
304 | /* Set the address in the Command Ring Control register */ | |
305 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | |
306 | val = (val & ~CMD_RING_ADDR_MASK) | | |
307 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | |
308 | xhci->cmd_ring->cycle_state; | |
309 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | |
310 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | |
311 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | |
312 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | |
313 | xhci_dbg_cmd_ptrs(xhci); | |
314 | ||
315 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | |
316 | val &= DBOFF_MASK; | |
317 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | |
318 | " from cap regs base addr\n", val); | |
319 | xhci->dba = (void *) xhci->cap_regs + val; | |
320 | xhci_dbg_regs(xhci); | |
321 | xhci_print_run_regs(xhci); | |
322 | /* Set ir_set to interrupt register set 0 */ | |
323 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | |
324 | ||
325 | /* | |
326 | * Event ring setup: Allocate a normal ring, but also setup | |
327 | * the event ring segment table (ERST). Section 4.9.3. | |
328 | */ | |
329 | xhci_dbg(xhci, "// Allocating event ring\n"); | |
330 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | |
331 | if (!xhci->event_ring) | |
332 | goto fail; | |
333 | ||
334 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | |
335 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | |
336 | if (!xhci->erst.entries) | |
337 | goto fail; | |
338 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma); | |
339 | ||
340 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | |
341 | xhci->erst.num_entries = ERST_NUM_SEGS; | |
342 | xhci->erst.erst_dma_addr = dma; | |
343 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n", | |
344 | xhci->erst.num_entries, | |
345 | (unsigned int) xhci->erst.entries, | |
346 | xhci->erst.erst_dma_addr); | |
347 | ||
348 | /* set ring base address and size for each segment table entry */ | |
349 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | |
350 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | |
351 | entry->seg_addr[1] = 0; | |
352 | entry->seg_addr[0] = seg->dma; | |
353 | entry->seg_size = TRBS_PER_SEGMENT; | |
354 | entry->rsvd = 0; | |
355 | seg = seg->next; | |
356 | } | |
357 | ||
358 | /* set ERST count with the number of entries in the segment table */ | |
359 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
360 | val &= ERST_SIZE_MASK; | |
361 | val |= ERST_NUM_SEGS; | |
362 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | |
363 | val); | |
364 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | |
365 | ||
366 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | |
367 | /* set the segment table base address */ | |
368 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n", | |
369 | xhci->erst.erst_dma_addr); | |
370 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
371 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | |
372 | val &= ERST_PTR_MASK; | |
373 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | |
374 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | |
375 | ||
376 | /* Set the event ring dequeue address */ | |
377 | xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n", | |
378 | xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]); | |
379 | val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]); | |
380 | val &= ERST_PTR_MASK; | |
381 | val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK); | |
382 | xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]); | |
383 | xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1], | |
384 | &xhci->run_regs->ir_set[0].erst_dequeue[1]); | |
385 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); | |
386 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
387 | ||
388 | /* | |
389 | * XXX: Might need to set the Interrupter Moderation Register to | |
390 | * something other than the default (~1ms minimum between interrupts). | |
391 | * See section 5.5.1.2. | |
392 | */ | |
66d4eadd SS |
393 | |
394 | return 0; | |
395 | fail: | |
396 | xhci_warn(xhci, "Couldn't initialize memory\n"); | |
397 | xhci_mem_cleanup(xhci); | |
398 | return -ENOMEM; | |
399 | } |