Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Universal Host Controller Interface driver for USB. | |
3 | * | |
4 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | |
8 | * (C) Copyright 1999 Randy Dunlap | |
9 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | |
10 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | |
11 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | |
12 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | |
13 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | |
14 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | |
15 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | |
4daaa87c | 16 | * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu |
1da177e4 LT |
17 | * |
18 | * Intel documents this fairly well, and as far as I know there | |
19 | * are no royalties or anything like that, but even so there are | |
20 | * people who decided that they want to do the same thing in a | |
21 | * completely different way. | |
22 | * | |
1da177e4 LT |
23 | */ |
24 | ||
25 | #include <linux/config.h> | |
1da177e4 LT |
26 | #include <linux/module.h> |
27 | #include <linux/pci.h> | |
28 | #include <linux/kernel.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/ioport.h> | |
32 | #include <linux/sched.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/smp_lock.h> | |
35 | #include <linux/errno.h> | |
36 | #include <linux/unistd.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/spinlock.h> | |
39 | #include <linux/debugfs.h> | |
40 | #include <linux/pm.h> | |
41 | #include <linux/dmapool.h> | |
42 | #include <linux/dma-mapping.h> | |
43 | #include <linux/usb.h> | |
44 | #include <linux/bitops.h> | |
45 | ||
46 | #include <asm/uaccess.h> | |
47 | #include <asm/io.h> | |
48 | #include <asm/irq.h> | |
49 | #include <asm/system.h> | |
50 | ||
51 | #include "../core/hcd.h" | |
52 | #include "uhci-hcd.h" | |
75e2df60 | 53 | #include "pci-quirks.h" |
1da177e4 LT |
54 | |
55 | /* | |
56 | * Version Information | |
57 | */ | |
dccf4a48 | 58 | #define DRIVER_VERSION "v3.0" |
1da177e4 LT |
59 | #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ |
60 | Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ | |
61 | Alan Stern" | |
62 | #define DRIVER_DESC "USB Universal Host Controller Interface driver" | |
63 | ||
64 | /* | |
65 | * debug = 0, no debugging messages | |
687f5f34 AS |
66 | * debug = 1, dump failed URBs except for stalls |
67 | * debug = 2, dump all failed URBs (including stalls) | |
1da177e4 | 68 | * show all queues in /debug/uhci/[pci_addr] |
687f5f34 | 69 | * debug = 3, show all TDs in URBs when dumping |
1da177e4 LT |
70 | */ |
71 | #ifdef DEBUG | |
8d402e1a | 72 | #define DEBUG_CONFIGURED 1 |
1da177e4 | 73 | static int debug = 1; |
1da177e4 LT |
74 | module_param(debug, int, S_IRUGO | S_IWUSR); |
75 | MODULE_PARM_DESC(debug, "Debug level"); | |
8d402e1a AS |
76 | |
77 | #else | |
78 | #define DEBUG_CONFIGURED 0 | |
79 | #define debug 0 | |
80 | #endif | |
81 | ||
1da177e4 LT |
82 | static char *errbuf; |
83 | #define ERRBUF_LEN (32 * 1024) | |
84 | ||
85 | static kmem_cache_t *uhci_up_cachep; /* urb_priv */ | |
86 | ||
6c1b445c AS |
87 | static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); |
88 | static void wakeup_rh(struct uhci_hcd *uhci); | |
1da177e4 | 89 | static void uhci_get_current_frame_number(struct uhci_hcd *uhci); |
1da177e4 LT |
90 | |
91 | /* If a transfer is still active after this much time, turn off FSBR */ | |
92 | #define IDLE_TIMEOUT msecs_to_jiffies(50) | |
93 | #define FSBR_DELAY msecs_to_jiffies(50) | |
94 | ||
95 | /* When we timeout an idle transfer for FSBR, we'll switch it over to */ | |
687f5f34 | 96 | /* depth first traversal. We'll do it in groups of this number of TDs */ |
1da177e4 LT |
97 | /* to make sure it doesn't hog all of the bandwidth */ |
98 | #define DEPTH_INTERVAL 5 | |
99 | ||
1da177e4 LT |
100 | #include "uhci-debug.c" |
101 | #include "uhci-q.c" | |
1f09df8b | 102 | #include "uhci-hub.c" |
1da177e4 | 103 | |
a8bed8b6 | 104 | /* |
bb200f6e | 105 | * Finish up a host controller reset and update the recorded state. |
a8bed8b6 | 106 | */ |
bb200f6e | 107 | static void finish_reset(struct uhci_hcd *uhci) |
1da177e4 | 108 | { |
c074b416 AS |
109 | int port; |
110 | ||
c074b416 AS |
111 | /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect |
112 | * bits in the port status and control registers. | |
113 | * We have to clear them by hand. | |
114 | */ | |
115 | for (port = 0; port < uhci->rh_numports; ++port) | |
116 | outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); | |
117 | ||
8e326406 | 118 | uhci->port_c_suspend = uhci->resuming_ports = 0; |
c8f4fe43 | 119 | uhci->rh_state = UHCI_RH_RESET; |
a8bed8b6 AS |
120 | uhci->is_stopped = UHCI_IS_STOPPED; |
121 | uhci_to_hcd(uhci)->state = HC_STATE_HALT; | |
6c1b445c | 122 | uhci_to_hcd(uhci)->poll_rh = 0; |
1da177e4 LT |
123 | } |
124 | ||
4daaa87c AS |
125 | /* |
126 | * Last rites for a defunct/nonfunctional controller | |
02597d2d | 127 | * or one we don't want to use any more. |
4daaa87c AS |
128 | */ |
129 | static void hc_died(struct uhci_hcd *uhci) | |
130 | { | |
bb200f6e AS |
131 | uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); |
132 | finish_reset(uhci); | |
4daaa87c AS |
133 | uhci->hc_inaccessible = 1; |
134 | } | |
135 | ||
a8bed8b6 AS |
136 | /* |
137 | * Initialize a controller that was newly discovered or has just been | |
138 | * resumed. In either case we can't be sure of its previous state. | |
139 | */ | |
140 | static void check_and_reset_hc(struct uhci_hcd *uhci) | |
141 | { | |
bb200f6e AS |
142 | if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) |
143 | finish_reset(uhci); | |
a8bed8b6 AS |
144 | } |
145 | ||
146 | /* | |
147 | * Store the basic register settings needed by the controller. | |
148 | */ | |
149 | static void configure_hc(struct uhci_hcd *uhci) | |
150 | { | |
151 | /* Set the frame length to the default: 1 ms exactly */ | |
152 | outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); | |
153 | ||
154 | /* Store the frame list base address */ | |
a1d59ce8 | 155 | outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); |
a8bed8b6 AS |
156 | |
157 | /* Set the current frame number */ | |
158 | outw(uhci->frame_number, uhci->io_addr + USBFRNUM); | |
159 | ||
f37be9b9 AS |
160 | /* Mark controller as not halted before we enable interrupts */ |
161 | uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; | |
a8bed8b6 AS |
162 | mb(); |
163 | ||
164 | /* Enable PIRQ */ | |
165 | pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, | |
166 | USBLEGSUP_DEFAULT); | |
167 | } | |
168 | ||
169 | ||
c8f4fe43 | 170 | static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) |
1da177e4 | 171 | { |
c8f4fe43 | 172 | int port; |
1da177e4 | 173 | |
c8f4fe43 AS |
174 | switch (to_pci_dev(uhci_dev(uhci))->vendor) { |
175 | default: | |
176 | break; | |
177 | ||
178 | case PCI_VENDOR_ID_GENESYS: | |
179 | /* Genesys Logic's GL880S controllers don't generate | |
180 | * resume-detect interrupts. | |
181 | */ | |
182 | return 1; | |
183 | ||
184 | case PCI_VENDOR_ID_INTEL: | |
185 | /* Some of Intel's USB controllers have a bug that causes | |
186 | * resume-detect interrupts if any port has an over-current | |
187 | * condition. To make matters worse, some motherboards | |
188 | * hardwire unused USB ports' over-current inputs active! | |
189 | * To prevent problems, we will not enable resume-detect | |
190 | * interrupts if any ports are OC. | |
191 | */ | |
192 | for (port = 0; port < uhci->rh_numports; ++port) { | |
193 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | |
194 | USBPORTSC_OC) | |
195 | return 1; | |
196 | } | |
197 | break; | |
198 | } | |
199 | return 0; | |
200 | } | |
201 | ||
a8bed8b6 | 202 | static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) |
c8f4fe43 AS |
203 | __releases(uhci->lock) |
204 | __acquires(uhci->lock) | |
205 | { | |
206 | int auto_stop; | |
207 | int int_enable; | |
208 | ||
209 | auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); | |
210 | dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, | |
211 | (auto_stop ? " (auto-stop)" : "")); | |
212 | ||
213 | /* If we get a suspend request when we're already auto-stopped | |
214 | * then there's nothing to do. | |
215 | */ | |
216 | if (uhci->rh_state == UHCI_RH_AUTO_STOPPED) { | |
217 | uhci->rh_state = new_state; | |
218 | return; | |
219 | } | |
220 | ||
221 | /* Enable resume-detect interrupts if they work. | |
222 | * Then enter Global Suspend mode, still configured. | |
223 | */ | |
1f09df8b AS |
224 | uhci->working_RD = 1; |
225 | int_enable = USBINTR_RESUME; | |
226 | if (resume_detect_interrupts_are_broken(uhci)) { | |
227 | uhci->working_RD = int_enable = 0; | |
228 | } | |
c8f4fe43 AS |
229 | outw(int_enable, uhci->io_addr + USBINTR); |
230 | outw(USBCMD_EGSM | USBCMD_CF, uhci->io_addr + USBCMD); | |
a8bed8b6 | 231 | mb(); |
c8f4fe43 AS |
232 | udelay(5); |
233 | ||
234 | /* If we're auto-stopping then no devices have been attached | |
235 | * for a while, so there shouldn't be any active URBs and the | |
236 | * controller should stop after a few microseconds. Otherwise | |
237 | * we will give the controller one frame to stop. | |
238 | */ | |
239 | if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) { | |
240 | uhci->rh_state = UHCI_RH_SUSPENDING; | |
241 | spin_unlock_irq(&uhci->lock); | |
242 | msleep(1); | |
243 | spin_lock_irq(&uhci->lock); | |
4daaa87c AS |
244 | if (uhci->hc_inaccessible) /* Died */ |
245 | return; | |
c8f4fe43 AS |
246 | } |
247 | if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) | |
248 | dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); | |
1da177e4 | 249 | |
1da177e4 | 250 | uhci_get_current_frame_number(uhci); |
c8f4fe43 AS |
251 | smp_wmb(); |
252 | ||
253 | uhci->rh_state = new_state; | |
1da177e4 | 254 | uhci->is_stopped = UHCI_IS_STOPPED; |
6c1b445c | 255 | uhci_to_hcd(uhci)->poll_rh = !int_enable; |
1da177e4 LT |
256 | |
257 | uhci_scan_schedule(uhci, NULL); | |
258 | } | |
259 | ||
a8bed8b6 AS |
260 | static void start_rh(struct uhci_hcd *uhci) |
261 | { | |
f37be9b9 | 262 | uhci_to_hcd(uhci)->state = HC_STATE_RUNNING; |
a8bed8b6 AS |
263 | uhci->is_stopped = 0; |
264 | smp_wmb(); | |
265 | ||
266 | /* Mark it configured and running with a 64-byte max packet. | |
267 | * All interrupts are enabled, even though RESUME won't do anything. | |
268 | */ | |
269 | outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD); | |
270 | outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, | |
271 | uhci->io_addr + USBINTR); | |
272 | mb(); | |
6c1b445c AS |
273 | uhci->rh_state = UHCI_RH_RUNNING; |
274 | uhci_to_hcd(uhci)->poll_rh = 1; | |
a8bed8b6 AS |
275 | } |
276 | ||
277 | static void wakeup_rh(struct uhci_hcd *uhci) | |
c8f4fe43 AS |
278 | __releases(uhci->lock) |
279 | __acquires(uhci->lock) | |
1da177e4 | 280 | { |
c8f4fe43 AS |
281 | dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, |
282 | uhci->rh_state == UHCI_RH_AUTO_STOPPED ? | |
283 | " (auto-start)" : ""); | |
1da177e4 | 284 | |
c8f4fe43 AS |
285 | /* If we are auto-stopped then no devices are attached so there's |
286 | * no need for wakeup signals. Otherwise we send Global Resume | |
287 | * for 20 ms. | |
288 | */ | |
289 | if (uhci->rh_state == UHCI_RH_SUSPENDED) { | |
290 | uhci->rh_state = UHCI_RH_RESUMING; | |
291 | outw(USBCMD_FGR | USBCMD_EGSM | USBCMD_CF, | |
292 | uhci->io_addr + USBCMD); | |
293 | spin_unlock_irq(&uhci->lock); | |
294 | msleep(20); | |
295 | spin_lock_irq(&uhci->lock); | |
4daaa87c AS |
296 | if (uhci->hc_inaccessible) /* Died */ |
297 | return; | |
1da177e4 | 298 | |
c8f4fe43 AS |
299 | /* End Global Resume and wait for EOP to be sent */ |
300 | outw(USBCMD_CF, uhci->io_addr + USBCMD); | |
a8bed8b6 | 301 | mb(); |
c8f4fe43 AS |
302 | udelay(4); |
303 | if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR) | |
304 | dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); | |
305 | } | |
1da177e4 | 306 | |
a8bed8b6 | 307 | start_rh(uhci); |
c8f4fe43 | 308 | |
6c1b445c AS |
309 | /* Restart root hub polling */ |
310 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); | |
1da177e4 LT |
311 | } |
312 | ||
014e73c9 AS |
313 | static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) |
314 | { | |
315 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
014e73c9 | 316 | unsigned short status; |
4daaa87c | 317 | unsigned long flags; |
1da177e4 LT |
318 | |
319 | /* | |
014e73c9 AS |
320 | * Read the interrupt status, and write it back to clear the |
321 | * interrupt cause. Contrary to the UHCI specification, the | |
322 | * "HC Halted" status bit is persistent: it is RO, not R/WC. | |
1da177e4 | 323 | */ |
a8bed8b6 | 324 | status = inw(uhci->io_addr + USBSTS); |
014e73c9 AS |
325 | if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ |
326 | return IRQ_NONE; | |
a8bed8b6 | 327 | outw(status, uhci->io_addr + USBSTS); /* Clear it */ |
014e73c9 AS |
328 | |
329 | if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { | |
330 | if (status & USBSTS_HSE) | |
331 | dev_err(uhci_dev(uhci), "host system error, " | |
332 | "PCI problems?\n"); | |
333 | if (status & USBSTS_HCPE) | |
334 | dev_err(uhci_dev(uhci), "host controller process " | |
335 | "error, something bad happened!\n"); | |
4daaa87c AS |
336 | if (status & USBSTS_HCH) { |
337 | spin_lock_irqsave(&uhci->lock, flags); | |
338 | if (uhci->rh_state >= UHCI_RH_RUNNING) { | |
339 | dev_err(uhci_dev(uhci), | |
340 | "host controller halted, " | |
014e73c9 | 341 | "very bad!\n"); |
8d402e1a AS |
342 | if (debug > 1 && errbuf) { |
343 | /* Print the schedule for debugging */ | |
344 | uhci_sprint_schedule(uhci, | |
345 | errbuf, ERRBUF_LEN); | |
346 | lprintk(errbuf); | |
347 | } | |
4daaa87c | 348 | hc_died(uhci); |
1f09df8b AS |
349 | |
350 | /* Force a callback in case there are | |
351 | * pending unlinks */ | |
352 | mod_timer(&hcd->rh_timer, jiffies); | |
4daaa87c AS |
353 | } |
354 | spin_unlock_irqrestore(&uhci->lock, flags); | |
1da177e4 | 355 | } |
1da177e4 LT |
356 | } |
357 | ||
014e73c9 | 358 | if (status & USBSTS_RD) |
6c1b445c | 359 | usb_hcd_poll_rh_status(hcd); |
1f09df8b AS |
360 | else { |
361 | spin_lock_irqsave(&uhci->lock, flags); | |
362 | uhci_scan_schedule(uhci, regs); | |
363 | spin_unlock_irqrestore(&uhci->lock, flags); | |
364 | } | |
1da177e4 | 365 | |
014e73c9 AS |
366 | return IRQ_HANDLED; |
367 | } | |
1da177e4 | 368 | |
014e73c9 AS |
369 | /* |
370 | * Store the current frame number in uhci->frame_number if the controller | |
371 | * is runnning | |
372 | */ | |
373 | static void uhci_get_current_frame_number(struct uhci_hcd *uhci) | |
374 | { | |
375 | if (!uhci->is_stopped) | |
376 | uhci->frame_number = inw(uhci->io_addr + USBFRNUM); | |
1da177e4 LT |
377 | } |
378 | ||
379 | /* | |
380 | * De-allocate all resources | |
381 | */ | |
382 | static void release_uhci(struct uhci_hcd *uhci) | |
383 | { | |
384 | int i; | |
385 | ||
8d402e1a AS |
386 | if (DEBUG_CONFIGURED) { |
387 | spin_lock_irq(&uhci->lock); | |
388 | uhci->is_initialized = 0; | |
389 | spin_unlock_irq(&uhci->lock); | |
390 | ||
391 | debugfs_remove(uhci->dentry); | |
392 | } | |
393 | ||
1da177e4 | 394 | for (i = 0; i < UHCI_NUM_SKELQH; i++) |
8b4cd421 | 395 | uhci_free_qh(uhci, uhci->skelqh[i]); |
1da177e4 | 396 | |
8b4cd421 | 397 | uhci_free_td(uhci, uhci->term_td); |
1da177e4 | 398 | |
8b4cd421 | 399 | dma_pool_destroy(uhci->qh_pool); |
1da177e4 | 400 | |
8b4cd421 | 401 | dma_pool_destroy(uhci->td_pool); |
1da177e4 | 402 | |
a1d59ce8 AS |
403 | kfree(uhci->frame_cpu); |
404 | ||
405 | dma_free_coherent(uhci_dev(uhci), | |
406 | UHCI_NUMFRAMES * sizeof(*uhci->frame), | |
407 | uhci->frame, uhci->frame_dma_handle); | |
1da177e4 LT |
408 | } |
409 | ||
410 | static int uhci_reset(struct usb_hcd *hcd) | |
411 | { | |
412 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
c074b416 AS |
413 | unsigned io_size = (unsigned) hcd->rsrc_len; |
414 | int port; | |
1da177e4 LT |
415 | |
416 | uhci->io_addr = (unsigned long) hcd->rsrc_start; | |
417 | ||
c074b416 AS |
418 | /* The UHCI spec says devices must have 2 ports, and goes on to say |
419 | * they may have more but gives no way to determine how many there | |
e07fefa6 | 420 | * are. However according to the UHCI spec, Bit 7 of the port |
c074b416 | 421 | * status and control register is always set to 1. So we try to |
e07fefa6 AS |
422 | * use this to our advantage. Another common failure mode when |
423 | * a nonexistent register is addressed is to return all ones, so | |
424 | * we test for that also. | |
c074b416 AS |
425 | */ |
426 | for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { | |
427 | unsigned int portstatus; | |
428 | ||
429 | portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2)); | |
e07fefa6 | 430 | if (!(portstatus & 0x0080) || portstatus == 0xffff) |
c074b416 AS |
431 | break; |
432 | } | |
433 | if (debug) | |
434 | dev_info(uhci_dev(uhci), "detected %d ports\n", port); | |
435 | ||
e07fefa6 AS |
436 | /* Anything greater than 7 is weird so we'll ignore it. */ |
437 | if (port > UHCI_RH_MAXCHILD) { | |
c074b416 AS |
438 | dev_info(uhci_dev(uhci), "port count misdetected? " |
439 | "forcing to 2 ports\n"); | |
440 | port = 2; | |
441 | } | |
442 | uhci->rh_numports = port; | |
443 | ||
a8bed8b6 AS |
444 | /* Kick BIOS off this hardware and reset if the controller |
445 | * isn't already safely quiescent. | |
1da177e4 | 446 | */ |
a8bed8b6 | 447 | check_and_reset_hc(uhci); |
1da177e4 LT |
448 | return 0; |
449 | } | |
450 | ||
02597d2d AS |
451 | /* Make sure the controller is quiescent and that we're not using it |
452 | * any more. This is mainly for the benefit of programs which, like kexec, | |
453 | * expect the hardware to be idle: not doing DMA or generating IRQs. | |
454 | * | |
455 | * This routine may be called in a damaged or failing kernel. Hence we | |
456 | * do not acquire the spinlock before shutting down the controller. | |
457 | */ | |
458 | static void uhci_shutdown(struct pci_dev *pdev) | |
459 | { | |
460 | struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev); | |
461 | ||
462 | hc_died(hcd_to_uhci(hcd)); | |
463 | } | |
464 | ||
1da177e4 LT |
465 | /* |
466 | * Allocate a frame list, and then setup the skeleton | |
467 | * | |
468 | * The hardware doesn't really know any difference | |
469 | * in the queues, but the order does matter for the | |
470 | * protocols higher up. The order is: | |
471 | * | |
472 | * - any isochronous events handled before any | |
473 | * of the queues. We don't do that here, because | |
474 | * we'll create the actual TD entries on demand. | |
475 | * - The first queue is the interrupt queue. | |
476 | * - The second queue is the control queue, split into low- and full-speed | |
477 | * - The third queue is bulk queue. | |
478 | * - The fourth queue is the bandwidth reclamation queue, which loops back | |
479 | * to the full-speed control queue. | |
480 | */ | |
481 | static int uhci_start(struct usb_hcd *hcd) | |
482 | { | |
483 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
484 | int retval = -EBUSY; | |
c074b416 | 485 | int i; |
1da177e4 LT |
486 | struct dentry *dentry; |
487 | ||
6c1b445c | 488 | hcd->uses_new_polling = 1; |
1da177e4 | 489 | |
1da177e4 LT |
490 | uhci->fsbr = 0; |
491 | uhci->fsbrtimeout = 0; | |
492 | ||
493 | spin_lock_init(&uhci->lock); | |
1da177e4 | 494 | |
dccf4a48 | 495 | INIT_LIST_HEAD(&uhci->idle_qh_list); |
1da177e4 LT |
496 | |
497 | init_waitqueue_head(&uhci->waitqh); | |
498 | ||
8d402e1a AS |
499 | if (DEBUG_CONFIGURED) { |
500 | dentry = debugfs_create_file(hcd->self.bus_name, | |
501 | S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, | |
502 | uhci, &uhci_debug_operations); | |
503 | if (!dentry) { | |
504 | dev_err(uhci_dev(uhci), "couldn't create uhci " | |
505 | "debugfs entry\n"); | |
506 | retval = -ENOMEM; | |
507 | goto err_create_debug_entry; | |
508 | } | |
509 | uhci->dentry = dentry; | |
510 | } | |
511 | ||
a1d59ce8 AS |
512 | uhci->frame = dma_alloc_coherent(uhci_dev(uhci), |
513 | UHCI_NUMFRAMES * sizeof(*uhci->frame), | |
514 | &uhci->frame_dma_handle, 0); | |
515 | if (!uhci->frame) { | |
1da177e4 LT |
516 | dev_err(uhci_dev(uhci), "unable to allocate " |
517 | "consistent memory for frame list\n"); | |
a1d59ce8 | 518 | goto err_alloc_frame; |
1da177e4 | 519 | } |
a1d59ce8 | 520 | memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame)); |
1da177e4 | 521 | |
a1d59ce8 AS |
522 | uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu), |
523 | GFP_KERNEL); | |
524 | if (!uhci->frame_cpu) { | |
525 | dev_err(uhci_dev(uhci), "unable to allocate " | |
526 | "memory for frame pointers\n"); | |
527 | goto err_alloc_frame_cpu; | |
528 | } | |
1da177e4 LT |
529 | |
530 | uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci), | |
531 | sizeof(struct uhci_td), 16, 0); | |
532 | if (!uhci->td_pool) { | |
533 | dev_err(uhci_dev(uhci), "unable to create td dma_pool\n"); | |
534 | goto err_create_td_pool; | |
535 | } | |
536 | ||
537 | uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci), | |
538 | sizeof(struct uhci_qh), 16, 0); | |
539 | if (!uhci->qh_pool) { | |
540 | dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n"); | |
541 | goto err_create_qh_pool; | |
542 | } | |
543 | ||
2532178a | 544 | uhci->term_td = uhci_alloc_td(uhci); |
1da177e4 LT |
545 | if (!uhci->term_td) { |
546 | dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n"); | |
547 | goto err_alloc_term_td; | |
548 | } | |
549 | ||
550 | for (i = 0; i < UHCI_NUM_SKELQH; i++) { | |
dccf4a48 | 551 | uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL); |
1da177e4 LT |
552 | if (!uhci->skelqh[i]) { |
553 | dev_err(uhci_dev(uhci), "unable to allocate QH\n"); | |
554 | goto err_alloc_skelqh; | |
555 | } | |
556 | } | |
557 | ||
558 | /* | |
559 | * 8 Interrupt queues; link all higher int queues to int1, | |
560 | * then link int1 to control and control to bulk | |
561 | */ | |
562 | uhci->skel_int128_qh->link = | |
563 | uhci->skel_int64_qh->link = | |
564 | uhci->skel_int32_qh->link = | |
565 | uhci->skel_int16_qh->link = | |
566 | uhci->skel_int8_qh->link = | |
567 | uhci->skel_int4_qh->link = | |
dccf4a48 AS |
568 | uhci->skel_int2_qh->link = UHCI_PTR_QH | |
569 | cpu_to_le32(uhci->skel_int1_qh->dma_handle); | |
570 | ||
571 | uhci->skel_int1_qh->link = UHCI_PTR_QH | | |
572 | cpu_to_le32(uhci->skel_ls_control_qh->dma_handle); | |
573 | uhci->skel_ls_control_qh->link = UHCI_PTR_QH | | |
574 | cpu_to_le32(uhci->skel_fs_control_qh->dma_handle); | |
575 | uhci->skel_fs_control_qh->link = UHCI_PTR_QH | | |
576 | cpu_to_le32(uhci->skel_bulk_qh->dma_handle); | |
577 | uhci->skel_bulk_qh->link = UHCI_PTR_QH | | |
578 | cpu_to_le32(uhci->skel_term_qh->dma_handle); | |
1da177e4 LT |
579 | |
580 | /* This dummy TD is to work around a bug in Intel PIIX controllers */ | |
fa346568 | 581 | uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | |
1da177e4 LT |
582 | (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); |
583 | uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); | |
584 | ||
585 | uhci->skel_term_qh->link = UHCI_PTR_TERM; | |
586 | uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); | |
587 | ||
588 | /* | |
589 | * Fill the frame list: make all entries point to the proper | |
590 | * interrupt queue. | |
591 | * | |
592 | * The interrupt queues will be interleaved as evenly as possible. | |
593 | * There's not much to be done about period-1 interrupts; they have | |
594 | * to occur in every frame. But we can schedule period-2 interrupts | |
595 | * in odd-numbered frames, period-4 interrupts in frames congruent | |
596 | * to 2 (mod 4), and so on. This way each frame only has two | |
597 | * interrupt QHs, which will help spread out bandwidth utilization. | |
598 | */ | |
599 | for (i = 0; i < UHCI_NUMFRAMES; i++) { | |
600 | int irq; | |
601 | ||
602 | /* | |
603 | * ffs (Find First bit Set) does exactly what we need: | |
dccf4a48 AS |
604 | * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], |
605 | * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. | |
606 | * ffs >= 7 => not on any high-period queue, so use | |
607 | * skel_int1_qh = skelqh[9]. | |
1da177e4 LT |
608 | * Add UHCI_NUMFRAMES to insure at least one bit is set. |
609 | */ | |
dccf4a48 AS |
610 | irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES); |
611 | if (irq <= 1) | |
612 | irq = 9; | |
1da177e4 LT |
613 | |
614 | /* Only place we don't use the frame list routines */ | |
a1d59ce8 | 615 | uhci->frame[i] = UHCI_PTR_QH | |
1da177e4 LT |
616 | cpu_to_le32(uhci->skelqh[irq]->dma_handle); |
617 | } | |
618 | ||
619 | /* | |
620 | * Some architectures require a full mb() to enforce completion of | |
a8bed8b6 | 621 | * the memory writes above before the I/O transfers in configure_hc(). |
1da177e4 LT |
622 | */ |
623 | mb(); | |
a8bed8b6 AS |
624 | |
625 | configure_hc(uhci); | |
8d402e1a | 626 | uhci->is_initialized = 1; |
a8bed8b6 | 627 | start_rh(uhci); |
1da177e4 LT |
628 | return 0; |
629 | ||
630 | /* | |
631 | * error exits: | |
632 | */ | |
1da177e4 | 633 | err_alloc_skelqh: |
8b4cd421 AS |
634 | for (i = 0; i < UHCI_NUM_SKELQH; i++) { |
635 | if (uhci->skelqh[i]) | |
1da177e4 | 636 | uhci_free_qh(uhci, uhci->skelqh[i]); |
8b4cd421 | 637 | } |
1da177e4 LT |
638 | |
639 | uhci_free_td(uhci, uhci->term_td); | |
1da177e4 LT |
640 | |
641 | err_alloc_term_td: | |
1da177e4 | 642 | dma_pool_destroy(uhci->qh_pool); |
1da177e4 LT |
643 | |
644 | err_create_qh_pool: | |
645 | dma_pool_destroy(uhci->td_pool); | |
1da177e4 LT |
646 | |
647 | err_create_td_pool: | |
a1d59ce8 AS |
648 | kfree(uhci->frame_cpu); |
649 | ||
650 | err_alloc_frame_cpu: | |
651 | dma_free_coherent(uhci_dev(uhci), | |
652 | UHCI_NUMFRAMES * sizeof(*uhci->frame), | |
653 | uhci->frame, uhci->frame_dma_handle); | |
1da177e4 | 654 | |
a1d59ce8 | 655 | err_alloc_frame: |
1da177e4 | 656 | debugfs_remove(uhci->dentry); |
1da177e4 LT |
657 | |
658 | err_create_debug_entry: | |
659 | return retval; | |
660 | } | |
661 | ||
662 | static void uhci_stop(struct usb_hcd *hcd) | |
663 | { | |
664 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
665 | ||
1da177e4 | 666 | spin_lock_irq(&uhci->lock); |
1f09df8b | 667 | if (!uhci->hc_inaccessible) |
bb200f6e | 668 | hc_died(uhci); |
1da177e4 LT |
669 | uhci_scan_schedule(uhci, NULL); |
670 | spin_unlock_irq(&uhci->lock); | |
6c1b445c | 671 | |
1da177e4 LT |
672 | release_uhci(uhci); |
673 | } | |
674 | ||
675 | #ifdef CONFIG_PM | |
a8bed8b6 AS |
676 | static int uhci_rh_suspend(struct usb_hcd *hcd) |
677 | { | |
678 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
679 | ||
680 | spin_lock_irq(&uhci->lock); | |
4daaa87c AS |
681 | if (!uhci->hc_inaccessible) /* Not dead */ |
682 | suspend_rh(uhci, UHCI_RH_SUSPENDED); | |
a8bed8b6 AS |
683 | spin_unlock_irq(&uhci->lock); |
684 | return 0; | |
685 | } | |
686 | ||
687 | static int uhci_rh_resume(struct usb_hcd *hcd) | |
688 | { | |
689 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
4daaa87c | 690 | int rc = 0; |
a8bed8b6 AS |
691 | |
692 | spin_lock_irq(&uhci->lock); | |
4daaa87c AS |
693 | if (uhci->hc_inaccessible) { |
694 | if (uhci->rh_state == UHCI_RH_SUSPENDED) { | |
695 | dev_warn(uhci_dev(uhci), "HC isn't running!\n"); | |
696 | rc = -ENODEV; | |
697 | } | |
698 | /* Otherwise the HC is dead */ | |
699 | } else | |
700 | wakeup_rh(uhci); | |
a8bed8b6 | 701 | spin_unlock_irq(&uhci->lock); |
4daaa87c | 702 | return rc; |
a8bed8b6 AS |
703 | } |
704 | ||
9a5d3e98 | 705 | static int uhci_suspend(struct usb_hcd *hcd, pm_message_t message) |
1da177e4 LT |
706 | { |
707 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
4daaa87c | 708 | int rc = 0; |
1da177e4 | 709 | |
a8bed8b6 AS |
710 | dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); |
711 | ||
1da177e4 | 712 | spin_lock_irq(&uhci->lock); |
4daaa87c AS |
713 | if (uhci->hc_inaccessible) /* Dead or already suspended */ |
714 | goto done; | |
a8bed8b6 | 715 | |
4daaa87c AS |
716 | if (uhci->rh_state > UHCI_RH_SUSPENDED) { |
717 | dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); | |
4daaa87c AS |
718 | rc = -EBUSY; |
719 | goto done; | |
720 | }; | |
721 | ||
a8bed8b6 AS |
722 | /* All PCI host controllers are required to disable IRQ generation |
723 | * at the source, so we must turn off PIRQ. | |
724 | */ | |
725 | pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0); | |
42245e65 | 726 | mb(); |
8de98402 | 727 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
a8bed8b6 | 728 | uhci->hc_inaccessible = 1; |
1f09df8b | 729 | hcd->poll_rh = 0; |
a8bed8b6 AS |
730 | |
731 | /* FIXME: Enable non-PME# remote wakeup? */ | |
732 | ||
4daaa87c | 733 | done: |
1da177e4 | 734 | spin_unlock_irq(&uhci->lock); |
4daaa87c | 735 | return rc; |
1da177e4 LT |
736 | } |
737 | ||
738 | static int uhci_resume(struct usb_hcd *hcd) | |
739 | { | |
740 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1da177e4 | 741 | |
a8bed8b6 AS |
742 | dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); |
743 | ||
687f5f34 AS |
744 | /* Since we aren't in D3 any more, it's safe to set this flag |
745 | * even if the controller was dead. It might not even be dead | |
746 | * any more, if the firmware or quirks code has reset it. | |
8de98402 BH |
747 | */ |
748 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
42245e65 | 749 | mb(); |
8de98402 | 750 | |
4daaa87c AS |
751 | if (uhci->rh_state == UHCI_RH_RESET) /* Dead */ |
752 | return 0; | |
1da177e4 | 753 | spin_lock_irq(&uhci->lock); |
1da177e4 | 754 | |
a8bed8b6 AS |
755 | /* FIXME: Disable non-PME# remote wakeup? */ |
756 | ||
757 | uhci->hc_inaccessible = 0; | |
758 | ||
759 | /* The BIOS may have changed the controller settings during a | |
760 | * system wakeup. Check it and reconfigure to avoid problems. | |
761 | */ | |
762 | check_and_reset_hc(uhci); | |
763 | configure_hc(uhci); | |
764 | ||
1c50c317 AS |
765 | if (uhci->rh_state == UHCI_RH_RESET) { |
766 | ||
767 | /* The controller had to be reset */ | |
768 | usb_root_hub_lost_power(hcd->self.root_hub); | |
a8bed8b6 | 769 | suspend_rh(uhci, UHCI_RH_SUSPENDED); |
1c50c317 | 770 | } |
c8f4fe43 | 771 | |
a8bed8b6 | 772 | spin_unlock_irq(&uhci->lock); |
6c1b445c | 773 | |
1f09df8b AS |
774 | if (!uhci->working_RD) { |
775 | /* Suspended root hub needs to be polled */ | |
776 | hcd->poll_rh = 1; | |
6c1b445c | 777 | usb_hcd_poll_rh_status(hcd); |
1f09df8b | 778 | } |
1da177e4 LT |
779 | return 0; |
780 | } | |
781 | #endif | |
782 | ||
dccf4a48 | 783 | /* Wait until a particular device/endpoint's QH is idle, and free it */ |
1da177e4 | 784 | static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, |
dccf4a48 | 785 | struct usb_host_endpoint *hep) |
1da177e4 LT |
786 | { |
787 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
dccf4a48 AS |
788 | struct uhci_qh *qh; |
789 | ||
790 | spin_lock_irq(&uhci->lock); | |
791 | qh = (struct uhci_qh *) hep->hcpriv; | |
792 | if (qh == NULL) | |
793 | goto done; | |
1da177e4 | 794 | |
dccf4a48 AS |
795 | while (qh->state != QH_STATE_IDLE) { |
796 | ++uhci->num_waiting; | |
797 | spin_unlock_irq(&uhci->lock); | |
798 | wait_event_interruptible(uhci->waitqh, | |
799 | qh->state == QH_STATE_IDLE); | |
800 | spin_lock_irq(&uhci->lock); | |
801 | --uhci->num_waiting; | |
802 | } | |
803 | ||
804 | uhci_free_qh(uhci, qh); | |
805 | done: | |
806 | spin_unlock_irq(&uhci->lock); | |
1da177e4 LT |
807 | } |
808 | ||
809 | static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) | |
810 | { | |
811 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | |
1da177e4 | 812 | unsigned long flags; |
c8f4fe43 AS |
813 | int is_stopped; |
814 | int frame_number; | |
1da177e4 LT |
815 | |
816 | /* Minimize latency by avoiding the spinlock */ | |
817 | local_irq_save(flags); | |
c8f4fe43 AS |
818 | is_stopped = uhci->is_stopped; |
819 | smp_rmb(); | |
820 | frame_number = (is_stopped ? uhci->frame_number : | |
1da177e4 LT |
821 | inw(uhci->io_addr + USBFRNUM)); |
822 | local_irq_restore(flags); | |
823 | return frame_number; | |
824 | } | |
825 | ||
826 | static const char hcd_name[] = "uhci_hcd"; | |
827 | ||
828 | static const struct hc_driver uhci_driver = { | |
829 | .description = hcd_name, | |
830 | .product_desc = "UHCI Host Controller", | |
831 | .hcd_priv_size = sizeof(struct uhci_hcd), | |
832 | ||
833 | /* Generic hardware linkage */ | |
834 | .irq = uhci_irq, | |
835 | .flags = HCD_USB11, | |
836 | ||
837 | /* Basic lifecycle operations */ | |
838 | .reset = uhci_reset, | |
839 | .start = uhci_start, | |
840 | #ifdef CONFIG_PM | |
841 | .suspend = uhci_suspend, | |
842 | .resume = uhci_resume, | |
0c0382e3 AS |
843 | .bus_suspend = uhci_rh_suspend, |
844 | .bus_resume = uhci_rh_resume, | |
1da177e4 LT |
845 | #endif |
846 | .stop = uhci_stop, | |
847 | ||
848 | .urb_enqueue = uhci_urb_enqueue, | |
849 | .urb_dequeue = uhci_urb_dequeue, | |
850 | ||
851 | .endpoint_disable = uhci_hcd_endpoint_disable, | |
852 | .get_frame_number = uhci_hcd_get_frame_number, | |
853 | ||
854 | .hub_status_data = uhci_hub_status_data, | |
855 | .hub_control = uhci_hub_control, | |
856 | }; | |
857 | ||
858 | static const struct pci_device_id uhci_pci_ids[] = { { | |
859 | /* handle any USB UHCI controller */ | |
c67808ee | 860 | PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), |
1da177e4 LT |
861 | .driver_data = (unsigned long) &uhci_driver, |
862 | }, { /* end: all zeroes */ } | |
863 | }; | |
864 | ||
865 | MODULE_DEVICE_TABLE(pci, uhci_pci_ids); | |
866 | ||
867 | static struct pci_driver uhci_pci_driver = { | |
868 | .name = (char *)hcd_name, | |
869 | .id_table = uhci_pci_ids, | |
870 | ||
871 | .probe = usb_hcd_pci_probe, | |
872 | .remove = usb_hcd_pci_remove, | |
02597d2d | 873 | .shutdown = uhci_shutdown, |
1da177e4 LT |
874 | |
875 | #ifdef CONFIG_PM | |
876 | .suspend = usb_hcd_pci_suspend, | |
877 | .resume = usb_hcd_pci_resume, | |
878 | #endif /* PM */ | |
879 | }; | |
880 | ||
881 | static int __init uhci_hcd_init(void) | |
882 | { | |
883 | int retval = -ENOMEM; | |
884 | ||
885 | printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n"); | |
886 | ||
887 | if (usb_disabled()) | |
888 | return -ENODEV; | |
889 | ||
8d402e1a | 890 | if (DEBUG_CONFIGURED) { |
1da177e4 LT |
891 | errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); |
892 | if (!errbuf) | |
893 | goto errbuf_failed; | |
8d402e1a AS |
894 | uhci_debugfs_root = debugfs_create_dir("uhci", NULL); |
895 | if (!uhci_debugfs_root) | |
896 | goto debug_failed; | |
1da177e4 LT |
897 | } |
898 | ||
1da177e4 LT |
899 | uhci_up_cachep = kmem_cache_create("uhci_urb_priv", |
900 | sizeof(struct urb_priv), 0, 0, NULL, NULL); | |
901 | if (!uhci_up_cachep) | |
902 | goto up_failed; | |
903 | ||
904 | retval = pci_register_driver(&uhci_pci_driver); | |
905 | if (retval) | |
906 | goto init_failed; | |
907 | ||
908 | return 0; | |
909 | ||
910 | init_failed: | |
911 | if (kmem_cache_destroy(uhci_up_cachep)) | |
687f5f34 | 912 | warn("not all urb_privs were freed!"); |
1da177e4 LT |
913 | |
914 | up_failed: | |
915 | debugfs_remove(uhci_debugfs_root); | |
916 | ||
917 | debug_failed: | |
1bc3c9e1 | 918 | kfree(errbuf); |
1da177e4 LT |
919 | |
920 | errbuf_failed: | |
921 | ||
922 | return retval; | |
923 | } | |
924 | ||
925 | static void __exit uhci_hcd_cleanup(void) | |
926 | { | |
927 | pci_unregister_driver(&uhci_pci_driver); | |
928 | ||
929 | if (kmem_cache_destroy(uhci_up_cachep)) | |
687f5f34 | 930 | warn("not all urb_privs were freed!"); |
1da177e4 LT |
931 | |
932 | debugfs_remove(uhci_debugfs_root); | |
1bc3c9e1 | 933 | kfree(errbuf); |
1da177e4 LT |
934 | } |
935 | ||
936 | module_init(uhci_hcd_init); | |
937 | module_exit(uhci_hcd_cleanup); | |
938 | ||
939 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
940 | MODULE_DESCRIPTION(DRIVER_DESC); | |
941 | MODULE_LICENSE("GPL"); |