Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[deliverable/linux.git] / drivers / usb / host / pci-quirks.c
1 /*
2 * This file contains code to reset and initialize USB host controllers.
3 * Some of it includes work-arounds for PCI hardware and BIOS quirks.
4 * It may need to run early during booting -- before USB would normally
5 * initialize -- to ensure that Linux doesn't use any legacy modes.
6 *
7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8 * (and others)
9 */
10
11 #include <linux/types.h>
12 #include <linux/kconfig.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/export.h>
17 #include <linux/acpi.h>
18 #include <linux/dmi.h>
19 #include "pci-quirks.h"
20 #include "xhci-ext-caps.h"
21
22
23 #define UHCI_USBLEGSUP 0xc0 /* legacy support */
24 #define UHCI_USBCMD 0 /* command register */
25 #define UHCI_USBINTR 4 /* interrupt register */
26 #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
27 #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
28 #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */
29 #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */
30 #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */
31 #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */
32 #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */
33
34 #define OHCI_CONTROL 0x04
35 #define OHCI_CMDSTATUS 0x08
36 #define OHCI_INTRSTATUS 0x0c
37 #define OHCI_INTRENABLE 0x10
38 #define OHCI_INTRDISABLE 0x14
39 #define OHCI_FMINTERVAL 0x34
40 #define OHCI_HCFS (3 << 6) /* hc functional state */
41 #define OHCI_HCR (1 << 0) /* host controller reset */
42 #define OHCI_OCR (1 << 3) /* ownership change request */
43 #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
44 #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
45 #define OHCI_INTR_OC (1 << 30) /* ownership change */
46
47 #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */
48 #define EHCI_USBCMD 0 /* command register */
49 #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
50 #define EHCI_USBSTS 4 /* status register */
51 #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */
52 #define EHCI_USBINTR 8 /* interrupt register */
53 #define EHCI_CONFIGFLAG 0x40 /* configured flag register */
54 #define EHCI_USBLEGSUP 0 /* legacy support register */
55 #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
56 #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */
57 #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
58 #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
59
60 /* AMD quirk use */
61 #define AB_REG_BAR_LOW 0xe0
62 #define AB_REG_BAR_HIGH 0xe1
63 #define AB_REG_BAR_SB700 0xf0
64 #define AB_INDX(addr) ((addr) + 0x00)
65 #define AB_DATA(addr) ((addr) + 0x04)
66 #define AX_INDXC 0x30
67 #define AX_DATAC 0x34
68
69 #define NB_PCIE_INDX_ADDR 0xe0
70 #define NB_PCIE_INDX_DATA 0xe4
71 #define PCIE_P_CNTL 0x10040
72 #define BIF_NB 0x10002
73 #define NB_PIF0_PWRDOWN_0 0x01100012
74 #define NB_PIF0_PWRDOWN_1 0x01100013
75
76 #define USB_INTEL_XUSB2PR 0xD0
77 #define USB_INTEL_USB2PRM 0xD4
78 #define USB_INTEL_USB3_PSSEN 0xD8
79 #define USB_INTEL_USB3PRM 0xDC
80
81 /*
82 * amd_chipset_gen values represent AMD different chipset generations
83 */
84 enum amd_chipset_gen {
85 NOT_AMD_CHIPSET = 0,
86 AMD_CHIPSET_SB600,
87 AMD_CHIPSET_SB700,
88 AMD_CHIPSET_SB800,
89 AMD_CHIPSET_HUDSON2,
90 AMD_CHIPSET_BOLTON,
91 AMD_CHIPSET_YANGTZE,
92 AMD_CHIPSET_UNKNOWN,
93 };
94
95 struct amd_chipset_type {
96 enum amd_chipset_gen gen;
97 u8 rev;
98 };
99
100 static struct amd_chipset_info {
101 struct pci_dev *nb_dev;
102 struct pci_dev *smbus_dev;
103 int nb_type;
104 struct amd_chipset_type sb_type;
105 int isoc_reqs;
106 int probe_count;
107 int probe_result;
108 } amd_chipset;
109
110 static DEFINE_SPINLOCK(amd_lock);
111
112 /*
113 * amd_chipset_sb_type_init - initialize amd chipset southbridge type
114 *
115 * AMD FCH/SB generation and revision is identified by SMBus controller
116 * vendor, device and revision IDs.
117 *
118 * Returns: 1 if it is an AMD chipset, 0 otherwise.
119 */
120 static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
121 {
122 u8 rev = 0;
123 pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
124
125 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
126 PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
127 if (pinfo->smbus_dev) {
128 rev = pinfo->smbus_dev->revision;
129 if (rev >= 0x10 && rev <= 0x1f)
130 pinfo->sb_type.gen = AMD_CHIPSET_SB600;
131 else if (rev >= 0x30 && rev <= 0x3f)
132 pinfo->sb_type.gen = AMD_CHIPSET_SB700;
133 else if (rev >= 0x40 && rev <= 0x4f)
134 pinfo->sb_type.gen = AMD_CHIPSET_SB800;
135 } else {
136 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
137 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
138
139 if (!pinfo->smbus_dev) {
140 pinfo->sb_type.gen = NOT_AMD_CHIPSET;
141 return 0;
142 }
143
144 rev = pinfo->smbus_dev->revision;
145 if (rev >= 0x11 && rev <= 0x14)
146 pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
147 else if (rev >= 0x15 && rev <= 0x18)
148 pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
149 else if (rev >= 0x39 && rev <= 0x3a)
150 pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
151 }
152
153 pinfo->sb_type.rev = rev;
154 return 1;
155 }
156
157 void sb800_prefetch(struct device *dev, int on)
158 {
159 u16 misc;
160 struct pci_dev *pdev = to_pci_dev(dev);
161
162 pci_read_config_word(pdev, 0x50, &misc);
163 if (on == 0)
164 pci_write_config_word(pdev, 0x50, misc & 0xfcff);
165 else
166 pci_write_config_word(pdev, 0x50, misc | 0x0300);
167 }
168 EXPORT_SYMBOL_GPL(sb800_prefetch);
169
170 int usb_amd_find_chipset_info(void)
171 {
172 unsigned long flags;
173 struct amd_chipset_info info;
174 int ret;
175
176 spin_lock_irqsave(&amd_lock, flags);
177
178 /* probe only once */
179 if (amd_chipset.probe_count > 0) {
180 amd_chipset.probe_count++;
181 spin_unlock_irqrestore(&amd_lock, flags);
182 return amd_chipset.probe_result;
183 }
184 memset(&info, 0, sizeof(info));
185 spin_unlock_irqrestore(&amd_lock, flags);
186
187 if (!amd_chipset_sb_type_init(&info)) {
188 ret = 0;
189 goto commit;
190 }
191
192 /* Below chipset generations needn't enable AMD PLL quirk */
193 if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
194 info.sb_type.gen == AMD_CHIPSET_SB600 ||
195 info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
196 (info.sb_type.gen == AMD_CHIPSET_SB700 &&
197 info.sb_type.rev > 0x3b)) {
198 if (info.smbus_dev) {
199 pci_dev_put(info.smbus_dev);
200 info.smbus_dev = NULL;
201 }
202 ret = 0;
203 goto commit;
204 }
205
206 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
207 if (info.nb_dev) {
208 info.nb_type = 1;
209 } else {
210 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
211 if (info.nb_dev) {
212 info.nb_type = 2;
213 } else {
214 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
215 0x9600, NULL);
216 if (info.nb_dev)
217 info.nb_type = 3;
218 }
219 }
220
221 ret = info.probe_result = 1;
222 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
223
224 commit:
225
226 spin_lock_irqsave(&amd_lock, flags);
227 if (amd_chipset.probe_count > 0) {
228 /* race - someone else was faster - drop devices */
229
230 /* Mark that we where here */
231 amd_chipset.probe_count++;
232 ret = amd_chipset.probe_result;
233
234 spin_unlock_irqrestore(&amd_lock, flags);
235
236 if (info.nb_dev)
237 pci_dev_put(info.nb_dev);
238 if (info.smbus_dev)
239 pci_dev_put(info.smbus_dev);
240
241 } else {
242 /* no race - commit the result */
243 info.probe_count++;
244 amd_chipset = info;
245 spin_unlock_irqrestore(&amd_lock, flags);
246 }
247
248 return ret;
249 }
250 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
251
252 int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
253 {
254 /* Make sure amd chipset type has already been initialized */
255 usb_amd_find_chipset_info();
256 if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
257 return 0;
258
259 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
260 return 1;
261 }
262 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
263
264 bool usb_amd_hang_symptom_quirk(void)
265 {
266 u8 rev;
267
268 usb_amd_find_chipset_info();
269 rev = amd_chipset.sb_type.rev;
270 /* SB600 and old version of SB700 have hang symptom bug */
271 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
272 (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
273 rev >= 0x3a && rev <= 0x3b);
274 }
275 EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
276
277 bool usb_amd_prefetch_quirk(void)
278 {
279 usb_amd_find_chipset_info();
280 /* SB800 needs pre-fetch fix */
281 return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
282 }
283 EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
284
285 /*
286 * The hardware normally enables the A-link power management feature, which
287 * lets the system lower the power consumption in idle states.
288 *
289 * This USB quirk prevents the link going into that lower power state
290 * during isochronous transfers.
291 *
292 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
293 * some AMD platforms may stutter or have breaks occasionally.
294 */
295 static void usb_amd_quirk_pll(int disable)
296 {
297 u32 addr, addr_low, addr_high, val;
298 u32 bit = disable ? 0 : 1;
299 unsigned long flags;
300
301 spin_lock_irqsave(&amd_lock, flags);
302
303 if (disable) {
304 amd_chipset.isoc_reqs++;
305 if (amd_chipset.isoc_reqs > 1) {
306 spin_unlock_irqrestore(&amd_lock, flags);
307 return;
308 }
309 } else {
310 amd_chipset.isoc_reqs--;
311 if (amd_chipset.isoc_reqs > 0) {
312 spin_unlock_irqrestore(&amd_lock, flags);
313 return;
314 }
315 }
316
317 if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
318 amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
319 amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
320 outb_p(AB_REG_BAR_LOW, 0xcd6);
321 addr_low = inb_p(0xcd7);
322 outb_p(AB_REG_BAR_HIGH, 0xcd6);
323 addr_high = inb_p(0xcd7);
324 addr = addr_high << 8 | addr_low;
325
326 outl_p(0x30, AB_INDX(addr));
327 outl_p(0x40, AB_DATA(addr));
328 outl_p(0x34, AB_INDX(addr));
329 val = inl_p(AB_DATA(addr));
330 } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
331 amd_chipset.sb_type.rev <= 0x3b) {
332 pci_read_config_dword(amd_chipset.smbus_dev,
333 AB_REG_BAR_SB700, &addr);
334 outl(AX_INDXC, AB_INDX(addr));
335 outl(0x40, AB_DATA(addr));
336 outl(AX_DATAC, AB_INDX(addr));
337 val = inl(AB_DATA(addr));
338 } else {
339 spin_unlock_irqrestore(&amd_lock, flags);
340 return;
341 }
342
343 if (disable) {
344 val &= ~0x08;
345 val |= (1 << 4) | (1 << 9);
346 } else {
347 val |= 0x08;
348 val &= ~((1 << 4) | (1 << 9));
349 }
350 outl_p(val, AB_DATA(addr));
351
352 if (!amd_chipset.nb_dev) {
353 spin_unlock_irqrestore(&amd_lock, flags);
354 return;
355 }
356
357 if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
358 addr = PCIE_P_CNTL;
359 pci_write_config_dword(amd_chipset.nb_dev,
360 NB_PCIE_INDX_ADDR, addr);
361 pci_read_config_dword(amd_chipset.nb_dev,
362 NB_PCIE_INDX_DATA, &val);
363
364 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
365 val |= bit | (bit << 3) | (bit << 12);
366 val |= ((!bit) << 4) | ((!bit) << 9);
367 pci_write_config_dword(amd_chipset.nb_dev,
368 NB_PCIE_INDX_DATA, val);
369
370 addr = BIF_NB;
371 pci_write_config_dword(amd_chipset.nb_dev,
372 NB_PCIE_INDX_ADDR, addr);
373 pci_read_config_dword(amd_chipset.nb_dev,
374 NB_PCIE_INDX_DATA, &val);
375 val &= ~(1 << 8);
376 val |= bit << 8;
377
378 pci_write_config_dword(amd_chipset.nb_dev,
379 NB_PCIE_INDX_DATA, val);
380 } else if (amd_chipset.nb_type == 2) {
381 addr = NB_PIF0_PWRDOWN_0;
382 pci_write_config_dword(amd_chipset.nb_dev,
383 NB_PCIE_INDX_ADDR, addr);
384 pci_read_config_dword(amd_chipset.nb_dev,
385 NB_PCIE_INDX_DATA, &val);
386 if (disable)
387 val &= ~(0x3f << 7);
388 else
389 val |= 0x3f << 7;
390
391 pci_write_config_dword(amd_chipset.nb_dev,
392 NB_PCIE_INDX_DATA, val);
393
394 addr = NB_PIF0_PWRDOWN_1;
395 pci_write_config_dword(amd_chipset.nb_dev,
396 NB_PCIE_INDX_ADDR, addr);
397 pci_read_config_dword(amd_chipset.nb_dev,
398 NB_PCIE_INDX_DATA, &val);
399 if (disable)
400 val &= ~(0x3f << 7);
401 else
402 val |= 0x3f << 7;
403
404 pci_write_config_dword(amd_chipset.nb_dev,
405 NB_PCIE_INDX_DATA, val);
406 }
407
408 spin_unlock_irqrestore(&amd_lock, flags);
409 return;
410 }
411
412 void usb_amd_quirk_pll_disable(void)
413 {
414 usb_amd_quirk_pll(1);
415 }
416 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
417
418 void usb_amd_quirk_pll_enable(void)
419 {
420 usb_amd_quirk_pll(0);
421 }
422 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
423
424 void usb_amd_dev_put(void)
425 {
426 struct pci_dev *nb, *smbus;
427 unsigned long flags;
428
429 spin_lock_irqsave(&amd_lock, flags);
430
431 amd_chipset.probe_count--;
432 if (amd_chipset.probe_count > 0) {
433 spin_unlock_irqrestore(&amd_lock, flags);
434 return;
435 }
436
437 /* save them to pci_dev_put outside of spinlock */
438 nb = amd_chipset.nb_dev;
439 smbus = amd_chipset.smbus_dev;
440
441 amd_chipset.nb_dev = NULL;
442 amd_chipset.smbus_dev = NULL;
443 amd_chipset.nb_type = 0;
444 memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
445 amd_chipset.isoc_reqs = 0;
446 amd_chipset.probe_result = 0;
447
448 spin_unlock_irqrestore(&amd_lock, flags);
449
450 if (nb)
451 pci_dev_put(nb);
452 if (smbus)
453 pci_dev_put(smbus);
454 }
455 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
456
457 /*
458 * Make sure the controller is completely inactive, unable to
459 * generate interrupts or do DMA.
460 */
461 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
462 {
463 /* Turn off PIRQ enable and SMI enable. (This also turns off the
464 * BIOS's USB Legacy Support.) Turn off all the R/WC bits too.
465 */
466 pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
467
468 /* Reset the HC - this will force us to get a
469 * new notification of any already connected
470 * ports due to the virtual disconnect that it
471 * implies.
472 */
473 outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
474 mb();
475 udelay(5);
476 if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
477 dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
478
479 /* Just to be safe, disable interrupt requests and
480 * make sure the controller is stopped.
481 */
482 outw(0, base + UHCI_USBINTR);
483 outw(0, base + UHCI_USBCMD);
484 }
485 EXPORT_SYMBOL_GPL(uhci_reset_hc);
486
487 /*
488 * Initialize a controller that was newly discovered or has just been
489 * resumed. In either case we can't be sure of its previous state.
490 *
491 * Returns: 1 if the controller was reset, 0 otherwise.
492 */
493 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
494 {
495 u16 legsup;
496 unsigned int cmd, intr;
497
498 /*
499 * When restarting a suspended controller, we expect all the
500 * settings to be the same as we left them:
501 *
502 * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
503 * Controller is stopped and configured with EGSM set;
504 * No interrupts enabled except possibly Resume Detect.
505 *
506 * If any of these conditions are violated we do a complete reset.
507 */
508 pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
509 if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
510 dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
511 __func__, legsup);
512 goto reset_needed;
513 }
514
515 cmd = inw(base + UHCI_USBCMD);
516 if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
517 !(cmd & UHCI_USBCMD_EGSM)) {
518 dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
519 __func__, cmd);
520 goto reset_needed;
521 }
522
523 intr = inw(base + UHCI_USBINTR);
524 if (intr & (~UHCI_USBINTR_RESUME)) {
525 dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
526 __func__, intr);
527 goto reset_needed;
528 }
529 return 0;
530
531 reset_needed:
532 dev_dbg(&pdev->dev, "Performing full reset\n");
533 uhci_reset_hc(pdev, base);
534 return 1;
535 }
536 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
537
538 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
539 {
540 u16 cmd;
541 return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
542 }
543
544 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
545 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
546
547 static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
548 {
549 unsigned long base = 0;
550 int i;
551
552 if (!pio_enabled(pdev))
553 return;
554
555 for (i = 0; i < PCI_ROM_RESOURCE; i++)
556 if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
557 base = pci_resource_start(pdev, i);
558 break;
559 }
560
561 if (base)
562 uhci_check_and_reset_hc(pdev, base);
563 }
564
565 static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
566 {
567 return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
568 }
569
570 static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
571 {
572 void __iomem *base;
573 u32 control;
574 u32 fminterval;
575 int cnt;
576
577 if (!mmio_resource_enabled(pdev, 0))
578 return;
579
580 base = pci_ioremap_bar(pdev, 0);
581 if (base == NULL)
582 return;
583
584 control = readl(base + OHCI_CONTROL);
585
586 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
587 #ifdef __hppa__
588 #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
589 #else
590 #define OHCI_CTRL_MASK OHCI_CTRL_RWC
591
592 if (control & OHCI_CTRL_IR) {
593 int wait_time = 500; /* arbitrary; 5 seconds */
594 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
595 writel(OHCI_OCR, base + OHCI_CMDSTATUS);
596 while (wait_time > 0 &&
597 readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
598 wait_time -= 10;
599 msleep(10);
600 }
601 if (wait_time <= 0)
602 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
603 " (BIOS bug?) %08x\n",
604 readl(base + OHCI_CONTROL));
605 }
606 #endif
607
608 /* disable interrupts */
609 writel((u32) ~0, base + OHCI_INTRDISABLE);
610
611 /* Reset the USB bus, if the controller isn't already in RESET */
612 if (control & OHCI_HCFS) {
613 /* Go into RESET, preserving RWC (and possibly IR) */
614 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
615 readl(base + OHCI_CONTROL);
616
617 /* drive bus reset for at least 50 ms (7.1.7.5) */
618 msleep(50);
619 }
620
621 /* software reset of the controller, preserving HcFmInterval */
622 fminterval = readl(base + OHCI_FMINTERVAL);
623 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
624
625 /* reset requires max 10 us delay */
626 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
627 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
628 break;
629 udelay(1);
630 }
631 writel(fminterval, base + OHCI_FMINTERVAL);
632
633 /* Now the controller is safely in SUSPEND and nothing can wake it up */
634 iounmap(base);
635 }
636
637 static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
638 {
639 /* Pegatron Lucid (ExoPC) */
640 .matches = {
641 DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
642 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
643 },
644 },
645 {
646 /* Pegatron Lucid (Ordissimo AIRIS) */
647 .matches = {
648 DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
649 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
650 },
651 },
652 {
653 /* Pegatron Lucid (Ordissimo) */
654 .matches = {
655 DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
656 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
657 },
658 },
659 {
660 /* HASEE E200 */
661 .matches = {
662 DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
663 DMI_MATCH(DMI_BOARD_NAME, "E210"),
664 DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
665 },
666 },
667 { }
668 };
669
670 static void ehci_bios_handoff(struct pci_dev *pdev,
671 void __iomem *op_reg_base,
672 u32 cap, u8 offset)
673 {
674 int try_handoff = 1, tried_handoff = 0;
675
676 /*
677 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
678 * the handoff on its unused controller. Skip it.
679 *
680 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
681 */
682 if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
683 pdev->device == 0x27cc)) {
684 if (dmi_check_system(ehci_dmi_nohandoff_table))
685 try_handoff = 0;
686 }
687
688 if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
689 dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
690
691 #if 0
692 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
693 * but that seems dubious in general (the BIOS left it off intentionally)
694 * and is known to prevent some systems from booting. so we won't do this
695 * unless maybe we can determine when we're on a system that needs SMI forced.
696 */
697 /* BIOS workaround (?): be sure the pre-Linux code
698 * receives the SMI
699 */
700 pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
701 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
702 val | EHCI_USBLEGCTLSTS_SOOE);
703 #endif
704
705 /* some systems get upset if this semaphore is
706 * set for any other reason than forcing a BIOS
707 * handoff..
708 */
709 pci_write_config_byte(pdev, offset + 3, 1);
710 }
711
712 /* if boot firmware now owns EHCI, spin till it hands it over. */
713 if (try_handoff) {
714 int msec = 1000;
715 while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
716 tried_handoff = 1;
717 msleep(10);
718 msec -= 10;
719 pci_read_config_dword(pdev, offset, &cap);
720 }
721 }
722
723 if (cap & EHCI_USBLEGSUP_BIOS) {
724 /* well, possibly buggy BIOS... try to shut it down,
725 * and hope nothing goes too wrong
726 */
727 if (try_handoff)
728 dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
729 " (BIOS bug?) %08x\n", cap);
730 pci_write_config_byte(pdev, offset + 2, 0);
731 }
732
733 /* just in case, always disable EHCI SMIs */
734 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
735
736 /* If the BIOS ever owned the controller then we can't expect
737 * any power sessions to remain intact.
738 */
739 if (tried_handoff)
740 writel(0, op_reg_base + EHCI_CONFIGFLAG);
741 }
742
743 static void quirk_usb_disable_ehci(struct pci_dev *pdev)
744 {
745 void __iomem *base, *op_reg_base;
746 u32 hcc_params, cap, val;
747 u8 offset, cap_length;
748 int wait_time, count = 256/4;
749
750 if (!mmio_resource_enabled(pdev, 0))
751 return;
752
753 base = pci_ioremap_bar(pdev, 0);
754 if (base == NULL)
755 return;
756
757 cap_length = readb(base);
758 op_reg_base = base + cap_length;
759
760 /* EHCI 0.96 and later may have "extended capabilities"
761 * spec section 5.1 explains the bios handoff, e.g. for
762 * booting from USB disk or using a usb keyboard
763 */
764 hcc_params = readl(base + EHCI_HCC_PARAMS);
765 offset = (hcc_params >> 8) & 0xff;
766 while (offset && --count) {
767 pci_read_config_dword(pdev, offset, &cap);
768
769 switch (cap & 0xff) {
770 case 1:
771 ehci_bios_handoff(pdev, op_reg_base, cap, offset);
772 break;
773 case 0: /* Illegal reserved cap, set cap=0 so we exit */
774 cap = 0; /* then fallthrough... */
775 default:
776 dev_warn(&pdev->dev, "EHCI: unrecognized capability "
777 "%02x\n", cap & 0xff);
778 }
779 offset = (cap >> 8) & 0xff;
780 }
781 if (!count)
782 dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
783
784 /*
785 * halt EHCI & disable its interrupts in any case
786 */
787 val = readl(op_reg_base + EHCI_USBSTS);
788 if ((val & EHCI_USBSTS_HALTED) == 0) {
789 val = readl(op_reg_base + EHCI_USBCMD);
790 val &= ~EHCI_USBCMD_RUN;
791 writel(val, op_reg_base + EHCI_USBCMD);
792
793 wait_time = 2000;
794 do {
795 writel(0x3f, op_reg_base + EHCI_USBSTS);
796 udelay(100);
797 wait_time -= 100;
798 val = readl(op_reg_base + EHCI_USBSTS);
799 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
800 break;
801 }
802 } while (wait_time > 0);
803 }
804 writel(0, op_reg_base + EHCI_USBINTR);
805 writel(0x3f, op_reg_base + EHCI_USBSTS);
806
807 iounmap(base);
808 }
809
810 /*
811 * handshake - spin reading a register until handshake completes
812 * @ptr: address of hc register to be read
813 * @mask: bits to look at in result of read
814 * @done: value of those bits when handshake succeeds
815 * @wait_usec: timeout in microseconds
816 * @delay_usec: delay in microseconds to wait between polling
817 *
818 * Polls a register every delay_usec microseconds.
819 * Returns 0 when the mask bits have the value done.
820 * Returns -ETIMEDOUT if this condition is not true after
821 * wait_usec microseconds have passed.
822 */
823 static int handshake(void __iomem *ptr, u32 mask, u32 done,
824 int wait_usec, int delay_usec)
825 {
826 u32 result;
827
828 do {
829 result = readl(ptr);
830 result &= mask;
831 if (result == done)
832 return 0;
833 udelay(delay_usec);
834 wait_usec -= delay_usec;
835 } while (wait_usec > 0);
836 return -ETIMEDOUT;
837 }
838
839 /*
840 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
841 * share some number of ports. These ports can be switched between either
842 * controller. Not all of the ports under the EHCI host controller may be
843 * switchable.
844 *
845 * The ports should be switched over to xHCI before PCI probes for any device
846 * start. This avoids active devices under EHCI being disconnected during the
847 * port switchover, which could cause loss of data on USB storage devices, or
848 * failed boot when the root file system is on a USB mass storage device and is
849 * enumerated under EHCI first.
850 *
851 * We write into the xHC's PCI configuration space in some Intel-specific
852 * registers to switch the ports over. The USB 3.0 terminations and the USB
853 * 2.0 data wires are switched separately. We want to enable the SuperSpeed
854 * terminations before switching the USB 2.0 wires over, so that USB 3.0
855 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
856 */
857 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
858 {
859 u32 ports_available;
860 bool ehci_found = false;
861 struct pci_dev *companion = NULL;
862
863 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
864 * switching ports from EHCI to xHCI
865 */
866 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
867 xhci_pdev->subsystem_device == 0x90a8)
868 return;
869
870 /* make sure an intel EHCI controller exists */
871 for_each_pci_dev(companion) {
872 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
873 companion->vendor == PCI_VENDOR_ID_INTEL) {
874 ehci_found = true;
875 break;
876 }
877 }
878
879 if (!ehci_found)
880 return;
881
882 /* Don't switchover the ports if the user hasn't compiled the xHCI
883 * driver. Otherwise they will see "dead" USB ports that don't power
884 * the devices.
885 */
886 if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
887 dev_warn(&xhci_pdev->dev,
888 "CONFIG_USB_XHCI_HCD is turned off, "
889 "defaulting to EHCI.\n");
890 dev_warn(&xhci_pdev->dev,
891 "USB 3.0 devices will work at USB 2.0 speeds.\n");
892 usb_disable_xhci_ports(xhci_pdev);
893 return;
894 }
895
896 /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
897 * Indicate the ports that can be changed from OS.
898 */
899 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
900 &ports_available);
901
902 dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
903 ports_available);
904
905 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
906 * Register, to turn on SuperSpeed terminations for the
907 * switchable ports.
908 */
909 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
910 ports_available);
911
912 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
913 &ports_available);
914 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
915 "under xHCI: 0x%x\n", ports_available);
916
917 /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
918 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
919 */
920
921 pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
922 &ports_available);
923
924 dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
925 ports_available);
926
927 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
928 * switch the USB 2.0 power and data lines over to the xHCI
929 * host.
930 */
931 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
932 ports_available);
933
934 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
935 &ports_available);
936 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
937 "to xHCI: 0x%x\n", ports_available);
938 }
939 EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
940
941 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
942 {
943 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
944 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
945 }
946 EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
947
948 /**
949 * PCI Quirks for xHCI.
950 *
951 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
952 * It signals to the BIOS that the OS wants control of the host controller,
953 * and then waits 5 seconds for the BIOS to hand over control.
954 * If we timeout, assume the BIOS is broken and take control anyway.
955 */
956 static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
957 {
958 void __iomem *base;
959 int ext_cap_offset;
960 void __iomem *op_reg_base;
961 u32 val;
962 int timeout;
963 int len = pci_resource_len(pdev, 0);
964
965 if (!mmio_resource_enabled(pdev, 0))
966 return;
967
968 base = ioremap_nocache(pci_resource_start(pdev, 0), len);
969 if (base == NULL)
970 return;
971
972 /*
973 * Find the Legacy Support Capability register -
974 * this is optional for xHCI host controllers.
975 */
976 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
977 do {
978 if ((ext_cap_offset + sizeof(val)) > len) {
979 /* We're reading garbage from the controller */
980 dev_warn(&pdev->dev,
981 "xHCI controller failing to respond");
982 return;
983 }
984
985 if (!ext_cap_offset)
986 /* We've reached the end of the extended capabilities */
987 goto hc_init;
988
989 val = readl(base + ext_cap_offset);
990 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
991 break;
992 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
993 } while (1);
994
995 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
996 if (val & XHCI_HC_BIOS_OWNED) {
997 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
998
999 /* Wait for 5 seconds with 10 microsecond polling interval */
1000 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1001 0, 5000, 10);
1002
1003 /* Assume a buggy BIOS and take HC ownership anyway */
1004 if (timeout) {
1005 dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
1006 " (BIOS bug ?) %08x\n", val);
1007 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1008 }
1009 }
1010
1011 val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1012 /* Mask off (turn off) any enabled SMIs */
1013 val &= XHCI_LEGACY_DISABLE_SMI;
1014 /* Mask all SMI events bits, RW1C */
1015 val |= XHCI_LEGACY_SMI_EVENTS;
1016 /* Disable any BIOS SMIs and clear all SMI events*/
1017 writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1018
1019 hc_init:
1020 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1021 usb_enable_intel_xhci_ports(pdev);
1022
1023 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1024
1025 /* Wait for the host controller to be ready before writing any
1026 * operational or runtime registers. Wait 5 seconds and no more.
1027 */
1028 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1029 5000, 10);
1030 /* Assume a buggy HC and start HC initialization anyway */
1031 if (timeout) {
1032 val = readl(op_reg_base + XHCI_STS_OFFSET);
1033 dev_warn(&pdev->dev,
1034 "xHCI HW not ready after 5 sec (HC bug?) "
1035 "status = 0x%x\n", val);
1036 }
1037
1038 /* Send the halt and disable interrupts command */
1039 val = readl(op_reg_base + XHCI_CMD_OFFSET);
1040 val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1041 writel(val, op_reg_base + XHCI_CMD_OFFSET);
1042
1043 /* Wait for the HC to halt - poll every 125 usec (one microframe). */
1044 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1045 XHCI_MAX_HALT_USEC, 125);
1046 if (timeout) {
1047 val = readl(op_reg_base + XHCI_STS_OFFSET);
1048 dev_warn(&pdev->dev,
1049 "xHCI HW did not halt within %d usec "
1050 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
1051 }
1052
1053 iounmap(base);
1054 }
1055
1056 static void quirk_usb_early_handoff(struct pci_dev *pdev)
1057 {
1058 /* Skip Netlogic mips SoC's internal PCI USB controller.
1059 * This device does not need/support EHCI/OHCI handoff
1060 */
1061 if (pdev->vendor == 0x184e) /* vendor Netlogic */
1062 return;
1063 if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1064 pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1065 pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1066 pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1067 return;
1068
1069 if (pci_enable_device(pdev) < 0) {
1070 dev_warn(&pdev->dev, "Can't enable PCI device, "
1071 "BIOS handoff failed.\n");
1072 return;
1073 }
1074 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1075 quirk_usb_handoff_uhci(pdev);
1076 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1077 quirk_usb_handoff_ohci(pdev);
1078 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1079 quirk_usb_disable_ehci(pdev);
1080 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1081 quirk_usb_handoff_xhci(pdev);
1082 pci_disable_device(pdev);
1083 }
1084 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1085 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
This page took 0.055005 seconds and 6 git commands to generate.