Merge branch 'for-linus' of git://www.linux-m32r.org/git/takata/linux-2.6_dev
[deliverable/linux.git] / arch / x86 / pci / common.c
1 /*
2 * Low-Level PCI Support for PC
3 *
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 */
6
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/ioport.h>
10 #include <linux/init.h>
11 #include <linux/dmi.h>
12
13 #include <asm/acpi.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/smp.h>
17 #include <asm/pci_x86.h>
18
19 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
20 PCI_PROBE_MMCONF;
21
22 unsigned int pci_early_dump_regs;
23 static int pci_bf_sort;
24 int pci_routeirq;
25 int noioapicquirk;
26 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
27 int noioapicreroute = 0;
28 #else
29 int noioapicreroute = 1;
30 #endif
31 int pcibios_last_bus = -1;
32 unsigned long pirq_table_addr;
33 struct pci_bus *pci_root_bus;
34 struct pci_raw_ops *raw_pci_ops;
35 struct pci_raw_ops *raw_pci_ext_ops;
36
37 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
38 int reg, int len, u32 *val)
39 {
40 if (domain == 0 && reg < 256 && raw_pci_ops)
41 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
42 if (raw_pci_ext_ops)
43 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
44 return -EINVAL;
45 }
46
47 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
48 int reg, int len, u32 val)
49 {
50 if (domain == 0 && reg < 256 && raw_pci_ops)
51 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
52 if (raw_pci_ext_ops)
53 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
54 return -EINVAL;
55 }
56
57 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
58 {
59 return raw_pci_read(pci_domain_nr(bus), bus->number,
60 devfn, where, size, value);
61 }
62
63 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
64 {
65 return raw_pci_write(pci_domain_nr(bus), bus->number,
66 devfn, where, size, value);
67 }
68
69 struct pci_ops pci_root_ops = {
70 .read = pci_read,
71 .write = pci_write,
72 };
73
74 /*
75 * legacy, numa, and acpi all want to call pcibios_scan_root
76 * from their initcalls. This flag prevents that.
77 */
78 int pcibios_scanned;
79
80 /*
81 * This interrupt-safe spinlock protects all accesses to PCI
82 * configuration space.
83 */
84 DEFINE_SPINLOCK(pci_config_lock);
85
86 static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
87 {
88 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
89 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
90 return 0;
91 }
92
93 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
94 /*
95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set
97 */
98 {
99 .callback = can_skip_ioresource_align,
100 .ident = "IBM System x3800",
101 .matches = {
102 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
103 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
104 },
105 },
106 {
107 .callback = can_skip_ioresource_align,
108 .ident = "IBM System x3850",
109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
111 DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
112 },
113 },
114 {
115 .callback = can_skip_ioresource_align,
116 .ident = "IBM System x3950",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
120 },
121 },
122 {}
123 };
124
125 void __init dmi_check_skip_isa_align(void)
126 {
127 dmi_check_system(can_skip_pciprobe_dmi_table);
128 }
129
130 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
131 {
132 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
133
134 if (pci_probe & PCI_NOASSIGN_ROMS) {
135 if (rom_r->parent)
136 return;
137 if (rom_r->start) {
138 /* we deal with BIOS assigned ROM later */
139 return;
140 }
141 rom_r->start = rom_r->end = rom_r->flags = 0;
142 }
143 }
144
145 /*
146 * Called after each bus is probed, but before its children
147 * are examined.
148 */
149
150 void __devinit pcibios_fixup_bus(struct pci_bus *b)
151 {
152 struct pci_dev *dev;
153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
157 pci_read_bridge_bases(b);
158 list_for_each_entry(dev, &b->devices, bus_list)
159 pcibios_fixup_device_resources(dev);
160 }
161
162 /*
163 * Only use DMI information to set this if nothing was passed
164 * on the kernel command line (which was parsed earlier).
165 */
166
167 static int __devinit set_bf_sort(const struct dmi_system_id *d)
168 {
169 if (pci_bf_sort == pci_bf_sort_default) {
170 pci_bf_sort = pci_dmi_bf;
171 printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
172 }
173 return 0;
174 }
175
176 /*
177 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
178 */
179 #ifdef __i386__
180 static int __devinit assign_all_busses(const struct dmi_system_id *d)
181 {
182 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
183 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
184 " (pci=assign-busses)\n", d->ident);
185 return 0;
186 }
187 #endif
188
189 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
190 #ifdef __i386__
191 /*
192 * Laptops which need pci=assign-busses to see Cardbus cards
193 */
194 {
195 .callback = assign_all_busses,
196 .ident = "Samsung X20 Laptop",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
200 },
201 },
202 #endif /* __i386__ */
203 {
204 .callback = set_bf_sort,
205 .ident = "Dell PowerEdge 1950",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
208 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
209 },
210 },
211 {
212 .callback = set_bf_sort,
213 .ident = "Dell PowerEdge 1955",
214 .matches = {
215 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
216 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
217 },
218 },
219 {
220 .callback = set_bf_sort,
221 .ident = "Dell PowerEdge 2900",
222 .matches = {
223 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
224 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
225 },
226 },
227 {
228 .callback = set_bf_sort,
229 .ident = "Dell PowerEdge 2950",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
232 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
233 },
234 },
235 {
236 .callback = set_bf_sort,
237 .ident = "Dell PowerEdge R900",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
241 },
242 },
243 {
244 .callback = set_bf_sort,
245 .ident = "HP ProLiant BL20p G3",
246 .matches = {
247 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
248 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
249 },
250 },
251 {
252 .callback = set_bf_sort,
253 .ident = "HP ProLiant BL20p G4",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
257 },
258 },
259 {
260 .callback = set_bf_sort,
261 .ident = "HP ProLiant BL30p G1",
262 .matches = {
263 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
264 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
265 },
266 },
267 {
268 .callback = set_bf_sort,
269 .ident = "HP ProLiant BL25p G1",
270 .matches = {
271 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
272 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
273 },
274 },
275 {
276 .callback = set_bf_sort,
277 .ident = "HP ProLiant BL35p G1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
280 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
281 },
282 },
283 {
284 .callback = set_bf_sort,
285 .ident = "HP ProLiant BL45p G1",
286 .matches = {
287 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
288 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
289 },
290 },
291 {
292 .callback = set_bf_sort,
293 .ident = "HP ProLiant BL45p G2",
294 .matches = {
295 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
296 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
297 },
298 },
299 {
300 .callback = set_bf_sort,
301 .ident = "HP ProLiant BL460c G1",
302 .matches = {
303 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
304 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
305 },
306 },
307 {
308 .callback = set_bf_sort,
309 .ident = "HP ProLiant BL465c G1",
310 .matches = {
311 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
312 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
313 },
314 },
315 {
316 .callback = set_bf_sort,
317 .ident = "HP ProLiant BL480c G1",
318 .matches = {
319 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
320 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
321 },
322 },
323 {
324 .callback = set_bf_sort,
325 .ident = "HP ProLiant BL685c G1",
326 .matches = {
327 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
328 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
329 },
330 },
331 {
332 .callback = set_bf_sort,
333 .ident = "HP ProLiant DL360",
334 .matches = {
335 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
336 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
337 },
338 },
339 {
340 .callback = set_bf_sort,
341 .ident = "HP ProLiant DL380",
342 .matches = {
343 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
344 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
345 },
346 },
347 #ifdef __i386__
348 {
349 .callback = assign_all_busses,
350 .ident = "Compaq EVO N800c",
351 .matches = {
352 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
353 DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
354 },
355 },
356 #endif
357 {
358 .callback = set_bf_sort,
359 .ident = "HP ProLiant DL385 G2",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
363 },
364 },
365 {
366 .callback = set_bf_sort,
367 .ident = "HP ProLiant DL585 G2",
368 .matches = {
369 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
370 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
371 },
372 },
373 {}
374 };
375
376 void __init dmi_check_pciprobe(void)
377 {
378 dmi_check_system(pciprobe_dmi_table);
379 }
380
381 struct pci_bus * __devinit pcibios_scan_root(int busnum)
382 {
383 struct pci_bus *bus = NULL;
384 struct pci_sysdata *sd;
385
386 while ((bus = pci_find_next_bus(bus)) != NULL) {
387 if (bus->number == busnum) {
388 /* Already scanned */
389 return bus;
390 }
391 }
392
393 /* Allocate per-root-bus (not per bus) arch-specific data.
394 * TODO: leak; this memory is never freed.
395 * It's arguable whether it's worth the trouble to care.
396 */
397 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
398 if (!sd) {
399 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
400 return NULL;
401 }
402
403 sd->node = get_mp_bus_to_node(busnum);
404
405 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
406 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
407 if (!bus)
408 kfree(sd);
409
410 return bus;
411 }
412
413 extern u8 pci_cache_line_size;
414
415 int __init pcibios_init(void)
416 {
417 struct cpuinfo_x86 *c = &boot_cpu_data;
418
419 if (!raw_pci_ops) {
420 printk(KERN_WARNING "PCI: System does not support PCI\n");
421 return 0;
422 }
423
424 /*
425 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
426 * and P4. It's also good for 386/486s (which actually have 16)
427 * as quite a few PCI devices do not support smaller values.
428 */
429 pci_cache_line_size = 32 >> 2;
430 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
431 pci_cache_line_size = 64 >> 2; /* K7 & K8 */
432 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
433 pci_cache_line_size = 128 >> 2; /* P4 */
434
435 pcibios_resource_survey();
436
437 if (pci_bf_sort >= pci_force_bf)
438 pci_sort_breadthfirst();
439 return 0;
440 }
441
442 char * __devinit pcibios_setup(char *str)
443 {
444 if (!strcmp(str, "off")) {
445 pci_probe = 0;
446 return NULL;
447 } else if (!strcmp(str, "bfsort")) {
448 pci_bf_sort = pci_force_bf;
449 return NULL;
450 } else if (!strcmp(str, "nobfsort")) {
451 pci_bf_sort = pci_force_nobf;
452 return NULL;
453 }
454 #ifdef CONFIG_PCI_BIOS
455 else if (!strcmp(str, "bios")) {
456 pci_probe = PCI_PROBE_BIOS;
457 return NULL;
458 } else if (!strcmp(str, "nobios")) {
459 pci_probe &= ~PCI_PROBE_BIOS;
460 return NULL;
461 } else if (!strcmp(str, "biosirq")) {
462 pci_probe |= PCI_BIOS_IRQ_SCAN;
463 return NULL;
464 } else if (!strncmp(str, "pirqaddr=", 9)) {
465 pirq_table_addr = simple_strtoul(str+9, NULL, 0);
466 return NULL;
467 }
468 #endif
469 #ifdef CONFIG_PCI_DIRECT
470 else if (!strcmp(str, "conf1")) {
471 pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
472 return NULL;
473 }
474 else if (!strcmp(str, "conf2")) {
475 pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
476 return NULL;
477 }
478 #endif
479 #ifdef CONFIG_PCI_MMCONFIG
480 else if (!strcmp(str, "nommconf")) {
481 pci_probe &= ~PCI_PROBE_MMCONF;
482 return NULL;
483 }
484 else if (!strcmp(str, "check_enable_amd_mmconf")) {
485 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
486 return NULL;
487 }
488 #endif
489 else if (!strcmp(str, "noacpi")) {
490 acpi_noirq_set();
491 return NULL;
492 }
493 else if (!strcmp(str, "noearly")) {
494 pci_probe |= PCI_PROBE_NOEARLY;
495 return NULL;
496 }
497 #ifndef CONFIG_X86_VISWS
498 else if (!strcmp(str, "usepirqmask")) {
499 pci_probe |= PCI_USE_PIRQ_MASK;
500 return NULL;
501 } else if (!strncmp(str, "irqmask=", 8)) {
502 pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
503 return NULL;
504 } else if (!strncmp(str, "lastbus=", 8)) {
505 pcibios_last_bus = simple_strtol(str+8, NULL, 0);
506 return NULL;
507 }
508 #endif
509 else if (!strcmp(str, "rom")) {
510 pci_probe |= PCI_ASSIGN_ROMS;
511 return NULL;
512 } else if (!strcmp(str, "norom")) {
513 pci_probe |= PCI_NOASSIGN_ROMS;
514 return NULL;
515 } else if (!strcmp(str, "assign-busses")) {
516 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
517 return NULL;
518 } else if (!strcmp(str, "use_crs")) {
519 pci_probe |= PCI_USE__CRS;
520 return NULL;
521 } else if (!strcmp(str, "earlydump")) {
522 pci_early_dump_regs = 1;
523 return NULL;
524 } else if (!strcmp(str, "routeirq")) {
525 pci_routeirq = 1;
526 return NULL;
527 } else if (!strcmp(str, "skip_isa_align")) {
528 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
529 return NULL;
530 } else if (!strcmp(str, "noioapicquirk")) {
531 noioapicquirk = 1;
532 return NULL;
533 } else if (!strcmp(str, "ioapicreroute")) {
534 if (noioapicreroute != -1)
535 noioapicreroute = 0;
536 return NULL;
537 } else if (!strcmp(str, "noioapicreroute")) {
538 if (noioapicreroute != -1)
539 noioapicreroute = 1;
540 return NULL;
541 }
542 return str;
543 }
544
545 unsigned int pcibios_assign_all_busses(void)
546 {
547 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
548 }
549
550 int pcibios_enable_device(struct pci_dev *dev, int mask)
551 {
552 int err;
553
554 if ((err = pci_enable_resources(dev, mask)) < 0)
555 return err;
556
557 if (!pci_dev_msi_enabled(dev))
558 return pcibios_enable_irq(dev);
559 return 0;
560 }
561
562 void pcibios_disable_device (struct pci_dev *dev)
563 {
564 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
565 pcibios_disable_irq(dev);
566 }
567
568 int pci_ext_cfg_avail(struct pci_dev *dev)
569 {
570 if (raw_pci_ext_ops)
571 return 1;
572 else
573 return 0;
574 }
575
576 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
577 {
578 struct pci_bus *bus = NULL;
579 struct pci_sysdata *sd;
580
581 /*
582 * Allocate per-root-bus (not per bus) arch-specific data.
583 * TODO: leak; this memory is never freed.
584 * It's arguable whether it's worth the trouble to care.
585 */
586 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
587 if (!sd) {
588 printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
589 return NULL;
590 }
591 sd->node = node;
592 bus = pci_scan_bus(busno, ops, sd);
593 if (!bus)
594 kfree(sd);
595
596 return bus;
597 }
598
599 struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
600 {
601 return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
602 }
603
604 /*
605 * NUMA info for PCI busses
606 *
607 * Early arch code is responsible for filling in reasonable values here.
608 * A node id of "-1" means "use current node". In other words, if a bus
609 * has a -1 node id, it's not tightly coupled to any particular chunk
610 * of memory (as is the case on some Nehalem systems).
611 */
612 #ifdef CONFIG_NUMA
613
614 #define BUS_NR 256
615
616 #ifdef CONFIG_X86_64
617
618 static int mp_bus_to_node[BUS_NR] = {
619 [0 ... BUS_NR - 1] = -1
620 };
621
622 void set_mp_bus_to_node(int busnum, int node)
623 {
624 if (busnum >= 0 && busnum < BUS_NR)
625 mp_bus_to_node[busnum] = node;
626 }
627
628 int get_mp_bus_to_node(int busnum)
629 {
630 int node = -1;
631
632 if (busnum < 0 || busnum > (BUS_NR - 1))
633 return node;
634
635 node = mp_bus_to_node[busnum];
636
637 /*
638 * let numa_node_id to decide it later in dma_alloc_pages
639 * if there is no ram on that node
640 */
641 if (node != -1 && !node_online(node))
642 node = -1;
643
644 return node;
645 }
646
647 #else /* CONFIG_X86_32 */
648
649 static int mp_bus_to_node[BUS_NR] = {
650 [0 ... BUS_NR - 1] = -1
651 };
652
653 void set_mp_bus_to_node(int busnum, int node)
654 {
655 if (busnum >= 0 && busnum < BUS_NR)
656 mp_bus_to_node[busnum] = (unsigned char) node;
657 }
658
659 int get_mp_bus_to_node(int busnum)
660 {
661 int node;
662
663 if (busnum < 0 || busnum > (BUS_NR - 1))
664 return 0;
665 node = mp_bus_to_node[busnum];
666 return node;
667 }
668
669 #endif /* CONFIG_X86_32 */
670
671 #endif /* CONFIG_NUMA */
This page took 0.045089 seconds and 6 git commands to generate.