[POWERPC] cell: Use machine_*_initcall() hooks in platform code
[deliverable/linux.git] / arch / powerpc / kernel / pci_64.c
1 /*
2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34
35 #ifdef DEBUG
36 #include <asm/udbg.h>
37 #define DBG(fmt...) printk(fmt)
38 #else
39 #define DBG(fmt...)
40 #endif
41
42 unsigned long pci_probe_only = 1;
43
44 /* pci_io_base -- the base address from which io bars are offsets.
45 * This is the lowest I/O base address (so bar values are always positive),
46 * and it *must* be the start of ISA space if an ISA bus exists because
47 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
48 * is mapped on the first 64K of IO space
49 */
50 unsigned long pci_io_base = ISA_IO_BASE;
51 EXPORT_SYMBOL(pci_io_base);
52
53 LIST_HEAD(hose_list);
54
55 static struct dma_mapping_ops *pci_dma_ops;
56
57 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
58 {
59 pci_dma_ops = dma_ops;
60 }
61
62 struct dma_mapping_ops *get_pci_dma_ops(void)
63 {
64 return pci_dma_ops;
65 }
66 EXPORT_SYMBOL(get_pci_dma_ops);
67
68
69 int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
70 {
71 return dma_set_mask(&dev->dev, mask);
72 }
73
74 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
75 {
76 int rc;
77
78 rc = dma_set_mask(&dev->dev, mask);
79 dev->dev.coherent_dma_mask = dev->dma_mask;
80
81 return rc;
82 }
83
84 static void fixup_broken_pcnet32(struct pci_dev* dev)
85 {
86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
87 dev->vendor = PCI_VENDOR_ID_AMD;
88 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
89 }
90 }
91 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
92
93
94 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
95 {
96 const u32 *prop;
97 int len;
98
99 prop = of_get_property(np, name, &len);
100 if (prop && len >= 4)
101 return *prop;
102 return def;
103 }
104
105 static unsigned int pci_parse_of_flags(u32 addr0)
106 {
107 unsigned int flags = 0;
108
109 if (addr0 & 0x02000000) {
110 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
111 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
112 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
113 if (addr0 & 0x40000000)
114 flags |= IORESOURCE_PREFETCH
115 | PCI_BASE_ADDRESS_MEM_PREFETCH;
116 } else if (addr0 & 0x01000000)
117 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
118 return flags;
119 }
120
121
122 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
123 {
124 u64 base, size;
125 unsigned int flags;
126 struct resource *res;
127 const u32 *addrs;
128 u32 i;
129 int proplen;
130
131 addrs = of_get_property(node, "assigned-addresses", &proplen);
132 if (!addrs)
133 return;
134 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
135 for (; proplen >= 20; proplen -= 20, addrs += 5) {
136 flags = pci_parse_of_flags(addrs[0]);
137 if (!flags)
138 continue;
139 base = of_read_number(&addrs[1], 2);
140 size = of_read_number(&addrs[3], 2);
141 if (!size)
142 continue;
143 i = addrs[0] & 0xff;
144 DBG(" base: %llx, size: %llx, i: %x\n",
145 (unsigned long long)base, (unsigned long long)size, i);
146
147 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
148 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
149 } else if (i == dev->rom_base_reg) {
150 res = &dev->resource[PCI_ROM_RESOURCE];
151 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
152 } else {
153 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
154 continue;
155 }
156 res->start = base;
157 res->end = base + size - 1;
158 res->flags = flags;
159 res->name = pci_name(dev);
160 }
161 }
162
163 struct pci_dev *of_create_pci_dev(struct device_node *node,
164 struct pci_bus *bus, int devfn)
165 {
166 struct pci_dev *dev;
167 const char *type;
168
169 dev = alloc_pci_dev();
170 if (!dev)
171 return NULL;
172 type = of_get_property(node, "device_type", NULL);
173 if (type == NULL)
174 type = "";
175
176 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
177
178 dev->bus = bus;
179 dev->sysdata = node;
180 dev->dev.parent = bus->bridge;
181 dev->dev.bus = &pci_bus_type;
182 dev->devfn = devfn;
183 dev->multifunction = 0; /* maybe a lie? */
184
185 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
186 dev->device = get_int_prop(node, "device-id", 0xffff);
187 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
188 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
189
190 dev->cfg_size = pci_cfg_space_size(dev);
191
192 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
193 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
194 dev->class = get_int_prop(node, "class-code", 0);
195 dev->revision = get_int_prop(node, "revision-id", 0);
196
197 DBG(" class: 0x%x\n", dev->class);
198 DBG(" revision: 0x%x\n", dev->revision);
199
200 dev->current_state = 4; /* unknown power state */
201 dev->error_state = pci_channel_io_normal;
202 dev->dma_mask = 0xffffffff;
203
204 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
205 /* a PCI-PCI bridge */
206 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
207 dev->rom_base_reg = PCI_ROM_ADDRESS1;
208 } else if (!strcmp(type, "cardbus")) {
209 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
210 } else {
211 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
212 dev->rom_base_reg = PCI_ROM_ADDRESS;
213 /* Maybe do a default OF mapping here */
214 dev->irq = NO_IRQ;
215 }
216
217 pci_parse_of_addrs(node, dev);
218
219 DBG(" adding to system ...\n");
220
221 pci_device_add(dev, bus);
222
223 return dev;
224 }
225 EXPORT_SYMBOL(of_create_pci_dev);
226
227 void __devinit of_scan_bus(struct device_node *node,
228 struct pci_bus *bus)
229 {
230 struct device_node *child = NULL;
231 const u32 *reg;
232 int reglen, devfn;
233 struct pci_dev *dev;
234
235 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
236
237 /* Scan direct children */
238 while ((child = of_get_next_child(node, child)) != NULL) {
239 DBG(" * %s\n", child->full_name);
240 reg = of_get_property(child, "reg", &reglen);
241 if (reg == NULL || reglen < 20)
242 continue;
243 devfn = (reg[0] >> 8) & 0xff;
244
245 /* create a new pci_dev for this device */
246 dev = of_create_pci_dev(child, bus, devfn);
247 if (!dev)
248 continue;
249 DBG(" dev header type: %x\n", dev->hdr_type);
250 }
251
252 /* Ally all fixups */
253 pcibios_fixup_of_probed_bus(bus);
254
255 /* Now scan child busses */
256 list_for_each_entry(dev, &bus->devices, bus_list) {
257 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
258 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
259 struct device_node *child = pci_device_to_OF_node(dev);
260 if (dev)
261 of_scan_pci_bridge(child, dev);
262 }
263 }
264 }
265 EXPORT_SYMBOL(of_scan_bus);
266
267 void __devinit of_scan_pci_bridge(struct device_node *node,
268 struct pci_dev *dev)
269 {
270 struct pci_bus *bus;
271 const u32 *busrange, *ranges;
272 int len, i, mode;
273 struct resource *res;
274 unsigned int flags;
275 u64 size;
276
277 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
278
279 /* parse bus-range property */
280 busrange = of_get_property(node, "bus-range", &len);
281 if (busrange == NULL || len != 8) {
282 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
283 node->full_name);
284 return;
285 }
286 ranges = of_get_property(node, "ranges", &len);
287 if (ranges == NULL) {
288 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
289 node->full_name);
290 return;
291 }
292
293 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
294 if (!bus) {
295 printk(KERN_ERR "Failed to create pci bus for %s\n",
296 node->full_name);
297 return;
298 }
299
300 bus->primary = dev->bus->number;
301 bus->subordinate = busrange[1];
302 bus->bridge_ctl = 0;
303 bus->sysdata = node;
304
305 /* parse ranges property */
306 /* PCI #address-cells == 3 and #size-cells == 2 always */
307 res = &dev->resource[PCI_BRIDGE_RESOURCES];
308 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
309 res->flags = 0;
310 bus->resource[i] = res;
311 ++res;
312 }
313 i = 1;
314 for (; len >= 32; len -= 32, ranges += 8) {
315 flags = pci_parse_of_flags(ranges[0]);
316 size = of_read_number(&ranges[6], 2);
317 if (flags == 0 || size == 0)
318 continue;
319 if (flags & IORESOURCE_IO) {
320 res = bus->resource[0];
321 if (res->flags) {
322 printk(KERN_ERR "PCI: ignoring extra I/O range"
323 " for bridge %s\n", node->full_name);
324 continue;
325 }
326 } else {
327 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
328 printk(KERN_ERR "PCI: too many memory ranges"
329 " for bridge %s\n", node->full_name);
330 continue;
331 }
332 res = bus->resource[i];
333 ++i;
334 }
335 res->start = of_read_number(&ranges[1], 2);
336 res->end = res->start + size - 1;
337 res->flags = flags;
338 }
339 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
340 bus->number);
341 DBG(" bus name: %s\n", bus->name);
342
343 mode = PCI_PROBE_NORMAL;
344 if (ppc_md.pci_probe_mode)
345 mode = ppc_md.pci_probe_mode(bus);
346 DBG(" probe mode: %d\n", mode);
347
348 if (mode == PCI_PROBE_DEVTREE)
349 of_scan_bus(node, bus);
350 else if (mode == PCI_PROBE_NORMAL)
351 pci_scan_child_bus(bus);
352 }
353 EXPORT_SYMBOL(of_scan_pci_bridge);
354
355 void __devinit scan_phb(struct pci_controller *hose)
356 {
357 struct pci_bus *bus;
358 struct device_node *node = hose->dn;
359 int i, mode;
360 struct resource *res;
361
362 DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
363
364 /* Create an empty bus for the toplevel */
365 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
366 if (bus == NULL) {
367 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
368 hose->global_number);
369 return;
370 }
371 bus->secondary = hose->first_busno;
372 hose->bus = bus;
373
374 /* Get some IO space for the new PHB */
375 pcibios_map_io_space(bus);
376
377 /* Wire up PHB bus resources */
378 if (hose->io_resource.flags) {
379 DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n",
380 hose->io_resource.start, hose->io_resource.end,
381 hose->io_resource.flags);
382 bus->resource[0] = res = &hose->io_resource;
383 }
384 for (i = 0; i < 3; ++i) {
385 DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
386 hose->mem_resources[i].start,
387 hose->mem_resources[i].end,
388 hose->mem_resources[i].flags);
389 bus->resource[i+1] = &hose->mem_resources[i];
390 }
391 DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
392 DBG("PCI: PHB IO offset = %08lx\n",
393 (unsigned long)hose->io_base_virt - _IO_BASE);
394
395 /* Get probe mode and perform scan */
396 mode = PCI_PROBE_NORMAL;
397 if (node && ppc_md.pci_probe_mode)
398 mode = ppc_md.pci_probe_mode(bus);
399 DBG(" probe mode: %d\n", mode);
400 if (mode == PCI_PROBE_DEVTREE) {
401 bus->subordinate = hose->last_busno;
402 of_scan_bus(node, bus);
403 }
404
405 if (mode == PCI_PROBE_NORMAL)
406 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
407 }
408
409 static int __init pcibios_init(void)
410 {
411 struct pci_controller *hose, *tmp;
412
413 printk(KERN_INFO "PCI: Probing PCI hardware\n");
414
415 /* For now, override phys_mem_access_prot. If we need it,
416 * later, we may move that initialization to each ppc_md
417 */
418 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
419
420 if (pci_probe_only)
421 ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
422
423 /* Scan all of the recorded PCI controllers. */
424 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
425 scan_phb(hose);
426 pci_bus_add_devices(hose->bus);
427 }
428
429 /* Call common code to handle resource allocation */
430 pcibios_resource_survey();
431
432 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
433
434 return 0;
435 }
436
437 subsys_initcall(pcibios_init);
438
439 #ifdef CONFIG_HOTPLUG
440
441 int pcibios_unmap_io_space(struct pci_bus *bus)
442 {
443 struct pci_controller *hose;
444
445 WARN_ON(bus == NULL);
446
447 /* If this is not a PHB, we only flush the hash table over
448 * the area mapped by this bridge. We don't play with the PTE
449 * mappings since we might have to deal with sub-page alignemnts
450 * so flushing the hash table is the only sane way to make sure
451 * that no hash entries are covering that removed bridge area
452 * while still allowing other busses overlapping those pages
453 */
454 if (bus->self) {
455 struct resource *res = bus->resource[0];
456
457 DBG("IO unmapping for PCI-PCI bridge %s\n",
458 pci_name(bus->self));
459
460 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
461 res->end - res->start + 1);
462 return 0;
463 }
464
465 /* Get the host bridge */
466 hose = pci_bus_to_host(bus);
467
468 /* Check if we have IOs allocated */
469 if (hose->io_base_alloc == 0)
470 return 0;
471
472 DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
473 DBG(" alloc=0x%p\n", hose->io_base_alloc);
474
475 /* This is a PHB, we fully unmap the IO area */
476 vunmap(hose->io_base_alloc);
477
478 return 0;
479 }
480 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
481
482 #endif /* CONFIG_HOTPLUG */
483
484 int __devinit pcibios_map_io_space(struct pci_bus *bus)
485 {
486 struct vm_struct *area;
487 unsigned long phys_page;
488 unsigned long size_page;
489 unsigned long io_virt_offset;
490 struct pci_controller *hose;
491
492 WARN_ON(bus == NULL);
493
494 /* If this not a PHB, nothing to do, page tables still exist and
495 * thus HPTEs will be faulted in when needed
496 */
497 if (bus->self) {
498 DBG("IO mapping for PCI-PCI bridge %s\n",
499 pci_name(bus->self));
500 DBG(" virt=0x%016lx...0x%016lx\n",
501 bus->resource[0]->start + _IO_BASE,
502 bus->resource[0]->end + _IO_BASE);
503 return 0;
504 }
505
506 /* Get the host bridge */
507 hose = pci_bus_to_host(bus);
508 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
509 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
510
511 /* Make sure IO area address is clear */
512 hose->io_base_alloc = NULL;
513
514 /* If there's no IO to map on that bus, get away too */
515 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
516 return 0;
517
518 /* Let's allocate some IO space for that guy. We don't pass
519 * VM_IOREMAP because we don't care about alignment tricks that
520 * the core does in that case. Maybe we should due to stupid card
521 * with incomplete address decoding but I'd rather not deal with
522 * those outside of the reserved 64K legacy region.
523 */
524 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
525 if (area == NULL)
526 return -ENOMEM;
527 hose->io_base_alloc = area->addr;
528 hose->io_base_virt = (void __iomem *)(area->addr +
529 hose->io_base_phys - phys_page);
530
531 DBG("IO mapping for PHB %s\n", hose->dn->full_name);
532 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
533 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
534 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
535 hose->pci_io_size, size_page);
536
537 /* Establish the mapping */
538 if (__ioremap_at(phys_page, area->addr, size_page,
539 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
540 return -ENOMEM;
541
542 /* Fixup hose IO resource */
543 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
544 hose->io_resource.start += io_virt_offset;
545 hose->io_resource.end += io_virt_offset;
546
547 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
548 hose->io_resource.start, hose->io_resource.end);
549
550 return 0;
551 }
552 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
553
554 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
555 {
556 struct dev_archdata *sd = &dev->dev.archdata;
557
558 sd->of_node = pci_device_to_OF_node(dev);
559
560 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
561 sd->of_node ? sd->of_node->full_name : "<none>");
562
563 sd->dma_ops = pci_dma_ops;
564 #ifdef CONFIG_NUMA
565 sd->numa_node = pcibus_to_node(dev->bus);
566 #else
567 sd->numa_node = -1;
568 #endif
569 if (ppc_md.pci_dma_dev_setup)
570 ppc_md.pci_dma_dev_setup(dev);
571 }
572 EXPORT_SYMBOL(pcibios_setup_new_device);
573
574 void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
575 {
576 struct pci_dev *dev;
577
578 if (ppc_md.pci_dma_bus_setup)
579 ppc_md.pci_dma_bus_setup(bus);
580
581 list_for_each_entry(dev, &bus->devices, bus_list)
582 pcibios_setup_new_device(dev);
583 }
584
585 unsigned long pci_address_to_pio(phys_addr_t address)
586 {
587 struct pci_controller *hose, *tmp;
588
589 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
590 if (address >= hose->io_base_phys &&
591 address < (hose->io_base_phys + hose->pci_io_size)) {
592 unsigned long base =
593 (unsigned long)hose->io_base_virt - _IO_BASE;
594 return base + (address - hose->io_base_phys);
595 }
596 }
597 return (unsigned int)-1;
598 }
599 EXPORT_SYMBOL_GPL(pci_address_to_pio);
600
601
602 #define IOBASE_BRIDGE_NUMBER 0
603 #define IOBASE_MEMORY 1
604 #define IOBASE_IO 2
605 #define IOBASE_ISA_IO 3
606 #define IOBASE_ISA_MEM 4
607
608 long sys_pciconfig_iobase(long which, unsigned long in_bus,
609 unsigned long in_devfn)
610 {
611 struct pci_controller* hose;
612 struct list_head *ln;
613 struct pci_bus *bus = NULL;
614 struct device_node *hose_node;
615
616 /* Argh ! Please forgive me for that hack, but that's the
617 * simplest way to get existing XFree to not lockup on some
618 * G5 machines... So when something asks for bus 0 io base
619 * (bus 0 is HT root), we return the AGP one instead.
620 */
621 if (machine_is_compatible("MacRISC4"))
622 if (in_bus == 0)
623 in_bus = 0xf0;
624
625 /* That syscall isn't quite compatible with PCI domains, but it's
626 * used on pre-domains setup. We return the first match
627 */
628
629 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
630 bus = pci_bus_b(ln);
631 if (in_bus >= bus->number && in_bus <= bus->subordinate)
632 break;
633 bus = NULL;
634 }
635 if (bus == NULL || bus->sysdata == NULL)
636 return -ENODEV;
637
638 hose_node = (struct device_node *)bus->sysdata;
639 hose = PCI_DN(hose_node)->phb;
640
641 switch (which) {
642 case IOBASE_BRIDGE_NUMBER:
643 return (long)hose->first_busno;
644 case IOBASE_MEMORY:
645 return (long)hose->pci_mem_offset;
646 case IOBASE_IO:
647 return (long)hose->io_base_phys;
648 case IOBASE_ISA_IO:
649 return (long)isa_io_base;
650 case IOBASE_ISA_MEM:
651 return -EINVAL;
652 }
653
654 return -EOPNOTSUPP;
655 }
656
657 #ifdef CONFIG_NUMA
658 int pcibus_to_node(struct pci_bus *bus)
659 {
660 struct pci_controller *phb = pci_bus_to_host(bus);
661 return phb->node;
662 }
663 EXPORT_SYMBOL(pcibus_to_node);
664 #endif
This page took 0.045992 seconds and 6 git commands to generate.