[POWERPC] Merge PCI resource fixups
[deliverable/linux.git] / arch / powerpc / kernel / pci_64.c
1 /*
2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34
35 #ifdef DEBUG
36 #include <asm/udbg.h>
37 #define DBG(fmt...) printk(fmt)
38 #else
39 #define DBG(fmt...)
40 #endif
41
42 unsigned long pci_probe_only = 1;
43
44 /* pci_io_base -- the base address from which io bars are offsets.
45 * This is the lowest I/O base address (so bar values are always positive),
46 * and it *must* be the start of ISA space if an ISA bus exists because
47 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
48 * is mapped on the first 64K of IO space
49 */
50 unsigned long pci_io_base = ISA_IO_BASE;
51 EXPORT_SYMBOL(pci_io_base);
52
53 LIST_HEAD(hose_list);
54
55 static struct dma_mapping_ops *pci_dma_ops;
56
57 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
58 {
59 pci_dma_ops = dma_ops;
60 }
61
62 struct dma_mapping_ops *get_pci_dma_ops(void)
63 {
64 return pci_dma_ops;
65 }
66 EXPORT_SYMBOL(get_pci_dma_ops);
67
68
69 int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
70 {
71 return dma_set_mask(&dev->dev, mask);
72 }
73
74 int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
75 {
76 int rc;
77
78 rc = dma_set_mask(&dev->dev, mask);
79 dev->dev.coherent_dma_mask = dev->dma_mask;
80
81 return rc;
82 }
83
84 static void fixup_broken_pcnet32(struct pci_dev* dev)
85 {
86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
87 dev->vendor = PCI_VENDOR_ID_AMD;
88 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
89 }
90 }
91 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
92
93
94 /*
95 * We need to avoid collisions with `mirrored' VGA ports
96 * and other strange ISA hardware, so we always want the
97 * addresses to be allocated in the 0x000-0x0ff region
98 * modulo 0x400.
99 *
100 * Why? Because some silly external IO cards only decode
101 * the low 10 bits of the IO address. The 0x00-0xff region
102 * is reserved for motherboard devices that decode all 16
103 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
104 * but we want to try to avoid allocating at 0x2900-0x2bff
105 * which might have be mirrored at 0x0100-0x03ff..
106 */
107 void pcibios_align_resource(void *data, struct resource *res,
108 resource_size_t size, resource_size_t align)
109 {
110 struct pci_dev *dev = data;
111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
112 resource_size_t start = res->start;
113 unsigned long alignto;
114
115 if (res->flags & IORESOURCE_IO) {
116 unsigned long offset = (unsigned long)hose->io_base_virt -
117 _IO_BASE;
118 /* Make sure we start at our min on all hoses */
119 if (start - offset < PCIBIOS_MIN_IO)
120 start = PCIBIOS_MIN_IO + offset;
121
122 /*
123 * Put everything into 0x00-0xff region modulo 0x400
124 */
125 if (start & 0x300)
126 start = (start + 0x3ff) & ~0x3ff;
127
128 } else if (res->flags & IORESOURCE_MEM) {
129 /* Make sure we start at our min on all hoses */
130 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
131 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
132
133 /* Align to multiple of size of minimum base. */
134 alignto = max(0x1000UL, align);
135 start = ALIGN(start, alignto);
136 }
137
138 res->start = start;
139 }
140
141 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
142 {
143 struct pci_dev *dev;
144 struct pci_bus *child_bus;
145
146 list_for_each_entry(dev, &b->devices, bus_list) {
147 int i;
148
149 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
150 struct resource *r = &dev->resource[i];
151
152 if (r->parent || !r->start || !r->flags)
153 continue;
154 pci_claim_resource(dev, i);
155 }
156 }
157
158 list_for_each_entry(child_bus, &b->children, node)
159 pcibios_claim_one_bus(child_bus);
160 }
161 #ifdef CONFIG_HOTPLUG
162 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
163 #endif
164
165 static void __init pcibios_claim_of_setup(void)
166 {
167 struct pci_bus *b;
168
169 list_for_each_entry(b, &pci_root_buses, node)
170 pcibios_claim_one_bus(b);
171 }
172
173 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
174 {
175 const u32 *prop;
176 int len;
177
178 prop = of_get_property(np, name, &len);
179 if (prop && len >= 4)
180 return *prop;
181 return def;
182 }
183
184 static unsigned int pci_parse_of_flags(u32 addr0)
185 {
186 unsigned int flags = 0;
187
188 if (addr0 & 0x02000000) {
189 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
190 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
191 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
192 if (addr0 & 0x40000000)
193 flags |= IORESOURCE_PREFETCH
194 | PCI_BASE_ADDRESS_MEM_PREFETCH;
195 } else if (addr0 & 0x01000000)
196 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
197 return flags;
198 }
199
200
201 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
202 {
203 u64 base, size;
204 unsigned int flags;
205 struct resource *res;
206 const u32 *addrs;
207 u32 i;
208 int proplen;
209
210 addrs = of_get_property(node, "assigned-addresses", &proplen);
211 if (!addrs)
212 return;
213 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
214 for (; proplen >= 20; proplen -= 20, addrs += 5) {
215 flags = pci_parse_of_flags(addrs[0]);
216 if (!flags)
217 continue;
218 base = of_read_number(&addrs[1], 2);
219 size = of_read_number(&addrs[3], 2);
220 if (!size)
221 continue;
222 i = addrs[0] & 0xff;
223 DBG(" base: %llx, size: %llx, i: %x\n",
224 (unsigned long long)base, (unsigned long long)size, i);
225
226 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
227 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
228 } else if (i == dev->rom_base_reg) {
229 res = &dev->resource[PCI_ROM_RESOURCE];
230 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
231 } else {
232 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
233 continue;
234 }
235 res->start = base;
236 res->end = base + size - 1;
237 res->flags = flags;
238 res->name = pci_name(dev);
239 }
240 }
241
242 struct pci_dev *of_create_pci_dev(struct device_node *node,
243 struct pci_bus *bus, int devfn)
244 {
245 struct pci_dev *dev;
246 const char *type;
247
248 dev = alloc_pci_dev();
249 if (!dev)
250 return NULL;
251 type = of_get_property(node, "device_type", NULL);
252 if (type == NULL)
253 type = "";
254
255 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
256
257 dev->bus = bus;
258 dev->sysdata = node;
259 dev->dev.parent = bus->bridge;
260 dev->dev.bus = &pci_bus_type;
261 dev->devfn = devfn;
262 dev->multifunction = 0; /* maybe a lie? */
263
264 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
265 dev->device = get_int_prop(node, "device-id", 0xffff);
266 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
267 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
268
269 dev->cfg_size = pci_cfg_space_size(dev);
270
271 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
272 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
273 dev->class = get_int_prop(node, "class-code", 0);
274 dev->revision = get_int_prop(node, "revision-id", 0);
275
276 DBG(" class: 0x%x\n", dev->class);
277 DBG(" revision: 0x%x\n", dev->revision);
278
279 dev->current_state = 4; /* unknown power state */
280 dev->error_state = pci_channel_io_normal;
281 dev->dma_mask = 0xffffffff;
282
283 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
284 /* a PCI-PCI bridge */
285 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
286 dev->rom_base_reg = PCI_ROM_ADDRESS1;
287 } else if (!strcmp(type, "cardbus")) {
288 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
289 } else {
290 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
291 dev->rom_base_reg = PCI_ROM_ADDRESS;
292 /* Maybe do a default OF mapping here */
293 dev->irq = NO_IRQ;
294 }
295
296 pci_parse_of_addrs(node, dev);
297
298 DBG(" adding to system ...\n");
299
300 pci_device_add(dev, bus);
301
302 return dev;
303 }
304 EXPORT_SYMBOL(of_create_pci_dev);
305
306 void __devinit of_scan_bus(struct device_node *node,
307 struct pci_bus *bus)
308 {
309 struct device_node *child = NULL;
310 const u32 *reg;
311 int reglen, devfn;
312 struct pci_dev *dev;
313
314 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
315
316 /* Scan direct children */
317 while ((child = of_get_next_child(node, child)) != NULL) {
318 DBG(" * %s\n", child->full_name);
319 reg = of_get_property(child, "reg", &reglen);
320 if (reg == NULL || reglen < 20)
321 continue;
322 devfn = (reg[0] >> 8) & 0xff;
323
324 /* create a new pci_dev for this device */
325 dev = of_create_pci_dev(child, bus, devfn);
326 if (!dev)
327 continue;
328 DBG(" dev header type: %x\n", dev->hdr_type);
329 }
330
331 /* Ally all fixups */
332 pcibios_fixup_of_probed_bus(bus);
333
334 /* Now scan child busses */
335 list_for_each_entry(dev, &bus->devices, bus_list) {
336 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
337 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
338 struct device_node *child = pci_device_to_OF_node(dev);
339 if (dev)
340 of_scan_pci_bridge(child, dev);
341 }
342 }
343 }
344 EXPORT_SYMBOL(of_scan_bus);
345
346 void __devinit of_scan_pci_bridge(struct device_node *node,
347 struct pci_dev *dev)
348 {
349 struct pci_bus *bus;
350 const u32 *busrange, *ranges;
351 int len, i, mode;
352 struct resource *res;
353 unsigned int flags;
354 u64 size;
355
356 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
357
358 /* parse bus-range property */
359 busrange = of_get_property(node, "bus-range", &len);
360 if (busrange == NULL || len != 8) {
361 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
362 node->full_name);
363 return;
364 }
365 ranges = of_get_property(node, "ranges", &len);
366 if (ranges == NULL) {
367 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
368 node->full_name);
369 return;
370 }
371
372 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
373 if (!bus) {
374 printk(KERN_ERR "Failed to create pci bus for %s\n",
375 node->full_name);
376 return;
377 }
378
379 bus->primary = dev->bus->number;
380 bus->subordinate = busrange[1];
381 bus->bridge_ctl = 0;
382 bus->sysdata = node;
383
384 /* parse ranges property */
385 /* PCI #address-cells == 3 and #size-cells == 2 always */
386 res = &dev->resource[PCI_BRIDGE_RESOURCES];
387 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
388 res->flags = 0;
389 bus->resource[i] = res;
390 ++res;
391 }
392 i = 1;
393 for (; len >= 32; len -= 32, ranges += 8) {
394 flags = pci_parse_of_flags(ranges[0]);
395 size = of_read_number(&ranges[6], 2);
396 if (flags == 0 || size == 0)
397 continue;
398 if (flags & IORESOURCE_IO) {
399 res = bus->resource[0];
400 if (res->flags) {
401 printk(KERN_ERR "PCI: ignoring extra I/O range"
402 " for bridge %s\n", node->full_name);
403 continue;
404 }
405 } else {
406 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
407 printk(KERN_ERR "PCI: too many memory ranges"
408 " for bridge %s\n", node->full_name);
409 continue;
410 }
411 res = bus->resource[i];
412 ++i;
413 }
414 res->start = of_read_number(&ranges[1], 2);
415 res->end = res->start + size - 1;
416 res->flags = flags;
417 }
418 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
419 bus->number);
420 DBG(" bus name: %s\n", bus->name);
421
422 mode = PCI_PROBE_NORMAL;
423 if (ppc_md.pci_probe_mode)
424 mode = ppc_md.pci_probe_mode(bus);
425 DBG(" probe mode: %d\n", mode);
426
427 if (mode == PCI_PROBE_DEVTREE)
428 of_scan_bus(node, bus);
429 else if (mode == PCI_PROBE_NORMAL)
430 pci_scan_child_bus(bus);
431 }
432 EXPORT_SYMBOL(of_scan_pci_bridge);
433
434 void __devinit scan_phb(struct pci_controller *hose)
435 {
436 struct pci_bus *bus;
437 struct device_node *node = hose->dn;
438 int i, mode;
439 struct resource *res;
440
441 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
442
443 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
444 if (bus == NULL) {
445 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
446 hose->global_number);
447 return;
448 }
449 bus->secondary = hose->first_busno;
450 hose->bus = bus;
451
452 pcibios_map_io_space(bus);
453
454 bus->resource[0] = res = &hose->io_resource;
455 if (res->flags && request_resource(&ioport_resource, res)) {
456 printk(KERN_ERR "Failed to request PCI IO region "
457 "on PCI domain %04x\n", hose->global_number);
458 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
459 res->start, res->end);
460 }
461
462 for (i = 0; i < 3; ++i) {
463 res = &hose->mem_resources[i];
464 bus->resource[i+1] = res;
465 if (res->flags && request_resource(&iomem_resource, res))
466 printk(KERN_ERR "Failed to request PCI memory region "
467 "on PCI domain %04x\n", hose->global_number);
468 }
469
470 mode = PCI_PROBE_NORMAL;
471
472 if (node && ppc_md.pci_probe_mode)
473 mode = ppc_md.pci_probe_mode(bus);
474 DBG(" probe mode: %d\n", mode);
475 if (mode == PCI_PROBE_DEVTREE) {
476 bus->subordinate = hose->last_busno;
477 of_scan_bus(node, bus);
478 }
479
480 if (mode == PCI_PROBE_NORMAL)
481 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
482 }
483
484 static int __init pcibios_init(void)
485 {
486 struct pci_controller *hose, *tmp;
487
488 /* For now, override phys_mem_access_prot. If we need it,
489 * later, we may move that initialization to each ppc_md
490 */
491 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
492
493 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
494
495 /* Scan all of the recorded PCI controllers. */
496 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
497 scan_phb(hose);
498 pci_bus_add_devices(hose->bus);
499 }
500
501 if (pci_probe_only)
502 pcibios_claim_of_setup();
503 else
504 /* FIXME: `else' will be removed when
505 pci_assign_unassigned_resources() is able to work
506 correctly with [partially] allocated PCI tree. */
507 pci_assign_unassigned_resources();
508
509 /* Call machine dependent final fixup */
510 if (ppc_md.pcibios_fixup)
511 ppc_md.pcibios_fixup();
512
513 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
514
515 return 0;
516 }
517
518 subsys_initcall(pcibios_init);
519
520 int pcibios_enable_device(struct pci_dev *dev, int mask)
521 {
522 u16 cmd, oldcmd;
523 int i;
524
525 pci_read_config_word(dev, PCI_COMMAND, &cmd);
526 oldcmd = cmd;
527
528 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
529 struct resource *res = &dev->resource[i];
530
531 /* Only set up the requested stuff */
532 if (!(mask & (1<<i)))
533 continue;
534
535 if (res->flags & IORESOURCE_IO)
536 cmd |= PCI_COMMAND_IO;
537 if (res->flags & IORESOURCE_MEM)
538 cmd |= PCI_COMMAND_MEMORY;
539 }
540
541 if (cmd != oldcmd) {
542 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
543 pci_name(dev), cmd);
544 /* Enable the appropriate bits in the PCI command register. */
545 pci_write_config_word(dev, PCI_COMMAND, cmd);
546 }
547 return 0;
548 }
549
550 #ifdef CONFIG_HOTPLUG
551
552 int pcibios_unmap_io_space(struct pci_bus *bus)
553 {
554 struct pci_controller *hose;
555
556 WARN_ON(bus == NULL);
557
558 /* If this is not a PHB, we only flush the hash table over
559 * the area mapped by this bridge. We don't play with the PTE
560 * mappings since we might have to deal with sub-page alignemnts
561 * so flushing the hash table is the only sane way to make sure
562 * that no hash entries are covering that removed bridge area
563 * while still allowing other busses overlapping those pages
564 */
565 if (bus->self) {
566 struct resource *res = bus->resource[0];
567
568 DBG("IO unmapping for PCI-PCI bridge %s\n",
569 pci_name(bus->self));
570
571 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
572 res->end - res->start + 1);
573 return 0;
574 }
575
576 /* Get the host bridge */
577 hose = pci_bus_to_host(bus);
578
579 /* Check if we have IOs allocated */
580 if (hose->io_base_alloc == 0)
581 return 0;
582
583 DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
584 DBG(" alloc=0x%p\n", hose->io_base_alloc);
585
586 /* This is a PHB, we fully unmap the IO area */
587 vunmap(hose->io_base_alloc);
588
589 return 0;
590 }
591 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
592
593 #endif /* CONFIG_HOTPLUG */
594
595 int __devinit pcibios_map_io_space(struct pci_bus *bus)
596 {
597 struct vm_struct *area;
598 unsigned long phys_page;
599 unsigned long size_page;
600 unsigned long io_virt_offset;
601 struct pci_controller *hose;
602
603 WARN_ON(bus == NULL);
604
605 /* If this not a PHB, nothing to do, page tables still exist and
606 * thus HPTEs will be faulted in when needed
607 */
608 if (bus->self) {
609 DBG("IO mapping for PCI-PCI bridge %s\n",
610 pci_name(bus->self));
611 DBG(" virt=0x%016lx...0x%016lx\n",
612 bus->resource[0]->start + _IO_BASE,
613 bus->resource[0]->end + _IO_BASE);
614 return 0;
615 }
616
617 /* Get the host bridge */
618 hose = pci_bus_to_host(bus);
619 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
620 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
621
622 /* Make sure IO area address is clear */
623 hose->io_base_alloc = NULL;
624
625 /* If there's no IO to map on that bus, get away too */
626 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
627 return 0;
628
629 /* Let's allocate some IO space for that guy. We don't pass
630 * VM_IOREMAP because we don't care about alignment tricks that
631 * the core does in that case. Maybe we should due to stupid card
632 * with incomplete address decoding but I'd rather not deal with
633 * those outside of the reserved 64K legacy region.
634 */
635 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
636 if (area == NULL)
637 return -ENOMEM;
638 hose->io_base_alloc = area->addr;
639 hose->io_base_virt = (void __iomem *)(area->addr +
640 hose->io_base_phys - phys_page);
641
642 DBG("IO mapping for PHB %s\n", hose->dn->full_name);
643 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
644 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
645 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
646 hose->pci_io_size, size_page);
647
648 /* Establish the mapping */
649 if (__ioremap_at(phys_page, area->addr, size_page,
650 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
651 return -ENOMEM;
652
653 /* Fixup hose IO resource */
654 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
655 hose->io_resource.start += io_virt_offset;
656 hose->io_resource.end += io_virt_offset;
657
658 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
659 hose->io_resource.start, hose->io_resource.end);
660
661 return 0;
662 }
663 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
664
665 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
666 {
667 struct dev_archdata *sd = &dev->dev.archdata;
668
669 sd->of_node = pci_device_to_OF_node(dev);
670
671 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
672 sd->of_node ? sd->of_node->full_name : "<none>");
673
674 sd->dma_ops = pci_dma_ops;
675 #ifdef CONFIG_NUMA
676 sd->numa_node = pcibus_to_node(dev->bus);
677 #else
678 sd->numa_node = -1;
679 #endif
680 if (ppc_md.pci_dma_dev_setup)
681 ppc_md.pci_dma_dev_setup(dev);
682 }
683 EXPORT_SYMBOL(pcibios_setup_new_device);
684
685 void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
686 {
687 struct pci_dev *dev;
688
689 if (ppc_md.pci_dma_bus_setup)
690 ppc_md.pci_dma_bus_setup(bus);
691
692 list_for_each_entry(dev, &bus->devices, bus_list)
693 pcibios_setup_new_device(dev);
694 }
695
696 unsigned long pci_address_to_pio(phys_addr_t address)
697 {
698 struct pci_controller *hose, *tmp;
699
700 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
701 if (address >= hose->io_base_phys &&
702 address < (hose->io_base_phys + hose->pci_io_size)) {
703 unsigned long base =
704 (unsigned long)hose->io_base_virt - _IO_BASE;
705 return base + (address - hose->io_base_phys);
706 }
707 }
708 return (unsigned int)-1;
709 }
710 EXPORT_SYMBOL_GPL(pci_address_to_pio);
711
712
713 #define IOBASE_BRIDGE_NUMBER 0
714 #define IOBASE_MEMORY 1
715 #define IOBASE_IO 2
716 #define IOBASE_ISA_IO 3
717 #define IOBASE_ISA_MEM 4
718
719 long sys_pciconfig_iobase(long which, unsigned long in_bus,
720 unsigned long in_devfn)
721 {
722 struct pci_controller* hose;
723 struct list_head *ln;
724 struct pci_bus *bus = NULL;
725 struct device_node *hose_node;
726
727 /* Argh ! Please forgive me for that hack, but that's the
728 * simplest way to get existing XFree to not lockup on some
729 * G5 machines... So when something asks for bus 0 io base
730 * (bus 0 is HT root), we return the AGP one instead.
731 */
732 if (machine_is_compatible("MacRISC4"))
733 if (in_bus == 0)
734 in_bus = 0xf0;
735
736 /* That syscall isn't quite compatible with PCI domains, but it's
737 * used on pre-domains setup. We return the first match
738 */
739
740 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
741 bus = pci_bus_b(ln);
742 if (in_bus >= bus->number && in_bus <= bus->subordinate)
743 break;
744 bus = NULL;
745 }
746 if (bus == NULL || bus->sysdata == NULL)
747 return -ENODEV;
748
749 hose_node = (struct device_node *)bus->sysdata;
750 hose = PCI_DN(hose_node)->phb;
751
752 switch (which) {
753 case IOBASE_BRIDGE_NUMBER:
754 return (long)hose->first_busno;
755 case IOBASE_MEMORY:
756 return (long)hose->pci_mem_offset;
757 case IOBASE_IO:
758 return (long)hose->io_base_phys;
759 case IOBASE_ISA_IO:
760 return (long)isa_io_base;
761 case IOBASE_ISA_MEM:
762 return -EINVAL;
763 }
764
765 return -EOPNOTSUPP;
766 }
767
768 #ifdef CONFIG_NUMA
769 int pcibus_to_node(struct pci_bus *bus)
770 {
771 struct pci_controller *phb = pci_bus_to_host(bus);
772 return phb->node;
773 }
774 EXPORT_SYMBOL(pcibus_to_node);
775 #endif
This page took 0.058239 seconds and 5 git commands to generate.