[POWERPC] Don't special case pci_domain_nr() for iSeries
[deliverable/linux.git] / arch / powerpc / kernel / pci_64.c
1 /*
2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34
35 #ifdef DEBUG
36 #include <asm/udbg.h>
37 #define DBG(fmt...) printk(fmt)
38 #else
39 #define DBG(fmt...)
40 #endif
41
42 unsigned long pci_probe_only = 1;
43 int pci_assign_all_buses = 0;
44
45 static void fixup_resource(struct resource *res, struct pci_dev *dev);
46 static void do_bus_setup(struct pci_bus *bus);
47
48 /* pci_io_base -- the base address from which io bars are offsets.
49 * This is the lowest I/O base address (so bar values are always positive),
50 * and it *must* be the start of ISA space if an ISA bus exists because
51 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
52 * is mapped on the first 64K of IO space
53 */
54 unsigned long pci_io_base = ISA_IO_BASE;
55 EXPORT_SYMBOL(pci_io_base);
56
57 LIST_HEAD(hose_list);
58
59 static struct dma_mapping_ops *pci_dma_ops;
60
61 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
62 {
63 pci_dma_ops = dma_ops;
64 }
65
66 struct dma_mapping_ops *get_pci_dma_ops(void)
67 {
68 return pci_dma_ops;
69 }
70 EXPORT_SYMBOL(get_pci_dma_ops);
71
72 static void fixup_broken_pcnet32(struct pci_dev* dev)
73 {
74 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
75 dev->vendor = PCI_VENDOR_ID_AMD;
76 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
77 }
78 }
79 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
80
81 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
82 struct resource *res)
83 {
84 unsigned long offset = 0;
85 struct pci_controller *hose = pci_bus_to_host(dev->bus);
86
87 if (!hose)
88 return;
89
90 if (res->flags & IORESOURCE_IO)
91 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
92
93 if (res->flags & IORESOURCE_MEM)
94 offset = hose->pci_mem_offset;
95
96 region->start = res->start - offset;
97 region->end = res->end - offset;
98 }
99
100 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
101 struct pci_bus_region *region)
102 {
103 unsigned long offset = 0;
104 struct pci_controller *hose = pci_bus_to_host(dev->bus);
105
106 if (!hose)
107 return;
108
109 if (res->flags & IORESOURCE_IO)
110 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
111
112 if (res->flags & IORESOURCE_MEM)
113 offset = hose->pci_mem_offset;
114
115 res->start = region->start + offset;
116 res->end = region->end + offset;
117 }
118
119 #ifdef CONFIG_HOTPLUG
120 EXPORT_SYMBOL(pcibios_resource_to_bus);
121 EXPORT_SYMBOL(pcibios_bus_to_resource);
122 #endif
123
124 /*
125 * We need to avoid collisions with `mirrored' VGA ports
126 * and other strange ISA hardware, so we always want the
127 * addresses to be allocated in the 0x000-0x0ff region
128 * modulo 0x400.
129 *
130 * Why? Because some silly external IO cards only decode
131 * the low 10 bits of the IO address. The 0x00-0xff region
132 * is reserved for motherboard devices that decode all 16
133 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
134 * but we want to try to avoid allocating at 0x2900-0x2bff
135 * which might have be mirrored at 0x0100-0x03ff..
136 */
137 void pcibios_align_resource(void *data, struct resource *res,
138 resource_size_t size, resource_size_t align)
139 {
140 struct pci_dev *dev = data;
141 struct pci_controller *hose = pci_bus_to_host(dev->bus);
142 resource_size_t start = res->start;
143 unsigned long alignto;
144
145 if (res->flags & IORESOURCE_IO) {
146 unsigned long offset = (unsigned long)hose->io_base_virt -
147 _IO_BASE;
148 /* Make sure we start at our min on all hoses */
149 if (start - offset < PCIBIOS_MIN_IO)
150 start = PCIBIOS_MIN_IO + offset;
151
152 /*
153 * Put everything into 0x00-0xff region modulo 0x400
154 */
155 if (start & 0x300)
156 start = (start + 0x3ff) & ~0x3ff;
157
158 } else if (res->flags & IORESOURCE_MEM) {
159 /* Make sure we start at our min on all hoses */
160 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
161 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
162
163 /* Align to multiple of size of minimum base. */
164 alignto = max(0x1000UL, align);
165 start = ALIGN(start, alignto);
166 }
167
168 res->start = start;
169 }
170
171 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
172 {
173 struct pci_dev *dev;
174 struct pci_bus *child_bus;
175
176 list_for_each_entry(dev, &b->devices, bus_list) {
177 int i;
178
179 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
180 struct resource *r = &dev->resource[i];
181
182 if (r->parent || !r->start || !r->flags)
183 continue;
184 pci_claim_resource(dev, i);
185 }
186 }
187
188 list_for_each_entry(child_bus, &b->children, node)
189 pcibios_claim_one_bus(child_bus);
190 }
191 #ifdef CONFIG_HOTPLUG
192 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
193 #endif
194
195 static void __init pcibios_claim_of_setup(void)
196 {
197 struct pci_bus *b;
198
199 list_for_each_entry(b, &pci_root_buses, node)
200 pcibios_claim_one_bus(b);
201 }
202
203 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
204 {
205 const u32 *prop;
206 int len;
207
208 prop = of_get_property(np, name, &len);
209 if (prop && len >= 4)
210 return *prop;
211 return def;
212 }
213
214 static unsigned int pci_parse_of_flags(u32 addr0)
215 {
216 unsigned int flags = 0;
217
218 if (addr0 & 0x02000000) {
219 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
220 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
221 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
222 if (addr0 & 0x40000000)
223 flags |= IORESOURCE_PREFETCH
224 | PCI_BASE_ADDRESS_MEM_PREFETCH;
225 } else if (addr0 & 0x01000000)
226 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
227 return flags;
228 }
229
230
231 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
232 {
233 u64 base, size;
234 unsigned int flags;
235 struct resource *res;
236 const u32 *addrs;
237 u32 i;
238 int proplen;
239
240 addrs = of_get_property(node, "assigned-addresses", &proplen);
241 if (!addrs)
242 return;
243 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
244 for (; proplen >= 20; proplen -= 20, addrs += 5) {
245 flags = pci_parse_of_flags(addrs[0]);
246 if (!flags)
247 continue;
248 base = of_read_number(&addrs[1], 2);
249 size = of_read_number(&addrs[3], 2);
250 if (!size)
251 continue;
252 i = addrs[0] & 0xff;
253 DBG(" base: %llx, size: %llx, i: %x\n",
254 (unsigned long long)base, (unsigned long long)size, i);
255
256 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
257 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
258 } else if (i == dev->rom_base_reg) {
259 res = &dev->resource[PCI_ROM_RESOURCE];
260 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
261 } else {
262 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
263 continue;
264 }
265 res->start = base;
266 res->end = base + size - 1;
267 res->flags = flags;
268 res->name = pci_name(dev);
269 fixup_resource(res, dev);
270 }
271 }
272
273 struct pci_dev *of_create_pci_dev(struct device_node *node,
274 struct pci_bus *bus, int devfn)
275 {
276 struct pci_dev *dev;
277 const char *type;
278
279 dev = alloc_pci_dev();
280 if (!dev)
281 return NULL;
282 type = of_get_property(node, "device_type", NULL);
283 if (type == NULL)
284 type = "";
285
286 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
287
288 dev->bus = bus;
289 dev->sysdata = node;
290 dev->dev.parent = bus->bridge;
291 dev->dev.bus = &pci_bus_type;
292 dev->devfn = devfn;
293 dev->multifunction = 0; /* maybe a lie? */
294
295 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
296 dev->device = get_int_prop(node, "device-id", 0xffff);
297 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
298 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
299
300 dev->cfg_size = pci_cfg_space_size(dev);
301
302 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
303 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
304 dev->class = get_int_prop(node, "class-code", 0);
305 dev->revision = get_int_prop(node, "revision-id", 0);
306
307 DBG(" class: 0x%x\n", dev->class);
308 DBG(" revision: 0x%x\n", dev->revision);
309
310 dev->current_state = 4; /* unknown power state */
311 dev->error_state = pci_channel_io_normal;
312 dev->dma_mask = 0xffffffff;
313
314 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
315 /* a PCI-PCI bridge */
316 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
317 dev->rom_base_reg = PCI_ROM_ADDRESS1;
318 } else if (!strcmp(type, "cardbus")) {
319 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
320 } else {
321 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
322 dev->rom_base_reg = PCI_ROM_ADDRESS;
323 /* Maybe do a default OF mapping here */
324 dev->irq = NO_IRQ;
325 }
326
327 pci_parse_of_addrs(node, dev);
328
329 DBG(" adding to system ...\n");
330
331 pci_device_add(dev, bus);
332
333 return dev;
334 }
335 EXPORT_SYMBOL(of_create_pci_dev);
336
337 void __devinit of_scan_bus(struct device_node *node,
338 struct pci_bus *bus)
339 {
340 struct device_node *child = NULL;
341 const u32 *reg;
342 int reglen, devfn;
343 struct pci_dev *dev;
344
345 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
346
347 while ((child = of_get_next_child(node, child)) != NULL) {
348 DBG(" * %s\n", child->full_name);
349 reg = of_get_property(child, "reg", &reglen);
350 if (reg == NULL || reglen < 20)
351 continue;
352 devfn = (reg[0] >> 8) & 0xff;
353
354 /* create a new pci_dev for this device */
355 dev = of_create_pci_dev(child, bus, devfn);
356 if (!dev)
357 continue;
358 DBG("dev header type: %x\n", dev->hdr_type);
359
360 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
361 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
362 of_scan_pci_bridge(child, dev);
363 }
364
365 do_bus_setup(bus);
366 }
367 EXPORT_SYMBOL(of_scan_bus);
368
369 void __devinit of_scan_pci_bridge(struct device_node *node,
370 struct pci_dev *dev)
371 {
372 struct pci_bus *bus;
373 const u32 *busrange, *ranges;
374 int len, i, mode;
375 struct resource *res;
376 unsigned int flags;
377 u64 size;
378
379 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
380
381 /* parse bus-range property */
382 busrange = of_get_property(node, "bus-range", &len);
383 if (busrange == NULL || len != 8) {
384 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
385 node->full_name);
386 return;
387 }
388 ranges = of_get_property(node, "ranges", &len);
389 if (ranges == NULL) {
390 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
391 node->full_name);
392 return;
393 }
394
395 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
396 if (!bus) {
397 printk(KERN_ERR "Failed to create pci bus for %s\n",
398 node->full_name);
399 return;
400 }
401
402 bus->primary = dev->bus->number;
403 bus->subordinate = busrange[1];
404 bus->bridge_ctl = 0;
405 bus->sysdata = node;
406
407 /* parse ranges property */
408 /* PCI #address-cells == 3 and #size-cells == 2 always */
409 res = &dev->resource[PCI_BRIDGE_RESOURCES];
410 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
411 res->flags = 0;
412 bus->resource[i] = res;
413 ++res;
414 }
415 i = 1;
416 for (; len >= 32; len -= 32, ranges += 8) {
417 flags = pci_parse_of_flags(ranges[0]);
418 size = of_read_number(&ranges[6], 2);
419 if (flags == 0 || size == 0)
420 continue;
421 if (flags & IORESOURCE_IO) {
422 res = bus->resource[0];
423 if (res->flags) {
424 printk(KERN_ERR "PCI: ignoring extra I/O range"
425 " for bridge %s\n", node->full_name);
426 continue;
427 }
428 } else {
429 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
430 printk(KERN_ERR "PCI: too many memory ranges"
431 " for bridge %s\n", node->full_name);
432 continue;
433 }
434 res = bus->resource[i];
435 ++i;
436 }
437 res->start = of_read_number(&ranges[1], 2);
438 res->end = res->start + size - 1;
439 res->flags = flags;
440 fixup_resource(res, dev);
441 }
442 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
443 bus->number);
444 DBG(" bus name: %s\n", bus->name);
445
446 mode = PCI_PROBE_NORMAL;
447 if (ppc_md.pci_probe_mode)
448 mode = ppc_md.pci_probe_mode(bus);
449 DBG(" probe mode: %d\n", mode);
450
451 if (mode == PCI_PROBE_DEVTREE)
452 of_scan_bus(node, bus);
453 else if (mode == PCI_PROBE_NORMAL)
454 pci_scan_child_bus(bus);
455 }
456 EXPORT_SYMBOL(of_scan_pci_bridge);
457
458 void __devinit scan_phb(struct pci_controller *hose)
459 {
460 struct pci_bus *bus;
461 struct device_node *node = hose->arch_data;
462 int i, mode;
463 struct resource *res;
464
465 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
466
467 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
468 if (bus == NULL) {
469 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
470 hose->global_number);
471 return;
472 }
473 bus->secondary = hose->first_busno;
474 hose->bus = bus;
475
476 pcibios_map_io_space(bus);
477
478 bus->resource[0] = res = &hose->io_resource;
479 if (res->flags && request_resource(&ioport_resource, res)) {
480 printk(KERN_ERR "Failed to request PCI IO region "
481 "on PCI domain %04x\n", hose->global_number);
482 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
483 res->start, res->end);
484 }
485
486 for (i = 0; i < 3; ++i) {
487 res = &hose->mem_resources[i];
488 bus->resource[i+1] = res;
489 if (res->flags && request_resource(&iomem_resource, res))
490 printk(KERN_ERR "Failed to request PCI memory region "
491 "on PCI domain %04x\n", hose->global_number);
492 }
493
494 mode = PCI_PROBE_NORMAL;
495
496 if (node && ppc_md.pci_probe_mode)
497 mode = ppc_md.pci_probe_mode(bus);
498 DBG(" probe mode: %d\n", mode);
499 if (mode == PCI_PROBE_DEVTREE) {
500 bus->subordinate = hose->last_busno;
501 of_scan_bus(node, bus);
502 }
503
504 if (mode == PCI_PROBE_NORMAL)
505 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
506 }
507
508 static int __init pcibios_init(void)
509 {
510 struct pci_controller *hose, *tmp;
511
512 /* For now, override phys_mem_access_prot. If we need it,
513 * later, we may move that initialization to each ppc_md
514 */
515 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
516
517 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
518
519 /* Scan all of the recorded PCI controllers. */
520 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
521 scan_phb(hose);
522 pci_bus_add_devices(hose->bus);
523 }
524
525 if (pci_probe_only)
526 pcibios_claim_of_setup();
527 else
528 /* FIXME: `else' will be removed when
529 pci_assign_unassigned_resources() is able to work
530 correctly with [partially] allocated PCI tree. */
531 pci_assign_unassigned_resources();
532
533 /* Call machine dependent final fixup */
534 if (ppc_md.pcibios_fixup)
535 ppc_md.pcibios_fixup();
536
537 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
538
539 return 0;
540 }
541
542 subsys_initcall(pcibios_init);
543
544 int pcibios_enable_device(struct pci_dev *dev, int mask)
545 {
546 u16 cmd, oldcmd;
547 int i;
548
549 pci_read_config_word(dev, PCI_COMMAND, &cmd);
550 oldcmd = cmd;
551
552 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
553 struct resource *res = &dev->resource[i];
554
555 /* Only set up the requested stuff */
556 if (!(mask & (1<<i)))
557 continue;
558
559 if (res->flags & IORESOURCE_IO)
560 cmd |= PCI_COMMAND_IO;
561 if (res->flags & IORESOURCE_MEM)
562 cmd |= PCI_COMMAND_MEMORY;
563 }
564
565 if (cmd != oldcmd) {
566 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
567 pci_name(dev), cmd);
568 /* Enable the appropriate bits in the PCI command register. */
569 pci_write_config_word(dev, PCI_COMMAND, cmd);
570 }
571 return 0;
572 }
573
574 /* Decide whether to display the domain number in /proc */
575 int pci_proc_domain(struct pci_bus *bus)
576 {
577 struct pci_controller *hose = pci_bus_to_host(bus);
578 return hose->buid != 0;
579 }
580
581 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
582 struct device_node *dev, int prim)
583 {
584 const unsigned int *ranges;
585 unsigned int pci_space;
586 unsigned long size;
587 int rlen = 0;
588 int memno = 0;
589 struct resource *res;
590 int np, na = of_n_addr_cells(dev);
591 unsigned long pci_addr, cpu_phys_addr;
592
593 np = na + 5;
594
595 /* From "PCI Binding to 1275"
596 * The ranges property is laid out as an array of elements,
597 * each of which comprises:
598 * cells 0 - 2: a PCI address
599 * cells 3 or 3+4: a CPU physical address
600 * (size depending on dev->n_addr_cells)
601 * cells 4+5 or 5+6: the size of the range
602 */
603 ranges = of_get_property(dev, "ranges", &rlen);
604 if (ranges == NULL)
605 return;
606 hose->io_base_phys = 0;
607 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
608 res = NULL;
609 pci_space = ranges[0];
610 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
611 cpu_phys_addr = of_translate_address(dev, &ranges[3]);
612 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
613 ranges += np;
614 if (size == 0)
615 continue;
616
617 /* Now consume following elements while they are contiguous */
618 while (rlen >= np * sizeof(unsigned int)) {
619 unsigned long addr, phys;
620
621 if (ranges[0] != pci_space)
622 break;
623 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
624 phys = ranges[3];
625 if (na >= 2)
626 phys = (phys << 32) | ranges[4];
627 if (addr != pci_addr + size ||
628 phys != cpu_phys_addr + size)
629 break;
630
631 size += ((unsigned long)ranges[na+3] << 32)
632 | ranges[na+4];
633 ranges += np;
634 rlen -= np * sizeof(unsigned int);
635 }
636
637 switch ((pci_space >> 24) & 0x3) {
638 case 1: /* I/O space */
639 hose->io_base_phys = cpu_phys_addr - pci_addr;
640 /* handle from 0 to top of I/O window */
641 hose->pci_io_size = pci_addr + size;
642
643 res = &hose->io_resource;
644 res->flags = IORESOURCE_IO;
645 res->start = pci_addr;
646 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
647 res->start, res->start + size - 1);
648 break;
649 case 2: /* memory space */
650 memno = 0;
651 while (memno < 3 && hose->mem_resources[memno].flags)
652 ++memno;
653
654 if (memno == 0)
655 hose->pci_mem_offset = cpu_phys_addr - pci_addr;
656 if (memno < 3) {
657 res = &hose->mem_resources[memno];
658 res->flags = IORESOURCE_MEM;
659 res->start = cpu_phys_addr;
660 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
661 res->start, res->start + size - 1);
662 }
663 break;
664 }
665 if (res != NULL) {
666 res->name = dev->full_name;
667 res->end = res->start + size - 1;
668 res->parent = NULL;
669 res->sibling = NULL;
670 res->child = NULL;
671 }
672 }
673 }
674
675 #ifdef CONFIG_HOTPLUG
676
677 int pcibios_unmap_io_space(struct pci_bus *bus)
678 {
679 struct pci_controller *hose;
680
681 WARN_ON(bus == NULL);
682
683 /* If this is not a PHB, we only flush the hash table over
684 * the area mapped by this bridge. We don't play with the PTE
685 * mappings since we might have to deal with sub-page alignemnts
686 * so flushing the hash table is the only sane way to make sure
687 * that no hash entries are covering that removed bridge area
688 * while still allowing other busses overlapping those pages
689 */
690 if (bus->self) {
691 struct resource *res = bus->resource[0];
692
693 DBG("IO unmapping for PCI-PCI bridge %s\n",
694 pci_name(bus->self));
695
696 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
697 res->end - res->start + 1);
698 return 0;
699 }
700
701 /* Get the host bridge */
702 hose = pci_bus_to_host(bus);
703
704 /* Check if we have IOs allocated */
705 if (hose->io_base_alloc == 0)
706 return 0;
707
708 DBG("IO unmapping for PHB %s\n",
709 ((struct device_node *)hose->arch_data)->full_name);
710 DBG(" alloc=0x%p\n", hose->io_base_alloc);
711
712 /* This is a PHB, we fully unmap the IO area */
713 vunmap(hose->io_base_alloc);
714
715 return 0;
716 }
717 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
718
719 #endif /* CONFIG_HOTPLUG */
720
721 int __devinit pcibios_map_io_space(struct pci_bus *bus)
722 {
723 struct vm_struct *area;
724 unsigned long phys_page;
725 unsigned long size_page;
726 unsigned long io_virt_offset;
727 struct pci_controller *hose;
728
729 WARN_ON(bus == NULL);
730
731 /* If this not a PHB, nothing to do, page tables still exist and
732 * thus HPTEs will be faulted in when needed
733 */
734 if (bus->self) {
735 DBG("IO mapping for PCI-PCI bridge %s\n",
736 pci_name(bus->self));
737 DBG(" virt=0x%016lx...0x%016lx\n",
738 bus->resource[0]->start + _IO_BASE,
739 bus->resource[0]->end + _IO_BASE);
740 return 0;
741 }
742
743 /* Get the host bridge */
744 hose = pci_bus_to_host(bus);
745 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
746 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
747
748 /* Make sure IO area address is clear */
749 hose->io_base_alloc = NULL;
750
751 /* If there's no IO to map on that bus, get away too */
752 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
753 return 0;
754
755 /* Let's allocate some IO space for that guy. We don't pass
756 * VM_IOREMAP because we don't care about alignment tricks that
757 * the core does in that case. Maybe we should due to stupid card
758 * with incomplete address decoding but I'd rather not deal with
759 * those outside of the reserved 64K legacy region.
760 */
761 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
762 if (area == NULL)
763 return -ENOMEM;
764 hose->io_base_alloc = area->addr;
765 hose->io_base_virt = (void __iomem *)(area->addr +
766 hose->io_base_phys - phys_page);
767
768 DBG("IO mapping for PHB %s\n",
769 ((struct device_node *)hose->arch_data)->full_name);
770 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
771 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
772 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
773 hose->pci_io_size, size_page);
774
775 /* Establish the mapping */
776 if (__ioremap_at(phys_page, area->addr, size_page,
777 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
778 return -ENOMEM;
779
780 /* Fixup hose IO resource */
781 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
782 hose->io_resource.start += io_virt_offset;
783 hose->io_resource.end += io_virt_offset;
784
785 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
786 hose->io_resource.start, hose->io_resource.end);
787
788 return 0;
789 }
790 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
791
792 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
793 {
794 struct pci_controller *hose = pci_bus_to_host(dev->bus);
795 unsigned long offset;
796
797 if (res->flags & IORESOURCE_IO) {
798 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
799 res->start += offset;
800 res->end += offset;
801 } else if (res->flags & IORESOURCE_MEM) {
802 res->start += hose->pci_mem_offset;
803 res->end += hose->pci_mem_offset;
804 }
805 }
806
807 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
808 struct pci_bus *bus)
809 {
810 /* Update device resources. */
811 int i;
812
813 DBG("%s: Fixup resources:\n", pci_name(dev));
814 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
815 struct resource *res = &dev->resource[i];
816 if (!res->flags)
817 continue;
818
819 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
820 i, res->flags, res->start, res->end);
821
822 fixup_resource(res, dev);
823
824 DBG(" > %08lx:0x%016lx...0x%016lx\n",
825 res->flags, res->start, res->end);
826 }
827 }
828 EXPORT_SYMBOL(pcibios_fixup_device_resources);
829
830 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
831 {
832 struct dev_archdata *sd = &dev->dev.archdata;
833
834 sd->of_node = pci_device_to_OF_node(dev);
835
836 DBG("PCI device %s OF node: %s\n", pci_name(dev),
837 sd->of_node ? sd->of_node->full_name : "<none>");
838
839 sd->dma_ops = pci_dma_ops;
840 #ifdef CONFIG_NUMA
841 sd->numa_node = pcibus_to_node(dev->bus);
842 #else
843 sd->numa_node = -1;
844 #endif
845 if (ppc_md.pci_dma_dev_setup)
846 ppc_md.pci_dma_dev_setup(dev);
847 }
848 EXPORT_SYMBOL(pcibios_setup_new_device);
849
850 static void __devinit do_bus_setup(struct pci_bus *bus)
851 {
852 struct pci_dev *dev;
853
854 if (ppc_md.pci_dma_bus_setup)
855 ppc_md.pci_dma_bus_setup(bus);
856
857 list_for_each_entry(dev, &bus->devices, bus_list)
858 pcibios_setup_new_device(dev);
859
860 /* Read default IRQs and fixup if necessary */
861 list_for_each_entry(dev, &bus->devices, bus_list) {
862 pci_read_irq_line(dev);
863 if (ppc_md.pci_irq_fixup)
864 ppc_md.pci_irq_fixup(dev);
865 }
866 }
867
868 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
869 {
870 struct pci_dev *dev = bus->self;
871 struct device_node *np;
872
873 np = pci_bus_to_OF_node(bus);
874
875 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
876
877 if (dev && pci_probe_only &&
878 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
879 /* This is a subordinate bridge */
880
881 pci_read_bridge_bases(bus);
882 pcibios_fixup_device_resources(dev, bus);
883 }
884
885 do_bus_setup(bus);
886
887 if (!pci_probe_only)
888 return;
889
890 list_for_each_entry(dev, &bus->devices, bus_list)
891 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
892 pcibios_fixup_device_resources(dev, bus);
893 }
894 EXPORT_SYMBOL(pcibios_fixup_bus);
895
896 unsigned long pci_address_to_pio(phys_addr_t address)
897 {
898 struct pci_controller *hose, *tmp;
899
900 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
901 if (address >= hose->io_base_phys &&
902 address < (hose->io_base_phys + hose->pci_io_size)) {
903 unsigned long base =
904 (unsigned long)hose->io_base_virt - _IO_BASE;
905 return base + (address - hose->io_base_phys);
906 }
907 }
908 return (unsigned int)-1;
909 }
910 EXPORT_SYMBOL_GPL(pci_address_to_pio);
911
912
913 #define IOBASE_BRIDGE_NUMBER 0
914 #define IOBASE_MEMORY 1
915 #define IOBASE_IO 2
916 #define IOBASE_ISA_IO 3
917 #define IOBASE_ISA_MEM 4
918
919 long sys_pciconfig_iobase(long which, unsigned long in_bus,
920 unsigned long in_devfn)
921 {
922 struct pci_controller* hose;
923 struct list_head *ln;
924 struct pci_bus *bus = NULL;
925 struct device_node *hose_node;
926
927 /* Argh ! Please forgive me for that hack, but that's the
928 * simplest way to get existing XFree to not lockup on some
929 * G5 machines... So when something asks for bus 0 io base
930 * (bus 0 is HT root), we return the AGP one instead.
931 */
932 if (machine_is_compatible("MacRISC4"))
933 if (in_bus == 0)
934 in_bus = 0xf0;
935
936 /* That syscall isn't quite compatible with PCI domains, but it's
937 * used on pre-domains setup. We return the first match
938 */
939
940 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
941 bus = pci_bus_b(ln);
942 if (in_bus >= bus->number && in_bus <= bus->subordinate)
943 break;
944 bus = NULL;
945 }
946 if (bus == NULL || bus->sysdata == NULL)
947 return -ENODEV;
948
949 hose_node = (struct device_node *)bus->sysdata;
950 hose = PCI_DN(hose_node)->phb;
951
952 switch (which) {
953 case IOBASE_BRIDGE_NUMBER:
954 return (long)hose->first_busno;
955 case IOBASE_MEMORY:
956 return (long)hose->pci_mem_offset;
957 case IOBASE_IO:
958 return (long)hose->io_base_phys;
959 case IOBASE_ISA_IO:
960 return (long)isa_io_base;
961 case IOBASE_ISA_MEM:
962 return -EINVAL;
963 }
964
965 return -EOPNOTSUPP;
966 }
967
968 #ifdef CONFIG_NUMA
969 int pcibus_to_node(struct pci_bus *bus)
970 {
971 struct pci_controller *phb = pci_bus_to_host(bus);
972 return phb->node;
973 }
974 EXPORT_SYMBOL(pcibus_to_node);
975 #endif
This page took 0.051609 seconds and 6 git commands to generate.