9f63bdcb0bdf88f0cdabe6de30b8276070ecebaf
[deliverable/linux.git] / arch / powerpc / kernel / pci_64.c
1 /*
2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #undef DEBUG
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/mm.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
26
27 #include <asm/processor.h>
28 #include <asm/io.h>
29 #include <asm/prom.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34 #include <asm/firmware.h>
35
36 #ifdef DEBUG
37 #include <asm/udbg.h>
38 #define DBG(fmt...) printk(fmt)
39 #else
40 #define DBG(fmt...)
41 #endif
42
43 unsigned long pci_probe_only = 1;
44 int pci_assign_all_buses = 0;
45
46 static void fixup_resource(struct resource *res, struct pci_dev *dev);
47 static void do_bus_setup(struct pci_bus *bus);
48
49 /* pci_io_base -- the base address from which io bars are offsets.
50 * This is the lowest I/O base address (so bar values are always positive),
51 * and it *must* be the start of ISA space if an ISA bus exists because
52 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
53 * is mapped on the first 64K of IO space
54 */
55 unsigned long pci_io_base = ISA_IO_BASE;
56 EXPORT_SYMBOL(pci_io_base);
57
58 LIST_HEAD(hose_list);
59
60 static struct dma_mapping_ops *pci_dma_ops;
61
62 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
63 {
64 pci_dma_ops = dma_ops;
65 }
66
67 struct dma_mapping_ops *get_pci_dma_ops(void)
68 {
69 return pci_dma_ops;
70 }
71 EXPORT_SYMBOL(get_pci_dma_ops);
72
73 static void fixup_broken_pcnet32(struct pci_dev* dev)
74 {
75 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
76 dev->vendor = PCI_VENDOR_ID_AMD;
77 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
78 }
79 }
80 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
81
82 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
83 struct resource *res)
84 {
85 unsigned long offset = 0;
86 struct pci_controller *hose = pci_bus_to_host(dev->bus);
87
88 if (!hose)
89 return;
90
91 if (res->flags & IORESOURCE_IO)
92 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
93
94 if (res->flags & IORESOURCE_MEM)
95 offset = hose->pci_mem_offset;
96
97 region->start = res->start - offset;
98 region->end = res->end - offset;
99 }
100
101 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
102 struct pci_bus_region *region)
103 {
104 unsigned long offset = 0;
105 struct pci_controller *hose = pci_bus_to_host(dev->bus);
106
107 if (!hose)
108 return;
109
110 if (res->flags & IORESOURCE_IO)
111 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
112
113 if (res->flags & IORESOURCE_MEM)
114 offset = hose->pci_mem_offset;
115
116 res->start = region->start + offset;
117 res->end = region->end + offset;
118 }
119
120 #ifdef CONFIG_HOTPLUG
121 EXPORT_SYMBOL(pcibios_resource_to_bus);
122 EXPORT_SYMBOL(pcibios_bus_to_resource);
123 #endif
124
125 /*
126 * We need to avoid collisions with `mirrored' VGA ports
127 * and other strange ISA hardware, so we always want the
128 * addresses to be allocated in the 0x000-0x0ff region
129 * modulo 0x400.
130 *
131 * Why? Because some silly external IO cards only decode
132 * the low 10 bits of the IO address. The 0x00-0xff region
133 * is reserved for motherboard devices that decode all 16
134 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
135 * but we want to try to avoid allocating at 0x2900-0x2bff
136 * which might have be mirrored at 0x0100-0x03ff..
137 */
138 void pcibios_align_resource(void *data, struct resource *res,
139 resource_size_t size, resource_size_t align)
140 {
141 struct pci_dev *dev = data;
142 struct pci_controller *hose = pci_bus_to_host(dev->bus);
143 resource_size_t start = res->start;
144 unsigned long alignto;
145
146 if (res->flags & IORESOURCE_IO) {
147 unsigned long offset = (unsigned long)hose->io_base_virt -
148 _IO_BASE;
149 /* Make sure we start at our min on all hoses */
150 if (start - offset < PCIBIOS_MIN_IO)
151 start = PCIBIOS_MIN_IO + offset;
152
153 /*
154 * Put everything into 0x00-0xff region modulo 0x400
155 */
156 if (start & 0x300)
157 start = (start + 0x3ff) & ~0x3ff;
158
159 } else if (res->flags & IORESOURCE_MEM) {
160 /* Make sure we start at our min on all hoses */
161 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
162 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
163
164 /* Align to multiple of size of minimum base. */
165 alignto = max(0x1000UL, align);
166 start = ALIGN(start, alignto);
167 }
168
169 res->start = start;
170 }
171
172 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
173 {
174 struct pci_dev *dev;
175 struct pci_bus *child_bus;
176
177 list_for_each_entry(dev, &b->devices, bus_list) {
178 int i;
179
180 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
181 struct resource *r = &dev->resource[i];
182
183 if (r->parent || !r->start || !r->flags)
184 continue;
185 pci_claim_resource(dev, i);
186 }
187 }
188
189 list_for_each_entry(child_bus, &b->children, node)
190 pcibios_claim_one_bus(child_bus);
191 }
192 #ifdef CONFIG_HOTPLUG
193 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
194 #endif
195
196 static void __init pcibios_claim_of_setup(void)
197 {
198 struct pci_bus *b;
199
200 if (firmware_has_feature(FW_FEATURE_ISERIES))
201 return;
202
203 list_for_each_entry(b, &pci_root_buses, node)
204 pcibios_claim_one_bus(b);
205 }
206
207 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
208 {
209 const u32 *prop;
210 int len;
211
212 prop = of_get_property(np, name, &len);
213 if (prop && len >= 4)
214 return *prop;
215 return def;
216 }
217
218 static unsigned int pci_parse_of_flags(u32 addr0)
219 {
220 unsigned int flags = 0;
221
222 if (addr0 & 0x02000000) {
223 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
224 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
225 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
226 if (addr0 & 0x40000000)
227 flags |= IORESOURCE_PREFETCH
228 | PCI_BASE_ADDRESS_MEM_PREFETCH;
229 } else if (addr0 & 0x01000000)
230 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
231 return flags;
232 }
233
234
235 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
236 {
237 u64 base, size;
238 unsigned int flags;
239 struct resource *res;
240 const u32 *addrs;
241 u32 i;
242 int proplen;
243
244 addrs = of_get_property(node, "assigned-addresses", &proplen);
245 if (!addrs)
246 return;
247 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
248 for (; proplen >= 20; proplen -= 20, addrs += 5) {
249 flags = pci_parse_of_flags(addrs[0]);
250 if (!flags)
251 continue;
252 base = of_read_number(&addrs[1], 2);
253 size = of_read_number(&addrs[3], 2);
254 if (!size)
255 continue;
256 i = addrs[0] & 0xff;
257 DBG(" base: %llx, size: %llx, i: %x\n",
258 (unsigned long long)base, (unsigned long long)size, i);
259
260 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
261 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
262 } else if (i == dev->rom_base_reg) {
263 res = &dev->resource[PCI_ROM_RESOURCE];
264 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
265 } else {
266 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
267 continue;
268 }
269 res->start = base;
270 res->end = base + size - 1;
271 res->flags = flags;
272 res->name = pci_name(dev);
273 fixup_resource(res, dev);
274 }
275 }
276
277 struct pci_dev *of_create_pci_dev(struct device_node *node,
278 struct pci_bus *bus, int devfn)
279 {
280 struct pci_dev *dev;
281 const char *type;
282
283 dev = alloc_pci_dev();
284 if (!dev)
285 return NULL;
286 type = of_get_property(node, "device_type", NULL);
287 if (type == NULL)
288 type = "";
289
290 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
291
292 dev->bus = bus;
293 dev->sysdata = node;
294 dev->dev.parent = bus->bridge;
295 dev->dev.bus = &pci_bus_type;
296 dev->devfn = devfn;
297 dev->multifunction = 0; /* maybe a lie? */
298
299 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
300 dev->device = get_int_prop(node, "device-id", 0xffff);
301 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
302 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
303
304 dev->cfg_size = pci_cfg_space_size(dev);
305
306 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
307 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
308 dev->class = get_int_prop(node, "class-code", 0);
309 dev->revision = get_int_prop(node, "revision-id", 0);
310
311 DBG(" class: 0x%x\n", dev->class);
312 DBG(" revision: 0x%x\n", dev->revision);
313
314 dev->current_state = 4; /* unknown power state */
315 dev->error_state = pci_channel_io_normal;
316 dev->dma_mask = 0xffffffff;
317
318 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
319 /* a PCI-PCI bridge */
320 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
321 dev->rom_base_reg = PCI_ROM_ADDRESS1;
322 } else if (!strcmp(type, "cardbus")) {
323 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
324 } else {
325 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
326 dev->rom_base_reg = PCI_ROM_ADDRESS;
327 /* Maybe do a default OF mapping here */
328 dev->irq = NO_IRQ;
329 }
330
331 pci_parse_of_addrs(node, dev);
332
333 DBG(" adding to system ...\n");
334
335 pci_device_add(dev, bus);
336
337 return dev;
338 }
339 EXPORT_SYMBOL(of_create_pci_dev);
340
341 void __devinit of_scan_bus(struct device_node *node,
342 struct pci_bus *bus)
343 {
344 struct device_node *child = NULL;
345 const u32 *reg;
346 int reglen, devfn;
347 struct pci_dev *dev;
348
349 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
350
351 while ((child = of_get_next_child(node, child)) != NULL) {
352 DBG(" * %s\n", child->full_name);
353 reg = of_get_property(child, "reg", &reglen);
354 if (reg == NULL || reglen < 20)
355 continue;
356 devfn = (reg[0] >> 8) & 0xff;
357
358 /* create a new pci_dev for this device */
359 dev = of_create_pci_dev(child, bus, devfn);
360 if (!dev)
361 continue;
362 DBG("dev header type: %x\n", dev->hdr_type);
363
364 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
365 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
366 of_scan_pci_bridge(child, dev);
367 }
368
369 do_bus_setup(bus);
370 }
371 EXPORT_SYMBOL(of_scan_bus);
372
373 void __devinit of_scan_pci_bridge(struct device_node *node,
374 struct pci_dev *dev)
375 {
376 struct pci_bus *bus;
377 const u32 *busrange, *ranges;
378 int len, i, mode;
379 struct resource *res;
380 unsigned int flags;
381 u64 size;
382
383 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
384
385 /* parse bus-range property */
386 busrange = of_get_property(node, "bus-range", &len);
387 if (busrange == NULL || len != 8) {
388 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
389 node->full_name);
390 return;
391 }
392 ranges = of_get_property(node, "ranges", &len);
393 if (ranges == NULL) {
394 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
395 node->full_name);
396 return;
397 }
398
399 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
400 if (!bus) {
401 printk(KERN_ERR "Failed to create pci bus for %s\n",
402 node->full_name);
403 return;
404 }
405
406 bus->primary = dev->bus->number;
407 bus->subordinate = busrange[1];
408 bus->bridge_ctl = 0;
409 bus->sysdata = node;
410
411 /* parse ranges property */
412 /* PCI #address-cells == 3 and #size-cells == 2 always */
413 res = &dev->resource[PCI_BRIDGE_RESOURCES];
414 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
415 res->flags = 0;
416 bus->resource[i] = res;
417 ++res;
418 }
419 i = 1;
420 for (; len >= 32; len -= 32, ranges += 8) {
421 flags = pci_parse_of_flags(ranges[0]);
422 size = of_read_number(&ranges[6], 2);
423 if (flags == 0 || size == 0)
424 continue;
425 if (flags & IORESOURCE_IO) {
426 res = bus->resource[0];
427 if (res->flags) {
428 printk(KERN_ERR "PCI: ignoring extra I/O range"
429 " for bridge %s\n", node->full_name);
430 continue;
431 }
432 } else {
433 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
434 printk(KERN_ERR "PCI: too many memory ranges"
435 " for bridge %s\n", node->full_name);
436 continue;
437 }
438 res = bus->resource[i];
439 ++i;
440 }
441 res->start = of_read_number(&ranges[1], 2);
442 res->end = res->start + size - 1;
443 res->flags = flags;
444 fixup_resource(res, dev);
445 }
446 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
447 bus->number);
448 DBG(" bus name: %s\n", bus->name);
449
450 mode = PCI_PROBE_NORMAL;
451 if (ppc_md.pci_probe_mode)
452 mode = ppc_md.pci_probe_mode(bus);
453 DBG(" probe mode: %d\n", mode);
454
455 if (mode == PCI_PROBE_DEVTREE)
456 of_scan_bus(node, bus);
457 else if (mode == PCI_PROBE_NORMAL)
458 pci_scan_child_bus(bus);
459 }
460 EXPORT_SYMBOL(of_scan_pci_bridge);
461
462 void __devinit scan_phb(struct pci_controller *hose)
463 {
464 struct pci_bus *bus;
465 struct device_node *node = hose->arch_data;
466 int i, mode;
467 struct resource *res;
468
469 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
470
471 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
472 if (bus == NULL) {
473 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
474 hose->global_number);
475 return;
476 }
477 bus->secondary = hose->first_busno;
478 hose->bus = bus;
479
480 if (!firmware_has_feature(FW_FEATURE_ISERIES))
481 pcibios_map_io_space(bus);
482
483 bus->resource[0] = res = &hose->io_resource;
484 if (res->flags && request_resource(&ioport_resource, res)) {
485 printk(KERN_ERR "Failed to request PCI IO region "
486 "on PCI domain %04x\n", hose->global_number);
487 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
488 res->start, res->end);
489 }
490
491 for (i = 0; i < 3; ++i) {
492 res = &hose->mem_resources[i];
493 bus->resource[i+1] = res;
494 if (res->flags && request_resource(&iomem_resource, res))
495 printk(KERN_ERR "Failed to request PCI memory region "
496 "on PCI domain %04x\n", hose->global_number);
497 }
498
499 mode = PCI_PROBE_NORMAL;
500
501 if (node && ppc_md.pci_probe_mode)
502 mode = ppc_md.pci_probe_mode(bus);
503 DBG(" probe mode: %d\n", mode);
504 if (mode == PCI_PROBE_DEVTREE) {
505 bus->subordinate = hose->last_busno;
506 of_scan_bus(node, bus);
507 }
508
509 if (mode == PCI_PROBE_NORMAL)
510 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
511 }
512
513 static int __init pcibios_init(void)
514 {
515 struct pci_controller *hose, *tmp;
516
517 /* For now, override phys_mem_access_prot. If we need it,
518 * later, we may move that initialization to each ppc_md
519 */
520 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
521
522 if (firmware_has_feature(FW_FEATURE_ISERIES))
523 iSeries_pcibios_init();
524
525 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
526
527 /* Scan all of the recorded PCI controllers. */
528 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
529 scan_phb(hose);
530 pci_bus_add_devices(hose->bus);
531 }
532
533 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
534 if (pci_probe_only)
535 pcibios_claim_of_setup();
536 else
537 /* FIXME: `else' will be removed when
538 pci_assign_unassigned_resources() is able to work
539 correctly with [partially] allocated PCI tree. */
540 pci_assign_unassigned_resources();
541 }
542
543 /* Call machine dependent final fixup */
544 if (ppc_md.pcibios_fixup)
545 ppc_md.pcibios_fixup();
546
547 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
548
549 return 0;
550 }
551
552 subsys_initcall(pcibios_init);
553
554 int pcibios_enable_device(struct pci_dev *dev, int mask)
555 {
556 u16 cmd, oldcmd;
557 int i;
558
559 pci_read_config_word(dev, PCI_COMMAND, &cmd);
560 oldcmd = cmd;
561
562 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
563 struct resource *res = &dev->resource[i];
564
565 /* Only set up the requested stuff */
566 if (!(mask & (1<<i)))
567 continue;
568
569 if (res->flags & IORESOURCE_IO)
570 cmd |= PCI_COMMAND_IO;
571 if (res->flags & IORESOURCE_MEM)
572 cmd |= PCI_COMMAND_MEMORY;
573 }
574
575 if (cmd != oldcmd) {
576 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
577 pci_name(dev), cmd);
578 /* Enable the appropriate bits in the PCI command register. */
579 pci_write_config_word(dev, PCI_COMMAND, cmd);
580 }
581 return 0;
582 }
583
584 /* Decide whether to display the domain number in /proc */
585 int pci_proc_domain(struct pci_bus *bus)
586 {
587 if (firmware_has_feature(FW_FEATURE_ISERIES))
588 return 0;
589 else {
590 struct pci_controller *hose = pci_bus_to_host(bus);
591 return hose->buid != 0;
592 }
593 }
594
595 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
596 struct device_node *dev, int prim)
597 {
598 const unsigned int *ranges;
599 unsigned int pci_space;
600 unsigned long size;
601 int rlen = 0;
602 int memno = 0;
603 struct resource *res;
604 int np, na = of_n_addr_cells(dev);
605 unsigned long pci_addr, cpu_phys_addr;
606
607 np = na + 5;
608
609 /* From "PCI Binding to 1275"
610 * The ranges property is laid out as an array of elements,
611 * each of which comprises:
612 * cells 0 - 2: a PCI address
613 * cells 3 or 3+4: a CPU physical address
614 * (size depending on dev->n_addr_cells)
615 * cells 4+5 or 5+6: the size of the range
616 */
617 ranges = of_get_property(dev, "ranges", &rlen);
618 if (ranges == NULL)
619 return;
620 hose->io_base_phys = 0;
621 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
622 res = NULL;
623 pci_space = ranges[0];
624 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
625 cpu_phys_addr = of_translate_address(dev, &ranges[3]);
626 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
627 ranges += np;
628 if (size == 0)
629 continue;
630
631 /* Now consume following elements while they are contiguous */
632 while (rlen >= np * sizeof(unsigned int)) {
633 unsigned long addr, phys;
634
635 if (ranges[0] != pci_space)
636 break;
637 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
638 phys = ranges[3];
639 if (na >= 2)
640 phys = (phys << 32) | ranges[4];
641 if (addr != pci_addr + size ||
642 phys != cpu_phys_addr + size)
643 break;
644
645 size += ((unsigned long)ranges[na+3] << 32)
646 | ranges[na+4];
647 ranges += np;
648 rlen -= np * sizeof(unsigned int);
649 }
650
651 switch ((pci_space >> 24) & 0x3) {
652 case 1: /* I/O space */
653 hose->io_base_phys = cpu_phys_addr - pci_addr;
654 /* handle from 0 to top of I/O window */
655 hose->pci_io_size = pci_addr + size;
656
657 res = &hose->io_resource;
658 res->flags = IORESOURCE_IO;
659 res->start = pci_addr;
660 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
661 res->start, res->start + size - 1);
662 break;
663 case 2: /* memory space */
664 memno = 0;
665 while (memno < 3 && hose->mem_resources[memno].flags)
666 ++memno;
667
668 if (memno == 0)
669 hose->pci_mem_offset = cpu_phys_addr - pci_addr;
670 if (memno < 3) {
671 res = &hose->mem_resources[memno];
672 res->flags = IORESOURCE_MEM;
673 res->start = cpu_phys_addr;
674 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
675 res->start, res->start + size - 1);
676 }
677 break;
678 }
679 if (res != NULL) {
680 res->name = dev->full_name;
681 res->end = res->start + size - 1;
682 res->parent = NULL;
683 res->sibling = NULL;
684 res->child = NULL;
685 }
686 }
687 }
688
689 #ifdef CONFIG_HOTPLUG
690
691 int pcibios_unmap_io_space(struct pci_bus *bus)
692 {
693 struct pci_controller *hose;
694
695 WARN_ON(bus == NULL);
696
697 /* If this is not a PHB, we only flush the hash table over
698 * the area mapped by this bridge. We don't play with the PTE
699 * mappings since we might have to deal with sub-page alignemnts
700 * so flushing the hash table is the only sane way to make sure
701 * that no hash entries are covering that removed bridge area
702 * while still allowing other busses overlapping those pages
703 */
704 if (bus->self) {
705 struct resource *res = bus->resource[0];
706
707 DBG("IO unmapping for PCI-PCI bridge %s\n",
708 pci_name(bus->self));
709
710 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
711 res->end - res->start + 1);
712 return 0;
713 }
714
715 /* Get the host bridge */
716 hose = pci_bus_to_host(bus);
717
718 /* Check if we have IOs allocated */
719 if (hose->io_base_alloc == 0)
720 return 0;
721
722 DBG("IO unmapping for PHB %s\n",
723 ((struct device_node *)hose->arch_data)->full_name);
724 DBG(" alloc=0x%p\n", hose->io_base_alloc);
725
726 /* This is a PHB, we fully unmap the IO area */
727 vunmap(hose->io_base_alloc);
728
729 return 0;
730 }
731 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
732
733 #endif /* CONFIG_HOTPLUG */
734
735 int __devinit pcibios_map_io_space(struct pci_bus *bus)
736 {
737 struct vm_struct *area;
738 unsigned long phys_page;
739 unsigned long size_page;
740 unsigned long io_virt_offset;
741 struct pci_controller *hose;
742
743 WARN_ON(bus == NULL);
744
745 /* If this not a PHB, nothing to do, page tables still exist and
746 * thus HPTEs will be faulted in when needed
747 */
748 if (bus->self) {
749 DBG("IO mapping for PCI-PCI bridge %s\n",
750 pci_name(bus->self));
751 DBG(" virt=0x%016lx...0x%016lx\n",
752 bus->resource[0]->start + _IO_BASE,
753 bus->resource[0]->end + _IO_BASE);
754 return 0;
755 }
756
757 /* Get the host bridge */
758 hose = pci_bus_to_host(bus);
759 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
760 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
761
762 /* Make sure IO area address is clear */
763 hose->io_base_alloc = NULL;
764
765 /* If there's no IO to map on that bus, get away too */
766 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
767 return 0;
768
769 /* Let's allocate some IO space for that guy. We don't pass
770 * VM_IOREMAP because we don't care about alignment tricks that
771 * the core does in that case. Maybe we should due to stupid card
772 * with incomplete address decoding but I'd rather not deal with
773 * those outside of the reserved 64K legacy region.
774 */
775 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
776 if (area == NULL)
777 return -ENOMEM;
778 hose->io_base_alloc = area->addr;
779 hose->io_base_virt = (void __iomem *)(area->addr +
780 hose->io_base_phys - phys_page);
781
782 DBG("IO mapping for PHB %s\n",
783 ((struct device_node *)hose->arch_data)->full_name);
784 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
785 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
786 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
787 hose->pci_io_size, size_page);
788
789 /* Establish the mapping */
790 if (__ioremap_at(phys_page, area->addr, size_page,
791 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
792 return -ENOMEM;
793
794 /* Fixup hose IO resource */
795 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
796 hose->io_resource.start += io_virt_offset;
797 hose->io_resource.end += io_virt_offset;
798
799 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
800 hose->io_resource.start, hose->io_resource.end);
801
802 return 0;
803 }
804 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
805
806 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
807 {
808 struct pci_controller *hose = pci_bus_to_host(dev->bus);
809 unsigned long offset;
810
811 if (res->flags & IORESOURCE_IO) {
812 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
813 res->start += offset;
814 res->end += offset;
815 } else if (res->flags & IORESOURCE_MEM) {
816 res->start += hose->pci_mem_offset;
817 res->end += hose->pci_mem_offset;
818 }
819 }
820
821 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
822 struct pci_bus *bus)
823 {
824 /* Update device resources. */
825 int i;
826
827 DBG("%s: Fixup resources:\n", pci_name(dev));
828 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
829 struct resource *res = &dev->resource[i];
830 if (!res->flags)
831 continue;
832
833 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
834 i, res->flags, res->start, res->end);
835
836 fixup_resource(res, dev);
837
838 DBG(" > %08lx:0x%016lx...0x%016lx\n",
839 res->flags, res->start, res->end);
840 }
841 }
842 EXPORT_SYMBOL(pcibios_fixup_device_resources);
843
844 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
845 {
846 struct dev_archdata *sd = &dev->dev.archdata;
847
848 sd->of_node = pci_device_to_OF_node(dev);
849
850 DBG("PCI device %s OF node: %s\n", pci_name(dev),
851 sd->of_node ? sd->of_node->full_name : "<none>");
852
853 sd->dma_ops = pci_dma_ops;
854 #ifdef CONFIG_NUMA
855 sd->numa_node = pcibus_to_node(dev->bus);
856 #else
857 sd->numa_node = -1;
858 #endif
859 if (ppc_md.pci_dma_dev_setup)
860 ppc_md.pci_dma_dev_setup(dev);
861 }
862 EXPORT_SYMBOL(pcibios_setup_new_device);
863
864 static void __devinit do_bus_setup(struct pci_bus *bus)
865 {
866 struct pci_dev *dev;
867
868 if (ppc_md.pci_dma_bus_setup)
869 ppc_md.pci_dma_bus_setup(bus);
870
871 list_for_each_entry(dev, &bus->devices, bus_list)
872 pcibios_setup_new_device(dev);
873
874 /* Read default IRQs and fixup if necessary */
875 list_for_each_entry(dev, &bus->devices, bus_list) {
876 pci_read_irq_line(dev);
877 if (ppc_md.pci_irq_fixup)
878 ppc_md.pci_irq_fixup(dev);
879 }
880 }
881
882 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
883 {
884 struct pci_dev *dev = bus->self;
885 struct device_node *np;
886
887 np = pci_bus_to_OF_node(bus);
888
889 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
890
891 if (dev && pci_probe_only &&
892 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
893 /* This is a subordinate bridge */
894
895 pci_read_bridge_bases(bus);
896 pcibios_fixup_device_resources(dev, bus);
897 }
898
899 do_bus_setup(bus);
900
901 if (!pci_probe_only)
902 return;
903
904 list_for_each_entry(dev, &bus->devices, bus_list)
905 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
906 pcibios_fixup_device_resources(dev, bus);
907 }
908 EXPORT_SYMBOL(pcibios_fixup_bus);
909
910 unsigned long pci_address_to_pio(phys_addr_t address)
911 {
912 struct pci_controller *hose, *tmp;
913
914 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
915 if (address >= hose->io_base_phys &&
916 address < (hose->io_base_phys + hose->pci_io_size)) {
917 unsigned long base =
918 (unsigned long)hose->io_base_virt - _IO_BASE;
919 return base + (address - hose->io_base_phys);
920 }
921 }
922 return (unsigned int)-1;
923 }
924 EXPORT_SYMBOL_GPL(pci_address_to_pio);
925
926
927 #define IOBASE_BRIDGE_NUMBER 0
928 #define IOBASE_MEMORY 1
929 #define IOBASE_IO 2
930 #define IOBASE_ISA_IO 3
931 #define IOBASE_ISA_MEM 4
932
933 long sys_pciconfig_iobase(long which, unsigned long in_bus,
934 unsigned long in_devfn)
935 {
936 struct pci_controller* hose;
937 struct list_head *ln;
938 struct pci_bus *bus = NULL;
939 struct device_node *hose_node;
940
941 /* Argh ! Please forgive me for that hack, but that's the
942 * simplest way to get existing XFree to not lockup on some
943 * G5 machines... So when something asks for bus 0 io base
944 * (bus 0 is HT root), we return the AGP one instead.
945 */
946 if (machine_is_compatible("MacRISC4"))
947 if (in_bus == 0)
948 in_bus = 0xf0;
949
950 /* That syscall isn't quite compatible with PCI domains, but it's
951 * used on pre-domains setup. We return the first match
952 */
953
954 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
955 bus = pci_bus_b(ln);
956 if (in_bus >= bus->number && in_bus <= bus->subordinate)
957 break;
958 bus = NULL;
959 }
960 if (bus == NULL || bus->sysdata == NULL)
961 return -ENODEV;
962
963 hose_node = (struct device_node *)bus->sysdata;
964 hose = PCI_DN(hose_node)->phb;
965
966 switch (which) {
967 case IOBASE_BRIDGE_NUMBER:
968 return (long)hose->first_busno;
969 case IOBASE_MEMORY:
970 return (long)hose->pci_mem_offset;
971 case IOBASE_IO:
972 return (long)hose->io_base_phys;
973 case IOBASE_ISA_IO:
974 return (long)isa_io_base;
975 case IOBASE_ISA_MEM:
976 return -EINVAL;
977 }
978
979 return -EOPNOTSUPP;
980 }
981
982 #ifdef CONFIG_NUMA
983 int pcibus_to_node(struct pci_bus *bus)
984 {
985 struct pci_controller *phb = pci_bus_to_host(bus);
986 return phb->node;
987 }
988 EXPORT_SYMBOL(pcibus_to_node);
989 #endif
This page took 0.166855 seconds and 4 git commands to generate.