sparc64: Stop creating dummy root PCI host controller devices.
[deliverable/linux.git] / arch / sparc64 / kernel / pci.c
1 /* pci.c: UltraSparc PCI controller support.
2 *
3 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 *
7 * OF tree based PCI bus probing taken from the PowerPC port
8 * with minor modifications, see there for credits.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/sched.h>
15 #include <linux/capability.h>
16 #include <linux/errno.h>
17 #include <linux/pci.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/init.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/pgtable.h>
24 #include <asm/irq.h>
25 #include <asm/ebus.h>
26 #include <asm/prom.h>
27 #include <asm/apb.h>
28
29 #include "pci_impl.h"
30
31 #ifndef CONFIG_PCI
32 /* A "nop" PCI implementation. */
33 asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
34 unsigned long off, unsigned long len,
35 unsigned char *buf)
36 {
37 return 0;
38 }
39 asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
40 unsigned long off, unsigned long len,
41 unsigned char *buf)
42 {
43 return 0;
44 }
45 #else
46
47 /* List of all PCI controllers found in the system. */
48 struct pci_pbm_info *pci_pbm_root = NULL;
49
50 /* Each PBM found gets a unique index. */
51 int pci_num_pbms = 0;
52
53 volatile int pci_poke_in_progress;
54 volatile int pci_poke_cpu = -1;
55 volatile int pci_poke_faulted;
56
57 static DEFINE_SPINLOCK(pci_poke_lock);
58
59 void pci_config_read8(u8 *addr, u8 *ret)
60 {
61 unsigned long flags;
62 u8 byte;
63
64 spin_lock_irqsave(&pci_poke_lock, flags);
65 pci_poke_cpu = smp_processor_id();
66 pci_poke_in_progress = 1;
67 pci_poke_faulted = 0;
68 __asm__ __volatile__("membar #Sync\n\t"
69 "lduba [%1] %2, %0\n\t"
70 "membar #Sync"
71 : "=r" (byte)
72 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
73 : "memory");
74 pci_poke_in_progress = 0;
75 pci_poke_cpu = -1;
76 if (!pci_poke_faulted)
77 *ret = byte;
78 spin_unlock_irqrestore(&pci_poke_lock, flags);
79 }
80
81 void pci_config_read16(u16 *addr, u16 *ret)
82 {
83 unsigned long flags;
84 u16 word;
85
86 spin_lock_irqsave(&pci_poke_lock, flags);
87 pci_poke_cpu = smp_processor_id();
88 pci_poke_in_progress = 1;
89 pci_poke_faulted = 0;
90 __asm__ __volatile__("membar #Sync\n\t"
91 "lduha [%1] %2, %0\n\t"
92 "membar #Sync"
93 : "=r" (word)
94 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
95 : "memory");
96 pci_poke_in_progress = 0;
97 pci_poke_cpu = -1;
98 if (!pci_poke_faulted)
99 *ret = word;
100 spin_unlock_irqrestore(&pci_poke_lock, flags);
101 }
102
103 void pci_config_read32(u32 *addr, u32 *ret)
104 {
105 unsigned long flags;
106 u32 dword;
107
108 spin_lock_irqsave(&pci_poke_lock, flags);
109 pci_poke_cpu = smp_processor_id();
110 pci_poke_in_progress = 1;
111 pci_poke_faulted = 0;
112 __asm__ __volatile__("membar #Sync\n\t"
113 "lduwa [%1] %2, %0\n\t"
114 "membar #Sync"
115 : "=r" (dword)
116 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
117 : "memory");
118 pci_poke_in_progress = 0;
119 pci_poke_cpu = -1;
120 if (!pci_poke_faulted)
121 *ret = dword;
122 spin_unlock_irqrestore(&pci_poke_lock, flags);
123 }
124
125 void pci_config_write8(u8 *addr, u8 val)
126 {
127 unsigned long flags;
128
129 spin_lock_irqsave(&pci_poke_lock, flags);
130 pci_poke_cpu = smp_processor_id();
131 pci_poke_in_progress = 1;
132 pci_poke_faulted = 0;
133 __asm__ __volatile__("membar #Sync\n\t"
134 "stba %0, [%1] %2\n\t"
135 "membar #Sync"
136 : /* no outputs */
137 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
138 : "memory");
139 pci_poke_in_progress = 0;
140 pci_poke_cpu = -1;
141 spin_unlock_irqrestore(&pci_poke_lock, flags);
142 }
143
144 void pci_config_write16(u16 *addr, u16 val)
145 {
146 unsigned long flags;
147
148 spin_lock_irqsave(&pci_poke_lock, flags);
149 pci_poke_cpu = smp_processor_id();
150 pci_poke_in_progress = 1;
151 pci_poke_faulted = 0;
152 __asm__ __volatile__("membar #Sync\n\t"
153 "stha %0, [%1] %2\n\t"
154 "membar #Sync"
155 : /* no outputs */
156 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
157 : "memory");
158 pci_poke_in_progress = 0;
159 pci_poke_cpu = -1;
160 spin_unlock_irqrestore(&pci_poke_lock, flags);
161 }
162
163 void pci_config_write32(u32 *addr, u32 val)
164 {
165 unsigned long flags;
166
167 spin_lock_irqsave(&pci_poke_lock, flags);
168 pci_poke_cpu = smp_processor_id();
169 pci_poke_in_progress = 1;
170 pci_poke_faulted = 0;
171 __asm__ __volatile__("membar #Sync\n\t"
172 "stwa %0, [%1] %2\n\t"
173 "membar #Sync"
174 : /* no outputs */
175 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
176 : "memory");
177 pci_poke_in_progress = 0;
178 pci_poke_cpu = -1;
179 spin_unlock_irqrestore(&pci_poke_lock, flags);
180 }
181
182 /* Probe for all PCI controllers in the system. */
183 extern void sabre_init(struct device_node *, const char *);
184 extern void psycho_init(struct device_node *, const char *);
185 extern void schizo_init(struct device_node *, const char *);
186 extern void schizo_plus_init(struct device_node *, const char *);
187 extern void tomatillo_init(struct device_node *, const char *);
188 extern void sun4v_pci_init(struct device_node *, const char *);
189 extern void fire_pci_init(struct device_node *, const char *);
190
191 static struct {
192 char *model_name;
193 void (*init)(struct device_node *, const char *);
194 } pci_controller_table[] __initdata = {
195 { "SUNW,sabre", sabre_init },
196 { "pci108e,a000", sabre_init },
197 { "pci108e,a001", sabre_init },
198 { "SUNW,psycho", psycho_init },
199 { "pci108e,8000", psycho_init },
200 { "SUNW,schizo", schizo_init },
201 { "pci108e,8001", schizo_init },
202 { "SUNW,schizo+", schizo_plus_init },
203 { "pci108e,8002", schizo_plus_init },
204 { "SUNW,tomatillo", tomatillo_init },
205 { "pci108e,a801", tomatillo_init },
206 { "SUNW,sun4v-pci", sun4v_pci_init },
207 { "pciex108e,80f0", fire_pci_init },
208 };
209 #define PCI_NUM_CONTROLLER_TYPES ARRAY_SIZE(pci_controller_table)
210
211 static int __init pci_controller_init(const char *model_name, int namelen, struct device_node *dp)
212 {
213 int i;
214
215 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
216 if (!strncmp(model_name,
217 pci_controller_table[i].model_name,
218 namelen)) {
219 pci_controller_table[i].init(dp, model_name);
220 return 1;
221 }
222 }
223
224 return 0;
225 }
226
227 static int __init pci_controller_scan(int (*handler)(const char *, int, struct device_node *))
228 {
229 struct device_node *dp;
230 int count = 0;
231
232 for_each_node_by_name(dp, "pci") {
233 struct property *prop;
234 int len;
235
236 prop = of_find_property(dp, "model", &len);
237 if (!prop)
238 prop = of_find_property(dp, "compatible", &len);
239
240 if (prop) {
241 const char *model = prop->value;
242 int item_len = 0;
243
244 /* Our value may be a multi-valued string in the
245 * case of some compatible properties. For sanity,
246 * only try the first one.
247 */
248 while (model[item_len] && len) {
249 len--;
250 item_len++;
251 }
252
253 if (handler(model, item_len, dp))
254 count++;
255 }
256 }
257
258 return count;
259 }
260
261 /* Find each controller in the system, attach and initialize
262 * software state structure for each and link into the
263 * pci_pbm_root. Setup the controller enough such
264 * that bus scanning can be done.
265 */
266 static void __init pci_controller_probe(void)
267 {
268 printk("PCI: Probing for controllers.\n");
269
270 pci_controller_scan(pci_controller_init);
271 }
272
273 static int ofpci_verbose;
274
275 static int __init ofpci_debug(char *str)
276 {
277 int val = 0;
278
279 get_option(&str, &val);
280 if (val)
281 ofpci_verbose = 1;
282 return 1;
283 }
284
285 __setup("ofpci_debug=", ofpci_debug);
286
287 static unsigned long pci_parse_of_flags(u32 addr0)
288 {
289 unsigned long flags = 0;
290
291 if (addr0 & 0x02000000) {
292 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
293 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
294 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
295 if (addr0 & 0x40000000)
296 flags |= IORESOURCE_PREFETCH
297 | PCI_BASE_ADDRESS_MEM_PREFETCH;
298 } else if (addr0 & 0x01000000)
299 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
300 return flags;
301 }
302
303 /* The of_device layer has translated all of the assigned-address properties
304 * into physical address resources, we only have to figure out the register
305 * mapping.
306 */
307 static void pci_parse_of_addrs(struct of_device *op,
308 struct device_node *node,
309 struct pci_dev *dev)
310 {
311 struct resource *op_res;
312 const u32 *addrs;
313 int proplen;
314
315 addrs = of_get_property(node, "assigned-addresses", &proplen);
316 if (!addrs)
317 return;
318 if (ofpci_verbose)
319 printk(" parse addresses (%d bytes) @ %p\n",
320 proplen, addrs);
321 op_res = &op->resource[0];
322 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
323 struct resource *res;
324 unsigned long flags;
325 int i;
326
327 flags = pci_parse_of_flags(addrs[0]);
328 if (!flags)
329 continue;
330 i = addrs[0] & 0xff;
331 if (ofpci_verbose)
332 printk(" start: %lx, end: %lx, i: %x\n",
333 op_res->start, op_res->end, i);
334
335 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
336 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
337 } else if (i == dev->rom_base_reg) {
338 res = &dev->resource[PCI_ROM_RESOURCE];
339 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
340 } else {
341 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
342 continue;
343 }
344 res->start = op_res->start;
345 res->end = op_res->end;
346 res->flags = flags;
347 res->name = pci_name(dev);
348 }
349 }
350
351 struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
352 struct device_node *node,
353 struct pci_bus *bus, int devfn)
354 {
355 struct dev_archdata *sd;
356 struct pci_dev *dev;
357 const char *type;
358 u32 class;
359
360 dev = alloc_pci_dev();
361 if (!dev)
362 return NULL;
363
364 sd = &dev->dev.archdata;
365 sd->iommu = pbm->iommu;
366 sd->stc = &pbm->stc;
367 sd->host_controller = pbm;
368 sd->prom_node = node;
369 sd->op = of_find_device_by_node(node);
370 sd->numa_node = pbm->numa_node;
371
372 sd = &sd->op->dev.archdata;
373 sd->iommu = pbm->iommu;
374 sd->stc = &pbm->stc;
375 sd->numa_node = pbm->numa_node;
376
377 type = of_get_property(node, "device_type", NULL);
378 if (type == NULL)
379 type = "";
380
381 if (ofpci_verbose)
382 printk(" create device, devfn: %x, type: %s\n",
383 devfn, type);
384
385 dev->bus = bus;
386 dev->sysdata = node;
387 dev->dev.parent = bus->bridge;
388 dev->dev.bus = &pci_bus_type;
389 dev->devfn = devfn;
390 dev->multifunction = 0; /* maybe a lie? */
391
392 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
393 dev->device = of_getintprop_default(node, "device-id", 0xffff);
394 dev->subsystem_vendor =
395 of_getintprop_default(node, "subsystem-vendor-id", 0);
396 dev->subsystem_device =
397 of_getintprop_default(node, "subsystem-id", 0);
398
399 dev->cfg_size = pci_cfg_space_size(dev);
400
401 /* We can't actually use the firmware value, we have
402 * to read what is in the register right now. One
403 * reason is that in the case of IDE interfaces the
404 * firmware can sample the value before the the IDE
405 * interface is programmed into native mode.
406 */
407 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
408 dev->class = class >> 8;
409 dev->revision = class & 0xff;
410
411 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
412 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
413
414 if (ofpci_verbose)
415 printk(" class: 0x%x device name: %s\n",
416 dev->class, pci_name(dev));
417
418 /* I have seen IDE devices which will not respond to
419 * the bmdma simplex check reads if bus mastering is
420 * disabled.
421 */
422 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
423 pci_set_master(dev);
424
425 dev->current_state = 4; /* unknown power state */
426 dev->error_state = pci_channel_io_normal;
427
428 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
429 /* a PCI-PCI bridge */
430 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
431 dev->rom_base_reg = PCI_ROM_ADDRESS1;
432 } else if (!strcmp(type, "cardbus")) {
433 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
434 } else {
435 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
436 dev->rom_base_reg = PCI_ROM_ADDRESS;
437
438 dev->irq = sd->op->irqs[0];
439 if (dev->irq == 0xffffffff)
440 dev->irq = PCI_IRQ_NONE;
441 }
442
443 pci_parse_of_addrs(sd->op, node, dev);
444
445 if (ofpci_verbose)
446 printk(" adding to system ...\n");
447
448 pci_device_add(dev, bus);
449
450 return dev;
451 }
452
453 static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
454 {
455 u32 idx, first, last;
456
457 first = 8;
458 last = 0;
459 for (idx = 0; idx < 8; idx++) {
460 if ((map & (1 << idx)) != 0) {
461 if (first > idx)
462 first = idx;
463 if (last < idx)
464 last = idx;
465 }
466 }
467
468 *first_p = first;
469 *last_p = last;
470 }
471
472 static void pci_resource_adjust(struct resource *res,
473 struct resource *root)
474 {
475 res->start += root->start;
476 res->end += root->start;
477 }
478
479 /* For PCI bus devices which lack a 'ranges' property we interrogate
480 * the config space values to set the resources, just like the generic
481 * Linux PCI probing code does.
482 */
483 static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
484 struct pci_bus *bus,
485 struct pci_pbm_info *pbm)
486 {
487 struct resource *res;
488 u8 io_base_lo, io_limit_lo;
489 u16 mem_base_lo, mem_limit_lo;
490 unsigned long base, limit;
491
492 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
493 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
494 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
495 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
496
497 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
498 u16 io_base_hi, io_limit_hi;
499
500 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
501 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
502 base |= (io_base_hi << 16);
503 limit |= (io_limit_hi << 16);
504 }
505
506 res = bus->resource[0];
507 if (base <= limit) {
508 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
509 if (!res->start)
510 res->start = base;
511 if (!res->end)
512 res->end = limit + 0xfff;
513 pci_resource_adjust(res, &pbm->io_space);
514 }
515
516 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
517 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
518 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
519 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
520
521 res = bus->resource[1];
522 if (base <= limit) {
523 res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
524 IORESOURCE_MEM);
525 res->start = base;
526 res->end = limit + 0xfffff;
527 pci_resource_adjust(res, &pbm->mem_space);
528 }
529
530 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
531 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
532 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
533 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
534
535 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
536 u32 mem_base_hi, mem_limit_hi;
537
538 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
539 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
540
541 /*
542 * Some bridges set the base > limit by default, and some
543 * (broken) BIOSes do not initialize them. If we find
544 * this, just assume they are not being used.
545 */
546 if (mem_base_hi <= mem_limit_hi) {
547 base |= ((long) mem_base_hi) << 32;
548 limit |= ((long) mem_limit_hi) << 32;
549 }
550 }
551
552 res = bus->resource[2];
553 if (base <= limit) {
554 res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
555 IORESOURCE_MEM | IORESOURCE_PREFETCH);
556 res->start = base;
557 res->end = limit + 0xfffff;
558 pci_resource_adjust(res, &pbm->mem_space);
559 }
560 }
561
562 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
563 * a proper 'ranges' property.
564 */
565 static void __devinit apb_fake_ranges(struct pci_dev *dev,
566 struct pci_bus *bus,
567 struct pci_pbm_info *pbm)
568 {
569 struct resource *res;
570 u32 first, last;
571 u8 map;
572
573 pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
574 apb_calc_first_last(map, &first, &last);
575 res = bus->resource[0];
576 res->start = (first << 21);
577 res->end = (last << 21) + ((1 << 21) - 1);
578 res->flags = IORESOURCE_IO;
579 pci_resource_adjust(res, &pbm->io_space);
580
581 pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
582 apb_calc_first_last(map, &first, &last);
583 res = bus->resource[1];
584 res->start = (first << 21);
585 res->end = (last << 21) + ((1 << 21) - 1);
586 res->flags = IORESOURCE_MEM;
587 pci_resource_adjust(res, &pbm->mem_space);
588 }
589
590 static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
591 struct device_node *node,
592 struct pci_bus *bus);
593
594 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
595
596 static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
597 struct device_node *node,
598 struct pci_dev *dev)
599 {
600 struct pci_bus *bus;
601 const u32 *busrange, *ranges;
602 int len, i, simba;
603 struct resource *res;
604 unsigned int flags;
605 u64 size;
606
607 if (ofpci_verbose)
608 printk("of_scan_pci_bridge(%s)\n", node->full_name);
609
610 /* parse bus-range property */
611 busrange = of_get_property(node, "bus-range", &len);
612 if (busrange == NULL || len != 8) {
613 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
614 node->full_name);
615 return;
616 }
617 ranges = of_get_property(node, "ranges", &len);
618 simba = 0;
619 if (ranges == NULL) {
620 const char *model = of_get_property(node, "model", NULL);
621 if (model && !strcmp(model, "SUNW,simba"))
622 simba = 1;
623 }
624
625 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
626 if (!bus) {
627 printk(KERN_ERR "Failed to create pci bus for %s\n",
628 node->full_name);
629 return;
630 }
631
632 bus->primary = dev->bus->number;
633 bus->subordinate = busrange[1];
634 bus->bridge_ctl = 0;
635
636 /* parse ranges property, or cook one up by hand for Simba */
637 /* PCI #address-cells == 3 and #size-cells == 2 always */
638 res = &dev->resource[PCI_BRIDGE_RESOURCES];
639 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
640 res->flags = 0;
641 bus->resource[i] = res;
642 ++res;
643 }
644 if (simba) {
645 apb_fake_ranges(dev, bus, pbm);
646 goto after_ranges;
647 } else if (ranges == NULL) {
648 pci_cfg_fake_ranges(dev, bus, pbm);
649 goto after_ranges;
650 }
651 i = 1;
652 for (; len >= 32; len -= 32, ranges += 8) {
653 struct resource *root;
654
655 flags = pci_parse_of_flags(ranges[0]);
656 size = GET_64BIT(ranges, 6);
657 if (flags == 0 || size == 0)
658 continue;
659 if (flags & IORESOURCE_IO) {
660 res = bus->resource[0];
661 if (res->flags) {
662 printk(KERN_ERR "PCI: ignoring extra I/O range"
663 " for bridge %s\n", node->full_name);
664 continue;
665 }
666 root = &pbm->io_space;
667 } else {
668 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
669 printk(KERN_ERR "PCI: too many memory ranges"
670 " for bridge %s\n", node->full_name);
671 continue;
672 }
673 res = bus->resource[i];
674 ++i;
675 root = &pbm->mem_space;
676 }
677
678 res->start = GET_64BIT(ranges, 1);
679 res->end = res->start + size - 1;
680 res->flags = flags;
681
682 /* Another way to implement this would be to add an of_device
683 * layer routine that can calculate a resource for a given
684 * range property value in a PCI device.
685 */
686 pci_resource_adjust(res, root);
687 }
688 after_ranges:
689 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
690 bus->number);
691 if (ofpci_verbose)
692 printk(" bus name: %s\n", bus->name);
693
694 pci_of_scan_bus(pbm, node, bus);
695 }
696
697 static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
698 struct device_node *node,
699 struct pci_bus *bus)
700 {
701 struct device_node *child;
702 const u32 *reg;
703 int reglen, devfn, prev_devfn;
704 struct pci_dev *dev;
705
706 if (ofpci_verbose)
707 printk("PCI: scan_bus[%s] bus no %d\n",
708 node->full_name, bus->number);
709
710 child = NULL;
711 prev_devfn = -1;
712 while ((child = of_get_next_child(node, child)) != NULL) {
713 if (ofpci_verbose)
714 printk(" * %s\n", child->full_name);
715 reg = of_get_property(child, "reg", &reglen);
716 if (reg == NULL || reglen < 20)
717 continue;
718
719 devfn = (reg[0] >> 8) & 0xff;
720
721 /* This is a workaround for some device trees
722 * which list PCI devices twice. On the V100
723 * for example, device number 3 is listed twice.
724 * Once as "pm" and once again as "lomp".
725 */
726 if (devfn == prev_devfn)
727 continue;
728 prev_devfn = devfn;
729
730 /* create a new pci_dev for this device */
731 dev = of_create_pci_dev(pbm, child, bus, devfn);
732 if (!dev)
733 continue;
734 if (ofpci_verbose)
735 printk("PCI: dev header type: %x\n",
736 dev->hdr_type);
737
738 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
739 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
740 of_scan_pci_bridge(pbm, child, dev);
741 }
742 }
743
744 static ssize_t
745 show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
746 {
747 struct pci_dev *pdev;
748 struct device_node *dp;
749
750 pdev = to_pci_dev(dev);
751 dp = pdev->dev.archdata.prom_node;
752
753 return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
754 }
755
756 static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
757
758 static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
759 {
760 struct pci_dev *dev;
761 struct pci_bus *child_bus;
762 int err;
763
764 list_for_each_entry(dev, &bus->devices, bus_list) {
765 /* we don't really care if we can create this file or
766 * not, but we need to assign the result of the call
767 * or the world will fall under alien invasion and
768 * everybody will be frozen on a spaceship ready to be
769 * eaten on alpha centauri by some green and jelly
770 * humanoid.
771 */
772 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
773 }
774 list_for_each_entry(child_bus, &bus->children, node)
775 pci_bus_register_of_sysfs(child_bus);
776 }
777
778 struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm)
779 {
780 struct device_node *node = pbm->prom_node;
781 struct pci_bus *bus;
782
783 printk("PCI: Scanning PBM %s\n", node->full_name);
784
785 /* XXX parent device? XXX */
786 bus = pci_create_bus(NULL, pbm->pci_first_busno, pbm->pci_ops, pbm);
787 if (!bus) {
788 printk(KERN_ERR "Failed to create bus for %s\n",
789 node->full_name);
790 return NULL;
791 }
792 bus->secondary = pbm->pci_first_busno;
793 bus->subordinate = pbm->pci_last_busno;
794
795 bus->resource[0] = &pbm->io_space;
796 bus->resource[1] = &pbm->mem_space;
797
798 pci_of_scan_bus(pbm, node, bus);
799 pci_bus_add_devices(bus);
800 pci_bus_register_of_sysfs(bus);
801
802 return bus;
803 }
804
805 static void __init pci_scan_each_controller_bus(void)
806 {
807 struct pci_pbm_info *pbm;
808
809 for (pbm = pci_pbm_root; pbm; pbm = pbm->next)
810 pbm->scan_bus(pbm);
811 }
812
813 extern void power_init(void);
814
815 static int __init pcibios_init(void)
816 {
817 pci_controller_probe();
818 if (pci_pbm_root == NULL)
819 return 0;
820
821 pci_scan_each_controller_bus();
822
823 ebus_init();
824 power_init();
825
826 return 0;
827 }
828
829 subsys_initcall(pcibios_init);
830
831 void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
832 {
833 struct pci_pbm_info *pbm = pbus->sysdata;
834
835 /* Generic PCI bus probing sets these to point at
836 * &io{port,mem}_resouce which is wrong for us.
837 */
838 pbus->resource[0] = &pbm->io_space;
839 pbus->resource[1] = &pbm->mem_space;
840 }
841
842 struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r)
843 {
844 struct pci_pbm_info *pbm = pdev->bus->sysdata;
845 struct resource *root = NULL;
846
847 if (r->flags & IORESOURCE_IO)
848 root = &pbm->io_space;
849 if (r->flags & IORESOURCE_MEM)
850 root = &pbm->mem_space;
851
852 return root;
853 }
854
855 void pcibios_update_irq(struct pci_dev *pdev, int irq)
856 {
857 }
858
859 void pcibios_align_resource(void *data, struct resource *res,
860 resource_size_t size, resource_size_t align)
861 {
862 }
863
864 int pcibios_enable_device(struct pci_dev *dev, int mask)
865 {
866 u16 cmd, oldcmd;
867 int i;
868
869 pci_read_config_word(dev, PCI_COMMAND, &cmd);
870 oldcmd = cmd;
871
872 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
873 struct resource *res = &dev->resource[i];
874
875 /* Only set up the requested stuff */
876 if (!(mask & (1<<i)))
877 continue;
878
879 if (res->flags & IORESOURCE_IO)
880 cmd |= PCI_COMMAND_IO;
881 if (res->flags & IORESOURCE_MEM)
882 cmd |= PCI_COMMAND_MEMORY;
883 }
884
885 if (cmd != oldcmd) {
886 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
887 pci_name(dev), cmd);
888 /* Enable the appropriate bits in the PCI command register. */
889 pci_write_config_word(dev, PCI_COMMAND, cmd);
890 }
891 return 0;
892 }
893
894 void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
895 struct resource *res)
896 {
897 struct pci_pbm_info *pbm = pdev->bus->sysdata;
898 struct resource zero_res, *root;
899
900 zero_res.start = 0;
901 zero_res.end = 0;
902 zero_res.flags = res->flags;
903
904 if (res->flags & IORESOURCE_IO)
905 root = &pbm->io_space;
906 else
907 root = &pbm->mem_space;
908
909 pci_resource_adjust(&zero_res, root);
910
911 region->start = res->start - zero_res.start;
912 region->end = res->end - zero_res.start;
913 }
914 EXPORT_SYMBOL(pcibios_resource_to_bus);
915
916 void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
917 struct pci_bus_region *region)
918 {
919 struct pci_pbm_info *pbm = pdev->bus->sysdata;
920 struct resource *root;
921
922 res->start = region->start;
923 res->end = region->end;
924
925 if (res->flags & IORESOURCE_IO)
926 root = &pbm->io_space;
927 else
928 root = &pbm->mem_space;
929
930 pci_resource_adjust(res, root);
931 }
932 EXPORT_SYMBOL(pcibios_bus_to_resource);
933
934 char * __devinit pcibios_setup(char *str)
935 {
936 return str;
937 }
938
939 /* Platform support for /proc/bus/pci/X/Y mmap()s. */
940
941 /* If the user uses a host-bridge as the PCI device, he may use
942 * this to perform a raw mmap() of the I/O or MEM space behind
943 * that controller.
944 *
945 * This can be useful for execution of x86 PCI bios initialization code
946 * on a PCI card, like the xfree86 int10 stuff does.
947 */
948 static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
949 enum pci_mmap_state mmap_state)
950 {
951 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
952 unsigned long space_size, user_offset, user_size;
953
954 if (mmap_state == pci_mmap_io) {
955 space_size = (pbm->io_space.end -
956 pbm->io_space.start) + 1;
957 } else {
958 space_size = (pbm->mem_space.end -
959 pbm->mem_space.start) + 1;
960 }
961
962 /* Make sure the request is in range. */
963 user_offset = vma->vm_pgoff << PAGE_SHIFT;
964 user_size = vma->vm_end - vma->vm_start;
965
966 if (user_offset >= space_size ||
967 (user_offset + user_size) > space_size)
968 return -EINVAL;
969
970 if (mmap_state == pci_mmap_io) {
971 vma->vm_pgoff = (pbm->io_space.start +
972 user_offset) >> PAGE_SHIFT;
973 } else {
974 vma->vm_pgoff = (pbm->mem_space.start +
975 user_offset) >> PAGE_SHIFT;
976 }
977
978 return 0;
979 }
980
981 /* Adjust vm_pgoff of VMA such that it is the physical page offset
982 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
983 *
984 * Basically, the user finds the base address for his device which he wishes
985 * to mmap. They read the 32-bit value from the config space base register,
986 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
987 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
988 *
989 * Returns negative error code on failure, zero on success.
990 */
991 static int __pci_mmap_make_offset(struct pci_dev *pdev,
992 struct vm_area_struct *vma,
993 enum pci_mmap_state mmap_state)
994 {
995 unsigned long user_paddr, user_size;
996 int i, err;
997
998 /* First compute the physical address in vma->vm_pgoff,
999 * making sure the user offset is within range in the
1000 * appropriate PCI space.
1001 */
1002 err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
1003 if (err)
1004 return err;
1005
1006 /* If this is a mapping on a host bridge, any address
1007 * is OK.
1008 */
1009 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
1010 return err;
1011
1012 /* Otherwise make sure it's in the range for one of the
1013 * device's resources.
1014 */
1015 user_paddr = vma->vm_pgoff << PAGE_SHIFT;
1016 user_size = vma->vm_end - vma->vm_start;
1017
1018 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1019 struct resource *rp = &pdev->resource[i];
1020
1021 /* Active? */
1022 if (!rp->flags)
1023 continue;
1024
1025 /* Same type? */
1026 if (i == PCI_ROM_RESOURCE) {
1027 if (mmap_state != pci_mmap_mem)
1028 continue;
1029 } else {
1030 if ((mmap_state == pci_mmap_io &&
1031 (rp->flags & IORESOURCE_IO) == 0) ||
1032 (mmap_state == pci_mmap_mem &&
1033 (rp->flags & IORESOURCE_MEM) == 0))
1034 continue;
1035 }
1036
1037 if ((rp->start <= user_paddr) &&
1038 (user_paddr + user_size) <= (rp->end + 1UL))
1039 break;
1040 }
1041
1042 if (i > PCI_ROM_RESOURCE)
1043 return -EINVAL;
1044
1045 return 0;
1046 }
1047
1048 /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
1049 * mapping.
1050 */
1051 static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
1052 enum pci_mmap_state mmap_state)
1053 {
1054 vma->vm_flags |= (VM_IO | VM_RESERVED);
1055 }
1056
1057 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1058 * device mapping.
1059 */
1060 static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
1061 enum pci_mmap_state mmap_state)
1062 {
1063 /* Our io_remap_pfn_range takes care of this, do nothing. */
1064 }
1065
1066 /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
1067 * for this architecture. The region in the process to map is described by vm_start
1068 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
1069 * The pci device structure is provided so that architectures may make mapping
1070 * decisions on a per-device or per-bus basis.
1071 *
1072 * Returns a negative error code on failure, zero on success.
1073 */
1074 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1075 enum pci_mmap_state mmap_state,
1076 int write_combine)
1077 {
1078 int ret;
1079
1080 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
1081 if (ret < 0)
1082 return ret;
1083
1084 __pci_mmap_set_flags(dev, vma, mmap_state);
1085 __pci_mmap_set_pgprot(dev, vma, mmap_state);
1086
1087 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1088 ret = io_remap_pfn_range(vma, vma->vm_start,
1089 vma->vm_pgoff,
1090 vma->vm_end - vma->vm_start,
1091 vma->vm_page_prot);
1092 if (ret)
1093 return ret;
1094
1095 return 0;
1096 }
1097
1098 #ifdef CONFIG_NUMA
1099 int pcibus_to_node(struct pci_bus *pbus)
1100 {
1101 struct pci_pbm_info *pbm = pbus->sysdata;
1102
1103 return pbm->numa_node;
1104 }
1105 EXPORT_SYMBOL(pcibus_to_node);
1106 #endif
1107
1108 /* Return the domain nuber for this pci bus */
1109
1110 int pci_domain_nr(struct pci_bus *pbus)
1111 {
1112 struct pci_pbm_info *pbm = pbus->sysdata;
1113 int ret;
1114
1115 if (pbm == NULL || pbm->parent == NULL) {
1116 ret = -ENXIO;
1117 } else {
1118 ret = pbm->index;
1119 }
1120
1121 return ret;
1122 }
1123 EXPORT_SYMBOL(pci_domain_nr);
1124
1125 #ifdef CONFIG_PCI_MSI
1126 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1127 {
1128 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1129 int virt_irq;
1130
1131 if (!pbm->setup_msi_irq)
1132 return -EINVAL;
1133
1134 return pbm->setup_msi_irq(&virt_irq, pdev, desc);
1135 }
1136
1137 void arch_teardown_msi_irq(unsigned int virt_irq)
1138 {
1139 struct msi_desc *entry = get_irq_msi(virt_irq);
1140 struct pci_dev *pdev = entry->dev;
1141 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1142
1143 if (!pbm->teardown_msi_irq)
1144 return;
1145
1146 return pbm->teardown_msi_irq(virt_irq, pdev);
1147 }
1148 #endif /* !(CONFIG_PCI_MSI) */
1149
1150 struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
1151 {
1152 return pdev->dev.archdata.prom_node;
1153 }
1154 EXPORT_SYMBOL(pci_device_to_OF_node);
1155
1156 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1157 {
1158 struct pci_dev *ali_isa_bridge;
1159 u8 val;
1160
1161 /* ALI sound chips generate 31-bits of DMA, a special register
1162 * determines what bit 31 is emitted as.
1163 */
1164 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
1165 PCI_DEVICE_ID_AL_M1533,
1166 NULL);
1167
1168 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
1169 if (set_bit)
1170 val |= 0x01;
1171 else
1172 val &= ~0x01;
1173 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
1174 pci_dev_put(ali_isa_bridge);
1175 }
1176
1177 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
1178 {
1179 u64 dma_addr_mask;
1180
1181 if (pdev == NULL) {
1182 dma_addr_mask = 0xffffffff;
1183 } else {
1184 struct iommu *iommu = pdev->dev.archdata.iommu;
1185
1186 dma_addr_mask = iommu->dma_addr_mask;
1187
1188 if (pdev->vendor == PCI_VENDOR_ID_AL &&
1189 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
1190 device_mask == 0x7fffffff) {
1191 ali_sound_dma_hack(pdev,
1192 (dma_addr_mask & 0x80000000) != 0);
1193 return 1;
1194 }
1195 }
1196
1197 if (device_mask >= (1UL << 32UL))
1198 return 0;
1199
1200 return (device_mask & dma_addr_mask) == dma_addr_mask;
1201 }
1202
1203 void pci_resource_to_user(const struct pci_dev *pdev, int bar,
1204 const struct resource *rp, resource_size_t *start,
1205 resource_size_t *end)
1206 {
1207 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1208 unsigned long offset;
1209
1210 if (rp->flags & IORESOURCE_IO)
1211 offset = pbm->io_space.start;
1212 else
1213 offset = pbm->mem_space.start;
1214
1215 *start = rp->start - offset;
1216 *end = rp->end - offset;
1217 }
1218
1219 #endif /* !(CONFIG_PCI) */
This page took 0.063466 seconds and 5 git commands to generate.