sparc64: Convert SUN4V PCI controller driver into a real driver.
[deliverable/linux.git] / arch / sparc64 / kernel / pci.c
1 /* pci.c: UltraSparc PCI controller support.
2 *
3 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 *
7 * OF tree based PCI bus probing taken from the PowerPC port
8 * with minor modifications, see there for credits.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/sched.h>
15 #include <linux/capability.h>
16 #include <linux/errno.h>
17 #include <linux/pci.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/irq.h>
27 #include <asm/prom.h>
28 #include <asm/apb.h>
29
30 #include "pci_impl.h"
31
32 /* List of all PCI controllers found in the system. */
33 struct pci_pbm_info *pci_pbm_root = NULL;
34
35 /* Each PBM found gets a unique index. */
36 int pci_num_pbms = 0;
37
38 volatile int pci_poke_in_progress;
39 volatile int pci_poke_cpu = -1;
40 volatile int pci_poke_faulted;
41
42 static DEFINE_SPINLOCK(pci_poke_lock);
43
44 void pci_config_read8(u8 *addr, u8 *ret)
45 {
46 unsigned long flags;
47 u8 byte;
48
49 spin_lock_irqsave(&pci_poke_lock, flags);
50 pci_poke_cpu = smp_processor_id();
51 pci_poke_in_progress = 1;
52 pci_poke_faulted = 0;
53 __asm__ __volatile__("membar #Sync\n\t"
54 "lduba [%1] %2, %0\n\t"
55 "membar #Sync"
56 : "=r" (byte)
57 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
58 : "memory");
59 pci_poke_in_progress = 0;
60 pci_poke_cpu = -1;
61 if (!pci_poke_faulted)
62 *ret = byte;
63 spin_unlock_irqrestore(&pci_poke_lock, flags);
64 }
65
66 void pci_config_read16(u16 *addr, u16 *ret)
67 {
68 unsigned long flags;
69 u16 word;
70
71 spin_lock_irqsave(&pci_poke_lock, flags);
72 pci_poke_cpu = smp_processor_id();
73 pci_poke_in_progress = 1;
74 pci_poke_faulted = 0;
75 __asm__ __volatile__("membar #Sync\n\t"
76 "lduha [%1] %2, %0\n\t"
77 "membar #Sync"
78 : "=r" (word)
79 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
80 : "memory");
81 pci_poke_in_progress = 0;
82 pci_poke_cpu = -1;
83 if (!pci_poke_faulted)
84 *ret = word;
85 spin_unlock_irqrestore(&pci_poke_lock, flags);
86 }
87
88 void pci_config_read32(u32 *addr, u32 *ret)
89 {
90 unsigned long flags;
91 u32 dword;
92
93 spin_lock_irqsave(&pci_poke_lock, flags);
94 pci_poke_cpu = smp_processor_id();
95 pci_poke_in_progress = 1;
96 pci_poke_faulted = 0;
97 __asm__ __volatile__("membar #Sync\n\t"
98 "lduwa [%1] %2, %0\n\t"
99 "membar #Sync"
100 : "=r" (dword)
101 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
102 : "memory");
103 pci_poke_in_progress = 0;
104 pci_poke_cpu = -1;
105 if (!pci_poke_faulted)
106 *ret = dword;
107 spin_unlock_irqrestore(&pci_poke_lock, flags);
108 }
109
110 void pci_config_write8(u8 *addr, u8 val)
111 {
112 unsigned long flags;
113
114 spin_lock_irqsave(&pci_poke_lock, flags);
115 pci_poke_cpu = smp_processor_id();
116 pci_poke_in_progress = 1;
117 pci_poke_faulted = 0;
118 __asm__ __volatile__("membar #Sync\n\t"
119 "stba %0, [%1] %2\n\t"
120 "membar #Sync"
121 : /* no outputs */
122 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
123 : "memory");
124 pci_poke_in_progress = 0;
125 pci_poke_cpu = -1;
126 spin_unlock_irqrestore(&pci_poke_lock, flags);
127 }
128
129 void pci_config_write16(u16 *addr, u16 val)
130 {
131 unsigned long flags;
132
133 spin_lock_irqsave(&pci_poke_lock, flags);
134 pci_poke_cpu = smp_processor_id();
135 pci_poke_in_progress = 1;
136 pci_poke_faulted = 0;
137 __asm__ __volatile__("membar #Sync\n\t"
138 "stha %0, [%1] %2\n\t"
139 "membar #Sync"
140 : /* no outputs */
141 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
142 : "memory");
143 pci_poke_in_progress = 0;
144 pci_poke_cpu = -1;
145 spin_unlock_irqrestore(&pci_poke_lock, flags);
146 }
147
148 void pci_config_write32(u32 *addr, u32 val)
149 {
150 unsigned long flags;
151
152 spin_lock_irqsave(&pci_poke_lock, flags);
153 pci_poke_cpu = smp_processor_id();
154 pci_poke_in_progress = 1;
155 pci_poke_faulted = 0;
156 __asm__ __volatile__("membar #Sync\n\t"
157 "stwa %0, [%1] %2\n\t"
158 "membar #Sync"
159 : /* no outputs */
160 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
161 : "memory");
162 pci_poke_in_progress = 0;
163 pci_poke_cpu = -1;
164 spin_unlock_irqrestore(&pci_poke_lock, flags);
165 }
166
167 /* Probe for all PCI controllers in the system. */
168 extern void sabre_init(struct device_node *, const char *);
169 extern void psycho_init(struct device_node *, const char *);
170 extern void fire_pci_init(struct device_node *, const char *);
171
172 static struct {
173 char *model_name;
174 void (*init)(struct device_node *, const char *);
175 } pci_controller_table[] __initdata = {
176 { "SUNW,sabre", sabre_init },
177 { "pci108e,a000", sabre_init },
178 { "pci108e,a001", sabre_init },
179 { "SUNW,psycho", psycho_init },
180 { "pci108e,8000", psycho_init },
181 { "pciex108e,80f0", fire_pci_init },
182 };
183 #define PCI_NUM_CONTROLLER_TYPES ARRAY_SIZE(pci_controller_table)
184
185 static int __init pci_controller_init(const char *model_name, int namelen, struct device_node *dp)
186 {
187 int i;
188
189 for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
190 if (!strncmp(model_name,
191 pci_controller_table[i].model_name,
192 namelen)) {
193 pci_controller_table[i].init(dp, model_name);
194 return 1;
195 }
196 }
197
198 return 0;
199 }
200
201 static int __init pci_controller_scan(int (*handler)(const char *, int, struct device_node *))
202 {
203 struct device_node *dp;
204 int count = 0;
205
206 for_each_node_by_name(dp, "pci") {
207 struct property *prop;
208 int len;
209
210 prop = of_find_property(dp, "model", &len);
211 if (!prop)
212 prop = of_find_property(dp, "compatible", &len);
213
214 if (prop) {
215 const char *model = prop->value;
216 int item_len = 0;
217
218 /* Our value may be a multi-valued string in the
219 * case of some compatible properties. For sanity,
220 * only try the first one.
221 */
222 while (model[item_len] && len) {
223 len--;
224 item_len++;
225 }
226
227 if (handler(model, item_len, dp))
228 count++;
229 }
230 }
231
232 return count;
233 }
234
235 /* Find each controller in the system, attach and initialize
236 * software state structure for each and link into the
237 * pci_pbm_root. Setup the controller enough such
238 * that bus scanning can be done.
239 */
240 static void __init pci_controller_probe(void)
241 {
242 printk("PCI: Probing for controllers.\n");
243
244 pci_controller_scan(pci_controller_init);
245 }
246
247 static int ofpci_verbose;
248
249 static int __init ofpci_debug(char *str)
250 {
251 int val = 0;
252
253 get_option(&str, &val);
254 if (val)
255 ofpci_verbose = 1;
256 return 1;
257 }
258
259 __setup("ofpci_debug=", ofpci_debug);
260
261 static unsigned long pci_parse_of_flags(u32 addr0)
262 {
263 unsigned long flags = 0;
264
265 if (addr0 & 0x02000000) {
266 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
267 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
268 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
269 if (addr0 & 0x40000000)
270 flags |= IORESOURCE_PREFETCH
271 | PCI_BASE_ADDRESS_MEM_PREFETCH;
272 } else if (addr0 & 0x01000000)
273 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
274 return flags;
275 }
276
277 /* The of_device layer has translated all of the assigned-address properties
278 * into physical address resources, we only have to figure out the register
279 * mapping.
280 */
281 static void pci_parse_of_addrs(struct of_device *op,
282 struct device_node *node,
283 struct pci_dev *dev)
284 {
285 struct resource *op_res;
286 const u32 *addrs;
287 int proplen;
288
289 addrs = of_get_property(node, "assigned-addresses", &proplen);
290 if (!addrs)
291 return;
292 if (ofpci_verbose)
293 printk(" parse addresses (%d bytes) @ %p\n",
294 proplen, addrs);
295 op_res = &op->resource[0];
296 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
297 struct resource *res;
298 unsigned long flags;
299 int i;
300
301 flags = pci_parse_of_flags(addrs[0]);
302 if (!flags)
303 continue;
304 i = addrs[0] & 0xff;
305 if (ofpci_verbose)
306 printk(" start: %lx, end: %lx, i: %x\n",
307 op_res->start, op_res->end, i);
308
309 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
310 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
311 } else if (i == dev->rom_base_reg) {
312 res = &dev->resource[PCI_ROM_RESOURCE];
313 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
314 } else {
315 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
316 continue;
317 }
318 res->start = op_res->start;
319 res->end = op_res->end;
320 res->flags = flags;
321 res->name = pci_name(dev);
322 }
323 }
324
325 struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
326 struct device_node *node,
327 struct pci_bus *bus, int devfn)
328 {
329 struct dev_archdata *sd;
330 struct of_device *op;
331 struct pci_dev *dev;
332 const char *type;
333 u32 class;
334
335 dev = alloc_pci_dev();
336 if (!dev)
337 return NULL;
338
339 sd = &dev->dev.archdata;
340 sd->iommu = pbm->iommu;
341 sd->stc = &pbm->stc;
342 sd->host_controller = pbm;
343 sd->prom_node = node;
344 sd->op = op = of_find_device_by_node(node);
345 sd->numa_node = pbm->numa_node;
346
347 sd = &op->dev.archdata;
348 sd->iommu = pbm->iommu;
349 sd->stc = &pbm->stc;
350 sd->numa_node = pbm->numa_node;
351
352 if (!strcmp(node->name, "ebus"))
353 of_propagate_archdata(op);
354
355 type = of_get_property(node, "device_type", NULL);
356 if (type == NULL)
357 type = "";
358
359 if (ofpci_verbose)
360 printk(" create device, devfn: %x, type: %s\n",
361 devfn, type);
362
363 dev->bus = bus;
364 dev->sysdata = node;
365 dev->dev.parent = bus->bridge;
366 dev->dev.bus = &pci_bus_type;
367 dev->devfn = devfn;
368 dev->multifunction = 0; /* maybe a lie? */
369
370 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
371 dev->device = of_getintprop_default(node, "device-id", 0xffff);
372 dev->subsystem_vendor =
373 of_getintprop_default(node, "subsystem-vendor-id", 0);
374 dev->subsystem_device =
375 of_getintprop_default(node, "subsystem-id", 0);
376
377 dev->cfg_size = pci_cfg_space_size(dev);
378
379 /* We can't actually use the firmware value, we have
380 * to read what is in the register right now. One
381 * reason is that in the case of IDE interfaces the
382 * firmware can sample the value before the the IDE
383 * interface is programmed into native mode.
384 */
385 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
386 dev->class = class >> 8;
387 dev->revision = class & 0xff;
388
389 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
390 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
391
392 if (ofpci_verbose)
393 printk(" class: 0x%x device name: %s\n",
394 dev->class, pci_name(dev));
395
396 /* I have seen IDE devices which will not respond to
397 * the bmdma simplex check reads if bus mastering is
398 * disabled.
399 */
400 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
401 pci_set_master(dev);
402
403 dev->current_state = 4; /* unknown power state */
404 dev->error_state = pci_channel_io_normal;
405
406 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
407 /* a PCI-PCI bridge */
408 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
409 dev->rom_base_reg = PCI_ROM_ADDRESS1;
410 } else if (!strcmp(type, "cardbus")) {
411 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
412 } else {
413 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
414 dev->rom_base_reg = PCI_ROM_ADDRESS;
415
416 dev->irq = sd->op->irqs[0];
417 if (dev->irq == 0xffffffff)
418 dev->irq = PCI_IRQ_NONE;
419 }
420
421 pci_parse_of_addrs(sd->op, node, dev);
422
423 if (ofpci_verbose)
424 printk(" adding to system ...\n");
425
426 pci_device_add(dev, bus);
427
428 return dev;
429 }
430
431 static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
432 {
433 u32 idx, first, last;
434
435 first = 8;
436 last = 0;
437 for (idx = 0; idx < 8; idx++) {
438 if ((map & (1 << idx)) != 0) {
439 if (first > idx)
440 first = idx;
441 if (last < idx)
442 last = idx;
443 }
444 }
445
446 *first_p = first;
447 *last_p = last;
448 }
449
450 static void pci_resource_adjust(struct resource *res,
451 struct resource *root)
452 {
453 res->start += root->start;
454 res->end += root->start;
455 }
456
457 /* For PCI bus devices which lack a 'ranges' property we interrogate
458 * the config space values to set the resources, just like the generic
459 * Linux PCI probing code does.
460 */
461 static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
462 struct pci_bus *bus,
463 struct pci_pbm_info *pbm)
464 {
465 struct resource *res;
466 u8 io_base_lo, io_limit_lo;
467 u16 mem_base_lo, mem_limit_lo;
468 unsigned long base, limit;
469
470 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
471 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
472 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
473 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
474
475 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
476 u16 io_base_hi, io_limit_hi;
477
478 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
479 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
480 base |= (io_base_hi << 16);
481 limit |= (io_limit_hi << 16);
482 }
483
484 res = bus->resource[0];
485 if (base <= limit) {
486 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
487 if (!res->start)
488 res->start = base;
489 if (!res->end)
490 res->end = limit + 0xfff;
491 pci_resource_adjust(res, &pbm->io_space);
492 }
493
494 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
495 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
496 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
497 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
498
499 res = bus->resource[1];
500 if (base <= limit) {
501 res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
502 IORESOURCE_MEM);
503 res->start = base;
504 res->end = limit + 0xfffff;
505 pci_resource_adjust(res, &pbm->mem_space);
506 }
507
508 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
509 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
510 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
511 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
512
513 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
514 u32 mem_base_hi, mem_limit_hi;
515
516 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
517 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
518
519 /*
520 * Some bridges set the base > limit by default, and some
521 * (broken) BIOSes do not initialize them. If we find
522 * this, just assume they are not being used.
523 */
524 if (mem_base_hi <= mem_limit_hi) {
525 base |= ((long) mem_base_hi) << 32;
526 limit |= ((long) mem_limit_hi) << 32;
527 }
528 }
529
530 res = bus->resource[2];
531 if (base <= limit) {
532 res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
533 IORESOURCE_MEM | IORESOURCE_PREFETCH);
534 res->start = base;
535 res->end = limit + 0xfffff;
536 pci_resource_adjust(res, &pbm->mem_space);
537 }
538 }
539
540 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
541 * a proper 'ranges' property.
542 */
543 static void __devinit apb_fake_ranges(struct pci_dev *dev,
544 struct pci_bus *bus,
545 struct pci_pbm_info *pbm)
546 {
547 struct resource *res;
548 u32 first, last;
549 u8 map;
550
551 pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
552 apb_calc_first_last(map, &first, &last);
553 res = bus->resource[0];
554 res->start = (first << 21);
555 res->end = (last << 21) + ((1 << 21) - 1);
556 res->flags = IORESOURCE_IO;
557 pci_resource_adjust(res, &pbm->io_space);
558
559 pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
560 apb_calc_first_last(map, &first, &last);
561 res = bus->resource[1];
562 res->start = (first << 21);
563 res->end = (last << 21) + ((1 << 21) - 1);
564 res->flags = IORESOURCE_MEM;
565 pci_resource_adjust(res, &pbm->mem_space);
566 }
567
568 static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
569 struct device_node *node,
570 struct pci_bus *bus);
571
572 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
573
574 static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
575 struct device_node *node,
576 struct pci_dev *dev)
577 {
578 struct pci_bus *bus;
579 const u32 *busrange, *ranges;
580 int len, i, simba;
581 struct resource *res;
582 unsigned int flags;
583 u64 size;
584
585 if (ofpci_verbose)
586 printk("of_scan_pci_bridge(%s)\n", node->full_name);
587
588 /* parse bus-range property */
589 busrange = of_get_property(node, "bus-range", &len);
590 if (busrange == NULL || len != 8) {
591 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
592 node->full_name);
593 return;
594 }
595 ranges = of_get_property(node, "ranges", &len);
596 simba = 0;
597 if (ranges == NULL) {
598 const char *model = of_get_property(node, "model", NULL);
599 if (model && !strcmp(model, "SUNW,simba"))
600 simba = 1;
601 }
602
603 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
604 if (!bus) {
605 printk(KERN_ERR "Failed to create pci bus for %s\n",
606 node->full_name);
607 return;
608 }
609
610 bus->primary = dev->bus->number;
611 bus->subordinate = busrange[1];
612 bus->bridge_ctl = 0;
613
614 /* parse ranges property, or cook one up by hand for Simba */
615 /* PCI #address-cells == 3 and #size-cells == 2 always */
616 res = &dev->resource[PCI_BRIDGE_RESOURCES];
617 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
618 res->flags = 0;
619 bus->resource[i] = res;
620 ++res;
621 }
622 if (simba) {
623 apb_fake_ranges(dev, bus, pbm);
624 goto after_ranges;
625 } else if (ranges == NULL) {
626 pci_cfg_fake_ranges(dev, bus, pbm);
627 goto after_ranges;
628 }
629 i = 1;
630 for (; len >= 32; len -= 32, ranges += 8) {
631 struct resource *root;
632
633 flags = pci_parse_of_flags(ranges[0]);
634 size = GET_64BIT(ranges, 6);
635 if (flags == 0 || size == 0)
636 continue;
637 if (flags & IORESOURCE_IO) {
638 res = bus->resource[0];
639 if (res->flags) {
640 printk(KERN_ERR "PCI: ignoring extra I/O range"
641 " for bridge %s\n", node->full_name);
642 continue;
643 }
644 root = &pbm->io_space;
645 } else {
646 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
647 printk(KERN_ERR "PCI: too many memory ranges"
648 " for bridge %s\n", node->full_name);
649 continue;
650 }
651 res = bus->resource[i];
652 ++i;
653 root = &pbm->mem_space;
654 }
655
656 res->start = GET_64BIT(ranges, 1);
657 res->end = res->start + size - 1;
658 res->flags = flags;
659
660 /* Another way to implement this would be to add an of_device
661 * layer routine that can calculate a resource for a given
662 * range property value in a PCI device.
663 */
664 pci_resource_adjust(res, root);
665 }
666 after_ranges:
667 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
668 bus->number);
669 if (ofpci_verbose)
670 printk(" bus name: %s\n", bus->name);
671
672 pci_of_scan_bus(pbm, node, bus);
673 }
674
675 static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
676 struct device_node *node,
677 struct pci_bus *bus)
678 {
679 struct device_node *child;
680 const u32 *reg;
681 int reglen, devfn, prev_devfn;
682 struct pci_dev *dev;
683
684 if (ofpci_verbose)
685 printk("PCI: scan_bus[%s] bus no %d\n",
686 node->full_name, bus->number);
687
688 child = NULL;
689 prev_devfn = -1;
690 while ((child = of_get_next_child(node, child)) != NULL) {
691 if (ofpci_verbose)
692 printk(" * %s\n", child->full_name);
693 reg = of_get_property(child, "reg", &reglen);
694 if (reg == NULL || reglen < 20)
695 continue;
696
697 devfn = (reg[0] >> 8) & 0xff;
698
699 /* This is a workaround for some device trees
700 * which list PCI devices twice. On the V100
701 * for example, device number 3 is listed twice.
702 * Once as "pm" and once again as "lomp".
703 */
704 if (devfn == prev_devfn)
705 continue;
706 prev_devfn = devfn;
707
708 /* create a new pci_dev for this device */
709 dev = of_create_pci_dev(pbm, child, bus, devfn);
710 if (!dev)
711 continue;
712 if (ofpci_verbose)
713 printk("PCI: dev header type: %x\n",
714 dev->hdr_type);
715
716 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
717 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
718 of_scan_pci_bridge(pbm, child, dev);
719 }
720 }
721
722 static ssize_t
723 show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
724 {
725 struct pci_dev *pdev;
726 struct device_node *dp;
727
728 pdev = to_pci_dev(dev);
729 dp = pdev->dev.archdata.prom_node;
730
731 return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
732 }
733
734 static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
735
736 static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
737 {
738 struct pci_dev *dev;
739 struct pci_bus *child_bus;
740 int err;
741
742 list_for_each_entry(dev, &bus->devices, bus_list) {
743 /* we don't really care if we can create this file or
744 * not, but we need to assign the result of the call
745 * or the world will fall under alien invasion and
746 * everybody will be frozen on a spaceship ready to be
747 * eaten on alpha centauri by some green and jelly
748 * humanoid.
749 */
750 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
751 }
752 list_for_each_entry(child_bus, &bus->children, node)
753 pci_bus_register_of_sysfs(child_bus);
754 }
755
756 struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm)
757 {
758 struct device_node *node = pbm->prom_node;
759 struct pci_bus *bus;
760
761 printk("PCI: Scanning PBM %s\n", node->full_name);
762
763 /* XXX parent device? XXX */
764 bus = pci_create_bus(NULL, pbm->pci_first_busno, pbm->pci_ops, pbm);
765 if (!bus) {
766 printk(KERN_ERR "Failed to create bus for %s\n",
767 node->full_name);
768 return NULL;
769 }
770 bus->secondary = pbm->pci_first_busno;
771 bus->subordinate = pbm->pci_last_busno;
772
773 bus->resource[0] = &pbm->io_space;
774 bus->resource[1] = &pbm->mem_space;
775
776 pci_of_scan_bus(pbm, node, bus);
777 pci_bus_add_devices(bus);
778 pci_bus_register_of_sysfs(bus);
779
780 return bus;
781 }
782
783 static void __init pci_scan_each_controller_bus(void)
784 {
785 struct pci_pbm_info *pbm;
786
787 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
788 if (pbm->scan_bus)
789 pbm->scan_bus(pbm);
790 }
791 }
792
793 static int __init pcibios_init(void)
794 {
795 pci_controller_probe();
796 if (pci_pbm_root == NULL)
797 return 0;
798
799 pci_scan_each_controller_bus();
800
801 return 0;
802 }
803
804 subsys_initcall(pcibios_init);
805
806 void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
807 {
808 struct pci_pbm_info *pbm = pbus->sysdata;
809
810 /* Generic PCI bus probing sets these to point at
811 * &io{port,mem}_resouce which is wrong for us.
812 */
813 pbus->resource[0] = &pbm->io_space;
814 pbus->resource[1] = &pbm->mem_space;
815 }
816
817 struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r)
818 {
819 struct pci_pbm_info *pbm = pdev->bus->sysdata;
820 struct resource *root = NULL;
821
822 if (r->flags & IORESOURCE_IO)
823 root = &pbm->io_space;
824 if (r->flags & IORESOURCE_MEM)
825 root = &pbm->mem_space;
826
827 return root;
828 }
829
830 void pcibios_update_irq(struct pci_dev *pdev, int irq)
831 {
832 }
833
834 void pcibios_align_resource(void *data, struct resource *res,
835 resource_size_t size, resource_size_t align)
836 {
837 }
838
839 int pcibios_enable_device(struct pci_dev *dev, int mask)
840 {
841 u16 cmd, oldcmd;
842 int i;
843
844 pci_read_config_word(dev, PCI_COMMAND, &cmd);
845 oldcmd = cmd;
846
847 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
848 struct resource *res = &dev->resource[i];
849
850 /* Only set up the requested stuff */
851 if (!(mask & (1<<i)))
852 continue;
853
854 if (res->flags & IORESOURCE_IO)
855 cmd |= PCI_COMMAND_IO;
856 if (res->flags & IORESOURCE_MEM)
857 cmd |= PCI_COMMAND_MEMORY;
858 }
859
860 if (cmd != oldcmd) {
861 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
862 pci_name(dev), cmd);
863 /* Enable the appropriate bits in the PCI command register. */
864 pci_write_config_word(dev, PCI_COMMAND, cmd);
865 }
866 return 0;
867 }
868
869 void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
870 struct resource *res)
871 {
872 struct pci_pbm_info *pbm = pdev->bus->sysdata;
873 struct resource zero_res, *root;
874
875 zero_res.start = 0;
876 zero_res.end = 0;
877 zero_res.flags = res->flags;
878
879 if (res->flags & IORESOURCE_IO)
880 root = &pbm->io_space;
881 else
882 root = &pbm->mem_space;
883
884 pci_resource_adjust(&zero_res, root);
885
886 region->start = res->start - zero_res.start;
887 region->end = res->end - zero_res.start;
888 }
889 EXPORT_SYMBOL(pcibios_resource_to_bus);
890
891 void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
892 struct pci_bus_region *region)
893 {
894 struct pci_pbm_info *pbm = pdev->bus->sysdata;
895 struct resource *root;
896
897 res->start = region->start;
898 res->end = region->end;
899
900 if (res->flags & IORESOURCE_IO)
901 root = &pbm->io_space;
902 else
903 root = &pbm->mem_space;
904
905 pci_resource_adjust(res, root);
906 }
907 EXPORT_SYMBOL(pcibios_bus_to_resource);
908
909 char * __devinit pcibios_setup(char *str)
910 {
911 return str;
912 }
913
914 /* Platform support for /proc/bus/pci/X/Y mmap()s. */
915
916 /* If the user uses a host-bridge as the PCI device, he may use
917 * this to perform a raw mmap() of the I/O or MEM space behind
918 * that controller.
919 *
920 * This can be useful for execution of x86 PCI bios initialization code
921 * on a PCI card, like the xfree86 int10 stuff does.
922 */
923 static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
924 enum pci_mmap_state mmap_state)
925 {
926 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
927 unsigned long space_size, user_offset, user_size;
928
929 if (mmap_state == pci_mmap_io) {
930 space_size = (pbm->io_space.end -
931 pbm->io_space.start) + 1;
932 } else {
933 space_size = (pbm->mem_space.end -
934 pbm->mem_space.start) + 1;
935 }
936
937 /* Make sure the request is in range. */
938 user_offset = vma->vm_pgoff << PAGE_SHIFT;
939 user_size = vma->vm_end - vma->vm_start;
940
941 if (user_offset >= space_size ||
942 (user_offset + user_size) > space_size)
943 return -EINVAL;
944
945 if (mmap_state == pci_mmap_io) {
946 vma->vm_pgoff = (pbm->io_space.start +
947 user_offset) >> PAGE_SHIFT;
948 } else {
949 vma->vm_pgoff = (pbm->mem_space.start +
950 user_offset) >> PAGE_SHIFT;
951 }
952
953 return 0;
954 }
955
956 /* Adjust vm_pgoff of VMA such that it is the physical page offset
957 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
958 *
959 * Basically, the user finds the base address for his device which he wishes
960 * to mmap. They read the 32-bit value from the config space base register,
961 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
962 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
963 *
964 * Returns negative error code on failure, zero on success.
965 */
966 static int __pci_mmap_make_offset(struct pci_dev *pdev,
967 struct vm_area_struct *vma,
968 enum pci_mmap_state mmap_state)
969 {
970 unsigned long user_paddr, user_size;
971 int i, err;
972
973 /* First compute the physical address in vma->vm_pgoff,
974 * making sure the user offset is within range in the
975 * appropriate PCI space.
976 */
977 err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
978 if (err)
979 return err;
980
981 /* If this is a mapping on a host bridge, any address
982 * is OK.
983 */
984 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
985 return err;
986
987 /* Otherwise make sure it's in the range for one of the
988 * device's resources.
989 */
990 user_paddr = vma->vm_pgoff << PAGE_SHIFT;
991 user_size = vma->vm_end - vma->vm_start;
992
993 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
994 struct resource *rp = &pdev->resource[i];
995
996 /* Active? */
997 if (!rp->flags)
998 continue;
999
1000 /* Same type? */
1001 if (i == PCI_ROM_RESOURCE) {
1002 if (mmap_state != pci_mmap_mem)
1003 continue;
1004 } else {
1005 if ((mmap_state == pci_mmap_io &&
1006 (rp->flags & IORESOURCE_IO) == 0) ||
1007 (mmap_state == pci_mmap_mem &&
1008 (rp->flags & IORESOURCE_MEM) == 0))
1009 continue;
1010 }
1011
1012 if ((rp->start <= user_paddr) &&
1013 (user_paddr + user_size) <= (rp->end + 1UL))
1014 break;
1015 }
1016
1017 if (i > PCI_ROM_RESOURCE)
1018 return -EINVAL;
1019
1020 return 0;
1021 }
1022
1023 /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
1024 * mapping.
1025 */
1026 static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
1027 enum pci_mmap_state mmap_state)
1028 {
1029 vma->vm_flags |= (VM_IO | VM_RESERVED);
1030 }
1031
1032 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1033 * device mapping.
1034 */
1035 static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
1036 enum pci_mmap_state mmap_state)
1037 {
1038 /* Our io_remap_pfn_range takes care of this, do nothing. */
1039 }
1040
1041 /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
1042 * for this architecture. The region in the process to map is described by vm_start
1043 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
1044 * The pci device structure is provided so that architectures may make mapping
1045 * decisions on a per-device or per-bus basis.
1046 *
1047 * Returns a negative error code on failure, zero on success.
1048 */
1049 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1050 enum pci_mmap_state mmap_state,
1051 int write_combine)
1052 {
1053 int ret;
1054
1055 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
1056 if (ret < 0)
1057 return ret;
1058
1059 __pci_mmap_set_flags(dev, vma, mmap_state);
1060 __pci_mmap_set_pgprot(dev, vma, mmap_state);
1061
1062 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1063 ret = io_remap_pfn_range(vma, vma->vm_start,
1064 vma->vm_pgoff,
1065 vma->vm_end - vma->vm_start,
1066 vma->vm_page_prot);
1067 if (ret)
1068 return ret;
1069
1070 return 0;
1071 }
1072
1073 #ifdef CONFIG_NUMA
1074 int pcibus_to_node(struct pci_bus *pbus)
1075 {
1076 struct pci_pbm_info *pbm = pbus->sysdata;
1077
1078 return pbm->numa_node;
1079 }
1080 EXPORT_SYMBOL(pcibus_to_node);
1081 #endif
1082
1083 /* Return the domain nuber for this pci bus */
1084
1085 int pci_domain_nr(struct pci_bus *pbus)
1086 {
1087 struct pci_pbm_info *pbm = pbus->sysdata;
1088 int ret;
1089
1090 if (pbm == NULL || pbm->parent == NULL) {
1091 ret = -ENXIO;
1092 } else {
1093 ret = pbm->index;
1094 }
1095
1096 return ret;
1097 }
1098 EXPORT_SYMBOL(pci_domain_nr);
1099
1100 #ifdef CONFIG_PCI_MSI
1101 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1102 {
1103 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1104 int virt_irq;
1105
1106 if (!pbm->setup_msi_irq)
1107 return -EINVAL;
1108
1109 return pbm->setup_msi_irq(&virt_irq, pdev, desc);
1110 }
1111
1112 void arch_teardown_msi_irq(unsigned int virt_irq)
1113 {
1114 struct msi_desc *entry = get_irq_msi(virt_irq);
1115 struct pci_dev *pdev = entry->dev;
1116 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1117
1118 if (!pbm->teardown_msi_irq)
1119 return;
1120
1121 return pbm->teardown_msi_irq(virt_irq, pdev);
1122 }
1123 #endif /* !(CONFIG_PCI_MSI) */
1124
1125 struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
1126 {
1127 return pdev->dev.archdata.prom_node;
1128 }
1129 EXPORT_SYMBOL(pci_device_to_OF_node);
1130
1131 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1132 {
1133 struct pci_dev *ali_isa_bridge;
1134 u8 val;
1135
1136 /* ALI sound chips generate 31-bits of DMA, a special register
1137 * determines what bit 31 is emitted as.
1138 */
1139 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
1140 PCI_DEVICE_ID_AL_M1533,
1141 NULL);
1142
1143 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
1144 if (set_bit)
1145 val |= 0x01;
1146 else
1147 val &= ~0x01;
1148 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
1149 pci_dev_put(ali_isa_bridge);
1150 }
1151
1152 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
1153 {
1154 u64 dma_addr_mask;
1155
1156 if (pdev == NULL) {
1157 dma_addr_mask = 0xffffffff;
1158 } else {
1159 struct iommu *iommu = pdev->dev.archdata.iommu;
1160
1161 dma_addr_mask = iommu->dma_addr_mask;
1162
1163 if (pdev->vendor == PCI_VENDOR_ID_AL &&
1164 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
1165 device_mask == 0x7fffffff) {
1166 ali_sound_dma_hack(pdev,
1167 (dma_addr_mask & 0x80000000) != 0);
1168 return 1;
1169 }
1170 }
1171
1172 if (device_mask >= (1UL << 32UL))
1173 return 0;
1174
1175 return (device_mask & dma_addr_mask) == dma_addr_mask;
1176 }
1177
1178 void pci_resource_to_user(const struct pci_dev *pdev, int bar,
1179 const struct resource *rp, resource_size_t *start,
1180 resource_size_t *end)
1181 {
1182 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1183 unsigned long offset;
1184
1185 if (rp->flags & IORESOURCE_IO)
1186 offset = pbm->io_space.start;
1187 else
1188 offset = pbm->mem_space.start;
1189
1190 *start = rp->start - offset;
1191 *end = rp->end - offset;
1192 }
This page took 0.093932 seconds and 5 git commands to generate.