Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / arch / powerpc / kernel / pci-common.c
1 /*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/pci.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/export.h>
25 #include <linux/of_address.h>
26 #include <linux/of_pci.h>
27 #include <linux/mm.h>
28 #include <linux/list.h>
29 #include <linux/syscalls.h>
30 #include <linux/irq.h>
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34
35 #include <asm/processor.h>
36 #include <asm/io.h>
37 #include <asm/prom.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/byteorder.h>
40 #include <asm/machdep.h>
41 #include <asm/ppc-pci.h>
42 #include <asm/eeh.h>
43
44 /* hose_spinlock protects accesses to the the phb_bitmap. */
45 static DEFINE_SPINLOCK(hose_spinlock);
46 LIST_HEAD(hose_list);
47
48 /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
49 #define MAX_PHBS 0x10000
50
51 /*
52 * For dynamic PHB numbering: used/free PHBs tracking bitmap.
53 * Accesses to this bitmap should be protected by hose_spinlock.
54 */
55 static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
56
57 /* ISA Memory physical address */
58 resource_size_t isa_mem_base;
59
60
61 static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
62
63 void set_pci_dma_ops(struct dma_map_ops *dma_ops)
64 {
65 pci_dma_ops = dma_ops;
66 }
67
68 struct dma_map_ops *get_pci_dma_ops(void)
69 {
70 return pci_dma_ops;
71 }
72 EXPORT_SYMBOL(get_pci_dma_ops);
73
74 /*
75 * This function should run under locking protection, specifically
76 * hose_spinlock.
77 */
78 static int get_phb_number(struct device_node *dn)
79 {
80 int ret, phb_id = -1;
81 u64 prop;
82
83 /*
84 * Try fixed PHB numbering first, by checking archs and reading
85 * the respective device-tree properties. Firstly, try powernv by
86 * reading "ibm,opal-phbid", only present in OPAL environment.
87 */
88 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
89 if (ret)
90 ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
91
92 if (!ret)
93 phb_id = (int)(prop & (MAX_PHBS - 1));
94
95 /* We need to be sure to not use the same PHB number twice. */
96 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
97 return phb_id;
98
99 /*
100 * If not pseries nor powernv, or if fixed PHB numbering tried to add
101 * the same PHB number twice, then fallback to dynamic PHB numbering.
102 */
103 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
104 BUG_ON(phb_id >= MAX_PHBS);
105 set_bit(phb_id, phb_bitmap);
106
107 return phb_id;
108 }
109
110 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
111 {
112 struct pci_controller *phb;
113
114 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
115 if (phb == NULL)
116 return NULL;
117 spin_lock(&hose_spinlock);
118 phb->global_number = get_phb_number(dev);
119 list_add_tail(&phb->list_node, &hose_list);
120 spin_unlock(&hose_spinlock);
121 phb->dn = dev;
122 phb->is_dynamic = slab_is_available();
123 #ifdef CONFIG_PPC64
124 if (dev) {
125 int nid = of_node_to_nid(dev);
126
127 if (nid < 0 || !node_online(nid))
128 nid = -1;
129
130 PHB_SET_NODE(phb, nid);
131 }
132 #endif
133 return phb;
134 }
135 EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
136
137 void pcibios_free_controller(struct pci_controller *phb)
138 {
139 spin_lock(&hose_spinlock);
140
141 /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
142 if (phb->global_number < MAX_PHBS)
143 clear_bit(phb->global_number, phb_bitmap);
144
145 list_del(&phb->list_node);
146 spin_unlock(&hose_spinlock);
147
148 if (phb->is_dynamic)
149 kfree(phb);
150 }
151 EXPORT_SYMBOL_GPL(pcibios_free_controller);
152
153 /*
154 * The function is used to return the minimal alignment
155 * for memory or I/O windows of the associated P2P bridge.
156 * By default, 4KiB alignment for I/O windows and 1MiB for
157 * memory windows.
158 */
159 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
160 unsigned long type)
161 {
162 struct pci_controller *phb = pci_bus_to_host(bus);
163
164 if (phb->controller_ops.window_alignment)
165 return phb->controller_ops.window_alignment(bus, type);
166
167 /*
168 * PCI core will figure out the default
169 * alignment: 4KiB for I/O and 1MiB for
170 * memory window.
171 */
172 return 1;
173 }
174
175 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
176 {
177 struct pci_controller *hose = pci_bus_to_host(bus);
178
179 if (hose->controller_ops.setup_bridge)
180 hose->controller_ops.setup_bridge(bus, type);
181 }
182
183 void pcibios_reset_secondary_bus(struct pci_dev *dev)
184 {
185 struct pci_controller *phb = pci_bus_to_host(dev->bus);
186
187 if (phb->controller_ops.reset_secondary_bus) {
188 phb->controller_ops.reset_secondary_bus(dev);
189 return;
190 }
191
192 pci_reset_secondary_bus(dev);
193 }
194
195 #ifdef CONFIG_PCI_IOV
196 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
197 {
198 if (ppc_md.pcibios_iov_resource_alignment)
199 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
200
201 return pci_iov_resource_size(pdev, resno);
202 }
203 #endif /* CONFIG_PCI_IOV */
204
205 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
206 {
207 #ifdef CONFIG_PPC64
208 return hose->pci_io_size;
209 #else
210 return resource_size(&hose->io_resource);
211 #endif
212 }
213
214 int pcibios_vaddr_is_ioport(void __iomem *address)
215 {
216 int ret = 0;
217 struct pci_controller *hose;
218 resource_size_t size;
219
220 spin_lock(&hose_spinlock);
221 list_for_each_entry(hose, &hose_list, list_node) {
222 size = pcibios_io_size(hose);
223 if (address >= hose->io_base_virt &&
224 address < (hose->io_base_virt + size)) {
225 ret = 1;
226 break;
227 }
228 }
229 spin_unlock(&hose_spinlock);
230 return ret;
231 }
232
233 unsigned long pci_address_to_pio(phys_addr_t address)
234 {
235 struct pci_controller *hose;
236 resource_size_t size;
237 unsigned long ret = ~0;
238
239 spin_lock(&hose_spinlock);
240 list_for_each_entry(hose, &hose_list, list_node) {
241 size = pcibios_io_size(hose);
242 if (address >= hose->io_base_phys &&
243 address < (hose->io_base_phys + size)) {
244 unsigned long base =
245 (unsigned long)hose->io_base_virt - _IO_BASE;
246 ret = base + (address - hose->io_base_phys);
247 break;
248 }
249 }
250 spin_unlock(&hose_spinlock);
251
252 return ret;
253 }
254 EXPORT_SYMBOL_GPL(pci_address_to_pio);
255
256 /*
257 * Return the domain number for this bus.
258 */
259 int pci_domain_nr(struct pci_bus *bus)
260 {
261 struct pci_controller *hose = pci_bus_to_host(bus);
262
263 return hose->global_number;
264 }
265 EXPORT_SYMBOL(pci_domain_nr);
266
267 /* This routine is meant to be used early during boot, when the
268 * PCI bus numbers have not yet been assigned, and you need to
269 * issue PCI config cycles to an OF device.
270 * It could also be used to "fix" RTAS config cycles if you want
271 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
272 * config cycles.
273 */
274 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
275 {
276 while(node) {
277 struct pci_controller *hose, *tmp;
278 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
279 if (hose->dn == node)
280 return hose;
281 node = node->parent;
282 }
283 return NULL;
284 }
285
286 /*
287 * Reads the interrupt pin to determine if interrupt is use by card.
288 * If the interrupt is used, then gets the interrupt line from the
289 * openfirmware and sets it in the pci_dev and pci_config line.
290 */
291 static int pci_read_irq_line(struct pci_dev *pci_dev)
292 {
293 struct of_phandle_args oirq;
294 unsigned int virq;
295
296 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
297
298 #ifdef DEBUG
299 memset(&oirq, 0xff, sizeof(oirq));
300 #endif
301 /* Try to get a mapping from the device-tree */
302 if (of_irq_parse_pci(pci_dev, &oirq)) {
303 u8 line, pin;
304
305 /* If that fails, lets fallback to what is in the config
306 * space and map that through the default controller. We
307 * also set the type to level low since that's what PCI
308 * interrupts are. If your platform does differently, then
309 * either provide a proper interrupt tree or don't use this
310 * function.
311 */
312 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
313 return -1;
314 if (pin == 0)
315 return -1;
316 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
317 line == 0xff || line == 0) {
318 return -1;
319 }
320 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
321 line, pin);
322
323 virq = irq_create_mapping(NULL, line);
324 if (virq != NO_IRQ)
325 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
326 } else {
327 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
328 oirq.args_count, oirq.args[0], oirq.args[1],
329 of_node_full_name(oirq.np));
330
331 virq = irq_create_of_mapping(&oirq);
332 }
333 if(virq == NO_IRQ) {
334 pr_debug(" Failed to map !\n");
335 return -1;
336 }
337
338 pr_debug(" Mapped to linux irq %d\n", virq);
339
340 pci_dev->irq = virq;
341
342 return 0;
343 }
344
345 /*
346 * Platform support for /proc/bus/pci/X/Y mmap()s,
347 * modelled on the sparc64 implementation by Dave Miller.
348 * -- paulus.
349 */
350
351 /*
352 * Adjust vm_pgoff of VMA such that it is the physical page offset
353 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
354 *
355 * Basically, the user finds the base address for his device which he wishes
356 * to mmap. They read the 32-bit value from the config space base register,
357 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
358 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
359 *
360 * Returns negative error code on failure, zero on success.
361 */
362 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
363 resource_size_t *offset,
364 enum pci_mmap_state mmap_state)
365 {
366 struct pci_controller *hose = pci_bus_to_host(dev->bus);
367 unsigned long io_offset = 0;
368 int i, res_bit;
369
370 if (hose == NULL)
371 return NULL; /* should never happen */
372
373 /* If memory, add on the PCI bridge address offset */
374 if (mmap_state == pci_mmap_mem) {
375 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
376 *offset += hose->pci_mem_offset;
377 #endif
378 res_bit = IORESOURCE_MEM;
379 } else {
380 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
381 *offset += io_offset;
382 res_bit = IORESOURCE_IO;
383 }
384
385 /*
386 * Check that the offset requested corresponds to one of the
387 * resources of the device.
388 */
389 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
390 struct resource *rp = &dev->resource[i];
391 int flags = rp->flags;
392
393 /* treat ROM as memory (should be already) */
394 if (i == PCI_ROM_RESOURCE)
395 flags |= IORESOURCE_MEM;
396
397 /* Active and same type? */
398 if ((flags & res_bit) == 0)
399 continue;
400
401 /* In the range of this resource? */
402 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
403 continue;
404
405 /* found it! construct the final physical address */
406 if (mmap_state == pci_mmap_io)
407 *offset += hose->io_base_phys - io_offset;
408 return rp;
409 }
410
411 return NULL;
412 }
413
414 /*
415 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
416 * device mapping.
417 */
418 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
419 pgprot_t protection,
420 enum pci_mmap_state mmap_state,
421 int write_combine)
422 {
423
424 /* Write combine is always 0 on non-memory space mappings. On
425 * memory space, if the user didn't pass 1, we check for a
426 * "prefetchable" resource. This is a bit hackish, but we use
427 * this to workaround the inability of /sysfs to provide a write
428 * combine bit
429 */
430 if (mmap_state != pci_mmap_mem)
431 write_combine = 0;
432 else if (write_combine == 0) {
433 if (rp->flags & IORESOURCE_PREFETCH)
434 write_combine = 1;
435 }
436
437 /* XXX would be nice to have a way to ask for write-through */
438 if (write_combine)
439 return pgprot_noncached_wc(protection);
440 else
441 return pgprot_noncached(protection);
442 }
443
444 /*
445 * This one is used by /dev/mem and fbdev who have no clue about the
446 * PCI device, it tries to find the PCI device first and calls the
447 * above routine
448 */
449 pgprot_t pci_phys_mem_access_prot(struct file *file,
450 unsigned long pfn,
451 unsigned long size,
452 pgprot_t prot)
453 {
454 struct pci_dev *pdev = NULL;
455 struct resource *found = NULL;
456 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
457 int i;
458
459 if (page_is_ram(pfn))
460 return prot;
461
462 prot = pgprot_noncached(prot);
463 for_each_pci_dev(pdev) {
464 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
465 struct resource *rp = &pdev->resource[i];
466 int flags = rp->flags;
467
468 /* Active and same type? */
469 if ((flags & IORESOURCE_MEM) == 0)
470 continue;
471 /* In the range of this resource? */
472 if (offset < (rp->start & PAGE_MASK) ||
473 offset > rp->end)
474 continue;
475 found = rp;
476 break;
477 }
478 if (found)
479 break;
480 }
481 if (found) {
482 if (found->flags & IORESOURCE_PREFETCH)
483 prot = pgprot_noncached_wc(prot);
484 pci_dev_put(pdev);
485 }
486
487 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
488 (unsigned long long)offset, pgprot_val(prot));
489
490 return prot;
491 }
492
493
494 /*
495 * Perform the actual remap of the pages for a PCI device mapping, as
496 * appropriate for this architecture. The region in the process to map
497 * is described by vm_start and vm_end members of VMA, the base physical
498 * address is found in vm_pgoff.
499 * The pci device structure is provided so that architectures may make mapping
500 * decisions on a per-device or per-bus basis.
501 *
502 * Returns a negative error code on failure, zero on success.
503 */
504 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
505 enum pci_mmap_state mmap_state, int write_combine)
506 {
507 resource_size_t offset =
508 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
509 struct resource *rp;
510 int ret;
511
512 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
513 if (rp == NULL)
514 return -EINVAL;
515
516 vma->vm_pgoff = offset >> PAGE_SHIFT;
517 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
518 vma->vm_page_prot,
519 mmap_state, write_combine);
520
521 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
522 vma->vm_end - vma->vm_start, vma->vm_page_prot);
523
524 return ret;
525 }
526
527 /* This provides legacy IO read access on a bus */
528 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
529 {
530 unsigned long offset;
531 struct pci_controller *hose = pci_bus_to_host(bus);
532 struct resource *rp = &hose->io_resource;
533 void __iomem *addr;
534
535 /* Check if port can be supported by that bus. We only check
536 * the ranges of the PHB though, not the bus itself as the rules
537 * for forwarding legacy cycles down bridges are not our problem
538 * here. So if the host bridge supports it, we do it.
539 */
540 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
541 offset += port;
542
543 if (!(rp->flags & IORESOURCE_IO))
544 return -ENXIO;
545 if (offset < rp->start || (offset + size) > rp->end)
546 return -ENXIO;
547 addr = hose->io_base_virt + port;
548
549 switch(size) {
550 case 1:
551 *((u8 *)val) = in_8(addr);
552 return 1;
553 case 2:
554 if (port & 1)
555 return -EINVAL;
556 *((u16 *)val) = in_le16(addr);
557 return 2;
558 case 4:
559 if (port & 3)
560 return -EINVAL;
561 *((u32 *)val) = in_le32(addr);
562 return 4;
563 }
564 return -EINVAL;
565 }
566
567 /* This provides legacy IO write access on a bus */
568 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
569 {
570 unsigned long offset;
571 struct pci_controller *hose = pci_bus_to_host(bus);
572 struct resource *rp = &hose->io_resource;
573 void __iomem *addr;
574
575 /* Check if port can be supported by that bus. We only check
576 * the ranges of the PHB though, not the bus itself as the rules
577 * for forwarding legacy cycles down bridges are not our problem
578 * here. So if the host bridge supports it, we do it.
579 */
580 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
581 offset += port;
582
583 if (!(rp->flags & IORESOURCE_IO))
584 return -ENXIO;
585 if (offset < rp->start || (offset + size) > rp->end)
586 return -ENXIO;
587 addr = hose->io_base_virt + port;
588
589 /* WARNING: The generic code is idiotic. It gets passed a pointer
590 * to what can be a 1, 2 or 4 byte quantity and always reads that
591 * as a u32, which means that we have to correct the location of
592 * the data read within those 32 bits for size 1 and 2
593 */
594 switch(size) {
595 case 1:
596 out_8(addr, val >> 24);
597 return 1;
598 case 2:
599 if (port & 1)
600 return -EINVAL;
601 out_le16(addr, val >> 16);
602 return 2;
603 case 4:
604 if (port & 3)
605 return -EINVAL;
606 out_le32(addr, val);
607 return 4;
608 }
609 return -EINVAL;
610 }
611
612 /* This provides legacy IO or memory mmap access on a bus */
613 int pci_mmap_legacy_page_range(struct pci_bus *bus,
614 struct vm_area_struct *vma,
615 enum pci_mmap_state mmap_state)
616 {
617 struct pci_controller *hose = pci_bus_to_host(bus);
618 resource_size_t offset =
619 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
620 resource_size_t size = vma->vm_end - vma->vm_start;
621 struct resource *rp;
622
623 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
624 pci_domain_nr(bus), bus->number,
625 mmap_state == pci_mmap_mem ? "MEM" : "IO",
626 (unsigned long long)offset,
627 (unsigned long long)(offset + size - 1));
628
629 if (mmap_state == pci_mmap_mem) {
630 /* Hack alert !
631 *
632 * Because X is lame and can fail starting if it gets an error trying
633 * to mmap legacy_mem (instead of just moving on without legacy memory
634 * access) we fake it here by giving it anonymous memory, effectively
635 * behaving just like /dev/zero
636 */
637 if ((offset + size) > hose->isa_mem_size) {
638 printk(KERN_DEBUG
639 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
640 current->comm, current->pid, pci_domain_nr(bus), bus->number);
641 if (vma->vm_flags & VM_SHARED)
642 return shmem_zero_setup(vma);
643 return 0;
644 }
645 offset += hose->isa_mem_phys;
646 } else {
647 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
648 unsigned long roffset = offset + io_offset;
649 rp = &hose->io_resource;
650 if (!(rp->flags & IORESOURCE_IO))
651 return -ENXIO;
652 if (roffset < rp->start || (roffset + size) > rp->end)
653 return -ENXIO;
654 offset += hose->io_base_phys;
655 }
656 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
657
658 vma->vm_pgoff = offset >> PAGE_SHIFT;
659 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
660 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
661 vma->vm_end - vma->vm_start,
662 vma->vm_page_prot);
663 }
664
665 void pci_resource_to_user(const struct pci_dev *dev, int bar,
666 const struct resource *rsrc,
667 resource_size_t *start, resource_size_t *end)
668 {
669 struct pci_controller *hose = pci_bus_to_host(dev->bus);
670 resource_size_t offset = 0;
671
672 if (hose == NULL)
673 return;
674
675 if (rsrc->flags & IORESOURCE_IO)
676 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
677
678 /* We pass a fully fixed up address to userland for MMIO instead of
679 * a BAR value because X is lame and expects to be able to use that
680 * to pass to /dev/mem !
681 *
682 * That means that we'll have potentially 64 bits values where some
683 * userland apps only expect 32 (like X itself since it thinks only
684 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
685 * 32 bits CHRPs :-(
686 *
687 * Hopefully, the sysfs insterface is immune to that gunk. Once X
688 * has been fixed (and the fix spread enough), we can re-enable the
689 * 2 lines below and pass down a BAR value to userland. In that case
690 * we'll also have to re-enable the matching code in
691 * __pci_mmap_make_offset().
692 *
693 * BenH.
694 */
695 #if 0
696 else if (rsrc->flags & IORESOURCE_MEM)
697 offset = hose->pci_mem_offset;
698 #endif
699
700 *start = rsrc->start - offset;
701 *end = rsrc->end - offset;
702 }
703
704 /**
705 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
706 * @hose: newly allocated pci_controller to be setup
707 * @dev: device node of the host bridge
708 * @primary: set if primary bus (32 bits only, soon to be deprecated)
709 *
710 * This function will parse the "ranges" property of a PCI host bridge device
711 * node and setup the resource mapping of a pci controller based on its
712 * content.
713 *
714 * Life would be boring if it wasn't for a few issues that we have to deal
715 * with here:
716 *
717 * - We can only cope with one IO space range and up to 3 Memory space
718 * ranges. However, some machines (thanks Apple !) tend to split their
719 * space into lots of small contiguous ranges. So we have to coalesce.
720 *
721 * - Some busses have IO space not starting at 0, which causes trouble with
722 * the way we do our IO resource renumbering. The code somewhat deals with
723 * it for 64 bits but I would expect problems on 32 bits.
724 *
725 * - Some 32 bits platforms such as 4xx can have physical space larger than
726 * 32 bits so we need to use 64 bits values for the parsing
727 */
728 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
729 struct device_node *dev, int primary)
730 {
731 int memno = 0;
732 struct resource *res;
733 struct of_pci_range range;
734 struct of_pci_range_parser parser;
735
736 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
737 dev->full_name, primary ? "(primary)" : "");
738
739 /* Check for ranges property */
740 if (of_pci_range_parser_init(&parser, dev))
741 return;
742
743 /* Parse it */
744 for_each_of_pci_range(&parser, &range) {
745 /* If we failed translation or got a zero-sized region
746 * (some FW try to feed us with non sensical zero sized regions
747 * such as power3 which look like some kind of attempt at exposing
748 * the VGA memory hole)
749 */
750 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
751 continue;
752
753 /* Act based on address space type */
754 res = NULL;
755 switch (range.flags & IORESOURCE_TYPE_BITS) {
756 case IORESOURCE_IO:
757 printk(KERN_INFO
758 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
759 range.cpu_addr, range.cpu_addr + range.size - 1,
760 range.pci_addr);
761
762 /* We support only one IO range */
763 if (hose->pci_io_size) {
764 printk(KERN_INFO
765 " \\--> Skipped (too many) !\n");
766 continue;
767 }
768 #ifdef CONFIG_PPC32
769 /* On 32 bits, limit I/O space to 16MB */
770 if (range.size > 0x01000000)
771 range.size = 0x01000000;
772
773 /* 32 bits needs to map IOs here */
774 hose->io_base_virt = ioremap(range.cpu_addr,
775 range.size);
776
777 /* Expect trouble if pci_addr is not 0 */
778 if (primary)
779 isa_io_base =
780 (unsigned long)hose->io_base_virt;
781 #endif /* CONFIG_PPC32 */
782 /* pci_io_size and io_base_phys always represent IO
783 * space starting at 0 so we factor in pci_addr
784 */
785 hose->pci_io_size = range.pci_addr + range.size;
786 hose->io_base_phys = range.cpu_addr - range.pci_addr;
787
788 /* Build resource */
789 res = &hose->io_resource;
790 range.cpu_addr = range.pci_addr;
791 break;
792 case IORESOURCE_MEM:
793 printk(KERN_INFO
794 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
795 range.cpu_addr, range.cpu_addr + range.size - 1,
796 range.pci_addr,
797 (range.pci_space & 0x40000000) ?
798 "Prefetch" : "");
799
800 /* We support only 3 memory ranges */
801 if (memno >= 3) {
802 printk(KERN_INFO
803 " \\--> Skipped (too many) !\n");
804 continue;
805 }
806 /* Handles ISA memory hole space here */
807 if (range.pci_addr == 0) {
808 if (primary || isa_mem_base == 0)
809 isa_mem_base = range.cpu_addr;
810 hose->isa_mem_phys = range.cpu_addr;
811 hose->isa_mem_size = range.size;
812 }
813
814 /* Build resource */
815 hose->mem_offset[memno] = range.cpu_addr -
816 range.pci_addr;
817 res = &hose->mem_resources[memno++];
818 break;
819 }
820 if (res != NULL) {
821 res->name = dev->full_name;
822 res->flags = range.flags;
823 res->start = range.cpu_addr;
824 res->end = range.cpu_addr + range.size - 1;
825 res->parent = res->child = res->sibling = NULL;
826 }
827 }
828 }
829
830 /* Decide whether to display the domain number in /proc */
831 int pci_proc_domain(struct pci_bus *bus)
832 {
833 struct pci_controller *hose = pci_bus_to_host(bus);
834
835 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
836 return 0;
837 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
838 return hose->global_number != 0;
839 return 1;
840 }
841
842 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
843 {
844 if (ppc_md.pcibios_root_bridge_prepare)
845 return ppc_md.pcibios_root_bridge_prepare(bridge);
846
847 return 0;
848 }
849
850 /* This header fixup will do the resource fixup for all devices as they are
851 * probed, but not for bridge ranges
852 */
853 static void pcibios_fixup_resources(struct pci_dev *dev)
854 {
855 struct pci_controller *hose = pci_bus_to_host(dev->bus);
856 int i;
857
858 if (!hose) {
859 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
860 pci_name(dev));
861 return;
862 }
863
864 if (dev->is_virtfn)
865 return;
866
867 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
868 struct resource *res = dev->resource + i;
869 struct pci_bus_region reg;
870 if (!res->flags)
871 continue;
872
873 /* If we're going to re-assign everything, we mark all resources
874 * as unset (and 0-base them). In addition, we mark BARs starting
875 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
876 * since in that case, we don't want to re-assign anything
877 */
878 pcibios_resource_to_bus(dev->bus, &reg, res);
879 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
880 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
881 /* Only print message if not re-assigning */
882 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
883 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
884 pci_name(dev), i, res);
885 res->end -= res->start;
886 res->start = 0;
887 res->flags |= IORESOURCE_UNSET;
888 continue;
889 }
890
891 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
892 }
893
894 /* Call machine specific resource fixup */
895 if (ppc_md.pcibios_fixup_resources)
896 ppc_md.pcibios_fixup_resources(dev);
897 }
898 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
899
900 /* This function tries to figure out if a bridge resource has been initialized
901 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
902 * things go more smoothly when it gets it right. It should covers cases such
903 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
904 */
905 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
906 struct resource *res)
907 {
908 struct pci_controller *hose = pci_bus_to_host(bus);
909 struct pci_dev *dev = bus->self;
910 resource_size_t offset;
911 struct pci_bus_region region;
912 u16 command;
913 int i;
914
915 /* We don't do anything if PCI_PROBE_ONLY is set */
916 if (pci_has_flag(PCI_PROBE_ONLY))
917 return 0;
918
919 /* Job is a bit different between memory and IO */
920 if (res->flags & IORESOURCE_MEM) {
921 pcibios_resource_to_bus(dev->bus, &region, res);
922
923 /* If the BAR is non-0 then it's probably been initialized */
924 if (region.start != 0)
925 return 0;
926
927 /* The BAR is 0, let's check if memory decoding is enabled on
928 * the bridge. If not, we consider it unassigned
929 */
930 pci_read_config_word(dev, PCI_COMMAND, &command);
931 if ((command & PCI_COMMAND_MEMORY) == 0)
932 return 1;
933
934 /* Memory decoding is enabled and the BAR is 0. If any of the bridge
935 * resources covers that starting address (0 then it's good enough for
936 * us for memory space)
937 */
938 for (i = 0; i < 3; i++) {
939 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
940 hose->mem_resources[i].start == hose->mem_offset[i])
941 return 0;
942 }
943
944 /* Well, it starts at 0 and we know it will collide so we may as
945 * well consider it as unassigned. That covers the Apple case.
946 */
947 return 1;
948 } else {
949 /* If the BAR is non-0, then we consider it assigned */
950 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
951 if (((res->start - offset) & 0xfffffffful) != 0)
952 return 0;
953
954 /* Here, we are a bit different than memory as typically IO space
955 * starting at low addresses -is- valid. What we do instead if that
956 * we consider as unassigned anything that doesn't have IO enabled
957 * in the PCI command register, and that's it.
958 */
959 pci_read_config_word(dev, PCI_COMMAND, &command);
960 if (command & PCI_COMMAND_IO)
961 return 0;
962
963 /* It's starting at 0 and IO is disabled in the bridge, consider
964 * it unassigned
965 */
966 return 1;
967 }
968 }
969
970 /* Fixup resources of a PCI<->PCI bridge */
971 static void pcibios_fixup_bridge(struct pci_bus *bus)
972 {
973 struct resource *res;
974 int i;
975
976 struct pci_dev *dev = bus->self;
977
978 pci_bus_for_each_resource(bus, res, i) {
979 if (!res || !res->flags)
980 continue;
981 if (i >= 3 && bus->self->transparent)
982 continue;
983
984 /* If we're going to reassign everything, we can
985 * shrink the P2P resource to have size as being
986 * of 0 in order to save space.
987 */
988 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
989 res->flags |= IORESOURCE_UNSET;
990 res->start = 0;
991 res->end = -1;
992 continue;
993 }
994
995 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
996
997 /* Try to detect uninitialized P2P bridge resources,
998 * and clear them out so they get re-assigned later
999 */
1000 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1001 res->flags = 0;
1002 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1003 }
1004 }
1005 }
1006
1007 void pcibios_setup_bus_self(struct pci_bus *bus)
1008 {
1009 struct pci_controller *phb;
1010
1011 /* Fix up the bus resources for P2P bridges */
1012 if (bus->self != NULL)
1013 pcibios_fixup_bridge(bus);
1014
1015 /* Platform specific bus fixups. This is currently only used
1016 * by fsl_pci and I'm hoping to get rid of it at some point
1017 */
1018 if (ppc_md.pcibios_fixup_bus)
1019 ppc_md.pcibios_fixup_bus(bus);
1020
1021 /* Setup bus DMA mappings */
1022 phb = pci_bus_to_host(bus);
1023 if (phb->controller_ops.dma_bus_setup)
1024 phb->controller_ops.dma_bus_setup(bus);
1025 }
1026
1027 static void pcibios_setup_device(struct pci_dev *dev)
1028 {
1029 struct pci_controller *phb;
1030 /* Fixup NUMA node as it may not be setup yet by the generic
1031 * code and is needed by the DMA init
1032 */
1033 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1034
1035 /* Hook up default DMA ops */
1036 set_dma_ops(&dev->dev, pci_dma_ops);
1037 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1038
1039 /* Additional platform DMA/iommu setup */
1040 phb = pci_bus_to_host(dev->bus);
1041 if (phb->controller_ops.dma_dev_setup)
1042 phb->controller_ops.dma_dev_setup(dev);
1043
1044 /* Read default IRQs and fixup if necessary */
1045 pci_read_irq_line(dev);
1046 if (ppc_md.pci_irq_fixup)
1047 ppc_md.pci_irq_fixup(dev);
1048 }
1049
1050 int pcibios_add_device(struct pci_dev *dev)
1051 {
1052 /*
1053 * We can only call pcibios_setup_device() after bus setup is complete,
1054 * since some of the platform specific DMA setup code depends on it.
1055 */
1056 if (dev->bus->is_added)
1057 pcibios_setup_device(dev);
1058
1059 #ifdef CONFIG_PCI_IOV
1060 if (ppc_md.pcibios_fixup_sriov)
1061 ppc_md.pcibios_fixup_sriov(dev);
1062 #endif /* CONFIG_PCI_IOV */
1063
1064 return 0;
1065 }
1066
1067 void pcibios_setup_bus_devices(struct pci_bus *bus)
1068 {
1069 struct pci_dev *dev;
1070
1071 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1072 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1073
1074 list_for_each_entry(dev, &bus->devices, bus_list) {
1075 /* Cardbus can call us to add new devices to a bus, so ignore
1076 * those who are already fully discovered
1077 */
1078 if (dev->is_added)
1079 continue;
1080
1081 pcibios_setup_device(dev);
1082 }
1083 }
1084
1085 void pcibios_set_master(struct pci_dev *dev)
1086 {
1087 /* No special bus mastering setup handling */
1088 }
1089
1090 void pcibios_fixup_bus(struct pci_bus *bus)
1091 {
1092 /* When called from the generic PCI probe, read PCI<->PCI bridge
1093 * bases. This is -not- called when generating the PCI tree from
1094 * the OF device-tree.
1095 */
1096 pci_read_bridge_bases(bus);
1097
1098 /* Now fixup the bus bus */
1099 pcibios_setup_bus_self(bus);
1100
1101 /* Now fixup devices on that bus */
1102 pcibios_setup_bus_devices(bus);
1103 }
1104 EXPORT_SYMBOL(pcibios_fixup_bus);
1105
1106 void pci_fixup_cardbus(struct pci_bus *bus)
1107 {
1108 /* Now fixup devices on that bus */
1109 pcibios_setup_bus_devices(bus);
1110 }
1111
1112
1113 static int skip_isa_ioresource_align(struct pci_dev *dev)
1114 {
1115 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1116 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1117 return 1;
1118 return 0;
1119 }
1120
1121 /*
1122 * We need to avoid collisions with `mirrored' VGA ports
1123 * and other strange ISA hardware, so we always want the
1124 * addresses to be allocated in the 0x000-0x0ff region
1125 * modulo 0x400.
1126 *
1127 * Why? Because some silly external IO cards only decode
1128 * the low 10 bits of the IO address. The 0x00-0xff region
1129 * is reserved for motherboard devices that decode all 16
1130 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1131 * but we want to try to avoid allocating at 0x2900-0x2bff
1132 * which might have be mirrored at 0x0100-0x03ff..
1133 */
1134 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1135 resource_size_t size, resource_size_t align)
1136 {
1137 struct pci_dev *dev = data;
1138 resource_size_t start = res->start;
1139
1140 if (res->flags & IORESOURCE_IO) {
1141 if (skip_isa_ioresource_align(dev))
1142 return start;
1143 if (start & 0x300)
1144 start = (start + 0x3ff) & ~0x3ff;
1145 }
1146
1147 return start;
1148 }
1149 EXPORT_SYMBOL(pcibios_align_resource);
1150
1151 /*
1152 * Reparent resource children of pr that conflict with res
1153 * under res, and make res replace those children.
1154 */
1155 static int reparent_resources(struct resource *parent,
1156 struct resource *res)
1157 {
1158 struct resource *p, **pp;
1159 struct resource **firstpp = NULL;
1160
1161 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1162 if (p->end < res->start)
1163 continue;
1164 if (res->end < p->start)
1165 break;
1166 if (p->start < res->start || p->end > res->end)
1167 return -1; /* not completely contained */
1168 if (firstpp == NULL)
1169 firstpp = pp;
1170 }
1171 if (firstpp == NULL)
1172 return -1; /* didn't find any conflicting entries? */
1173 res->parent = parent;
1174 res->child = *firstpp;
1175 res->sibling = *pp;
1176 *firstpp = res;
1177 *pp = NULL;
1178 for (p = res->child; p != NULL; p = p->sibling) {
1179 p->parent = res;
1180 pr_debug("PCI: Reparented %s %pR under %s\n",
1181 p->name, p, res->name);
1182 }
1183 return 0;
1184 }
1185
1186 /*
1187 * Handle resources of PCI devices. If the world were perfect, we could
1188 * just allocate all the resource regions and do nothing more. It isn't.
1189 * On the other hand, we cannot just re-allocate all devices, as it would
1190 * require us to know lots of host bridge internals. So we attempt to
1191 * keep as much of the original configuration as possible, but tweak it
1192 * when it's found to be wrong.
1193 *
1194 * Known BIOS problems we have to work around:
1195 * - I/O or memory regions not configured
1196 * - regions configured, but not enabled in the command register
1197 * - bogus I/O addresses above 64K used
1198 * - expansion ROMs left enabled (this may sound harmless, but given
1199 * the fact the PCI specs explicitly allow address decoders to be
1200 * shared between expansion ROMs and other resource regions, it's
1201 * at least dangerous)
1202 *
1203 * Our solution:
1204 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1205 * This gives us fixed barriers on where we can allocate.
1206 * (2) Allocate resources for all enabled devices. If there is
1207 * a collision, just mark the resource as unallocated. Also
1208 * disable expansion ROMs during this step.
1209 * (3) Try to allocate resources for disabled devices. If the
1210 * resources were assigned correctly, everything goes well,
1211 * if they weren't, they won't disturb allocation of other
1212 * resources.
1213 * (4) Assign new addresses to resources which were either
1214 * not configured at all or misconfigured. If explicitly
1215 * requested by the user, configure expansion ROM address
1216 * as well.
1217 */
1218
1219 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1220 {
1221 struct pci_bus *b;
1222 int i;
1223 struct resource *res, *pr;
1224
1225 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1226 pci_domain_nr(bus), bus->number);
1227
1228 pci_bus_for_each_resource(bus, res, i) {
1229 if (!res || !res->flags || res->start > res->end || res->parent)
1230 continue;
1231
1232 /* If the resource was left unset at this point, we clear it */
1233 if (res->flags & IORESOURCE_UNSET)
1234 goto clear_resource;
1235
1236 if (bus->parent == NULL)
1237 pr = (res->flags & IORESOURCE_IO) ?
1238 &ioport_resource : &iomem_resource;
1239 else {
1240 pr = pci_find_parent_resource(bus->self, res);
1241 if (pr == res) {
1242 /* this happens when the generic PCI
1243 * code (wrongly) decides that this
1244 * bridge is transparent -- paulus
1245 */
1246 continue;
1247 }
1248 }
1249
1250 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1251 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1252 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1253
1254 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1255 struct pci_dev *dev = bus->self;
1256
1257 if (request_resource(pr, res) == 0)
1258 continue;
1259 /*
1260 * Must be a conflict with an existing entry.
1261 * Move that entry (or entries) under the
1262 * bridge resource and try again.
1263 */
1264 if (reparent_resources(pr, res) == 0)
1265 continue;
1266
1267 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1268 pci_claim_bridge_resource(dev,
1269 i + PCI_BRIDGE_RESOURCES) == 0)
1270 continue;
1271 }
1272 pr_warning("PCI: Cannot allocate resource region "
1273 "%d of PCI bridge %d, will remap\n", i, bus->number);
1274 clear_resource:
1275 /* The resource might be figured out when doing
1276 * reassignment based on the resources required
1277 * by the downstream PCI devices. Here we set
1278 * the size of the resource to be 0 in order to
1279 * save more space.
1280 */
1281 res->start = 0;
1282 res->end = -1;
1283 res->flags = 0;
1284 }
1285
1286 list_for_each_entry(b, &bus->children, node)
1287 pcibios_allocate_bus_resources(b);
1288 }
1289
1290 static inline void alloc_resource(struct pci_dev *dev, int idx)
1291 {
1292 struct resource *pr, *r = &dev->resource[idx];
1293
1294 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1295 pci_name(dev), idx, r);
1296
1297 pr = pci_find_parent_resource(dev, r);
1298 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1299 request_resource(pr, r) < 0) {
1300 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1301 " of device %s, will remap\n", idx, pci_name(dev));
1302 if (pr)
1303 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1304 /* We'll assign a new address later */
1305 r->flags |= IORESOURCE_UNSET;
1306 r->end -= r->start;
1307 r->start = 0;
1308 }
1309 }
1310
1311 static void __init pcibios_allocate_resources(int pass)
1312 {
1313 struct pci_dev *dev = NULL;
1314 int idx, disabled;
1315 u16 command;
1316 struct resource *r;
1317
1318 for_each_pci_dev(dev) {
1319 pci_read_config_word(dev, PCI_COMMAND, &command);
1320 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1321 r = &dev->resource[idx];
1322 if (r->parent) /* Already allocated */
1323 continue;
1324 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1325 continue; /* Not assigned at all */
1326 /* We only allocate ROMs on pass 1 just in case they
1327 * have been screwed up by firmware
1328 */
1329 if (idx == PCI_ROM_RESOURCE )
1330 disabled = 1;
1331 if (r->flags & IORESOURCE_IO)
1332 disabled = !(command & PCI_COMMAND_IO);
1333 else
1334 disabled = !(command & PCI_COMMAND_MEMORY);
1335 if (pass == disabled)
1336 alloc_resource(dev, idx);
1337 }
1338 if (pass)
1339 continue;
1340 r = &dev->resource[PCI_ROM_RESOURCE];
1341 if (r->flags) {
1342 /* Turn the ROM off, leave the resource region,
1343 * but keep it unregistered.
1344 */
1345 u32 reg;
1346 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1347 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1348 pr_debug("PCI: Switching off ROM of %s\n",
1349 pci_name(dev));
1350 r->flags &= ~IORESOURCE_ROM_ENABLE;
1351 pci_write_config_dword(dev, dev->rom_base_reg,
1352 reg & ~PCI_ROM_ADDRESS_ENABLE);
1353 }
1354 }
1355 }
1356 }
1357
1358 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1359 {
1360 struct pci_controller *hose = pci_bus_to_host(bus);
1361 resource_size_t offset;
1362 struct resource *res, *pres;
1363 int i;
1364
1365 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1366
1367 /* Check for IO */
1368 if (!(hose->io_resource.flags & IORESOURCE_IO))
1369 goto no_io;
1370 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1371 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1372 BUG_ON(res == NULL);
1373 res->name = "Legacy IO";
1374 res->flags = IORESOURCE_IO;
1375 res->start = offset;
1376 res->end = (offset + 0xfff) & 0xfffffffful;
1377 pr_debug("Candidate legacy IO: %pR\n", res);
1378 if (request_resource(&hose->io_resource, res)) {
1379 printk(KERN_DEBUG
1380 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1381 pci_domain_nr(bus), bus->number, res);
1382 kfree(res);
1383 }
1384
1385 no_io:
1386 /* Check for memory */
1387 for (i = 0; i < 3; i++) {
1388 pres = &hose->mem_resources[i];
1389 offset = hose->mem_offset[i];
1390 if (!(pres->flags & IORESOURCE_MEM))
1391 continue;
1392 pr_debug("hose mem res: %pR\n", pres);
1393 if ((pres->start - offset) <= 0xa0000 &&
1394 (pres->end - offset) >= 0xbffff)
1395 break;
1396 }
1397 if (i >= 3)
1398 return;
1399 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1400 BUG_ON(res == NULL);
1401 res->name = "Legacy VGA memory";
1402 res->flags = IORESOURCE_MEM;
1403 res->start = 0xa0000 + offset;
1404 res->end = 0xbffff + offset;
1405 pr_debug("Candidate VGA memory: %pR\n", res);
1406 if (request_resource(pres, res)) {
1407 printk(KERN_DEBUG
1408 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1409 pci_domain_nr(bus), bus->number, res);
1410 kfree(res);
1411 }
1412 }
1413
1414 void __init pcibios_resource_survey(void)
1415 {
1416 struct pci_bus *b;
1417
1418 /* Allocate and assign resources */
1419 list_for_each_entry(b, &pci_root_buses, node)
1420 pcibios_allocate_bus_resources(b);
1421 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1422 pcibios_allocate_resources(0);
1423 pcibios_allocate_resources(1);
1424 }
1425
1426 /* Before we start assigning unassigned resource, we try to reserve
1427 * the low IO area and the VGA memory area if they intersect the
1428 * bus available resources to avoid allocating things on top of them
1429 */
1430 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1431 list_for_each_entry(b, &pci_root_buses, node)
1432 pcibios_reserve_legacy_regions(b);
1433 }
1434
1435 /* Now, if the platform didn't decide to blindly trust the firmware,
1436 * we proceed to assigning things that were left unassigned
1437 */
1438 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1439 pr_debug("PCI: Assigning unassigned resources...\n");
1440 pci_assign_unassigned_resources();
1441 }
1442
1443 /* Call machine dependent fixup */
1444 if (ppc_md.pcibios_fixup)
1445 ppc_md.pcibios_fixup();
1446 }
1447
1448 /* This is used by the PCI hotplug driver to allocate resource
1449 * of newly plugged busses. We can try to consolidate with the
1450 * rest of the code later, for now, keep it as-is as our main
1451 * resource allocation function doesn't deal with sub-trees yet.
1452 */
1453 void pcibios_claim_one_bus(struct pci_bus *bus)
1454 {
1455 struct pci_dev *dev;
1456 struct pci_bus *child_bus;
1457
1458 list_for_each_entry(dev, &bus->devices, bus_list) {
1459 int i;
1460
1461 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1462 struct resource *r = &dev->resource[i];
1463
1464 if (r->parent || !r->start || !r->flags)
1465 continue;
1466
1467 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1468 pci_name(dev), i, r);
1469
1470 if (pci_claim_resource(dev, i) == 0)
1471 continue;
1472
1473 pci_claim_bridge_resource(dev, i);
1474 }
1475 }
1476
1477 list_for_each_entry(child_bus, &bus->children, node)
1478 pcibios_claim_one_bus(child_bus);
1479 }
1480 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1481
1482
1483 /* pcibios_finish_adding_to_bus
1484 *
1485 * This is to be called by the hotplug code after devices have been
1486 * added to a bus, this include calling it for a PHB that is just
1487 * being added
1488 */
1489 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1490 {
1491 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1492 pci_domain_nr(bus), bus->number);
1493
1494 /* Allocate bus and devices resources */
1495 pcibios_allocate_bus_resources(bus);
1496 pcibios_claim_one_bus(bus);
1497 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1498 if (bus->self)
1499 pci_assign_unassigned_bridge_resources(bus->self);
1500 else
1501 pci_assign_unassigned_bus_resources(bus);
1502 }
1503
1504 /* Fixup EEH */
1505 eeh_add_device_tree_late(bus);
1506
1507 /* Add new devices to global lists. Register in proc, sysfs. */
1508 pci_bus_add_devices(bus);
1509
1510 /* sysfs files should only be added after devices are added */
1511 eeh_add_sysfs_files(bus);
1512 }
1513 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1514
1515 int pcibios_enable_device(struct pci_dev *dev, int mask)
1516 {
1517 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1518
1519 if (phb->controller_ops.enable_device_hook)
1520 if (!phb->controller_ops.enable_device_hook(dev))
1521 return -EINVAL;
1522
1523 return pci_enable_resources(dev, mask);
1524 }
1525
1526 void pcibios_disable_device(struct pci_dev *dev)
1527 {
1528 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1529
1530 if (phb->controller_ops.disable_device)
1531 phb->controller_ops.disable_device(dev);
1532 }
1533
1534 resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1535 {
1536 return (unsigned long) hose->io_base_virt - _IO_BASE;
1537 }
1538
1539 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1540 struct list_head *resources)
1541 {
1542 struct resource *res;
1543 resource_size_t offset;
1544 int i;
1545
1546 /* Hookup PHB IO resource */
1547 res = &hose->io_resource;
1548
1549 if (!res->flags) {
1550 pr_debug("PCI: I/O resource not set for host"
1551 " bridge %s (domain %d)\n",
1552 hose->dn->full_name, hose->global_number);
1553 } else {
1554 offset = pcibios_io_space_offset(hose);
1555
1556 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1557 res, (unsigned long long)offset);
1558 pci_add_resource_offset(resources, res, offset);
1559 }
1560
1561 /* Hookup PHB Memory resources */
1562 for (i = 0; i < 3; ++i) {
1563 res = &hose->mem_resources[i];
1564 if (!res->flags) {
1565 if (i == 0)
1566 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1567 "host bridge %s (domain %d)\n",
1568 hose->dn->full_name, hose->global_number);
1569 continue;
1570 }
1571 offset = hose->mem_offset[i];
1572
1573
1574 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1575 res, (unsigned long long)offset);
1576
1577 pci_add_resource_offset(resources, res, offset);
1578 }
1579 }
1580
1581 /*
1582 * Null PCI config access functions, for the case when we can't
1583 * find a hose.
1584 */
1585 #define NULL_PCI_OP(rw, size, type) \
1586 static int \
1587 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1588 { \
1589 return PCIBIOS_DEVICE_NOT_FOUND; \
1590 }
1591
1592 static int
1593 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1594 int len, u32 *val)
1595 {
1596 return PCIBIOS_DEVICE_NOT_FOUND;
1597 }
1598
1599 static int
1600 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1601 int len, u32 val)
1602 {
1603 return PCIBIOS_DEVICE_NOT_FOUND;
1604 }
1605
1606 static struct pci_ops null_pci_ops =
1607 {
1608 .read = null_read_config,
1609 .write = null_write_config,
1610 };
1611
1612 /*
1613 * These functions are used early on before PCI scanning is done
1614 * and all of the pci_dev and pci_bus structures have been created.
1615 */
1616 static struct pci_bus *
1617 fake_pci_bus(struct pci_controller *hose, int busnr)
1618 {
1619 static struct pci_bus bus;
1620
1621 if (hose == NULL) {
1622 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1623 }
1624 bus.number = busnr;
1625 bus.sysdata = hose;
1626 bus.ops = hose? hose->ops: &null_pci_ops;
1627 return &bus;
1628 }
1629
1630 #define EARLY_PCI_OP(rw, size, type) \
1631 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1632 int devfn, int offset, type value) \
1633 { \
1634 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1635 devfn, offset, value); \
1636 }
1637
1638 EARLY_PCI_OP(read, byte, u8 *)
1639 EARLY_PCI_OP(read, word, u16 *)
1640 EARLY_PCI_OP(read, dword, u32 *)
1641 EARLY_PCI_OP(write, byte, u8)
1642 EARLY_PCI_OP(write, word, u16)
1643 EARLY_PCI_OP(write, dword, u32)
1644
1645 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1646 int cap)
1647 {
1648 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1649 }
1650
1651 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1652 {
1653 struct pci_controller *hose = bus->sysdata;
1654
1655 return of_node_get(hose->dn);
1656 }
1657
1658 /**
1659 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1660 * @hose: Pointer to the PCI host controller instance structure
1661 */
1662 void pcibios_scan_phb(struct pci_controller *hose)
1663 {
1664 LIST_HEAD(resources);
1665 struct pci_bus *bus;
1666 struct device_node *node = hose->dn;
1667 int mode;
1668
1669 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1670
1671 /* Get some IO space for the new PHB */
1672 pcibios_setup_phb_io_space(hose);
1673
1674 /* Wire up PHB bus resources */
1675 pcibios_setup_phb_resources(hose, &resources);
1676
1677 hose->busn.start = hose->first_busno;
1678 hose->busn.end = hose->last_busno;
1679 hose->busn.flags = IORESOURCE_BUS;
1680 pci_add_resource(&resources, &hose->busn);
1681
1682 /* Create an empty bus for the toplevel */
1683 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1684 hose->ops, hose, &resources);
1685 if (bus == NULL) {
1686 pr_err("Failed to create bus for PCI domain %04x\n",
1687 hose->global_number);
1688 pci_free_resource_list(&resources);
1689 return;
1690 }
1691 hose->bus = bus;
1692
1693 /* Get probe mode and perform scan */
1694 mode = PCI_PROBE_NORMAL;
1695 if (node && hose->controller_ops.probe_mode)
1696 mode = hose->controller_ops.probe_mode(bus);
1697 pr_debug(" probe mode: %d\n", mode);
1698 if (mode == PCI_PROBE_DEVTREE)
1699 of_scan_bus(node, bus);
1700
1701 if (mode == PCI_PROBE_NORMAL) {
1702 pci_bus_update_busn_res_end(bus, 255);
1703 hose->last_busno = pci_scan_child_bus(bus);
1704 pci_bus_update_busn_res_end(bus, hose->last_busno);
1705 }
1706
1707 /* Platform gets a chance to do some global fixups before
1708 * we proceed to resource allocation
1709 */
1710 if (ppc_md.pcibios_fixup_phb)
1711 ppc_md.pcibios_fixup_phb(hose);
1712
1713 /* Configure PCI Express settings */
1714 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1715 struct pci_bus *child;
1716 list_for_each_entry(child, &bus->children, node)
1717 pcie_bus_configure_settings(child);
1718 }
1719 }
1720 EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1721
1722 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1723 {
1724 int i, class = dev->class >> 8;
1725 /* When configured as agent, programing interface = 1 */
1726 int prog_if = dev->class & 0xf;
1727
1728 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1729 class == PCI_CLASS_BRIDGE_OTHER) &&
1730 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1731 (prog_if == 0) &&
1732 (dev->bus->parent == NULL)) {
1733 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1734 dev->resource[i].start = 0;
1735 dev->resource[i].end = 0;
1736 dev->resource[i].flags = 0;
1737 }
1738 }
1739 }
1740 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1741 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1742
1743 static void fixup_vga(struct pci_dev *pdev)
1744 {
1745 u16 cmd;
1746
1747 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1748 if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
1749 vga_set_default_device(pdev);
1750
1751 }
1752 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1753 PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
This page took 0.089845 seconds and 6 git commands to generate.