2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
11 * Common pmac/prep/chrp pci routines. -- Cort
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
27 #include <linux/list.h>
28 #include <linux/syscalls.h>
29 #include <linux/irq.h>
30 #include <linux/vmalloc.h>
32 #include <asm/processor.h>
35 #include <asm/pci-bridge.h>
36 #include <asm/byteorder.h>
37 #include <asm/machdep.h>
38 #include <asm/ppc-pci.h>
39 #include <asm/firmware.h>
43 #define DBG(fmt...) printk(fmt)
48 static DEFINE_SPINLOCK(hose_spinlock
);
50 /* XXX kill that some day ... */
51 static int global_phb_number
; /* Global phb counter */
53 /* ISA Memory physical address */
54 resource_size_t isa_mem_base
;
57 struct pci_controller
*pcibios_alloc_controller(struct device_node
*dev
)
59 struct pci_controller
*phb
;
61 phb
= zalloc_maybe_bootmem(sizeof(struct pci_controller
), GFP_KERNEL
);
64 spin_lock(&hose_spinlock
);
65 phb
->global_number
= global_phb_number
++;
66 list_add_tail(&phb
->list_node
, &hose_list
);
67 spin_unlock(&hose_spinlock
);
69 phb
->is_dynamic
= mem_init_done
;
72 int nid
= of_node_to_nid(dev
);
74 if (nid
< 0 || !node_online(nid
))
77 PHB_SET_NODE(phb
, nid
);
83 void pcibios_free_controller(struct pci_controller
*phb
)
85 spin_lock(&hose_spinlock
);
86 list_del(&phb
->list_node
);
87 spin_unlock(&hose_spinlock
);
93 int pcibios_vaddr_is_ioport(void __iomem
*address
)
96 struct pci_controller
*hose
;
99 spin_lock(&hose_spinlock
);
100 list_for_each_entry(hose
, &hose_list
, list_node
) {
102 size
= hose
->pci_io_size
;
104 size
= hose
->io_resource
.end
- hose
->io_resource
.start
+ 1;
106 if (address
>= hose
->io_base_virt
&&
107 address
< (hose
->io_base_virt
+ size
)) {
112 spin_unlock(&hose_spinlock
);
117 * Return the domain number for this bus.
119 int pci_domain_nr(struct pci_bus
*bus
)
121 struct pci_controller
*hose
= pci_bus_to_host(bus
);
123 return hose
->global_number
;
125 EXPORT_SYMBOL(pci_domain_nr
);
129 /* This routine is meant to be used early during boot, when the
130 * PCI bus numbers have not yet been assigned, and you need to
131 * issue PCI config cycles to an OF device.
132 * It could also be used to "fix" RTAS config cycles if you want
133 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
136 struct pci_controller
* pci_find_hose_for_OF_device(struct device_node
* node
)
141 struct pci_controller
*hose
, *tmp
;
142 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
)
143 if (hose
->dn
== node
)
150 static ssize_t
pci_show_devspec(struct device
*dev
,
151 struct device_attribute
*attr
, char *buf
)
153 struct pci_dev
*pdev
;
154 struct device_node
*np
;
156 pdev
= to_pci_dev (dev
);
157 np
= pci_device_to_OF_node(pdev
);
158 if (np
== NULL
|| np
->full_name
== NULL
)
160 return sprintf(buf
, "%s", np
->full_name
);
162 static DEVICE_ATTR(devspec
, S_IRUGO
, pci_show_devspec
, NULL
);
163 #endif /* CONFIG_PPC_OF */
165 /* Add sysfs properties */
166 int pcibios_add_platform_entries(struct pci_dev
*pdev
)
169 return device_create_file(&pdev
->dev
, &dev_attr_devspec
);
172 #endif /* CONFIG_PPC_OF */
176 char __devinit
*pcibios_setup(char *str
)
182 * Reads the interrupt pin to determine if interrupt is use by card.
183 * If the interrupt is used, then gets the interrupt line from the
184 * openfirmware and sets it in the pci_dev and pci_config line.
186 int pci_read_irq_line(struct pci_dev
*pci_dev
)
191 DBG("Try to map irq for %s...\n", pci_name(pci_dev
));
194 memset(&oirq
, 0xff, sizeof(oirq
));
196 /* Try to get a mapping from the device-tree */
197 if (of_irq_map_pci(pci_dev
, &oirq
)) {
200 /* If that fails, lets fallback to what is in the config
201 * space and map that through the default controller. We
202 * also set the type to level low since that's what PCI
203 * interrupts are. If your platform does differently, then
204 * either provide a proper interrupt tree or don't use this
207 if (pci_read_config_byte(pci_dev
, PCI_INTERRUPT_PIN
, &pin
))
211 if (pci_read_config_byte(pci_dev
, PCI_INTERRUPT_LINE
, &line
) ||
215 DBG(" -> no map ! Using irq line %d from PCI config\n", line
);
217 virq
= irq_create_mapping(NULL
, line
);
219 set_irq_type(virq
, IRQ_TYPE_LEVEL_LOW
);
221 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
222 oirq
.size
, oirq
.specifier
[0], oirq
.specifier
[1],
223 oirq
.controller
->full_name
);
225 virq
= irq_create_of_mapping(oirq
.controller
, oirq
.specifier
,
229 DBG(" -> failed to map !\n");
233 DBG(" -> mapped to linux irq %d\n", virq
);
239 EXPORT_SYMBOL(pci_read_irq_line
);
242 * Platform support for /proc/bus/pci/X/Y mmap()s,
243 * modelled on the sparc64 implementation by Dave Miller.
248 * Adjust vm_pgoff of VMA such that it is the physical page offset
249 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
251 * Basically, the user finds the base address for his device which he wishes
252 * to mmap. They read the 32-bit value from the config space base register,
253 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
254 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
256 * Returns negative error code on failure, zero on success.
258 static struct resource
*__pci_mmap_make_offset(struct pci_dev
*dev
,
259 resource_size_t
*offset
,
260 enum pci_mmap_state mmap_state
)
262 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
263 unsigned long io_offset
= 0;
267 return NULL
; /* should never happen */
269 /* If memory, add on the PCI bridge address offset */
270 if (mmap_state
== pci_mmap_mem
) {
271 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
272 *offset
+= hose
->pci_mem_offset
;
274 res_bit
= IORESOURCE_MEM
;
276 io_offset
= (unsigned long)hose
->io_base_virt
- _IO_BASE
;
277 *offset
+= io_offset
;
278 res_bit
= IORESOURCE_IO
;
282 * Check that the offset requested corresponds to one of the
283 * resources of the device.
285 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++) {
286 struct resource
*rp
= &dev
->resource
[i
];
287 int flags
= rp
->flags
;
289 /* treat ROM as memory (should be already) */
290 if (i
== PCI_ROM_RESOURCE
)
291 flags
|= IORESOURCE_MEM
;
293 /* Active and same type? */
294 if ((flags
& res_bit
) == 0)
297 /* In the range of this resource? */
298 if (*offset
< (rp
->start
& PAGE_MASK
) || *offset
> rp
->end
)
301 /* found it! construct the final physical address */
302 if (mmap_state
== pci_mmap_io
)
303 *offset
+= hose
->io_base_phys
- io_offset
;
311 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
314 static pgprot_t
__pci_mmap_set_pgprot(struct pci_dev
*dev
, struct resource
*rp
,
316 enum pci_mmap_state mmap_state
,
319 unsigned long prot
= pgprot_val(protection
);
321 /* Write combine is always 0 on non-memory space mappings. On
322 * memory space, if the user didn't pass 1, we check for a
323 * "prefetchable" resource. This is a bit hackish, but we use
324 * this to workaround the inability of /sysfs to provide a write
327 if (mmap_state
!= pci_mmap_mem
)
329 else if (write_combine
== 0) {
330 if (rp
->flags
& IORESOURCE_PREFETCH
)
334 /* XXX would be nice to have a way to ask for write-through */
335 prot
|= _PAGE_NO_CACHE
;
337 prot
&= ~_PAGE_GUARDED
;
339 prot
|= _PAGE_GUARDED
;
341 return __pgprot(prot
);
345 * This one is used by /dev/mem and fbdev who have no clue about the
346 * PCI device, it tries to find the PCI device first and calls the
349 pgprot_t
pci_phys_mem_access_prot(struct file
*file
,
354 struct pci_dev
*pdev
= NULL
;
355 struct resource
*found
= NULL
;
356 unsigned long prot
= pgprot_val(protection
);
357 unsigned long offset
= pfn
<< PAGE_SHIFT
;
360 if (page_is_ram(pfn
))
361 return __pgprot(prot
);
363 prot
|= _PAGE_NO_CACHE
| _PAGE_GUARDED
;
365 for_each_pci_dev(pdev
) {
366 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++) {
367 struct resource
*rp
= &pdev
->resource
[i
];
368 int flags
= rp
->flags
;
370 /* Active and same type? */
371 if ((flags
& IORESOURCE_MEM
) == 0)
373 /* In the range of this resource? */
374 if (offset
< (rp
->start
& PAGE_MASK
) ||
384 if (found
->flags
& IORESOURCE_PREFETCH
)
385 prot
&= ~_PAGE_GUARDED
;
389 DBG("non-PCI map for %lx, prot: %lx\n", offset
, prot
);
391 return __pgprot(prot
);
396 * Perform the actual remap of the pages for a PCI device mapping, as
397 * appropriate for this architecture. The region in the process to map
398 * is described by vm_start and vm_end members of VMA, the base physical
399 * address is found in vm_pgoff.
400 * The pci device structure is provided so that architectures may make mapping
401 * decisions on a per-device or per-bus basis.
403 * Returns a negative error code on failure, zero on success.
405 int pci_mmap_page_range(struct pci_dev
*dev
, struct vm_area_struct
*vma
,
406 enum pci_mmap_state mmap_state
, int write_combine
)
408 resource_size_t offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
412 rp
= __pci_mmap_make_offset(dev
, &offset
, mmap_state
);
416 vma
->vm_pgoff
= offset
>> PAGE_SHIFT
;
417 vma
->vm_page_prot
= __pci_mmap_set_pgprot(dev
, rp
,
419 mmap_state
, write_combine
);
421 ret
= remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
422 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
);
427 void pci_resource_to_user(const struct pci_dev
*dev
, int bar
,
428 const struct resource
*rsrc
,
429 resource_size_t
*start
, resource_size_t
*end
)
431 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
432 resource_size_t offset
= 0;
437 if (rsrc
->flags
& IORESOURCE_IO
)
438 offset
= (unsigned long)hose
->io_base_virt
- _IO_BASE
;
440 /* We pass a fully fixed up address to userland for MMIO instead of
441 * a BAR value because X is lame and expects to be able to use that
442 * to pass to /dev/mem !
444 * That means that we'll have potentially 64 bits values where some
445 * userland apps only expect 32 (like X itself since it thinks only
446 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
449 * Hopefully, the sysfs insterface is immune to that gunk. Once X
450 * has been fixed (and the fix spread enough), we can re-enable the
451 * 2 lines below and pass down a BAR value to userland. In that case
452 * we'll also have to re-enable the matching code in
453 * __pci_mmap_make_offset().
458 else if (rsrc
->flags
& IORESOURCE_MEM
)
459 offset
= hose
->pci_mem_offset
;
462 *start
= rsrc
->start
- offset
;
463 *end
= rsrc
->end
- offset
;