2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
11 * Common pmac/prep/chrp pci routines. -- Cort
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
27 #include <linux/list.h>
28 #include <linux/syscalls.h>
29 #include <linux/irq.h>
30 #include <linux/vmalloc.h>
32 #include <asm/processor.h>
35 #include <asm/pci-bridge.h>
36 #include <asm/byteorder.h>
37 #include <asm/machdep.h>
38 #include <asm/ppc-pci.h>
39 #include <asm/firmware.h>
43 #define DBG(fmt...) printk(fmt)
48 static DEFINE_SPINLOCK(hose_spinlock
);
50 /* XXX kill that some day ... */
51 static int global_phb_number
; /* Global phb counter */
54 struct pci_controller
*pcibios_alloc_controller(struct device_node
*dev
)
56 struct pci_controller
*phb
;
58 phb
= zalloc_maybe_bootmem(sizeof(struct pci_controller
), GFP_KERNEL
);
61 spin_lock(&hose_spinlock
);
62 phb
->global_number
= global_phb_number
++;
63 list_add_tail(&phb
->list_node
, &hose_list
);
64 spin_unlock(&hose_spinlock
);
66 phb
->is_dynamic
= mem_init_done
;
69 int nid
= of_node_to_nid(dev
);
71 if (nid
< 0 || !node_online(nid
))
74 PHB_SET_NODE(phb
, nid
);
80 void pcibios_free_controller(struct pci_controller
*phb
)
82 spin_lock(&hose_spinlock
);
83 list_del(&phb
->list_node
);
84 spin_unlock(&hose_spinlock
);
90 int pcibios_vaddr_is_ioport(void __iomem
*address
)
93 struct pci_controller
*hose
;
96 spin_lock(&hose_spinlock
);
97 list_for_each_entry(hose
, &hose_list
, list_node
) {
99 size
= hose
->pci_io_size
;
101 size
= hose
->io_resource
.end
- hose
->io_resource
.start
+ 1;
103 if (address
>= hose
->io_base_virt
&&
104 address
< (hose
->io_base_virt
+ size
)) {
109 spin_unlock(&hose_spinlock
);
114 * Return the domain number for this bus.
116 int pci_domain_nr(struct pci_bus
*bus
)
118 if (firmware_has_feature(FW_FEATURE_ISERIES
))
121 struct pci_controller
*hose
= pci_bus_to_host(bus
);
123 return hose
->global_number
;
127 EXPORT_SYMBOL(pci_domain_nr
);
131 /* This routine is meant to be used early during boot, when the
132 * PCI bus numbers have not yet been assigned, and you need to
133 * issue PCI config cycles to an OF device.
134 * It could also be used to "fix" RTAS config cycles if you want
135 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
138 struct pci_controller
* pci_find_hose_for_OF_device(struct device_node
* node
)
143 struct pci_controller
*hose
, *tmp
;
144 list_for_each_entry_safe(hose
, tmp
, &hose_list
, list_node
)
145 if (hose
->arch_data
== node
)
152 static ssize_t
pci_show_devspec(struct device
*dev
,
153 struct device_attribute
*attr
, char *buf
)
155 struct pci_dev
*pdev
;
156 struct device_node
*np
;
158 pdev
= to_pci_dev (dev
);
159 np
= pci_device_to_OF_node(pdev
);
160 if (np
== NULL
|| np
->full_name
== NULL
)
162 return sprintf(buf
, "%s", np
->full_name
);
164 static DEVICE_ATTR(devspec
, S_IRUGO
, pci_show_devspec
, NULL
);
165 #endif /* CONFIG_PPC_OF */
167 /* Add sysfs properties */
168 int pcibios_add_platform_entries(struct pci_dev
*pdev
)
171 return device_create_file(&pdev
->dev
, &dev_attr_devspec
);
174 #endif /* CONFIG_PPC_OF */
178 char __devinit
*pcibios_setup(char *str
)
184 * Reads the interrupt pin to determine if interrupt is use by card.
185 * If the interrupt is used, then gets the interrupt line from the
186 * openfirmware and sets it in the pci_dev and pci_config line.
188 int pci_read_irq_line(struct pci_dev
*pci_dev
)
193 DBG("Try to map irq for %s...\n", pci_name(pci_dev
));
196 memset(&oirq
, 0xff, sizeof(oirq
));
198 /* Try to get a mapping from the device-tree */
199 if (of_irq_map_pci(pci_dev
, &oirq
)) {
202 /* If that fails, lets fallback to what is in the config
203 * space and map that through the default controller. We
204 * also set the type to level low since that's what PCI
205 * interrupts are. If your platform does differently, then
206 * either provide a proper interrupt tree or don't use this
209 if (pci_read_config_byte(pci_dev
, PCI_INTERRUPT_PIN
, &pin
))
213 if (pci_read_config_byte(pci_dev
, PCI_INTERRUPT_LINE
, &line
) ||
217 DBG(" -> no map ! Using irq line %d from PCI config\n", line
);
219 virq
= irq_create_mapping(NULL
, line
);
221 set_irq_type(virq
, IRQ_TYPE_LEVEL_LOW
);
223 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
224 oirq
.size
, oirq
.specifier
[0], oirq
.specifier
[1],
225 oirq
.controller
->full_name
);
227 virq
= irq_create_of_mapping(oirq
.controller
, oirq
.specifier
,
231 DBG(" -> failed to map !\n");
235 DBG(" -> mapped to linux irq %d\n", virq
);
241 EXPORT_SYMBOL(pci_read_irq_line
);
244 * Platform support for /proc/bus/pci/X/Y mmap()s,
245 * modelled on the sparc64 implementation by Dave Miller.
250 * Adjust vm_pgoff of VMA such that it is the physical page offset
251 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
253 * Basically, the user finds the base address for his device which he wishes
254 * to mmap. They read the 32-bit value from the config space base register,
255 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
256 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
258 * Returns negative error code on failure, zero on success.
260 static struct resource
*__pci_mmap_make_offset(struct pci_dev
*dev
,
261 resource_size_t
*offset
,
262 enum pci_mmap_state mmap_state
)
264 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
265 unsigned long io_offset
= 0;
269 return NULL
; /* should never happen */
271 /* If memory, add on the PCI bridge address offset */
272 if (mmap_state
== pci_mmap_mem
) {
273 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
274 *offset
+= hose
->pci_mem_offset
;
276 res_bit
= IORESOURCE_MEM
;
278 io_offset
= (unsigned long)hose
->io_base_virt
- _IO_BASE
;
279 *offset
+= io_offset
;
280 res_bit
= IORESOURCE_IO
;
284 * Check that the offset requested corresponds to one of the
285 * resources of the device.
287 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++) {
288 struct resource
*rp
= &dev
->resource
[i
];
289 int flags
= rp
->flags
;
291 /* treat ROM as memory (should be already) */
292 if (i
== PCI_ROM_RESOURCE
)
293 flags
|= IORESOURCE_MEM
;
295 /* Active and same type? */
296 if ((flags
& res_bit
) == 0)
299 /* In the range of this resource? */
300 if (*offset
< (rp
->start
& PAGE_MASK
) || *offset
> rp
->end
)
303 /* found it! construct the final physical address */
304 if (mmap_state
== pci_mmap_io
)
305 *offset
+= hose
->io_base_phys
- io_offset
;
313 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
316 static pgprot_t
__pci_mmap_set_pgprot(struct pci_dev
*dev
, struct resource
*rp
,
318 enum pci_mmap_state mmap_state
,
321 unsigned long prot
= pgprot_val(protection
);
323 /* Write combine is always 0 on non-memory space mappings. On
324 * memory space, if the user didn't pass 1, we check for a
325 * "prefetchable" resource. This is a bit hackish, but we use
326 * this to workaround the inability of /sysfs to provide a write
329 if (mmap_state
!= pci_mmap_mem
)
331 else if (write_combine
== 0) {
332 if (rp
->flags
& IORESOURCE_PREFETCH
)
336 /* XXX would be nice to have a way to ask for write-through */
337 prot
|= _PAGE_NO_CACHE
;
339 prot
&= ~_PAGE_GUARDED
;
341 prot
|= _PAGE_GUARDED
;
343 return __pgprot(prot
);
347 * This one is used by /dev/mem and fbdev who have no clue about the
348 * PCI device, it tries to find the PCI device first and calls the
351 pgprot_t
pci_phys_mem_access_prot(struct file
*file
,
356 struct pci_dev
*pdev
= NULL
;
357 struct resource
*found
= NULL
;
358 unsigned long prot
= pgprot_val(protection
);
359 unsigned long offset
= pfn
<< PAGE_SHIFT
;
362 if (page_is_ram(pfn
))
363 return __pgprot(prot
);
365 prot
|= _PAGE_NO_CACHE
| _PAGE_GUARDED
;
367 for_each_pci_dev(pdev
) {
368 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++) {
369 struct resource
*rp
= &pdev
->resource
[i
];
370 int flags
= rp
->flags
;
372 /* Active and same type? */
373 if ((flags
& IORESOURCE_MEM
) == 0)
375 /* In the range of this resource? */
376 if (offset
< (rp
->start
& PAGE_MASK
) ||
386 if (found
->flags
& IORESOURCE_PREFETCH
)
387 prot
&= ~_PAGE_GUARDED
;
391 DBG("non-PCI map for %lx, prot: %lx\n", offset
, prot
);
393 return __pgprot(prot
);
398 * Perform the actual remap of the pages for a PCI device mapping, as
399 * appropriate for this architecture. The region in the process to map
400 * is described by vm_start and vm_end members of VMA, the base physical
401 * address is found in vm_pgoff.
402 * The pci device structure is provided so that architectures may make mapping
403 * decisions on a per-device or per-bus basis.
405 * Returns a negative error code on failure, zero on success.
407 int pci_mmap_page_range(struct pci_dev
*dev
, struct vm_area_struct
*vma
,
408 enum pci_mmap_state mmap_state
, int write_combine
)
410 resource_size_t offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
414 rp
= __pci_mmap_make_offset(dev
, &offset
, mmap_state
);
418 vma
->vm_pgoff
= offset
>> PAGE_SHIFT
;
419 vma
->vm_page_prot
= __pci_mmap_set_pgprot(dev
, rp
,
421 mmap_state
, write_combine
);
423 ret
= remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
424 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
);
429 void pci_resource_to_user(const struct pci_dev
*dev
, int bar
,
430 const struct resource
*rsrc
,
431 resource_size_t
*start
, resource_size_t
*end
)
433 struct pci_controller
*hose
= pci_bus_to_host(dev
->bus
);
434 resource_size_t offset
= 0;
439 if (rsrc
->flags
& IORESOURCE_IO
)
440 offset
= (unsigned long)hose
->io_base_virt
- _IO_BASE
;
442 /* We pass a fully fixed up address to userland for MMIO instead of
443 * a BAR value because X is lame and expects to be able to use that
444 * to pass to /dev/mem !
446 * That means that we'll have potentially 64 bits values where some
447 * userland apps only expect 32 (like X itself since it thinks only
448 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
451 * Hopefully, the sysfs insterface is immune to that gunk. Once X
452 * has been fixed (and the fix spread enough), we can re-enable the
453 * 2 lines below and pass down a BAR value to userland. In that case
454 * we'll also have to re-enable the matching code in
455 * __pci_mmap_make_offset().
460 else if (rsrc
->flags
& IORESOURCE_MEM
)
461 offset
= hose
->pci_mem_offset
;
464 *start
= rsrc
->start
- offset
;
465 *end
= rsrc
->end
- offset
;