Merge tag 'pci-v4.8-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[deliverable/linux.git] / arch / powerpc / kernel / pci-common.c
index 0f7a60f1e9f6292ceb9f1ade5c67d2617d1aefb9..a5c0153ede37f21d6dd8f47beac764587789fbc5 100644 (file)
 #include <asm/ppc-pci.h>
 #include <asm/eeh.h>
 
+/* hose_spinlock protects accesses to the the phb_bitmap. */
 static DEFINE_SPINLOCK(hose_spinlock);
 LIST_HEAD(hose_list);
 
-/* XXX kill that some day ... */
-static int global_phb_number;          /* Global phb counter */
+/* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
+#define MAX_PHBS 0x10000
+
+/*
+ * For dynamic PHB numbering: used/free PHBs tracking bitmap.
+ * Accesses to this bitmap should be protected by hose_spinlock.
+ */
+static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
 
 /* ISA Memory physical address */
 resource_size_t isa_mem_base;
@@ -64,6 +71,42 @@ struct dma_map_ops *get_pci_dma_ops(void)
 }
 EXPORT_SYMBOL(get_pci_dma_ops);
 
+/*
+ * This function should run under locking protection, specifically
+ * hose_spinlock.
+ */
+static int get_phb_number(struct device_node *dn)
+{
+       int ret, phb_id = -1;
+       u64 prop;
+
+       /*
+        * Try fixed PHB numbering first, by checking archs and reading
+        * the respective device-tree properties. Firstly, try powernv by
+        * reading "ibm,opal-phbid", only present in OPAL environment.
+        */
+       ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+       if (ret)
+               ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+
+       if (!ret)
+               phb_id = (int)(prop & (MAX_PHBS - 1));
+
+       /* We need to be sure to not use the same PHB number twice. */
+       if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
+               return phb_id;
+
+       /*
+        * If not pseries nor powernv, or if fixed PHB numbering tried to add
+        * the same PHB number twice, then fallback to dynamic PHB numbering.
+        */
+       phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
+       BUG_ON(phb_id >= MAX_PHBS);
+       set_bit(phb_id, phb_bitmap);
+
+       return phb_id;
+}
+
 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
 {
        struct pci_controller *phb;
@@ -72,7 +115,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
        if (phb == NULL)
                return NULL;
        spin_lock(&hose_spinlock);
-       phb->global_number = global_phb_number++;
+       phb->global_number = get_phb_number(dev);
        list_add_tail(&phb->list_node, &hose_list);
        spin_unlock(&hose_spinlock);
        phb->dn = dev;
@@ -94,6 +137,11 @@ EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
 void pcibios_free_controller(struct pci_controller *phb)
 {
        spin_lock(&hose_spinlock);
+
+       /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
+       if (phb->global_number < MAX_PHBS)
+               clear_bit(phb->global_number, phb_bitmap);
+
        list_del(&phb->list_node);
        spin_unlock(&hose_spinlock);
 
@@ -124,6 +172,14 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
        return 1;
 }
 
+void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+       struct pci_controller *hose = pci_bus_to_host(bus);
+
+       if (hose->controller_ops.setup_bridge)
+               hose->controller_ops.setup_bridge(bus, type);
+}
+
 void pcibios_reset_secondary_bus(struct pci_dev *dev)
 {
        struct pci_controller *phb = pci_bus_to_host(dev->bus);
@@ -355,36 +411,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
        return NULL;
 }
 
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-                                     pgprot_t protection,
-                                     enum pci_mmap_state mmap_state,
-                                     int write_combine)
-{
-
-       /* Write combine is always 0 on non-memory space mappings. On
-        * memory space, if the user didn't pass 1, we check for a
-        * "prefetchable" resource. This is a bit hackish, but we use
-        * this to workaround the inability of /sysfs to provide a write
-        * combine bit
-        */
-       if (mmap_state != pci_mmap_mem)
-               write_combine = 0;
-       else if (write_combine == 0) {
-               if (rp->flags & IORESOURCE_PREFETCH)
-                       write_combine = 1;
-       }
-
-       /* XXX would be nice to have a way to ask for write-through */
-       if (write_combine)
-               return pgprot_noncached_wc(protection);
-       else
-               return pgprot_noncached(protection);
-}
-
 /*
  * This one is used by /dev/mem and fbdev who have no clue about the
  * PCI device, it tries to find the PCI device first and calls the
@@ -458,9 +484,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-                                                 vma->vm_page_prot,
-                                                 mmap_state, write_combine);
+       if (write_combine)
+               vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
@@ -610,39 +637,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          const struct resource *rsrc,
                          resource_size_t *start, resource_size_t *end)
 {
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       resource_size_t offset = 0;
+       struct pci_bus_region region;
 
-       if (hose == NULL)
+       if (rsrc->flags & IORESOURCE_IO) {
+               pcibios_resource_to_bus(dev->bus, &region,
+                                       (struct resource *) rsrc);
+               *start = region.start;
+               *end = region.end;
                return;
+       }
 
-       if (rsrc->flags & IORESOURCE_IO)
-               offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
-       /* We pass a fully fixed up address to userland for MMIO instead of
-        * a BAR value because X is lame and expects to be able to use that
-        * to pass to /dev/mem !
-        *
-        * That means that we'll have potentially 64 bits values where some
-        * userland apps only expect 32 (like X itself since it thinks only
-        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
-        * 32 bits CHRPs :-(
+       /* We pass a CPU physical address to userland for MMIO instead of a
+        * BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem!
         *
-        * Hopefully, the sysfs insterface is immune to that gunk. Once X
-        * has been fixed (and the fix spread enough), we can re-enable the
-        * 2 lines below and pass down a BAR value to userland. In that case
-        * we'll also have to re-enable the matching code in
-        * __pci_mmap_make_offset().
-        *
-        * BenH.
+        * That means we may have 64-bit values where some apps only expect
+        * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
         */
-#if 0
-       else if (rsrc->flags & IORESOURCE_MEM)
-               offset = hose->pci_mem_offset;
-#endif
-
-       *start = rsrc->start - offset;
-       *end = rsrc->end - offset;
+       *start = rsrc->start;
+       *end = rsrc->end;
 }
 
 /**
@@ -1362,8 +1375,10 @@ void __init pcibios_resource_survey(void)
        /* Allocate and assign resources */
        list_for_each_entry(b, &pci_root_buses, node)
                pcibios_allocate_bus_resources(b);
-       pcibios_allocate_resources(0);
-       pcibios_allocate_resources(1);
+       if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
+               pcibios_allocate_resources(0);
+               pcibios_allocate_resources(1);
+       }
 
        /* Before we start assigning unassigned resource, we try to reserve
         * the low IO area and the VGA memory area if they intersect the
@@ -1436,8 +1451,12 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
        /* Allocate bus and devices resources */
        pcibios_allocate_bus_resources(bus);
        pcibios_claim_one_bus(bus);
-       if (!pci_has_flag(PCI_PROBE_ONLY))
-               pci_assign_unassigned_bus_resources(bus);
+       if (!pci_has_flag(PCI_PROBE_ONLY)) {
+               if (bus->self)
+                       pci_assign_unassigned_bridge_resources(bus->self);
+               else
+                       pci_assign_unassigned_bus_resources(bus);
+       }
 
        /* Fixup EEH */
        eeh_add_device_tree_late(bus);
@@ -1485,9 +1504,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
        res = &hose->io_resource;
 
        if (!res->flags) {
-               pr_info("PCI: I/O resource not set for host"
-                      " bridge %s (domain %d)\n",
-                      hose->dn->full_name, hose->global_number);
+               pr_debug("PCI: I/O resource not set for host"
+                        " bridge %s (domain %d)\n",
+                        hose->dn->full_name, hose->global_number);
        } else {
                offset = pcibios_io_space_offset(hose);
 
This page took 0.032513 seconds and 5 git commands to generate.