PCI: Allocate 64-bit BARs above 4G when possible
[deliverable/linux.git] / drivers / pci / bus.c
1 /*
2 * drivers/pci/bus.c
3 *
4 * From setup-res.c, by:
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
8 * Ivan Kokshaysky (ink@jurassic.park.msu.ru)
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/errno.h>
14 #include <linux/ioport.h>
15 #include <linux/proc_fs.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18
19 #include "pci.h"
20
21 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
22 resource_size_t offset)
23 {
24 struct pci_host_bridge_window *window;
25
26 window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL);
27 if (!window) {
28 printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
29 return;
30 }
31
32 window->res = res;
33 window->offset = offset;
34 list_add_tail(&window->list, resources);
35 }
36 EXPORT_SYMBOL(pci_add_resource_offset);
37
38 void pci_add_resource(struct list_head *resources, struct resource *res)
39 {
40 pci_add_resource_offset(resources, res, 0);
41 }
42 EXPORT_SYMBOL(pci_add_resource);
43
44 void pci_free_resource_list(struct list_head *resources)
45 {
46 struct pci_host_bridge_window *window, *tmp;
47
48 list_for_each_entry_safe(window, tmp, resources, list) {
49 list_del(&window->list);
50 kfree(window);
51 }
52 }
53 EXPORT_SYMBOL(pci_free_resource_list);
54
55 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
56 unsigned int flags)
57 {
58 struct pci_bus_resource *bus_res;
59
60 bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
61 if (!bus_res) {
62 dev_err(&bus->dev, "can't add %pR resource\n", res);
63 return;
64 }
65
66 bus_res->res = res;
67 bus_res->flags = flags;
68 list_add_tail(&bus_res->list, &bus->resources);
69 }
70
71 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
72 {
73 struct pci_bus_resource *bus_res;
74
75 if (n < PCI_BRIDGE_RESOURCE_NUM)
76 return bus->resource[n];
77
78 n -= PCI_BRIDGE_RESOURCE_NUM;
79 list_for_each_entry(bus_res, &bus->resources, list) {
80 if (n-- == 0)
81 return bus_res->res;
82 }
83 return NULL;
84 }
85 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
86
87 void pci_bus_remove_resources(struct pci_bus *bus)
88 {
89 int i;
90 struct pci_bus_resource *bus_res, *tmp;
91
92 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
93 bus->resource[i] = NULL;
94
95 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
96 list_del(&bus_res->list);
97 kfree(bus_res);
98 }
99 }
100
101 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
102 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
103 static struct pci_bus_region pci_64_bit = {0,
104 (dma_addr_t) 0xffffffffffffffffULL};
105 static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL,
106 (dma_addr_t) 0xffffffffffffffffULL};
107 #endif
108
109 /*
110 * @res contains CPU addresses. Clip it so the corresponding bus addresses
111 * on @bus are entirely within @region. This is used to control the bus
112 * addresses of resources we allocate, e.g., we may need a resource that
113 * can be mapped by a 32-bit BAR.
114 */
115 static void pci_clip_resource_to_region(struct pci_bus *bus,
116 struct resource *res,
117 struct pci_bus_region *region)
118 {
119 struct pci_bus_region r;
120
121 pcibios_resource_to_bus(bus, &r, res);
122 if (r.start < region->start)
123 r.start = region->start;
124 if (r.end > region->end)
125 r.end = region->end;
126
127 if (r.end < r.start)
128 res->end = res->start - 1;
129 else
130 pcibios_bus_to_resource(bus, res, &r);
131 }
132
133 static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
134 resource_size_t size, resource_size_t align,
135 resource_size_t min, unsigned int type_mask,
136 resource_size_t (*alignf)(void *,
137 const struct resource *,
138 resource_size_t,
139 resource_size_t),
140 void *alignf_data,
141 struct pci_bus_region *region)
142 {
143 int i, ret;
144 struct resource *r, avail;
145 resource_size_t max;
146
147 type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
148
149 pci_bus_for_each_resource(bus, r, i) {
150 if (!r)
151 continue;
152
153 /* type_mask must match */
154 if ((res->flags ^ r->flags) & type_mask)
155 continue;
156
157 /* We cannot allocate a non-prefetching resource
158 from a pre-fetching area */
159 if ((r->flags & IORESOURCE_PREFETCH) &&
160 !(res->flags & IORESOURCE_PREFETCH))
161 continue;
162
163 avail = *r;
164 pci_clip_resource_to_region(bus, &avail, region);
165 if (!resource_size(&avail))
166 continue;
167
168 /*
169 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
170 * protect badly documented motherboard resources, but if
171 * this is an already-configured bridge window, its start
172 * overrides "min".
173 */
174 if (avail.start)
175 min = avail.start;
176
177 max = avail.end;
178
179 /* Ok, try it out.. */
180 ret = allocate_resource(r, res, size, min, max,
181 align, alignf, alignf_data);
182 if (ret == 0)
183 return 0;
184 }
185 return -ENOMEM;
186 }
187
188 /**
189 * pci_bus_alloc_resource - allocate a resource from a parent bus
190 * @bus: PCI bus
191 * @res: resource to allocate
192 * @size: size of resource to allocate
193 * @align: alignment of resource to allocate
194 * @min: minimum /proc/iomem address to allocate
195 * @type_mask: IORESOURCE_* type flags
196 * @alignf: resource alignment function
197 * @alignf_data: data argument for resource alignment function
198 *
199 * Given the PCI bus a device resides on, the size, minimum address,
200 * alignment and type, try to find an acceptable resource allocation
201 * for a specific device resource.
202 */
203 int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
204 resource_size_t size, resource_size_t align,
205 resource_size_t min, unsigned int type_mask,
206 resource_size_t (*alignf)(void *,
207 const struct resource *,
208 resource_size_t,
209 resource_size_t),
210 void *alignf_data)
211 {
212 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
213 int rc;
214
215 if (res->flags & IORESOURCE_MEM_64) {
216 rc = pci_bus_alloc_from_region(bus, res, size, align, min,
217 type_mask, alignf, alignf_data,
218 &pci_high);
219 if (rc == 0)
220 return 0;
221
222 return pci_bus_alloc_from_region(bus, res, size, align, min,
223 type_mask, alignf, alignf_data,
224 &pci_64_bit);
225 }
226 #endif
227
228 return pci_bus_alloc_from_region(bus, res, size, align, min,
229 type_mask, alignf, alignf_data,
230 &pci_32_bit);
231 }
232
233 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
234
235 /**
236 * pci_bus_add_device - start driver for a single device
237 * @dev: device to add
238 *
239 * This adds add sysfs entries and start device drivers
240 */
241 int pci_bus_add_device(struct pci_dev *dev)
242 {
243 int retval;
244
245 /*
246 * Can not put in pci_device_add yet because resources
247 * are not assigned yet for some devices.
248 */
249 pci_fixup_device(pci_fixup_final, dev);
250 pci_create_sysfs_dev_files(dev);
251
252 dev->match_driver = true;
253 retval = device_attach(&dev->dev);
254 WARN_ON(retval < 0);
255
256 dev->is_added = 1;
257
258 return 0;
259 }
260
261 /**
262 * pci_bus_add_devices - start driver for PCI devices
263 * @bus: bus to check for new devices
264 *
265 * Start driver for PCI devices and add some sysfs entries.
266 */
267 void pci_bus_add_devices(const struct pci_bus *bus)
268 {
269 struct pci_dev *dev;
270 struct pci_bus *child;
271 int retval;
272
273 list_for_each_entry(dev, &bus->devices, bus_list) {
274 /* Skip already-added devices */
275 if (dev->is_added)
276 continue;
277 retval = pci_bus_add_device(dev);
278 if (retval)
279 dev_err(&dev->dev, "Error adding device (%d)\n",
280 retval);
281 }
282
283 list_for_each_entry(dev, &bus->devices, bus_list) {
284 BUG_ON(!dev->is_added);
285 child = dev->subordinate;
286 if (child)
287 pci_bus_add_devices(child);
288 }
289 }
290
291 /** pci_walk_bus - walk devices on/under bus, calling callback.
292 * @top bus whose devices should be walked
293 * @cb callback to be called for each device found
294 * @userdata arbitrary pointer to be passed to callback.
295 *
296 * Walk the given bus, including any bridged devices
297 * on buses under this bus. Call the provided callback
298 * on each device found.
299 *
300 * We check the return of @cb each time. If it returns anything
301 * other than 0, we break out.
302 *
303 */
304 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
305 void *userdata)
306 {
307 struct pci_dev *dev;
308 struct pci_bus *bus;
309 struct list_head *next;
310 int retval;
311
312 bus = top;
313 down_read(&pci_bus_sem);
314 next = top->devices.next;
315 for (;;) {
316 if (next == &bus->devices) {
317 /* end of this bus, go up or finish */
318 if (bus == top)
319 break;
320 next = bus->self->bus_list.next;
321 bus = bus->self->bus;
322 continue;
323 }
324 dev = list_entry(next, struct pci_dev, bus_list);
325 if (dev->subordinate) {
326 /* this is a pci-pci bridge, do its devices next */
327 next = dev->subordinate->devices.next;
328 bus = dev->subordinate;
329 } else
330 next = dev->bus_list.next;
331
332 retval = cb(dev, userdata);
333 if (retval)
334 break;
335 }
336 up_read(&pci_bus_sem);
337 }
338 EXPORT_SYMBOL_GPL(pci_walk_bus);
339
340 struct pci_bus *pci_bus_get(struct pci_bus *bus)
341 {
342 if (bus)
343 get_device(&bus->dev);
344 return bus;
345 }
346 EXPORT_SYMBOL(pci_bus_get);
347
348 void pci_bus_put(struct pci_bus *bus)
349 {
350 if (bus)
351 put_device(&bus->dev);
352 }
353 EXPORT_SYMBOL(pci_bus_put);
354
355 EXPORT_SYMBOL(pci_bus_alloc_resource);
356 EXPORT_SYMBOL_GPL(pci_bus_add_device);
357 EXPORT_SYMBOL(pci_bus_add_devices);
This page took 0.053611 seconds and 6 git commands to generate.