x86/PCI: convert to pci_create_root_bus() and pci_scan_root_bus()
[deliverable/linux.git] / arch / x86 / pci / acpi.c
1 #include <linux/pci.h>
2 #include <linux/acpi.h>
3 #include <linux/init.h>
4 #include <linux/irq.h>
5 #include <linux/dmi.h>
6 #include <linux/slab.h>
7 #include <asm/numa.h>
8 #include <asm/pci_x86.h>
9
10 struct pci_root_info {
11 struct acpi_device *bridge;
12 char *name;
13 unsigned int res_num;
14 struct resource *res;
15 struct list_head *resources;
16 int busnum;
17 };
18
19 static bool pci_use_crs = true;
20
21 static int __init set_use_crs(const struct dmi_system_id *id)
22 {
23 pci_use_crs = true;
24 return 0;
25 }
26
27 static int __init set_nouse_crs(const struct dmi_system_id *id)
28 {
29 pci_use_crs = false;
30 return 0;
31 }
32
33 static const struct dmi_system_id pci_use_crs_table[] __initconst = {
34 /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
35 {
36 .callback = set_use_crs,
37 .ident = "IBM System x3800",
38 .matches = {
39 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
40 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
41 },
42 },
43 /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
44 /* 2006 AMD HT/VIA system with two host bridges */
45 {
46 .callback = set_use_crs,
47 .ident = "ASRock ALiveSATA2-GLAN",
48 .matches = {
49 DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
50 },
51 },
52 /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
53 /* 2006 AMD HT/VIA system with two host bridges */
54 {
55 .callback = set_use_crs,
56 .ident = "ASUS M2V-MX SE",
57 .matches = {
58 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
59 DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
60 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
61 },
62 },
63
64 /* Now for the blacklist.. */
65
66 /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
67 {
68 .callback = set_nouse_crs,
69 .ident = "Dell Studio 1557",
70 .matches = {
71 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
72 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
73 DMI_MATCH(DMI_BIOS_VERSION, "A09"),
74 },
75 },
76 /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
77 {
78 .callback = set_nouse_crs,
79 .ident = "Thinkpad SL510",
80 .matches = {
81 DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
82 DMI_MATCH(DMI_BOARD_NAME, "2847DFG"),
83 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
84 },
85 },
86 {}
87 };
88
89 void __init pci_acpi_crs_quirks(void)
90 {
91 int year;
92
93 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
94 pci_use_crs = false;
95
96 dmi_check_system(pci_use_crs_table);
97
98 /*
99 * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
100 * takes precedence over anything we figured out above.
101 */
102 if (pci_probe & PCI_ROOT_NO_CRS)
103 pci_use_crs = false;
104 else if (pci_probe & PCI_USE__CRS)
105 pci_use_crs = true;
106
107 printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
108 "if necessary, use \"pci=%s\" and report a bug\n",
109 pci_use_crs ? "Using" : "Ignoring",
110 pci_use_crs ? "nocrs" : "use_crs");
111 }
112
113 static acpi_status
114 resource_to_addr(struct acpi_resource *resource,
115 struct acpi_resource_address64 *addr)
116 {
117 acpi_status status;
118 struct acpi_resource_memory24 *memory24;
119 struct acpi_resource_memory32 *memory32;
120 struct acpi_resource_fixed_memory32 *fixed_memory32;
121
122 memset(addr, 0, sizeof(*addr));
123 switch (resource->type) {
124 case ACPI_RESOURCE_TYPE_MEMORY24:
125 memory24 = &resource->data.memory24;
126 addr->resource_type = ACPI_MEMORY_RANGE;
127 addr->minimum = memory24->minimum;
128 addr->address_length = memory24->address_length;
129 addr->maximum = addr->minimum + addr->address_length - 1;
130 return AE_OK;
131 case ACPI_RESOURCE_TYPE_MEMORY32:
132 memory32 = &resource->data.memory32;
133 addr->resource_type = ACPI_MEMORY_RANGE;
134 addr->minimum = memory32->minimum;
135 addr->address_length = memory32->address_length;
136 addr->maximum = addr->minimum + addr->address_length - 1;
137 return AE_OK;
138 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
139 fixed_memory32 = &resource->data.fixed_memory32;
140 addr->resource_type = ACPI_MEMORY_RANGE;
141 addr->minimum = fixed_memory32->address;
142 addr->address_length = fixed_memory32->address_length;
143 addr->maximum = addr->minimum + addr->address_length - 1;
144 return AE_OK;
145 case ACPI_RESOURCE_TYPE_ADDRESS16:
146 case ACPI_RESOURCE_TYPE_ADDRESS32:
147 case ACPI_RESOURCE_TYPE_ADDRESS64:
148 status = acpi_resource_to_address64(resource, addr);
149 if (ACPI_SUCCESS(status) &&
150 (addr->resource_type == ACPI_MEMORY_RANGE ||
151 addr->resource_type == ACPI_IO_RANGE) &&
152 addr->address_length > 0) {
153 return AE_OK;
154 }
155 break;
156 }
157 return AE_ERROR;
158 }
159
160 static acpi_status
161 count_resource(struct acpi_resource *acpi_res, void *data)
162 {
163 struct pci_root_info *info = data;
164 struct acpi_resource_address64 addr;
165 acpi_status status;
166
167 status = resource_to_addr(acpi_res, &addr);
168 if (ACPI_SUCCESS(status))
169 info->res_num++;
170 return AE_OK;
171 }
172
173 static acpi_status
174 setup_resource(struct acpi_resource *acpi_res, void *data)
175 {
176 struct pci_root_info *info = data;
177 struct resource *res;
178 struct acpi_resource_address64 addr;
179 acpi_status status;
180 unsigned long flags;
181 u64 start, orig_end, end;
182
183 status = resource_to_addr(acpi_res, &addr);
184 if (!ACPI_SUCCESS(status))
185 return AE_OK;
186
187 if (addr.resource_type == ACPI_MEMORY_RANGE) {
188 flags = IORESOURCE_MEM;
189 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
190 flags |= IORESOURCE_PREFETCH;
191 } else if (addr.resource_type == ACPI_IO_RANGE) {
192 flags = IORESOURCE_IO;
193 } else
194 return AE_OK;
195
196 start = addr.minimum + addr.translation_offset;
197 orig_end = end = addr.maximum + addr.translation_offset;
198
199 /* Exclude non-addressable range or non-addressable portion of range */
200 end = min(end, (u64)iomem_resource.end);
201 if (end <= start) {
202 dev_info(&info->bridge->dev,
203 "host bridge window [%#llx-%#llx] "
204 "(ignored, not CPU addressable)\n", start, orig_end);
205 return AE_OK;
206 } else if (orig_end != end) {
207 dev_info(&info->bridge->dev,
208 "host bridge window [%#llx-%#llx] "
209 "([%#llx-%#llx] ignored, not CPU addressable)\n",
210 start, orig_end, end + 1, orig_end);
211 }
212
213 res = &info->res[info->res_num];
214 res->name = info->name;
215 res->flags = flags;
216 res->start = start;
217 res->end = end;
218 res->child = NULL;
219
220 if (!pci_use_crs) {
221 dev_printk(KERN_DEBUG, &info->bridge->dev,
222 "host bridge window %pR (ignored)\n", res);
223 return AE_OK;
224 }
225
226 info->res_num++;
227 if (addr.translation_offset)
228 dev_info(&info->bridge->dev, "host bridge window %pR "
229 "(PCI address [%#llx-%#llx])\n",
230 res, res->start - addr.translation_offset,
231 res->end - addr.translation_offset);
232 else
233 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
234
235 return AE_OK;
236 }
237
238 static bool resource_contains(struct resource *res, resource_size_t point)
239 {
240 if (res->start <= point && point <= res->end)
241 return true;
242 return false;
243 }
244
245 static void coalesce_windows(struct pci_root_info *info, unsigned long type)
246 {
247 int i, j;
248 struct resource *res1, *res2;
249
250 for (i = 0; i < info->res_num; i++) {
251 res1 = &info->res[i];
252 if (!(res1->flags & type))
253 continue;
254
255 for (j = i + 1; j < info->res_num; j++) {
256 res2 = &info->res[j];
257 if (!(res2->flags & type))
258 continue;
259
260 /*
261 * I don't like throwing away windows because then
262 * our resources no longer match the ACPI _CRS, but
263 * the kernel resource tree doesn't allow overlaps.
264 */
265 if (resource_contains(res1, res2->start) ||
266 resource_contains(res1, res2->end) ||
267 resource_contains(res2, res1->start) ||
268 resource_contains(res2, res1->end)) {
269 res1->start = min(res1->start, res2->start);
270 res1->end = max(res1->end, res2->end);
271 dev_info(&info->bridge->dev,
272 "host bridge window expanded to %pR; %pR ignored\n",
273 res1, res2);
274 res2->flags = 0;
275 }
276 }
277 }
278 }
279
280 static void add_resources(struct pci_root_info *info)
281 {
282 int i;
283 struct resource *res, *root, *conflict;
284
285 if (!pci_use_crs)
286 return;
287
288 coalesce_windows(info, IORESOURCE_MEM);
289 coalesce_windows(info, IORESOURCE_IO);
290
291 for (i = 0; i < info->res_num; i++) {
292 res = &info->res[i];
293
294 if (res->flags & IORESOURCE_MEM)
295 root = &iomem_resource;
296 else if (res->flags & IORESOURCE_IO)
297 root = &ioport_resource;
298 else
299 continue;
300
301 conflict = insert_resource_conflict(root, res);
302 if (conflict)
303 dev_info(&info->bridge->dev,
304 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
305 res, conflict->name, conflict);
306 else
307 pci_add_resource(info->resources, res);
308 }
309 }
310
311 static void
312 get_current_resources(struct acpi_device *device, int busnum,
313 int domain, struct list_head *resources)
314 {
315 struct pci_root_info info;
316 size_t size;
317
318 info.bridge = device;
319 info.res_num = 0;
320 info.resources = resources;
321 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
322 &info);
323 if (!info.res_num)
324 return;
325
326 size = sizeof(*info.res) * info.res_num;
327 info.res = kmalloc(size, GFP_KERNEL);
328 if (!info.res)
329 return;
330
331 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
332 if (!info.name)
333 goto name_alloc_fail;
334
335 info.res_num = 0;
336 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
337 &info);
338
339 add_resources(&info);
340 return;
341
342 name_alloc_fail:
343 kfree(info.res);
344 }
345
346 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
347 {
348 struct acpi_device *device = root->device;
349 int domain = root->segment;
350 int busnum = root->secondary.start;
351 LIST_HEAD(resources);
352 struct pci_bus *bus;
353 struct pci_sysdata *sd;
354 int node;
355 #ifdef CONFIG_ACPI_NUMA
356 int pxm;
357 #endif
358
359 if (domain && !pci_domains_supported) {
360 printk(KERN_WARNING "pci_bus %04x:%02x: "
361 "ignored (multiple domains not supported)\n",
362 domain, busnum);
363 return NULL;
364 }
365
366 node = -1;
367 #ifdef CONFIG_ACPI_NUMA
368 pxm = acpi_get_pxm(device->handle);
369 if (pxm >= 0)
370 node = pxm_to_node(pxm);
371 if (node != -1)
372 set_mp_bus_to_node(busnum, node);
373 else
374 #endif
375 node = get_mp_bus_to_node(busnum);
376
377 if (node != -1 && !node_online(node))
378 node = -1;
379
380 /* Allocate per-root-bus (not per bus) arch-specific data.
381 * TODO: leak; this memory is never freed.
382 * It's arguable whether it's worth the trouble to care.
383 */
384 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
385 if (!sd) {
386 printk(KERN_WARNING "pci_bus %04x:%02x: "
387 "ignored (out of memory)\n", domain, busnum);
388 return NULL;
389 }
390
391 sd->domain = domain;
392 sd->node = node;
393 /*
394 * Maybe the desired pci bus has been already scanned. In such case
395 * it is unnecessary to scan the pci bus with the given domain,busnum.
396 */
397 bus = pci_find_bus(domain, busnum);
398 if (bus) {
399 /*
400 * If the desired bus exits, the content of bus->sysdata will
401 * be replaced by sd.
402 */
403 memcpy(bus->sysdata, sd, sizeof(*sd));
404 kfree(sd);
405 } else {
406 get_current_resources(device, busnum, domain, &resources);
407 if (list_empty(&resources))
408 x86_pci_root_bus_resources(busnum, &resources);
409 bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd,
410 &resources);
411 if (bus)
412 bus->subordinate = pci_scan_child_bus(bus);
413 else
414 pci_free_resource_list(&resources);
415 }
416
417 /* After the PCI-E bus has been walked and all devices discovered,
418 * configure any settings of the fabric that might be necessary.
419 */
420 if (bus) {
421 struct pci_bus *child;
422 list_for_each_entry(child, &bus->children, node) {
423 struct pci_dev *self = child->self;
424 if (!self)
425 continue;
426
427 pcie_bus_configure_settings(child, self->pcie_mpss);
428 }
429 }
430
431 if (!bus)
432 kfree(sd);
433
434 if (bus && node != -1) {
435 #ifdef CONFIG_ACPI_NUMA
436 if (pxm >= 0)
437 dev_printk(KERN_DEBUG, &bus->dev,
438 "on NUMA node %d (pxm %d)\n", node, pxm);
439 #else
440 dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
441 #endif
442 }
443
444 return bus;
445 }
446
447 int __init pci_acpi_init(void)
448 {
449 struct pci_dev *dev = NULL;
450
451 if (acpi_noirq)
452 return -ENODEV;
453
454 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
455 acpi_irq_penalty_init();
456 pcibios_enable_irq = acpi_pci_irq_enable;
457 pcibios_disable_irq = acpi_pci_irq_disable;
458 x86_init.pci.init_irq = x86_init_noop;
459
460 if (pci_routeirq) {
461 /*
462 * PCI IRQ routing is set up by pci_enable_device(), but we
463 * also do it here in case there are still broken drivers that
464 * don't use pci_enable_device().
465 */
466 printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
467 for_each_pci_dev(dev)
468 acpi_pci_irq_enable(dev);
469 }
470
471 return 0;
472 }
This page took 0.041041 seconds and 5 git commands to generate.