ethernet: moxa: fix incorrect placement of __initdata tag
[deliverable/linux.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_groups = pcibus_groups,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160
161 /**
162 * pci_read_base - read a PCI BAR
163 * @dev: the PCI device
164 * @type: type of the BAR
165 * @res: resource buffer to be filled in
166 * @pos: BAR position in the config space
167 *
168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169 */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 struct resource *res, unsigned int pos)
172 {
173 u32 l, sz, mask;
174 u16 orig_cmd;
175 struct pci_bus_region region, inverted_region;
176 bool bar_too_big = false, bar_disabled = false;
177
178 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
179
180 /* No printks while decoding is disabled! */
181 if (!dev->mmio_always_on) {
182 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
183 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
184 pci_write_config_word(dev, PCI_COMMAND,
185 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
186 }
187 }
188
189 res->name = pci_name(dev);
190
191 pci_read_config_dword(dev, pos, &l);
192 pci_write_config_dword(dev, pos, l | mask);
193 pci_read_config_dword(dev, pos, &sz);
194 pci_write_config_dword(dev, pos, l);
195
196 /*
197 * All bits set in sz means the device isn't working properly.
198 * If the BAR isn't implemented, all bits must be 0. If it's a
199 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
200 * 1 must be clear.
201 */
202 if (!sz || sz == 0xffffffff)
203 goto fail;
204
205 /*
206 * I don't know how l can have all bits set. Copied from old code.
207 * Maybe it fixes a bug on some ancient platform.
208 */
209 if (l == 0xffffffff)
210 l = 0;
211
212 if (type == pci_bar_unknown) {
213 res->flags = decode_bar(dev, l);
214 res->flags |= IORESOURCE_SIZEALIGN;
215 if (res->flags & IORESOURCE_IO) {
216 l &= PCI_BASE_ADDRESS_IO_MASK;
217 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
218 } else {
219 l &= PCI_BASE_ADDRESS_MEM_MASK;
220 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
221 }
222 } else {
223 res->flags |= (l & IORESOURCE_ROM_ENABLE);
224 l &= PCI_ROM_ADDRESS_MASK;
225 mask = (u32)PCI_ROM_ADDRESS_MASK;
226 }
227
228 if (res->flags & IORESOURCE_MEM_64) {
229 u64 l64 = l;
230 u64 sz64 = sz;
231 u64 mask64 = mask | (u64)~0 << 32;
232
233 pci_read_config_dword(dev, pos + 4, &l);
234 pci_write_config_dword(dev, pos + 4, ~0);
235 pci_read_config_dword(dev, pos + 4, &sz);
236 pci_write_config_dword(dev, pos + 4, l);
237
238 l64 |= ((u64)l << 32);
239 sz64 |= ((u64)sz << 32);
240
241 sz64 = pci_size(l64, sz64, mask64);
242
243 if (!sz64)
244 goto fail;
245
246 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
247 bar_too_big = true;
248 goto fail;
249 }
250
251 if ((sizeof(resource_size_t) < 8) && l) {
252 /* Address above 32-bit boundary; disable the BAR */
253 pci_write_config_dword(dev, pos, 0);
254 pci_write_config_dword(dev, pos + 4, 0);
255 region.start = 0;
256 region.end = sz64;
257 bar_disabled = true;
258 } else {
259 region.start = l64;
260 region.end = l64 + sz64;
261 }
262 } else {
263 sz = pci_size(l, sz, mask);
264
265 if (!sz)
266 goto fail;
267
268 region.start = l;
269 region.end = l + sz;
270 }
271
272 pcibios_bus_to_resource(dev, res, &region);
273 pcibios_resource_to_bus(dev, &inverted_region, res);
274
275 /*
276 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
277 * the corresponding resource address (the physical address used by
278 * the CPU. Converting that resource address back to a bus address
279 * should yield the original BAR value:
280 *
281 * resource_to_bus(bus_to_resource(A)) == A
282 *
283 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
284 * be claimed by the device.
285 */
286 if (inverted_region.start != region.start) {
287 dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
288 pos, &region.start);
289 res->flags |= IORESOURCE_UNSET;
290 res->end -= res->start;
291 res->start = 0;
292 }
293
294 goto out;
295
296
297 fail:
298 res->flags = 0;
299 out:
300 if (!dev->mmio_always_on &&
301 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
302 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
303
304 if (bar_too_big)
305 dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
306 if (res->flags && !bar_disabled)
307 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
308
309 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
310 }
311
312 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
313 {
314 unsigned int pos, reg;
315
316 for (pos = 0; pos < howmany; pos++) {
317 struct resource *res = &dev->resource[pos];
318 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
319 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
320 }
321
322 if (rom) {
323 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
324 dev->rom_base_reg = rom;
325 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
326 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
327 IORESOURCE_SIZEALIGN;
328 __pci_read_base(dev, pci_bar_mem32, res, rom);
329 }
330 }
331
332 static void pci_read_bridge_io(struct pci_bus *child)
333 {
334 struct pci_dev *dev = child->self;
335 u8 io_base_lo, io_limit_lo;
336 unsigned long io_mask, io_granularity, base, limit;
337 struct pci_bus_region region;
338 struct resource *res;
339
340 io_mask = PCI_IO_RANGE_MASK;
341 io_granularity = 0x1000;
342 if (dev->io_window_1k) {
343 /* Support 1K I/O space granularity */
344 io_mask = PCI_IO_1K_RANGE_MASK;
345 io_granularity = 0x400;
346 }
347
348 res = child->resource[0];
349 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
350 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
351 base = (io_base_lo & io_mask) << 8;
352 limit = (io_limit_lo & io_mask) << 8;
353
354 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
355 u16 io_base_hi, io_limit_hi;
356
357 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
358 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
359 base |= ((unsigned long) io_base_hi << 16);
360 limit |= ((unsigned long) io_limit_hi << 16);
361 }
362
363 if (base <= limit) {
364 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
365 region.start = base;
366 region.end = limit + io_granularity - 1;
367 pcibios_bus_to_resource(dev, res, &region);
368 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
369 }
370 }
371
372 static void pci_read_bridge_mmio(struct pci_bus *child)
373 {
374 struct pci_dev *dev = child->self;
375 u16 mem_base_lo, mem_limit_lo;
376 unsigned long base, limit;
377 struct pci_bus_region region;
378 struct resource *res;
379
380 res = child->resource[1];
381 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
382 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
383 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
384 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
385 if (base <= limit) {
386 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
387 region.start = base;
388 region.end = limit + 0xfffff;
389 pcibios_bus_to_resource(dev, res, &region);
390 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
391 }
392 }
393
394 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
395 {
396 struct pci_dev *dev = child->self;
397 u16 mem_base_lo, mem_limit_lo;
398 unsigned long base, limit;
399 struct pci_bus_region region;
400 struct resource *res;
401
402 res = child->resource[2];
403 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
404 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
405 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
406 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
407
408 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
409 u32 mem_base_hi, mem_limit_hi;
410
411 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
412 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
413
414 /*
415 * Some bridges set the base > limit by default, and some
416 * (broken) BIOSes do not initialize them. If we find
417 * this, just assume they are not being used.
418 */
419 if (mem_base_hi <= mem_limit_hi) {
420 #if BITS_PER_LONG == 64
421 base |= ((unsigned long) mem_base_hi) << 32;
422 limit |= ((unsigned long) mem_limit_hi) << 32;
423 #else
424 if (mem_base_hi || mem_limit_hi) {
425 dev_err(&dev->dev, "can't handle 64-bit "
426 "address space for bridge\n");
427 return;
428 }
429 #endif
430 }
431 }
432 if (base <= limit) {
433 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
434 IORESOURCE_MEM | IORESOURCE_PREFETCH;
435 if (res->flags & PCI_PREF_RANGE_TYPE_64)
436 res->flags |= IORESOURCE_MEM_64;
437 region.start = base;
438 region.end = limit + 0xfffff;
439 pcibios_bus_to_resource(dev, res, &region);
440 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
441 }
442 }
443
444 void pci_read_bridge_bases(struct pci_bus *child)
445 {
446 struct pci_dev *dev = child->self;
447 struct resource *res;
448 int i;
449
450 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
451 return;
452
453 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
454 &child->busn_res,
455 dev->transparent ? " (subtractive decode)" : "");
456
457 pci_bus_remove_resources(child);
458 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
459 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
460
461 pci_read_bridge_io(child);
462 pci_read_bridge_mmio(child);
463 pci_read_bridge_mmio_pref(child);
464
465 if (dev->transparent) {
466 pci_bus_for_each_resource(child->parent, res, i) {
467 if (res) {
468 pci_bus_add_resource(child, res,
469 PCI_SUBTRACTIVE_DECODE);
470 dev_printk(KERN_DEBUG, &dev->dev,
471 " bridge window %pR (subtractive decode)\n",
472 res);
473 }
474 }
475 }
476 }
477
478 static struct pci_bus *pci_alloc_bus(void)
479 {
480 struct pci_bus *b;
481
482 b = kzalloc(sizeof(*b), GFP_KERNEL);
483 if (!b)
484 return NULL;
485
486 INIT_LIST_HEAD(&b->node);
487 INIT_LIST_HEAD(&b->children);
488 INIT_LIST_HEAD(&b->devices);
489 INIT_LIST_HEAD(&b->slots);
490 INIT_LIST_HEAD(&b->resources);
491 b->max_bus_speed = PCI_SPEED_UNKNOWN;
492 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
493 return b;
494 }
495
496 static void pci_release_host_bridge_dev(struct device *dev)
497 {
498 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
499
500 if (bridge->release_fn)
501 bridge->release_fn(bridge);
502
503 pci_free_resource_list(&bridge->windows);
504
505 kfree(bridge);
506 }
507
508 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
509 {
510 struct pci_host_bridge *bridge;
511
512 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
513 if (!bridge)
514 return NULL;
515
516 INIT_LIST_HEAD(&bridge->windows);
517 bridge->bus = b;
518 return bridge;
519 }
520
521 const unsigned char pcix_bus_speed[] = {
522 PCI_SPEED_UNKNOWN, /* 0 */
523 PCI_SPEED_66MHz_PCIX, /* 1 */
524 PCI_SPEED_100MHz_PCIX, /* 2 */
525 PCI_SPEED_133MHz_PCIX, /* 3 */
526 PCI_SPEED_UNKNOWN, /* 4 */
527 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
528 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
529 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
530 PCI_SPEED_UNKNOWN, /* 8 */
531 PCI_SPEED_66MHz_PCIX_266, /* 9 */
532 PCI_SPEED_100MHz_PCIX_266, /* A */
533 PCI_SPEED_133MHz_PCIX_266, /* B */
534 PCI_SPEED_UNKNOWN, /* C */
535 PCI_SPEED_66MHz_PCIX_533, /* D */
536 PCI_SPEED_100MHz_PCIX_533, /* E */
537 PCI_SPEED_133MHz_PCIX_533 /* F */
538 };
539
540 const unsigned char pcie_link_speed[] = {
541 PCI_SPEED_UNKNOWN, /* 0 */
542 PCIE_SPEED_2_5GT, /* 1 */
543 PCIE_SPEED_5_0GT, /* 2 */
544 PCIE_SPEED_8_0GT, /* 3 */
545 PCI_SPEED_UNKNOWN, /* 4 */
546 PCI_SPEED_UNKNOWN, /* 5 */
547 PCI_SPEED_UNKNOWN, /* 6 */
548 PCI_SPEED_UNKNOWN, /* 7 */
549 PCI_SPEED_UNKNOWN, /* 8 */
550 PCI_SPEED_UNKNOWN, /* 9 */
551 PCI_SPEED_UNKNOWN, /* A */
552 PCI_SPEED_UNKNOWN, /* B */
553 PCI_SPEED_UNKNOWN, /* C */
554 PCI_SPEED_UNKNOWN, /* D */
555 PCI_SPEED_UNKNOWN, /* E */
556 PCI_SPEED_UNKNOWN /* F */
557 };
558
559 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
560 {
561 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
562 }
563 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
564
565 static unsigned char agp_speeds[] = {
566 AGP_UNKNOWN,
567 AGP_1X,
568 AGP_2X,
569 AGP_4X,
570 AGP_8X
571 };
572
573 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
574 {
575 int index = 0;
576
577 if (agpstat & 4)
578 index = 3;
579 else if (agpstat & 2)
580 index = 2;
581 else if (agpstat & 1)
582 index = 1;
583 else
584 goto out;
585
586 if (agp3) {
587 index += 2;
588 if (index == 5)
589 index = 0;
590 }
591
592 out:
593 return agp_speeds[index];
594 }
595
596
597 static void pci_set_bus_speed(struct pci_bus *bus)
598 {
599 struct pci_dev *bridge = bus->self;
600 int pos;
601
602 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
603 if (!pos)
604 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
605 if (pos) {
606 u32 agpstat, agpcmd;
607
608 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
609 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
610
611 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
612 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
613 }
614
615 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
616 if (pos) {
617 u16 status;
618 enum pci_bus_speed max;
619
620 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
621 &status);
622
623 if (status & PCI_X_SSTATUS_533MHZ) {
624 max = PCI_SPEED_133MHz_PCIX_533;
625 } else if (status & PCI_X_SSTATUS_266MHZ) {
626 max = PCI_SPEED_133MHz_PCIX_266;
627 } else if (status & PCI_X_SSTATUS_133MHZ) {
628 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
629 max = PCI_SPEED_133MHz_PCIX_ECC;
630 } else {
631 max = PCI_SPEED_133MHz_PCIX;
632 }
633 } else {
634 max = PCI_SPEED_66MHz_PCIX;
635 }
636
637 bus->max_bus_speed = max;
638 bus->cur_bus_speed = pcix_bus_speed[
639 (status & PCI_X_SSTATUS_FREQ) >> 6];
640
641 return;
642 }
643
644 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
645 if (pos) {
646 u32 linkcap;
647 u16 linksta;
648
649 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
650 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
651
652 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
653 pcie_update_link_speed(bus, linksta);
654 }
655 }
656
657
658 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
659 struct pci_dev *bridge, int busnr)
660 {
661 struct pci_bus *child;
662 int i;
663 int ret;
664
665 /*
666 * Allocate a new bus, and inherit stuff from the parent..
667 */
668 child = pci_alloc_bus();
669 if (!child)
670 return NULL;
671
672 child->parent = parent;
673 child->ops = parent->ops;
674 child->msi = parent->msi;
675 child->sysdata = parent->sysdata;
676 child->bus_flags = parent->bus_flags;
677
678 /* initialize some portions of the bus device, but don't register it
679 * now as the parent is not properly set up yet.
680 */
681 child->dev.class = &pcibus_class;
682 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
683
684 /*
685 * Set up the primary, secondary and subordinate
686 * bus numbers.
687 */
688 child->number = child->busn_res.start = busnr;
689 child->primary = parent->busn_res.start;
690 child->busn_res.end = 0xff;
691
692 if (!bridge) {
693 child->dev.parent = parent->bridge;
694 goto add_dev;
695 }
696
697 child->self = bridge;
698 child->bridge = get_device(&bridge->dev);
699 child->dev.parent = child->bridge;
700 pci_set_bus_of_node(child);
701 pci_set_bus_speed(child);
702
703 /* Set up default resource pointers and names.. */
704 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
705 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
706 child->resource[i]->name = child->name;
707 }
708 bridge->subordinate = child;
709
710 add_dev:
711 ret = device_register(&child->dev);
712 WARN_ON(ret < 0);
713
714 pcibios_add_bus(child);
715
716 /* Create legacy_io and legacy_mem files for this bus */
717 pci_create_legacy_files(child);
718
719 return child;
720 }
721
722 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
723 {
724 struct pci_bus *child;
725
726 child = pci_alloc_child_bus(parent, dev, busnr);
727 if (child) {
728 down_write(&pci_bus_sem);
729 list_add_tail(&child->node, &parent->children);
730 up_write(&pci_bus_sem);
731 }
732 return child;
733 }
734
735 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
736 {
737 struct pci_bus *parent = child->parent;
738
739 /* Attempts to fix that up are really dangerous unless
740 we're going to re-assign all bus numbers. */
741 if (!pcibios_assign_all_busses())
742 return;
743
744 while (parent->parent && parent->busn_res.end < max) {
745 parent->busn_res.end = max;
746 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
747 parent = parent->parent;
748 }
749 }
750
751 /*
752 * If it's a bridge, configure it and scan the bus behind it.
753 * For CardBus bridges, we don't scan behind as the devices will
754 * be handled by the bridge driver itself.
755 *
756 * We need to process bridges in two passes -- first we scan those
757 * already configured by the BIOS and after we are done with all of
758 * them, we proceed to assigning numbers to the remaining buses in
759 * order to avoid overlaps between old and new bus numbers.
760 */
761 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
762 {
763 struct pci_bus *child;
764 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
765 u32 buses, i, j = 0;
766 u16 bctl;
767 u8 primary, secondary, subordinate;
768 int broken = 0;
769
770 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
771 primary = buses & 0xFF;
772 secondary = (buses >> 8) & 0xFF;
773 subordinate = (buses >> 16) & 0xFF;
774
775 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
776 secondary, subordinate, pass);
777
778 if (!primary && (primary != bus->number) && secondary && subordinate) {
779 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
780 primary = bus->number;
781 }
782
783 /* Check if setup is sensible at all */
784 if (!pass &&
785 (primary != bus->number || secondary <= bus->number ||
786 secondary > subordinate)) {
787 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
788 secondary, subordinate);
789 broken = 1;
790 }
791
792 /* Disable MasterAbortMode during probing to avoid reporting
793 of bus errors (in some architectures) */
794 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
795 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
796 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
797
798 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
799 !is_cardbus && !broken) {
800 unsigned int cmax;
801 /*
802 * Bus already configured by firmware, process it in the first
803 * pass and just note the configuration.
804 */
805 if (pass)
806 goto out;
807
808 /*
809 * If we already got to this bus through a different bridge,
810 * don't re-add it. This can happen with the i450NX chipset.
811 *
812 * However, we continue to descend down the hierarchy and
813 * scan remaining child buses.
814 */
815 child = pci_find_bus(pci_domain_nr(bus), secondary);
816 if (!child) {
817 child = pci_add_new_bus(bus, dev, secondary);
818 if (!child)
819 goto out;
820 child->primary = primary;
821 pci_bus_insert_busn_res(child, secondary, subordinate);
822 child->bridge_ctl = bctl;
823 }
824
825 cmax = pci_scan_child_bus(child);
826 if (cmax > max)
827 max = cmax;
828 if (child->busn_res.end > max)
829 max = child->busn_res.end;
830 } else {
831 /*
832 * We need to assign a number to this bus which we always
833 * do in the second pass.
834 */
835 if (!pass) {
836 if (pcibios_assign_all_busses() || broken)
837 /* Temporarily disable forwarding of the
838 configuration cycles on all bridges in
839 this bus segment to avoid possible
840 conflicts in the second pass between two
841 bridges programmed with overlapping
842 bus ranges. */
843 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
844 buses & ~0xffffff);
845 goto out;
846 }
847
848 /* Clear errors */
849 pci_write_config_word(dev, PCI_STATUS, 0xffff);
850
851 /* Prevent assigning a bus number that already exists.
852 * This can happen when a bridge is hot-plugged, so in
853 * this case we only re-scan this bus. */
854 child = pci_find_bus(pci_domain_nr(bus), max+1);
855 if (!child) {
856 child = pci_add_new_bus(bus, dev, ++max);
857 if (!child)
858 goto out;
859 pci_bus_insert_busn_res(child, max, 0xff);
860 }
861 buses = (buses & 0xff000000)
862 | ((unsigned int)(child->primary) << 0)
863 | ((unsigned int)(child->busn_res.start) << 8)
864 | ((unsigned int)(child->busn_res.end) << 16);
865
866 /*
867 * yenta.c forces a secondary latency timer of 176.
868 * Copy that behaviour here.
869 */
870 if (is_cardbus) {
871 buses &= ~0xff000000;
872 buses |= CARDBUS_LATENCY_TIMER << 24;
873 }
874
875 /*
876 * We need to blast all three values with a single write.
877 */
878 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
879
880 if (!is_cardbus) {
881 child->bridge_ctl = bctl;
882 /*
883 * Adjust subordinate busnr in parent buses.
884 * We do this before scanning for children because
885 * some devices may not be detected if the bios
886 * was lazy.
887 */
888 pci_fixup_parent_subordinate_busnr(child, max);
889 /* Now we can scan all subordinate buses... */
890 max = pci_scan_child_bus(child);
891 /*
892 * now fix it up again since we have found
893 * the real value of max.
894 */
895 pci_fixup_parent_subordinate_busnr(child, max);
896 } else {
897 /*
898 * For CardBus bridges, we leave 4 bus numbers
899 * as cards with a PCI-to-PCI bridge can be
900 * inserted later.
901 */
902 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
903 struct pci_bus *parent = bus;
904 if (pci_find_bus(pci_domain_nr(bus),
905 max+i+1))
906 break;
907 while (parent->parent) {
908 if ((!pcibios_assign_all_busses()) &&
909 (parent->busn_res.end > max) &&
910 (parent->busn_res.end <= max+i)) {
911 j = 1;
912 }
913 parent = parent->parent;
914 }
915 if (j) {
916 /*
917 * Often, there are two cardbus bridges
918 * -- try to leave one valid bus number
919 * for each one.
920 */
921 i /= 2;
922 break;
923 }
924 }
925 max += i;
926 pci_fixup_parent_subordinate_busnr(child, max);
927 }
928 /*
929 * Set the subordinate bus number to its real value.
930 */
931 pci_bus_update_busn_res_end(child, max);
932 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
933 }
934
935 sprintf(child->name,
936 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
937 pci_domain_nr(bus), child->number);
938
939 /* Has only triggered on CardBus, fixup is in yenta_socket */
940 while (bus->parent) {
941 if ((child->busn_res.end > bus->busn_res.end) ||
942 (child->number > bus->busn_res.end) ||
943 (child->number < bus->number) ||
944 (child->busn_res.end < bus->number)) {
945 dev_info(&child->dev, "%pR %s "
946 "hidden behind%s bridge %s %pR\n",
947 &child->busn_res,
948 (bus->number > child->busn_res.end &&
949 bus->busn_res.end < child->number) ?
950 "wholly" : "partially",
951 bus->self->transparent ? " transparent" : "",
952 dev_name(&bus->dev),
953 &bus->busn_res);
954 }
955 bus = bus->parent;
956 }
957
958 out:
959 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
960
961 return max;
962 }
963
964 /*
965 * Read interrupt line and base address registers.
966 * The architecture-dependent code can tweak these, of course.
967 */
968 static void pci_read_irq(struct pci_dev *dev)
969 {
970 unsigned char irq;
971
972 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
973 dev->pin = irq;
974 if (irq)
975 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
976 dev->irq = irq;
977 }
978
979 void set_pcie_port_type(struct pci_dev *pdev)
980 {
981 int pos;
982 u16 reg16;
983
984 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
985 if (!pos)
986 return;
987 pdev->is_pcie = 1;
988 pdev->pcie_cap = pos;
989 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
990 pdev->pcie_flags_reg = reg16;
991 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
992 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
993 }
994
995 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
996 {
997 u32 reg32;
998
999 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1000 if (reg32 & PCI_EXP_SLTCAP_HPC)
1001 pdev->is_hotplug_bridge = 1;
1002 }
1003
1004 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1005
1006 /**
1007 * pci_setup_device - fill in class and map information of a device
1008 * @dev: the device structure to fill
1009 *
1010 * Initialize the device structure with information about the device's
1011 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1012 * Called at initialisation of the PCI subsystem and by CardBus services.
1013 * Returns 0 on success and negative if unknown type of device (not normal,
1014 * bridge or CardBus).
1015 */
1016 int pci_setup_device(struct pci_dev *dev)
1017 {
1018 u32 class;
1019 u8 hdr_type;
1020 struct pci_slot *slot;
1021 int pos = 0;
1022 struct pci_bus_region region;
1023 struct resource *res;
1024
1025 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1026 return -EIO;
1027
1028 dev->sysdata = dev->bus->sysdata;
1029 dev->dev.parent = dev->bus->bridge;
1030 dev->dev.bus = &pci_bus_type;
1031 dev->hdr_type = hdr_type & 0x7f;
1032 dev->multifunction = !!(hdr_type & 0x80);
1033 dev->error_state = pci_channel_io_normal;
1034 set_pcie_port_type(dev);
1035
1036 list_for_each_entry(slot, &dev->bus->slots, list)
1037 if (PCI_SLOT(dev->devfn) == slot->number)
1038 dev->slot = slot;
1039
1040 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1041 set this higher, assuming the system even supports it. */
1042 dev->dma_mask = 0xffffffff;
1043
1044 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1045 dev->bus->number, PCI_SLOT(dev->devfn),
1046 PCI_FUNC(dev->devfn));
1047
1048 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1049 dev->revision = class & 0xff;
1050 dev->class = class >> 8; /* upper 3 bytes */
1051
1052 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1053 dev->vendor, dev->device, dev->hdr_type, dev->class);
1054
1055 /* need to have dev->class ready */
1056 dev->cfg_size = pci_cfg_space_size(dev);
1057
1058 /* "Unknown power state" */
1059 dev->current_state = PCI_UNKNOWN;
1060
1061 /* Early fixups, before probing the BARs */
1062 pci_fixup_device(pci_fixup_early, dev);
1063 /* device class may be changed after fixup */
1064 class = dev->class >> 8;
1065
1066 switch (dev->hdr_type) { /* header type */
1067 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1068 if (class == PCI_CLASS_BRIDGE_PCI)
1069 goto bad;
1070 pci_read_irq(dev);
1071 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1072 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1073 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1074
1075 /*
1076 * Do the ugly legacy mode stuff here rather than broken chip
1077 * quirk code. Legacy mode ATA controllers have fixed
1078 * addresses. These are not always echoed in BAR0-3, and
1079 * BAR0-3 in a few cases contain junk!
1080 */
1081 if (class == PCI_CLASS_STORAGE_IDE) {
1082 u8 progif;
1083 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1084 if ((progif & 1) == 0) {
1085 region.start = 0x1F0;
1086 region.end = 0x1F7;
1087 res = &dev->resource[0];
1088 res->flags = LEGACY_IO_RESOURCE;
1089 pcibios_bus_to_resource(dev, res, &region);
1090 region.start = 0x3F6;
1091 region.end = 0x3F6;
1092 res = &dev->resource[1];
1093 res->flags = LEGACY_IO_RESOURCE;
1094 pcibios_bus_to_resource(dev, res, &region);
1095 }
1096 if ((progif & 4) == 0) {
1097 region.start = 0x170;
1098 region.end = 0x177;
1099 res = &dev->resource[2];
1100 res->flags = LEGACY_IO_RESOURCE;
1101 pcibios_bus_to_resource(dev, res, &region);
1102 region.start = 0x376;
1103 region.end = 0x376;
1104 res = &dev->resource[3];
1105 res->flags = LEGACY_IO_RESOURCE;
1106 pcibios_bus_to_resource(dev, res, &region);
1107 }
1108 }
1109 break;
1110
1111 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1112 if (class != PCI_CLASS_BRIDGE_PCI)
1113 goto bad;
1114 /* The PCI-to-PCI bridge spec requires that subtractive
1115 decoding (i.e. transparent) bridge must have programming
1116 interface code of 0x01. */
1117 pci_read_irq(dev);
1118 dev->transparent = ((dev->class & 0xff) == 1);
1119 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1120 set_pcie_hotplug_bridge(dev);
1121 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1122 if (pos) {
1123 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1124 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1125 }
1126 break;
1127
1128 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1129 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1130 goto bad;
1131 pci_read_irq(dev);
1132 pci_read_bases(dev, 1, 0);
1133 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1134 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1135 break;
1136
1137 default: /* unknown header */
1138 dev_err(&dev->dev, "unknown header type %02x, "
1139 "ignoring device\n", dev->hdr_type);
1140 return -EIO;
1141
1142 bad:
1143 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1144 "type %02x)\n", dev->class, dev->hdr_type);
1145 dev->class = PCI_CLASS_NOT_DEFINED;
1146 }
1147
1148 /* We found a fine healthy device, go go go... */
1149 return 0;
1150 }
1151
1152 static void pci_release_capabilities(struct pci_dev *dev)
1153 {
1154 pci_vpd_release(dev);
1155 pci_iov_release(dev);
1156 pci_free_cap_save_buffers(dev);
1157 }
1158
1159 /**
1160 * pci_release_dev - free a pci device structure when all users of it are finished.
1161 * @dev: device that's been disconnected
1162 *
1163 * Will be called only by the device core when all users of this pci device are
1164 * done.
1165 */
1166 static void pci_release_dev(struct device *dev)
1167 {
1168 struct pci_dev *pci_dev;
1169
1170 pci_dev = to_pci_dev(dev);
1171 pci_release_capabilities(pci_dev);
1172 pci_release_of_node(pci_dev);
1173 pcibios_release_device(pci_dev);
1174 pci_bus_put(pci_dev->bus);
1175 kfree(pci_dev);
1176 }
1177
1178 /**
1179 * pci_cfg_space_size - get the configuration space size of the PCI device.
1180 * @dev: PCI device
1181 *
1182 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1183 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1184 * access it. Maybe we don't have a way to generate extended config space
1185 * accesses, or the device is behind a reverse Express bridge. So we try
1186 * reading the dword at 0x100 which must either be 0 or a valid extended
1187 * capability header.
1188 */
1189 int pci_cfg_space_size_ext(struct pci_dev *dev)
1190 {
1191 u32 status;
1192 int pos = PCI_CFG_SPACE_SIZE;
1193
1194 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1195 goto fail;
1196 if (status == 0xffffffff)
1197 goto fail;
1198
1199 return PCI_CFG_SPACE_EXP_SIZE;
1200
1201 fail:
1202 return PCI_CFG_SPACE_SIZE;
1203 }
1204
1205 int pci_cfg_space_size(struct pci_dev *dev)
1206 {
1207 int pos;
1208 u32 status;
1209 u16 class;
1210
1211 class = dev->class >> 8;
1212 if (class == PCI_CLASS_BRIDGE_HOST)
1213 return pci_cfg_space_size_ext(dev);
1214
1215 if (!pci_is_pcie(dev)) {
1216 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1217 if (!pos)
1218 goto fail;
1219
1220 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1221 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1222 goto fail;
1223 }
1224
1225 return pci_cfg_space_size_ext(dev);
1226
1227 fail:
1228 return PCI_CFG_SPACE_SIZE;
1229 }
1230
1231 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1232 {
1233 struct pci_dev *dev;
1234
1235 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1236 if (!dev)
1237 return NULL;
1238
1239 INIT_LIST_HEAD(&dev->bus_list);
1240 dev->dev.type = &pci_dev_type;
1241 dev->bus = pci_bus_get(bus);
1242
1243 return dev;
1244 }
1245 EXPORT_SYMBOL(pci_alloc_dev);
1246
1247 struct pci_dev *alloc_pci_dev(void)
1248 {
1249 return pci_alloc_dev(NULL);
1250 }
1251 EXPORT_SYMBOL(alloc_pci_dev);
1252
1253 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1254 int crs_timeout)
1255 {
1256 int delay = 1;
1257
1258 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1259 return false;
1260
1261 /* some broken boards return 0 or ~0 if a slot is empty: */
1262 if (*l == 0xffffffff || *l == 0x00000000 ||
1263 *l == 0x0000ffff || *l == 0xffff0000)
1264 return false;
1265
1266 /* Configuration request Retry Status */
1267 while (*l == 0xffff0001) {
1268 if (!crs_timeout)
1269 return false;
1270
1271 msleep(delay);
1272 delay *= 2;
1273 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1274 return false;
1275 /* Card hasn't responded in 60 seconds? Must be stuck. */
1276 if (delay > crs_timeout) {
1277 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1278 "responding\n", pci_domain_nr(bus),
1279 bus->number, PCI_SLOT(devfn),
1280 PCI_FUNC(devfn));
1281 return false;
1282 }
1283 }
1284
1285 return true;
1286 }
1287 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1288
1289 /*
1290 * Read the config data for a PCI device, sanity-check it
1291 * and fill in the dev structure...
1292 */
1293 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1294 {
1295 struct pci_dev *dev;
1296 u32 l;
1297
1298 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1299 return NULL;
1300
1301 dev = pci_alloc_dev(bus);
1302 if (!dev)
1303 return NULL;
1304
1305 dev->devfn = devfn;
1306 dev->vendor = l & 0xffff;
1307 dev->device = (l >> 16) & 0xffff;
1308
1309 pci_set_of_node(dev);
1310
1311 if (pci_setup_device(dev)) {
1312 pci_bus_put(dev->bus);
1313 kfree(dev);
1314 return NULL;
1315 }
1316
1317 return dev;
1318 }
1319
1320 static void pci_init_capabilities(struct pci_dev *dev)
1321 {
1322 /* MSI/MSI-X list */
1323 pci_msi_init_pci_dev(dev);
1324
1325 /* Buffers for saving PCIe and PCI-X capabilities */
1326 pci_allocate_cap_save_buffers(dev);
1327
1328 /* Power Management */
1329 pci_pm_init(dev);
1330
1331 /* Vital Product Data */
1332 pci_vpd_pci22_init(dev);
1333
1334 /* Alternative Routing-ID Forwarding */
1335 pci_configure_ari(dev);
1336
1337 /* Single Root I/O Virtualization */
1338 pci_iov_init(dev);
1339
1340 /* Enable ACS P2P upstream forwarding */
1341 pci_enable_acs(dev);
1342 }
1343
1344 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1345 {
1346 int ret;
1347
1348 device_initialize(&dev->dev);
1349 dev->dev.release = pci_release_dev;
1350
1351 set_dev_node(&dev->dev, pcibus_to_node(bus));
1352 dev->dev.dma_mask = &dev->dma_mask;
1353 dev->dev.dma_parms = &dev->dma_parms;
1354 dev->dev.coherent_dma_mask = 0xffffffffull;
1355
1356 pci_set_dma_max_seg_size(dev, 65536);
1357 pci_set_dma_seg_boundary(dev, 0xffffffff);
1358
1359 /* Fix up broken headers */
1360 pci_fixup_device(pci_fixup_header, dev);
1361
1362 /* moved out from quirk header fixup code */
1363 pci_reassigndev_resource_alignment(dev);
1364
1365 /* Clear the state_saved flag. */
1366 dev->state_saved = false;
1367
1368 /* Initialize various capabilities */
1369 pci_init_capabilities(dev);
1370
1371 /*
1372 * Add the device to our list of discovered devices
1373 * and the bus list for fixup functions, etc.
1374 */
1375 down_write(&pci_bus_sem);
1376 list_add_tail(&dev->bus_list, &bus->devices);
1377 up_write(&pci_bus_sem);
1378
1379 ret = pcibios_add_device(dev);
1380 WARN_ON(ret < 0);
1381
1382 /* Notifier could use PCI capabilities */
1383 dev->match_driver = false;
1384 ret = device_add(&dev->dev);
1385 WARN_ON(ret < 0);
1386
1387 pci_proc_attach_device(dev);
1388 }
1389
1390 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1391 {
1392 struct pci_dev *dev;
1393
1394 dev = pci_get_slot(bus, devfn);
1395 if (dev) {
1396 pci_dev_put(dev);
1397 return dev;
1398 }
1399
1400 dev = pci_scan_device(bus, devfn);
1401 if (!dev)
1402 return NULL;
1403
1404 pci_device_add(dev, bus);
1405
1406 return dev;
1407 }
1408 EXPORT_SYMBOL(pci_scan_single_device);
1409
1410 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1411 {
1412 int pos;
1413 u16 cap = 0;
1414 unsigned next_fn;
1415
1416 if (pci_ari_enabled(bus)) {
1417 if (!dev)
1418 return 0;
1419 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1420 if (!pos)
1421 return 0;
1422
1423 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1424 next_fn = PCI_ARI_CAP_NFN(cap);
1425 if (next_fn <= fn)
1426 return 0; /* protect against malformed list */
1427
1428 return next_fn;
1429 }
1430
1431 /* dev may be NULL for non-contiguous multifunction devices */
1432 if (!dev || dev->multifunction)
1433 return (fn + 1) % 8;
1434
1435 return 0;
1436 }
1437
1438 static int only_one_child(struct pci_bus *bus)
1439 {
1440 struct pci_dev *parent = bus->self;
1441
1442 if (!parent || !pci_is_pcie(parent))
1443 return 0;
1444 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1445 return 1;
1446 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1447 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1448 return 1;
1449 return 0;
1450 }
1451
1452 /**
1453 * pci_scan_slot - scan a PCI slot on a bus for devices.
1454 * @bus: PCI bus to scan
1455 * @devfn: slot number to scan (must have zero function.)
1456 *
1457 * Scan a PCI slot on the specified PCI bus for devices, adding
1458 * discovered devices to the @bus->devices list. New devices
1459 * will not have is_added set.
1460 *
1461 * Returns the number of new devices found.
1462 */
1463 int pci_scan_slot(struct pci_bus *bus, int devfn)
1464 {
1465 unsigned fn, nr = 0;
1466 struct pci_dev *dev;
1467
1468 if (only_one_child(bus) && (devfn > 0))
1469 return 0; /* Already scanned the entire slot */
1470
1471 dev = pci_scan_single_device(bus, devfn);
1472 if (!dev)
1473 return 0;
1474 if (!dev->is_added)
1475 nr++;
1476
1477 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1478 dev = pci_scan_single_device(bus, devfn + fn);
1479 if (dev) {
1480 if (!dev->is_added)
1481 nr++;
1482 dev->multifunction = 1;
1483 }
1484 }
1485
1486 /* only one slot has pcie device */
1487 if (bus->self && nr)
1488 pcie_aspm_init_link_state(bus->self);
1489
1490 return nr;
1491 }
1492
1493 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1494 {
1495 u8 *smpss = data;
1496
1497 if (!pci_is_pcie(dev))
1498 return 0;
1499
1500 /*
1501 * We don't have a way to change MPS settings on devices that have
1502 * drivers attached. A hot-added device might support only the minimum
1503 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1504 * where devices may be hot-added, we limit the fabric MPS to 128 so
1505 * hot-added devices will work correctly.
1506 *
1507 * However, if we hot-add a device to a slot directly below a Root
1508 * Port, it's impossible for there to be other existing devices below
1509 * the port. We don't limit the MPS in this case because we can
1510 * reconfigure MPS on both the Root Port and the hot-added device,
1511 * and there are no other devices involved.
1512 *
1513 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1514 */
1515 if (dev->is_hotplug_bridge &&
1516 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1517 *smpss = 0;
1518
1519 if (*smpss > dev->pcie_mpss)
1520 *smpss = dev->pcie_mpss;
1521
1522 return 0;
1523 }
1524
1525 static void pcie_write_mps(struct pci_dev *dev, int mps)
1526 {
1527 int rc;
1528
1529 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1530 mps = 128 << dev->pcie_mpss;
1531
1532 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1533 dev->bus->self)
1534 /* For "Performance", the assumption is made that
1535 * downstream communication will never be larger than
1536 * the MRRS. So, the MPS only needs to be configured
1537 * for the upstream communication. This being the case,
1538 * walk from the top down and set the MPS of the child
1539 * to that of the parent bus.
1540 *
1541 * Configure the device MPS with the smaller of the
1542 * device MPSS or the bridge MPS (which is assumed to be
1543 * properly configured at this point to the largest
1544 * allowable MPS based on its parent bus).
1545 */
1546 mps = min(mps, pcie_get_mps(dev->bus->self));
1547 }
1548
1549 rc = pcie_set_mps(dev, mps);
1550 if (rc)
1551 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1552 }
1553
1554 static void pcie_write_mrrs(struct pci_dev *dev)
1555 {
1556 int rc, mrrs;
1557
1558 /* In the "safe" case, do not configure the MRRS. There appear to be
1559 * issues with setting MRRS to 0 on a number of devices.
1560 */
1561 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1562 return;
1563
1564 /* For Max performance, the MRRS must be set to the largest supported
1565 * value. However, it cannot be configured larger than the MPS the
1566 * device or the bus can support. This should already be properly
1567 * configured by a prior call to pcie_write_mps.
1568 */
1569 mrrs = pcie_get_mps(dev);
1570
1571 /* MRRS is a R/W register. Invalid values can be written, but a
1572 * subsequent read will verify if the value is acceptable or not.
1573 * If the MRRS value provided is not acceptable (e.g., too large),
1574 * shrink the value until it is acceptable to the HW.
1575 */
1576 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1577 rc = pcie_set_readrq(dev, mrrs);
1578 if (!rc)
1579 break;
1580
1581 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1582 mrrs /= 2;
1583 }
1584
1585 if (mrrs < 128)
1586 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1587 "safe value. If problems are experienced, try running "
1588 "with pci=pcie_bus_safe.\n");
1589 }
1590
1591 static void pcie_bus_detect_mps(struct pci_dev *dev)
1592 {
1593 struct pci_dev *bridge = dev->bus->self;
1594 int mps, p_mps;
1595
1596 if (!bridge)
1597 return;
1598
1599 mps = pcie_get_mps(dev);
1600 p_mps = pcie_get_mps(bridge);
1601
1602 if (mps != p_mps)
1603 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1604 mps, pci_name(bridge), p_mps);
1605 }
1606
1607 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1608 {
1609 int mps, orig_mps;
1610
1611 if (!pci_is_pcie(dev))
1612 return 0;
1613
1614 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1615 pcie_bus_detect_mps(dev);
1616 return 0;
1617 }
1618
1619 mps = 128 << *(u8 *)data;
1620 orig_mps = pcie_get_mps(dev);
1621
1622 pcie_write_mps(dev, mps);
1623 pcie_write_mrrs(dev);
1624
1625 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1626 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1627 orig_mps, pcie_get_readrq(dev));
1628
1629 return 0;
1630 }
1631
1632 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1633 * parents then children fashion. If this changes, then this code will not
1634 * work as designed.
1635 */
1636 void pcie_bus_configure_settings(struct pci_bus *bus)
1637 {
1638 u8 smpss;
1639
1640 if (!bus->self)
1641 return;
1642
1643 if (!pci_is_pcie(bus->self))
1644 return;
1645
1646 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1647 * to be aware of the MPS of the destination. To work around this,
1648 * simply force the MPS of the entire system to the smallest possible.
1649 */
1650 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1651 smpss = 0;
1652
1653 if (pcie_bus_config == PCIE_BUS_SAFE) {
1654 smpss = bus->self->pcie_mpss;
1655
1656 pcie_find_smpss(bus->self, &smpss);
1657 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1658 }
1659
1660 pcie_bus_configure_set(bus->self, &smpss);
1661 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1662 }
1663 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1664
1665 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1666 {
1667 unsigned int devfn, pass, max = bus->busn_res.start;
1668 struct pci_dev *dev;
1669
1670 dev_dbg(&bus->dev, "scanning bus\n");
1671
1672 /* Go find them, Rover! */
1673 for (devfn = 0; devfn < 0x100; devfn += 8)
1674 pci_scan_slot(bus, devfn);
1675
1676 /* Reserve buses for SR-IOV capability. */
1677 max += pci_iov_bus_range(bus);
1678
1679 /*
1680 * After performing arch-dependent fixup of the bus, look behind
1681 * all PCI-to-PCI bridges on this bus.
1682 */
1683 if (!bus->is_added) {
1684 dev_dbg(&bus->dev, "fixups for bus\n");
1685 pcibios_fixup_bus(bus);
1686 bus->is_added = 1;
1687 }
1688
1689 for (pass=0; pass < 2; pass++)
1690 list_for_each_entry(dev, &bus->devices, bus_list) {
1691 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1692 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1693 max = pci_scan_bridge(bus, dev, max, pass);
1694 }
1695
1696 /*
1697 * We've scanned the bus and so we know all about what's on
1698 * the other side of any bridges that may be on this bus plus
1699 * any devices.
1700 *
1701 * Return how far we've got finding sub-buses.
1702 */
1703 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1704 return max;
1705 }
1706
1707 /**
1708 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1709 * @bridge: Host bridge to set up.
1710 *
1711 * Default empty implementation. Replace with an architecture-specific setup
1712 * routine, if necessary.
1713 */
1714 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1715 {
1716 return 0;
1717 }
1718
1719 void __weak pcibios_add_bus(struct pci_bus *bus)
1720 {
1721 }
1722
1723 void __weak pcibios_remove_bus(struct pci_bus *bus)
1724 {
1725 }
1726
1727 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1728 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1729 {
1730 int error;
1731 struct pci_host_bridge *bridge;
1732 struct pci_bus *b, *b2;
1733 struct pci_host_bridge_window *window, *n;
1734 struct resource *res;
1735 resource_size_t offset;
1736 char bus_addr[64];
1737 char *fmt;
1738
1739 b = pci_alloc_bus();
1740 if (!b)
1741 return NULL;
1742
1743 b->sysdata = sysdata;
1744 b->ops = ops;
1745 b->number = b->busn_res.start = bus;
1746 b2 = pci_find_bus(pci_domain_nr(b), bus);
1747 if (b2) {
1748 /* If we already got to this bus through a different bridge, ignore it */
1749 dev_dbg(&b2->dev, "bus already known\n");
1750 goto err_out;
1751 }
1752
1753 bridge = pci_alloc_host_bridge(b);
1754 if (!bridge)
1755 goto err_out;
1756
1757 bridge->dev.parent = parent;
1758 bridge->dev.release = pci_release_host_bridge_dev;
1759 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1760 error = pcibios_root_bridge_prepare(bridge);
1761 if (error) {
1762 kfree(bridge);
1763 goto err_out;
1764 }
1765
1766 error = device_register(&bridge->dev);
1767 if (error) {
1768 put_device(&bridge->dev);
1769 goto err_out;
1770 }
1771 b->bridge = get_device(&bridge->dev);
1772 device_enable_async_suspend(b->bridge);
1773 pci_set_bus_of_node(b);
1774
1775 if (!parent)
1776 set_dev_node(b->bridge, pcibus_to_node(b));
1777
1778 b->dev.class = &pcibus_class;
1779 b->dev.parent = b->bridge;
1780 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1781 error = device_register(&b->dev);
1782 if (error)
1783 goto class_dev_reg_err;
1784
1785 pcibios_add_bus(b);
1786
1787 /* Create legacy_io and legacy_mem files for this bus */
1788 pci_create_legacy_files(b);
1789
1790 if (parent)
1791 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1792 else
1793 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1794
1795 /* Add initial resources to the bus */
1796 list_for_each_entry_safe(window, n, resources, list) {
1797 list_move_tail(&window->list, &bridge->windows);
1798 res = window->res;
1799 offset = window->offset;
1800 if (res->flags & IORESOURCE_BUS)
1801 pci_bus_insert_busn_res(b, bus, res->end);
1802 else
1803 pci_bus_add_resource(b, res, 0);
1804 if (offset) {
1805 if (resource_type(res) == IORESOURCE_IO)
1806 fmt = " (bus address [%#06llx-%#06llx])";
1807 else
1808 fmt = " (bus address [%#010llx-%#010llx])";
1809 snprintf(bus_addr, sizeof(bus_addr), fmt,
1810 (unsigned long long) (res->start - offset),
1811 (unsigned long long) (res->end - offset));
1812 } else
1813 bus_addr[0] = '\0';
1814 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1815 }
1816
1817 down_write(&pci_bus_sem);
1818 list_add_tail(&b->node, &pci_root_buses);
1819 up_write(&pci_bus_sem);
1820
1821 return b;
1822
1823 class_dev_reg_err:
1824 put_device(&bridge->dev);
1825 device_unregister(&bridge->dev);
1826 err_out:
1827 kfree(b);
1828 return NULL;
1829 }
1830
1831 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1832 {
1833 struct resource *res = &b->busn_res;
1834 struct resource *parent_res, *conflict;
1835
1836 res->start = bus;
1837 res->end = bus_max;
1838 res->flags = IORESOURCE_BUS;
1839
1840 if (!pci_is_root_bus(b))
1841 parent_res = &b->parent->busn_res;
1842 else {
1843 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1844 res->flags |= IORESOURCE_PCI_FIXED;
1845 }
1846
1847 conflict = insert_resource_conflict(parent_res, res);
1848
1849 if (conflict)
1850 dev_printk(KERN_DEBUG, &b->dev,
1851 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1852 res, pci_is_root_bus(b) ? "domain " : "",
1853 parent_res, conflict->name, conflict);
1854
1855 return conflict == NULL;
1856 }
1857
1858 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1859 {
1860 struct resource *res = &b->busn_res;
1861 struct resource old_res = *res;
1862 resource_size_t size;
1863 int ret;
1864
1865 if (res->start > bus_max)
1866 return -EINVAL;
1867
1868 size = bus_max - res->start + 1;
1869 ret = adjust_resource(res, res->start, size);
1870 dev_printk(KERN_DEBUG, &b->dev,
1871 "busn_res: %pR end %s updated to %02x\n",
1872 &old_res, ret ? "can not be" : "is", bus_max);
1873
1874 if (!ret && !res->parent)
1875 pci_bus_insert_busn_res(b, res->start, res->end);
1876
1877 return ret;
1878 }
1879
1880 void pci_bus_release_busn_res(struct pci_bus *b)
1881 {
1882 struct resource *res = &b->busn_res;
1883 int ret;
1884
1885 if (!res->flags || !res->parent)
1886 return;
1887
1888 ret = release_resource(res);
1889 dev_printk(KERN_DEBUG, &b->dev,
1890 "busn_res: %pR %s released\n",
1891 res, ret ? "can not be" : "is");
1892 }
1893
1894 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1895 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1896 {
1897 struct pci_host_bridge_window *window;
1898 bool found = false;
1899 struct pci_bus *b;
1900 int max;
1901
1902 list_for_each_entry(window, resources, list)
1903 if (window->res->flags & IORESOURCE_BUS) {
1904 found = true;
1905 break;
1906 }
1907
1908 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1909 if (!b)
1910 return NULL;
1911
1912 if (!found) {
1913 dev_info(&b->dev,
1914 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1915 bus);
1916 pci_bus_insert_busn_res(b, bus, 255);
1917 }
1918
1919 max = pci_scan_child_bus(b);
1920
1921 if (!found)
1922 pci_bus_update_busn_res_end(b, max);
1923
1924 pci_bus_add_devices(b);
1925 return b;
1926 }
1927 EXPORT_SYMBOL(pci_scan_root_bus);
1928
1929 /* Deprecated; use pci_scan_root_bus() instead */
1930 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1931 int bus, struct pci_ops *ops, void *sysdata)
1932 {
1933 LIST_HEAD(resources);
1934 struct pci_bus *b;
1935
1936 pci_add_resource(&resources, &ioport_resource);
1937 pci_add_resource(&resources, &iomem_resource);
1938 pci_add_resource(&resources, &busn_resource);
1939 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1940 if (b)
1941 pci_scan_child_bus(b);
1942 else
1943 pci_free_resource_list(&resources);
1944 return b;
1945 }
1946 EXPORT_SYMBOL(pci_scan_bus_parented);
1947
1948 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1949 void *sysdata)
1950 {
1951 LIST_HEAD(resources);
1952 struct pci_bus *b;
1953
1954 pci_add_resource(&resources, &ioport_resource);
1955 pci_add_resource(&resources, &iomem_resource);
1956 pci_add_resource(&resources, &busn_resource);
1957 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1958 if (b) {
1959 pci_scan_child_bus(b);
1960 pci_bus_add_devices(b);
1961 } else {
1962 pci_free_resource_list(&resources);
1963 }
1964 return b;
1965 }
1966 EXPORT_SYMBOL(pci_scan_bus);
1967
1968 /**
1969 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1970 * @bridge: PCI bridge for the bus to scan
1971 *
1972 * Scan a PCI bus and child buses for new devices, add them,
1973 * and enable them, resizing bridge mmio/io resource if necessary
1974 * and possible. The caller must ensure the child devices are already
1975 * removed for resizing to occur.
1976 *
1977 * Returns the max number of subordinate bus discovered.
1978 */
1979 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1980 {
1981 unsigned int max;
1982 struct pci_bus *bus = bridge->subordinate;
1983
1984 max = pci_scan_child_bus(bus);
1985
1986 pci_assign_unassigned_bridge_resources(bridge);
1987
1988 pci_bus_add_devices(bus);
1989
1990 return max;
1991 }
1992
1993 /**
1994 * pci_rescan_bus - scan a PCI bus for devices.
1995 * @bus: PCI bus to scan
1996 *
1997 * Scan a PCI bus and child buses for new devices, adds them,
1998 * and enables them.
1999 *
2000 * Returns the max number of subordinate bus discovered.
2001 */
2002 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
2003 {
2004 unsigned int max;
2005
2006 max = pci_scan_child_bus(bus);
2007 pci_assign_unassigned_bus_resources(bus);
2008 pci_bus_add_devices(bus);
2009
2010 return max;
2011 }
2012 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2013
2014 EXPORT_SYMBOL(pci_add_new_bus);
2015 EXPORT_SYMBOL(pci_scan_slot);
2016 EXPORT_SYMBOL(pci_scan_bridge);
2017 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2018
2019 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
2020 {
2021 const struct pci_dev *a = to_pci_dev(d_a);
2022 const struct pci_dev *b = to_pci_dev(d_b);
2023
2024 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2025 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2026
2027 if (a->bus->number < b->bus->number) return -1;
2028 else if (a->bus->number > b->bus->number) return 1;
2029
2030 if (a->devfn < b->devfn) return -1;
2031 else if (a->devfn > b->devfn) return 1;
2032
2033 return 0;
2034 }
2035
2036 void __init pci_sort_breadthfirst(void)
2037 {
2038 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2039 }
This page took 0.093461 seconds and 5 git commands to generate.