acct: constify the name arg to acct_on
[deliverable/linux.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_attrs = pcibus_dev_attrs,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 /**
160 * pci_read_base - read a PCI BAR
161 * @dev: the PCI device
162 * @type: type of the BAR
163 * @res: resource buffer to be filled in
164 * @pos: BAR position in the config space
165 *
166 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167 */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 struct resource *res, unsigned int pos)
170 {
171 u32 l, sz, mask;
172 u16 orig_cmd;
173 struct pci_bus_region region;
174 bool bar_too_big = false, bar_disabled = false;
175
176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177
178 /* No printks while decoding is disabled! */
179 if (!dev->mmio_always_on) {
180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 pci_write_config_word(dev, PCI_COMMAND,
182 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 }
184
185 res->name = pci_name(dev);
186
187 pci_read_config_dword(dev, pos, &l);
188 pci_write_config_dword(dev, pos, l | mask);
189 pci_read_config_dword(dev, pos, &sz);
190 pci_write_config_dword(dev, pos, l);
191
192 /*
193 * All bits set in sz means the device isn't working properly.
194 * If the BAR isn't implemented, all bits must be 0. If it's a
195 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 * 1 must be clear.
197 */
198 if (!sz || sz == 0xffffffff)
199 goto fail;
200
201 /*
202 * I don't know how l can have all bits set. Copied from old code.
203 * Maybe it fixes a bug on some ancient platform.
204 */
205 if (l == 0xffffffff)
206 l = 0;
207
208 if (type == pci_bar_unknown) {
209 res->flags = decode_bar(dev, l);
210 res->flags |= IORESOURCE_SIZEALIGN;
211 if (res->flags & IORESOURCE_IO) {
212 l &= PCI_BASE_ADDRESS_IO_MASK;
213 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 } else {
215 l &= PCI_BASE_ADDRESS_MEM_MASK;
216 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 }
218 } else {
219 res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 l &= PCI_ROM_ADDRESS_MASK;
221 mask = (u32)PCI_ROM_ADDRESS_MASK;
222 }
223
224 if (res->flags & IORESOURCE_MEM_64) {
225 u64 l64 = l;
226 u64 sz64 = sz;
227 u64 mask64 = mask | (u64)~0 << 32;
228
229 pci_read_config_dword(dev, pos + 4, &l);
230 pci_write_config_dword(dev, pos + 4, ~0);
231 pci_read_config_dword(dev, pos + 4, &sz);
232 pci_write_config_dword(dev, pos + 4, l);
233
234 l64 |= ((u64)l << 32);
235 sz64 |= ((u64)sz << 32);
236
237 sz64 = pci_size(l64, sz64, mask64);
238
239 if (!sz64)
240 goto fail;
241
242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 bar_too_big = true;
244 goto fail;
245 }
246
247 if ((sizeof(resource_size_t) < 8) && l) {
248 /* Address above 32-bit boundary; disable the BAR */
249 pci_write_config_dword(dev, pos, 0);
250 pci_write_config_dword(dev, pos + 4, 0);
251 region.start = 0;
252 region.end = sz64;
253 pcibios_bus_to_resource(dev, res, &region);
254 bar_disabled = true;
255 } else {
256 region.start = l64;
257 region.end = l64 + sz64;
258 pcibios_bus_to_resource(dev, res, &region);
259 }
260 } else {
261 sz = pci_size(l, sz, mask);
262
263 if (!sz)
264 goto fail;
265
266 region.start = l;
267 region.end = l + sz;
268 pcibios_bus_to_resource(dev, res, &region);
269 }
270
271 goto out;
272
273
274 fail:
275 res->flags = 0;
276 out:
277 if (!dev->mmio_always_on)
278 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
279
280 if (bar_too_big)
281 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
282 if (res->flags && !bar_disabled)
283 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
284
285 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
286 }
287
288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
289 {
290 unsigned int pos, reg;
291
292 for (pos = 0; pos < howmany; pos++) {
293 struct resource *res = &dev->resource[pos];
294 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
295 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
296 }
297
298 if (rom) {
299 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
300 dev->rom_base_reg = rom;
301 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
302 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
303 IORESOURCE_SIZEALIGN;
304 __pci_read_base(dev, pci_bar_mem32, res, rom);
305 }
306 }
307
308 static void __devinit pci_read_bridge_io(struct pci_bus *child)
309 {
310 struct pci_dev *dev = child->self;
311 u8 io_base_lo, io_limit_lo;
312 unsigned long io_mask, io_granularity, base, limit;
313 struct pci_bus_region region;
314 struct resource *res;
315
316 io_mask = PCI_IO_RANGE_MASK;
317 io_granularity = 0x1000;
318 if (dev->io_window_1k) {
319 /* Support 1K I/O space granularity */
320 io_mask = PCI_IO_1K_RANGE_MASK;
321 io_granularity = 0x400;
322 }
323
324 res = child->resource[0];
325 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
326 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
327 base = (io_base_lo & io_mask) << 8;
328 limit = (io_limit_lo & io_mask) << 8;
329
330 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
331 u16 io_base_hi, io_limit_hi;
332
333 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
334 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
335 base |= ((unsigned long) io_base_hi << 16);
336 limit |= ((unsigned long) io_limit_hi << 16);
337 }
338
339 if (base <= limit) {
340 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
341 region.start = base;
342 region.end = limit + io_granularity - 1;
343 pcibios_bus_to_resource(dev, res, &region);
344 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
345 }
346 }
347
348 static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
349 {
350 struct pci_dev *dev = child->self;
351 u16 mem_base_lo, mem_limit_lo;
352 unsigned long base, limit;
353 struct pci_bus_region region;
354 struct resource *res;
355
356 res = child->resource[1];
357 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
358 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
359 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
360 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
361 if (base <= limit) {
362 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
363 region.start = base;
364 region.end = limit + 0xfffff;
365 pcibios_bus_to_resource(dev, res, &region);
366 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
367 }
368 }
369
370 static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
371 {
372 struct pci_dev *dev = child->self;
373 u16 mem_base_lo, mem_limit_lo;
374 unsigned long base, limit;
375 struct pci_bus_region region;
376 struct resource *res;
377
378 res = child->resource[2];
379 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
380 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
381 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
382 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
383
384 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
385 u32 mem_base_hi, mem_limit_hi;
386
387 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
388 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
389
390 /*
391 * Some bridges set the base > limit by default, and some
392 * (broken) BIOSes do not initialize them. If we find
393 * this, just assume they are not being used.
394 */
395 if (mem_base_hi <= mem_limit_hi) {
396 #if BITS_PER_LONG == 64
397 base |= ((unsigned long) mem_base_hi) << 32;
398 limit |= ((unsigned long) mem_limit_hi) << 32;
399 #else
400 if (mem_base_hi || mem_limit_hi) {
401 dev_err(&dev->dev, "can't handle 64-bit "
402 "address space for bridge\n");
403 return;
404 }
405 #endif
406 }
407 }
408 if (base <= limit) {
409 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
410 IORESOURCE_MEM | IORESOURCE_PREFETCH;
411 if (res->flags & PCI_PREF_RANGE_TYPE_64)
412 res->flags |= IORESOURCE_MEM_64;
413 region.start = base;
414 region.end = limit + 0xfffff;
415 pcibios_bus_to_resource(dev, res, &region);
416 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
417 }
418 }
419
420 void __devinit pci_read_bridge_bases(struct pci_bus *child)
421 {
422 struct pci_dev *dev = child->self;
423 struct resource *res;
424 int i;
425
426 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
427 return;
428
429 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
430 &child->busn_res,
431 dev->transparent ? " (subtractive decode)" : "");
432
433 pci_bus_remove_resources(child);
434 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
435 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
436
437 pci_read_bridge_io(child);
438 pci_read_bridge_mmio(child);
439 pci_read_bridge_mmio_pref(child);
440
441 if (dev->transparent) {
442 pci_bus_for_each_resource(child->parent, res, i) {
443 if (res) {
444 pci_bus_add_resource(child, res,
445 PCI_SUBTRACTIVE_DECODE);
446 dev_printk(KERN_DEBUG, &dev->dev,
447 " bridge window %pR (subtractive decode)\n",
448 res);
449 }
450 }
451 }
452 }
453
454 static struct pci_bus * pci_alloc_bus(void)
455 {
456 struct pci_bus *b;
457
458 b = kzalloc(sizeof(*b), GFP_KERNEL);
459 if (b) {
460 INIT_LIST_HEAD(&b->node);
461 INIT_LIST_HEAD(&b->children);
462 INIT_LIST_HEAD(&b->devices);
463 INIT_LIST_HEAD(&b->slots);
464 INIT_LIST_HEAD(&b->resources);
465 b->max_bus_speed = PCI_SPEED_UNKNOWN;
466 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
467 }
468 return b;
469 }
470
471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
472 {
473 struct pci_host_bridge *bridge;
474
475 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
476 if (bridge) {
477 INIT_LIST_HEAD(&bridge->windows);
478 bridge->bus = b;
479 }
480
481 return bridge;
482 }
483
484 static unsigned char pcix_bus_speed[] = {
485 PCI_SPEED_UNKNOWN, /* 0 */
486 PCI_SPEED_66MHz_PCIX, /* 1 */
487 PCI_SPEED_100MHz_PCIX, /* 2 */
488 PCI_SPEED_133MHz_PCIX, /* 3 */
489 PCI_SPEED_UNKNOWN, /* 4 */
490 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
491 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
492 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
493 PCI_SPEED_UNKNOWN, /* 8 */
494 PCI_SPEED_66MHz_PCIX_266, /* 9 */
495 PCI_SPEED_100MHz_PCIX_266, /* A */
496 PCI_SPEED_133MHz_PCIX_266, /* B */
497 PCI_SPEED_UNKNOWN, /* C */
498 PCI_SPEED_66MHz_PCIX_533, /* D */
499 PCI_SPEED_100MHz_PCIX_533, /* E */
500 PCI_SPEED_133MHz_PCIX_533 /* F */
501 };
502
503 static unsigned char pcie_link_speed[] = {
504 PCI_SPEED_UNKNOWN, /* 0 */
505 PCIE_SPEED_2_5GT, /* 1 */
506 PCIE_SPEED_5_0GT, /* 2 */
507 PCIE_SPEED_8_0GT, /* 3 */
508 PCI_SPEED_UNKNOWN, /* 4 */
509 PCI_SPEED_UNKNOWN, /* 5 */
510 PCI_SPEED_UNKNOWN, /* 6 */
511 PCI_SPEED_UNKNOWN, /* 7 */
512 PCI_SPEED_UNKNOWN, /* 8 */
513 PCI_SPEED_UNKNOWN, /* 9 */
514 PCI_SPEED_UNKNOWN, /* A */
515 PCI_SPEED_UNKNOWN, /* B */
516 PCI_SPEED_UNKNOWN, /* C */
517 PCI_SPEED_UNKNOWN, /* D */
518 PCI_SPEED_UNKNOWN, /* E */
519 PCI_SPEED_UNKNOWN /* F */
520 };
521
522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
523 {
524 bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
525 }
526 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
527
528 static unsigned char agp_speeds[] = {
529 AGP_UNKNOWN,
530 AGP_1X,
531 AGP_2X,
532 AGP_4X,
533 AGP_8X
534 };
535
536 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
537 {
538 int index = 0;
539
540 if (agpstat & 4)
541 index = 3;
542 else if (agpstat & 2)
543 index = 2;
544 else if (agpstat & 1)
545 index = 1;
546 else
547 goto out;
548
549 if (agp3) {
550 index += 2;
551 if (index == 5)
552 index = 0;
553 }
554
555 out:
556 return agp_speeds[index];
557 }
558
559
560 static void pci_set_bus_speed(struct pci_bus *bus)
561 {
562 struct pci_dev *bridge = bus->self;
563 int pos;
564
565 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
566 if (!pos)
567 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
568 if (pos) {
569 u32 agpstat, agpcmd;
570
571 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
572 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
573
574 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
575 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
576 }
577
578 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
579 if (pos) {
580 u16 status;
581 enum pci_bus_speed max;
582 pci_read_config_word(bridge, pos + 2, &status);
583
584 if (status & 0x8000) {
585 max = PCI_SPEED_133MHz_PCIX_533;
586 } else if (status & 0x4000) {
587 max = PCI_SPEED_133MHz_PCIX_266;
588 } else if (status & 0x0002) {
589 if (((status >> 12) & 0x3) == 2) {
590 max = PCI_SPEED_133MHz_PCIX_ECC;
591 } else {
592 max = PCI_SPEED_133MHz_PCIX;
593 }
594 } else {
595 max = PCI_SPEED_66MHz_PCIX;
596 }
597
598 bus->max_bus_speed = max;
599 bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
600
601 return;
602 }
603
604 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
605 if (pos) {
606 u32 linkcap;
607 u16 linksta;
608
609 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
610 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
611
612 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
613 pcie_update_link_speed(bus, linksta);
614 }
615 }
616
617
618 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
619 struct pci_dev *bridge, int busnr)
620 {
621 struct pci_bus *child;
622 int i;
623
624 /*
625 * Allocate a new bus, and inherit stuff from the parent..
626 */
627 child = pci_alloc_bus();
628 if (!child)
629 return NULL;
630
631 child->parent = parent;
632 child->ops = parent->ops;
633 child->sysdata = parent->sysdata;
634 child->bus_flags = parent->bus_flags;
635
636 /* initialize some portions of the bus device, but don't register it
637 * now as the parent is not properly set up yet. This device will get
638 * registered later in pci_bus_add_devices()
639 */
640 child->dev.class = &pcibus_class;
641 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
642
643 /*
644 * Set up the primary, secondary and subordinate
645 * bus numbers.
646 */
647 child->number = child->busn_res.start = busnr;
648 child->primary = parent->busn_res.start;
649 child->busn_res.end = 0xff;
650
651 if (!bridge)
652 return child;
653
654 child->self = bridge;
655 child->bridge = get_device(&bridge->dev);
656 pci_set_bus_of_node(child);
657 pci_set_bus_speed(child);
658
659 /* Set up default resource pointers and names.. */
660 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
661 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
662 child->resource[i]->name = child->name;
663 }
664 bridge->subordinate = child;
665
666 return child;
667 }
668
669 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
670 {
671 struct pci_bus *child;
672
673 child = pci_alloc_child_bus(parent, dev, busnr);
674 if (child) {
675 down_write(&pci_bus_sem);
676 list_add_tail(&child->node, &parent->children);
677 up_write(&pci_bus_sem);
678 }
679 return child;
680 }
681
682 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
683 {
684 struct pci_bus *parent = child->parent;
685
686 /* Attempts to fix that up are really dangerous unless
687 we're going to re-assign all bus numbers. */
688 if (!pcibios_assign_all_busses())
689 return;
690
691 while (parent->parent && parent->busn_res.end < max) {
692 parent->busn_res.end = max;
693 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
694 parent = parent->parent;
695 }
696 }
697
698 /*
699 * If it's a bridge, configure it and scan the bus behind it.
700 * For CardBus bridges, we don't scan behind as the devices will
701 * be handled by the bridge driver itself.
702 *
703 * We need to process bridges in two passes -- first we scan those
704 * already configured by the BIOS and after we are done with all of
705 * them, we proceed to assigning numbers to the remaining buses in
706 * order to avoid overlaps between old and new bus numbers.
707 */
708 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
709 {
710 struct pci_bus *child;
711 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
712 u32 buses, i, j = 0;
713 u16 bctl;
714 u8 primary, secondary, subordinate;
715 int broken = 0;
716
717 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
718 primary = buses & 0xFF;
719 secondary = (buses >> 8) & 0xFF;
720 subordinate = (buses >> 16) & 0xFF;
721
722 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
723 secondary, subordinate, pass);
724
725 if (!primary && (primary != bus->number) && secondary && subordinate) {
726 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
727 primary = bus->number;
728 }
729
730 /* Check if setup is sensible at all */
731 if (!pass &&
732 (primary != bus->number || secondary <= bus->number ||
733 secondary > subordinate)) {
734 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
735 secondary, subordinate);
736 broken = 1;
737 }
738
739 /* Disable MasterAbortMode during probing to avoid reporting
740 of bus errors (in some architectures) */
741 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
742 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
743 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
744
745 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
746 !is_cardbus && !broken) {
747 unsigned int cmax;
748 /*
749 * Bus already configured by firmware, process it in the first
750 * pass and just note the configuration.
751 */
752 if (pass)
753 goto out;
754
755 /*
756 * If we already got to this bus through a different bridge,
757 * don't re-add it. This can happen with the i450NX chipset.
758 *
759 * However, we continue to descend down the hierarchy and
760 * scan remaining child buses.
761 */
762 child = pci_find_bus(pci_domain_nr(bus), secondary);
763 if (!child) {
764 child = pci_add_new_bus(bus, dev, secondary);
765 if (!child)
766 goto out;
767 child->primary = primary;
768 pci_bus_insert_busn_res(child, secondary, subordinate);
769 child->bridge_ctl = bctl;
770 }
771
772 cmax = pci_scan_child_bus(child);
773 if (cmax > max)
774 max = cmax;
775 if (child->busn_res.end > max)
776 max = child->busn_res.end;
777 } else {
778 /*
779 * We need to assign a number to this bus which we always
780 * do in the second pass.
781 */
782 if (!pass) {
783 if (pcibios_assign_all_busses() || broken)
784 /* Temporarily disable forwarding of the
785 configuration cycles on all bridges in
786 this bus segment to avoid possible
787 conflicts in the second pass between two
788 bridges programmed with overlapping
789 bus ranges. */
790 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
791 buses & ~0xffffff);
792 goto out;
793 }
794
795 /* Clear errors */
796 pci_write_config_word(dev, PCI_STATUS, 0xffff);
797
798 /* Prevent assigning a bus number that already exists.
799 * This can happen when a bridge is hot-plugged, so in
800 * this case we only re-scan this bus. */
801 child = pci_find_bus(pci_domain_nr(bus), max+1);
802 if (!child) {
803 child = pci_add_new_bus(bus, dev, ++max);
804 if (!child)
805 goto out;
806 pci_bus_insert_busn_res(child, max, 0xff);
807 }
808 buses = (buses & 0xff000000)
809 | ((unsigned int)(child->primary) << 0)
810 | ((unsigned int)(child->busn_res.start) << 8)
811 | ((unsigned int)(child->busn_res.end) << 16);
812
813 /*
814 * yenta.c forces a secondary latency timer of 176.
815 * Copy that behaviour here.
816 */
817 if (is_cardbus) {
818 buses &= ~0xff000000;
819 buses |= CARDBUS_LATENCY_TIMER << 24;
820 }
821
822 /*
823 * We need to blast all three values with a single write.
824 */
825 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
826
827 if (!is_cardbus) {
828 child->bridge_ctl = bctl;
829 /*
830 * Adjust subordinate busnr in parent buses.
831 * We do this before scanning for children because
832 * some devices may not be detected if the bios
833 * was lazy.
834 */
835 pci_fixup_parent_subordinate_busnr(child, max);
836 /* Now we can scan all subordinate buses... */
837 max = pci_scan_child_bus(child);
838 /*
839 * now fix it up again since we have found
840 * the real value of max.
841 */
842 pci_fixup_parent_subordinate_busnr(child, max);
843 } else {
844 /*
845 * For CardBus bridges, we leave 4 bus numbers
846 * as cards with a PCI-to-PCI bridge can be
847 * inserted later.
848 */
849 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
850 struct pci_bus *parent = bus;
851 if (pci_find_bus(pci_domain_nr(bus),
852 max+i+1))
853 break;
854 while (parent->parent) {
855 if ((!pcibios_assign_all_busses()) &&
856 (parent->busn_res.end > max) &&
857 (parent->busn_res.end <= max+i)) {
858 j = 1;
859 }
860 parent = parent->parent;
861 }
862 if (j) {
863 /*
864 * Often, there are two cardbus bridges
865 * -- try to leave one valid bus number
866 * for each one.
867 */
868 i /= 2;
869 break;
870 }
871 }
872 max += i;
873 pci_fixup_parent_subordinate_busnr(child, max);
874 }
875 /*
876 * Set the subordinate bus number to its real value.
877 */
878 pci_bus_update_busn_res_end(child, max);
879 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
880 }
881
882 sprintf(child->name,
883 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
884 pci_domain_nr(bus), child->number);
885
886 /* Has only triggered on CardBus, fixup is in yenta_socket */
887 while (bus->parent) {
888 if ((child->busn_res.end > bus->busn_res.end) ||
889 (child->number > bus->busn_res.end) ||
890 (child->number < bus->number) ||
891 (child->busn_res.end < bus->number)) {
892 dev_info(&child->dev, "%pR %s "
893 "hidden behind%s bridge %s %pR\n",
894 &child->busn_res,
895 (bus->number > child->busn_res.end &&
896 bus->busn_res.end < child->number) ?
897 "wholly" : "partially",
898 bus->self->transparent ? " transparent" : "",
899 dev_name(&bus->dev),
900 &bus->busn_res);
901 }
902 bus = bus->parent;
903 }
904
905 out:
906 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
907
908 return max;
909 }
910
911 /*
912 * Read interrupt line and base address registers.
913 * The architecture-dependent code can tweak these, of course.
914 */
915 static void pci_read_irq(struct pci_dev *dev)
916 {
917 unsigned char irq;
918
919 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
920 dev->pin = irq;
921 if (irq)
922 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
923 dev->irq = irq;
924 }
925
926 void set_pcie_port_type(struct pci_dev *pdev)
927 {
928 int pos;
929 u16 reg16;
930
931 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
932 if (!pos)
933 return;
934 pdev->is_pcie = 1;
935 pdev->pcie_cap = pos;
936 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
937 pdev->pcie_flags_reg = reg16;
938 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
939 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
940 }
941
942 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
943 {
944 u32 reg32;
945
946 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
947 if (reg32 & PCI_EXP_SLTCAP_HPC)
948 pdev->is_hotplug_bridge = 1;
949 }
950
951 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
952
953 /**
954 * pci_setup_device - fill in class and map information of a device
955 * @dev: the device structure to fill
956 *
957 * Initialize the device structure with information about the device's
958 * vendor,class,memory and IO-space addresses,IRQ lines etc.
959 * Called at initialisation of the PCI subsystem and by CardBus services.
960 * Returns 0 on success and negative if unknown type of device (not normal,
961 * bridge or CardBus).
962 */
963 int pci_setup_device(struct pci_dev *dev)
964 {
965 u32 class;
966 u8 hdr_type;
967 struct pci_slot *slot;
968 int pos = 0;
969 struct pci_bus_region region;
970 struct resource *res;
971
972 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
973 return -EIO;
974
975 dev->sysdata = dev->bus->sysdata;
976 dev->dev.parent = dev->bus->bridge;
977 dev->dev.bus = &pci_bus_type;
978 dev->hdr_type = hdr_type & 0x7f;
979 dev->multifunction = !!(hdr_type & 0x80);
980 dev->error_state = pci_channel_io_normal;
981 set_pcie_port_type(dev);
982
983 list_for_each_entry(slot, &dev->bus->slots, list)
984 if (PCI_SLOT(dev->devfn) == slot->number)
985 dev->slot = slot;
986
987 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
988 set this higher, assuming the system even supports it. */
989 dev->dma_mask = 0xffffffff;
990
991 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
992 dev->bus->number, PCI_SLOT(dev->devfn),
993 PCI_FUNC(dev->devfn));
994
995 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
996 dev->revision = class & 0xff;
997 dev->class = class >> 8; /* upper 3 bytes */
998
999 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1000 dev->vendor, dev->device, dev->hdr_type, dev->class);
1001
1002 /* need to have dev->class ready */
1003 dev->cfg_size = pci_cfg_space_size(dev);
1004
1005 /* "Unknown power state" */
1006 dev->current_state = PCI_UNKNOWN;
1007
1008 /* Early fixups, before probing the BARs */
1009 pci_fixup_device(pci_fixup_early, dev);
1010 /* device class may be changed after fixup */
1011 class = dev->class >> 8;
1012
1013 switch (dev->hdr_type) { /* header type */
1014 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1015 if (class == PCI_CLASS_BRIDGE_PCI)
1016 goto bad;
1017 pci_read_irq(dev);
1018 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1019 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1020 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1021
1022 /*
1023 * Do the ugly legacy mode stuff here rather than broken chip
1024 * quirk code. Legacy mode ATA controllers have fixed
1025 * addresses. These are not always echoed in BAR0-3, and
1026 * BAR0-3 in a few cases contain junk!
1027 */
1028 if (class == PCI_CLASS_STORAGE_IDE) {
1029 u8 progif;
1030 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1031 if ((progif & 1) == 0) {
1032 region.start = 0x1F0;
1033 region.end = 0x1F7;
1034 res = &dev->resource[0];
1035 res->flags = LEGACY_IO_RESOURCE;
1036 pcibios_bus_to_resource(dev, res, &region);
1037 region.start = 0x3F6;
1038 region.end = 0x3F6;
1039 res = &dev->resource[1];
1040 res->flags = LEGACY_IO_RESOURCE;
1041 pcibios_bus_to_resource(dev, res, &region);
1042 }
1043 if ((progif & 4) == 0) {
1044 region.start = 0x170;
1045 region.end = 0x177;
1046 res = &dev->resource[2];
1047 res->flags = LEGACY_IO_RESOURCE;
1048 pcibios_bus_to_resource(dev, res, &region);
1049 region.start = 0x376;
1050 region.end = 0x376;
1051 res = &dev->resource[3];
1052 res->flags = LEGACY_IO_RESOURCE;
1053 pcibios_bus_to_resource(dev, res, &region);
1054 }
1055 }
1056 break;
1057
1058 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1059 if (class != PCI_CLASS_BRIDGE_PCI)
1060 goto bad;
1061 /* The PCI-to-PCI bridge spec requires that subtractive
1062 decoding (i.e. transparent) bridge must have programming
1063 interface code of 0x01. */
1064 pci_read_irq(dev);
1065 dev->transparent = ((dev->class & 0xff) == 1);
1066 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1067 set_pcie_hotplug_bridge(dev);
1068 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1069 if (pos) {
1070 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1071 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1072 }
1073 break;
1074
1075 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1076 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1077 goto bad;
1078 pci_read_irq(dev);
1079 pci_read_bases(dev, 1, 0);
1080 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1081 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1082 break;
1083
1084 default: /* unknown header */
1085 dev_err(&dev->dev, "unknown header type %02x, "
1086 "ignoring device\n", dev->hdr_type);
1087 return -EIO;
1088
1089 bad:
1090 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1091 "type %02x)\n", dev->class, dev->hdr_type);
1092 dev->class = PCI_CLASS_NOT_DEFINED;
1093 }
1094
1095 /* We found a fine healthy device, go go go... */
1096 return 0;
1097 }
1098
1099 static void pci_release_capabilities(struct pci_dev *dev)
1100 {
1101 pci_vpd_release(dev);
1102 pci_iov_release(dev);
1103 pci_free_cap_save_buffers(dev);
1104 }
1105
1106 /**
1107 * pci_release_dev - free a pci device structure when all users of it are finished.
1108 * @dev: device that's been disconnected
1109 *
1110 * Will be called only by the device core when all users of this pci device are
1111 * done.
1112 */
1113 static void pci_release_dev(struct device *dev)
1114 {
1115 struct pci_dev *pci_dev;
1116
1117 pci_dev = to_pci_dev(dev);
1118 pci_release_capabilities(pci_dev);
1119 pci_release_of_node(pci_dev);
1120 kfree(pci_dev);
1121 }
1122
1123 /**
1124 * pci_cfg_space_size - get the configuration space size of the PCI device.
1125 * @dev: PCI device
1126 *
1127 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1128 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1129 * access it. Maybe we don't have a way to generate extended config space
1130 * accesses, or the device is behind a reverse Express bridge. So we try
1131 * reading the dword at 0x100 which must either be 0 or a valid extended
1132 * capability header.
1133 */
1134 int pci_cfg_space_size_ext(struct pci_dev *dev)
1135 {
1136 u32 status;
1137 int pos = PCI_CFG_SPACE_SIZE;
1138
1139 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1140 goto fail;
1141 if (status == 0xffffffff)
1142 goto fail;
1143
1144 return PCI_CFG_SPACE_EXP_SIZE;
1145
1146 fail:
1147 return PCI_CFG_SPACE_SIZE;
1148 }
1149
1150 int pci_cfg_space_size(struct pci_dev *dev)
1151 {
1152 int pos;
1153 u32 status;
1154 u16 class;
1155
1156 class = dev->class >> 8;
1157 if (class == PCI_CLASS_BRIDGE_HOST)
1158 return pci_cfg_space_size_ext(dev);
1159
1160 if (!pci_is_pcie(dev)) {
1161 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1162 if (!pos)
1163 goto fail;
1164
1165 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1166 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1167 goto fail;
1168 }
1169
1170 return pci_cfg_space_size_ext(dev);
1171
1172 fail:
1173 return PCI_CFG_SPACE_SIZE;
1174 }
1175
1176 static void pci_release_bus_bridge_dev(struct device *dev)
1177 {
1178 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
1179
1180 if (bridge->release_fn)
1181 bridge->release_fn(bridge);
1182
1183 pci_free_resource_list(&bridge->windows);
1184
1185 kfree(bridge);
1186 }
1187
1188 struct pci_dev *alloc_pci_dev(void)
1189 {
1190 struct pci_dev *dev;
1191
1192 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1193 if (!dev)
1194 return NULL;
1195
1196 INIT_LIST_HEAD(&dev->bus_list);
1197
1198 return dev;
1199 }
1200 EXPORT_SYMBOL(alloc_pci_dev);
1201
1202 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1203 int crs_timeout)
1204 {
1205 int delay = 1;
1206
1207 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1208 return false;
1209
1210 /* some broken boards return 0 or ~0 if a slot is empty: */
1211 if (*l == 0xffffffff || *l == 0x00000000 ||
1212 *l == 0x0000ffff || *l == 0xffff0000)
1213 return false;
1214
1215 /* Configuration request Retry Status */
1216 while (*l == 0xffff0001) {
1217 if (!crs_timeout)
1218 return false;
1219
1220 msleep(delay);
1221 delay *= 2;
1222 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1223 return false;
1224 /* Card hasn't responded in 60 seconds? Must be stuck. */
1225 if (delay > crs_timeout) {
1226 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1227 "responding\n", pci_domain_nr(bus),
1228 bus->number, PCI_SLOT(devfn),
1229 PCI_FUNC(devfn));
1230 return false;
1231 }
1232 }
1233
1234 return true;
1235 }
1236 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1237
1238 /*
1239 * Read the config data for a PCI device, sanity-check it
1240 * and fill in the dev structure...
1241 */
1242 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1243 {
1244 struct pci_dev *dev;
1245 u32 l;
1246
1247 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1248 return NULL;
1249
1250 dev = alloc_pci_dev();
1251 if (!dev)
1252 return NULL;
1253
1254 dev->bus = bus;
1255 dev->devfn = devfn;
1256 dev->vendor = l & 0xffff;
1257 dev->device = (l >> 16) & 0xffff;
1258
1259 pci_set_of_node(dev);
1260
1261 if (pci_setup_device(dev)) {
1262 kfree(dev);
1263 return NULL;
1264 }
1265
1266 return dev;
1267 }
1268
1269 static void pci_init_capabilities(struct pci_dev *dev)
1270 {
1271 /* MSI/MSI-X list */
1272 pci_msi_init_pci_dev(dev);
1273
1274 /* Buffers for saving PCIe and PCI-X capabilities */
1275 pci_allocate_cap_save_buffers(dev);
1276
1277 /* Power Management */
1278 pci_pm_init(dev);
1279 platform_pci_wakeup_init(dev);
1280
1281 /* Vital Product Data */
1282 pci_vpd_pci22_init(dev);
1283
1284 /* Alternative Routing-ID Forwarding */
1285 pci_enable_ari(dev);
1286
1287 /* Single Root I/O Virtualization */
1288 pci_iov_init(dev);
1289
1290 /* Enable ACS P2P upstream forwarding */
1291 pci_enable_acs(dev);
1292 }
1293
1294 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1295 {
1296 device_initialize(&dev->dev);
1297 dev->dev.release = pci_release_dev;
1298 pci_dev_get(dev);
1299
1300 dev->dev.dma_mask = &dev->dma_mask;
1301 dev->dev.dma_parms = &dev->dma_parms;
1302 dev->dev.coherent_dma_mask = 0xffffffffull;
1303
1304 pci_set_dma_max_seg_size(dev, 65536);
1305 pci_set_dma_seg_boundary(dev, 0xffffffff);
1306
1307 /* Fix up broken headers */
1308 pci_fixup_device(pci_fixup_header, dev);
1309
1310 /* moved out from quirk header fixup code */
1311 pci_reassigndev_resource_alignment(dev);
1312
1313 /* Clear the state_saved flag. */
1314 dev->state_saved = false;
1315
1316 /* Initialize various capabilities */
1317 pci_init_capabilities(dev);
1318
1319 /*
1320 * Add the device to our list of discovered devices
1321 * and the bus list for fixup functions, etc.
1322 */
1323 down_write(&pci_bus_sem);
1324 list_add_tail(&dev->bus_list, &bus->devices);
1325 up_write(&pci_bus_sem);
1326 }
1327
1328 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1329 {
1330 struct pci_dev *dev;
1331
1332 dev = pci_get_slot(bus, devfn);
1333 if (dev) {
1334 pci_dev_put(dev);
1335 return dev;
1336 }
1337
1338 dev = pci_scan_device(bus, devfn);
1339 if (!dev)
1340 return NULL;
1341
1342 pci_device_add(dev, bus);
1343
1344 return dev;
1345 }
1346 EXPORT_SYMBOL(pci_scan_single_device);
1347
1348 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
1349 {
1350 u16 cap;
1351 unsigned pos, next_fn;
1352
1353 if (!dev)
1354 return 0;
1355
1356 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1357 if (!pos)
1358 return 0;
1359 pci_read_config_word(dev, pos + 4, &cap);
1360 next_fn = cap >> 8;
1361 if (next_fn <= fn)
1362 return 0;
1363 return next_fn;
1364 }
1365
1366 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
1367 {
1368 return (fn + 1) % 8;
1369 }
1370
1371 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
1372 {
1373 return 0;
1374 }
1375
1376 static int only_one_child(struct pci_bus *bus)
1377 {
1378 struct pci_dev *parent = bus->self;
1379
1380 if (!parent || !pci_is_pcie(parent))
1381 return 0;
1382 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1383 return 1;
1384 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1385 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1386 return 1;
1387 return 0;
1388 }
1389
1390 /**
1391 * pci_scan_slot - scan a PCI slot on a bus for devices.
1392 * @bus: PCI bus to scan
1393 * @devfn: slot number to scan (must have zero function.)
1394 *
1395 * Scan a PCI slot on the specified PCI bus for devices, adding
1396 * discovered devices to the @bus->devices list. New devices
1397 * will not have is_added set.
1398 *
1399 * Returns the number of new devices found.
1400 */
1401 int pci_scan_slot(struct pci_bus *bus, int devfn)
1402 {
1403 unsigned fn, nr = 0;
1404 struct pci_dev *dev;
1405 unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
1406
1407 if (only_one_child(bus) && (devfn > 0))
1408 return 0; /* Already scanned the entire slot */
1409
1410 dev = pci_scan_single_device(bus, devfn);
1411 if (!dev)
1412 return 0;
1413 if (!dev->is_added)
1414 nr++;
1415
1416 if (pci_ari_enabled(bus))
1417 next_fn = next_ari_fn;
1418 else if (dev->multifunction)
1419 next_fn = next_trad_fn;
1420
1421 for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
1422 dev = pci_scan_single_device(bus, devfn + fn);
1423 if (dev) {
1424 if (!dev->is_added)
1425 nr++;
1426 dev->multifunction = 1;
1427 }
1428 }
1429
1430 /* only one slot has pcie device */
1431 if (bus->self && nr)
1432 pcie_aspm_init_link_state(bus->self);
1433
1434 return nr;
1435 }
1436
1437 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1438 {
1439 u8 *smpss = data;
1440
1441 if (!pci_is_pcie(dev))
1442 return 0;
1443
1444 /* For PCIE hotplug enabled slots not connected directly to a
1445 * PCI-E root port, there can be problems when hotplugging
1446 * devices. This is due to the possibility of hotplugging a
1447 * device into the fabric with a smaller MPS that the devices
1448 * currently running have configured. Modifying the MPS on the
1449 * running devices could cause a fatal bus error due to an
1450 * incoming frame being larger than the newly configured MPS.
1451 * To work around this, the MPS for the entire fabric must be
1452 * set to the minimum size. Any devices hotplugged into this
1453 * fabric will have the minimum MPS set. If the PCI hotplug
1454 * slot is directly connected to the root port and there are not
1455 * other devices on the fabric (which seems to be the most
1456 * common case), then this is not an issue and MPS discovery
1457 * will occur as normal.
1458 */
1459 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1460 (dev->bus->self &&
1461 pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1462 *smpss = 0;
1463
1464 if (*smpss > dev->pcie_mpss)
1465 *smpss = dev->pcie_mpss;
1466
1467 return 0;
1468 }
1469
1470 static void pcie_write_mps(struct pci_dev *dev, int mps)
1471 {
1472 int rc;
1473
1474 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1475 mps = 128 << dev->pcie_mpss;
1476
1477 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1478 dev->bus->self)
1479 /* For "Performance", the assumption is made that
1480 * downstream communication will never be larger than
1481 * the MRRS. So, the MPS only needs to be configured
1482 * for the upstream communication. This being the case,
1483 * walk from the top down and set the MPS of the child
1484 * to that of the parent bus.
1485 *
1486 * Configure the device MPS with the smaller of the
1487 * device MPSS or the bridge MPS (which is assumed to be
1488 * properly configured at this point to the largest
1489 * allowable MPS based on its parent bus).
1490 */
1491 mps = min(mps, pcie_get_mps(dev->bus->self));
1492 }
1493
1494 rc = pcie_set_mps(dev, mps);
1495 if (rc)
1496 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1497 }
1498
1499 static void pcie_write_mrrs(struct pci_dev *dev)
1500 {
1501 int rc, mrrs;
1502
1503 /* In the "safe" case, do not configure the MRRS. There appear to be
1504 * issues with setting MRRS to 0 on a number of devices.
1505 */
1506 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1507 return;
1508
1509 /* For Max performance, the MRRS must be set to the largest supported
1510 * value. However, it cannot be configured larger than the MPS the
1511 * device or the bus can support. This should already be properly
1512 * configured by a prior call to pcie_write_mps.
1513 */
1514 mrrs = pcie_get_mps(dev);
1515
1516 /* MRRS is a R/W register. Invalid values can be written, but a
1517 * subsequent read will verify if the value is acceptable or not.
1518 * If the MRRS value provided is not acceptable (e.g., too large),
1519 * shrink the value until it is acceptable to the HW.
1520 */
1521 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1522 rc = pcie_set_readrq(dev, mrrs);
1523 if (!rc)
1524 break;
1525
1526 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1527 mrrs /= 2;
1528 }
1529
1530 if (mrrs < 128)
1531 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1532 "safe value. If problems are experienced, try running "
1533 "with pci=pcie_bus_safe.\n");
1534 }
1535
1536 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1537 {
1538 int mps, orig_mps;
1539
1540 if (!pci_is_pcie(dev))
1541 return 0;
1542
1543 mps = 128 << *(u8 *)data;
1544 orig_mps = pcie_get_mps(dev);
1545
1546 pcie_write_mps(dev, mps);
1547 pcie_write_mrrs(dev);
1548
1549 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1550 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1551 orig_mps, pcie_get_readrq(dev));
1552
1553 return 0;
1554 }
1555
1556 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1557 * parents then children fashion. If this changes, then this code will not
1558 * work as designed.
1559 */
1560 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1561 {
1562 u8 smpss;
1563
1564 if (!pci_is_pcie(bus->self))
1565 return;
1566
1567 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1568 return;
1569
1570 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1571 * to be aware to the MPS of the destination. To work around this,
1572 * simply force the MPS of the entire system to the smallest possible.
1573 */
1574 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1575 smpss = 0;
1576
1577 if (pcie_bus_config == PCIE_BUS_SAFE) {
1578 smpss = mpss;
1579
1580 pcie_find_smpss(bus->self, &smpss);
1581 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1582 }
1583
1584 pcie_bus_configure_set(bus->self, &smpss);
1585 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1586 }
1587 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1588
1589 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1590 {
1591 unsigned int devfn, pass, max = bus->busn_res.start;
1592 struct pci_dev *dev;
1593
1594 dev_dbg(&bus->dev, "scanning bus\n");
1595
1596 /* Go find them, Rover! */
1597 for (devfn = 0; devfn < 0x100; devfn += 8)
1598 pci_scan_slot(bus, devfn);
1599
1600 /* Reserve buses for SR-IOV capability. */
1601 max += pci_iov_bus_range(bus);
1602
1603 /*
1604 * After performing arch-dependent fixup of the bus, look behind
1605 * all PCI-to-PCI bridges on this bus.
1606 */
1607 if (!bus->is_added) {
1608 dev_dbg(&bus->dev, "fixups for bus\n");
1609 pcibios_fixup_bus(bus);
1610 if (pci_is_root_bus(bus))
1611 bus->is_added = 1;
1612 }
1613
1614 for (pass=0; pass < 2; pass++)
1615 list_for_each_entry(dev, &bus->devices, bus_list) {
1616 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1617 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1618 max = pci_scan_bridge(bus, dev, max, pass);
1619 }
1620
1621 /*
1622 * We've scanned the bus and so we know all about what's on
1623 * the other side of any bridges that may be on this bus plus
1624 * any devices.
1625 *
1626 * Return how far we've got finding sub-buses.
1627 */
1628 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1629 return max;
1630 }
1631
1632 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1633 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1634 {
1635 int error;
1636 struct pci_host_bridge *bridge;
1637 struct pci_bus *b, *b2;
1638 struct pci_host_bridge_window *window, *n;
1639 struct resource *res;
1640 resource_size_t offset;
1641 char bus_addr[64];
1642 char *fmt;
1643
1644
1645 b = pci_alloc_bus();
1646 if (!b)
1647 return NULL;
1648
1649 b->sysdata = sysdata;
1650 b->ops = ops;
1651 b2 = pci_find_bus(pci_domain_nr(b), bus);
1652 if (b2) {
1653 /* If we already got to this bus through a different bridge, ignore it */
1654 dev_dbg(&b2->dev, "bus already known\n");
1655 goto err_out;
1656 }
1657
1658 bridge = pci_alloc_host_bridge(b);
1659 if (!bridge)
1660 goto err_out;
1661
1662 bridge->dev.parent = parent;
1663 bridge->dev.release = pci_release_bus_bridge_dev;
1664 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1665 error = device_register(&bridge->dev);
1666 if (error)
1667 goto bridge_dev_reg_err;
1668 b->bridge = get_device(&bridge->dev);
1669 device_enable_async_suspend(b->bridge);
1670 pci_set_bus_of_node(b);
1671
1672 if (!parent)
1673 set_dev_node(b->bridge, pcibus_to_node(b));
1674
1675 b->dev.class = &pcibus_class;
1676 b->dev.parent = b->bridge;
1677 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1678 error = device_register(&b->dev);
1679 if (error)
1680 goto class_dev_reg_err;
1681
1682 /* Create legacy_io and legacy_mem files for this bus */
1683 pci_create_legacy_files(b);
1684
1685 b->number = b->busn_res.start = bus;
1686
1687 if (parent)
1688 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1689 else
1690 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1691
1692 /* Add initial resources to the bus */
1693 list_for_each_entry_safe(window, n, resources, list) {
1694 list_move_tail(&window->list, &bridge->windows);
1695 res = window->res;
1696 offset = window->offset;
1697 if (res->flags & IORESOURCE_BUS)
1698 pci_bus_insert_busn_res(b, bus, res->end);
1699 else
1700 pci_bus_add_resource(b, res, 0);
1701 if (offset) {
1702 if (resource_type(res) == IORESOURCE_IO)
1703 fmt = " (bus address [%#06llx-%#06llx])";
1704 else
1705 fmt = " (bus address [%#010llx-%#010llx])";
1706 snprintf(bus_addr, sizeof(bus_addr), fmt,
1707 (unsigned long long) (res->start - offset),
1708 (unsigned long long) (res->end - offset));
1709 } else
1710 bus_addr[0] = '\0';
1711 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1712 }
1713
1714 down_write(&pci_bus_sem);
1715 list_add_tail(&b->node, &pci_root_buses);
1716 up_write(&pci_bus_sem);
1717
1718 return b;
1719
1720 class_dev_reg_err:
1721 put_device(&bridge->dev);
1722 device_unregister(&bridge->dev);
1723 bridge_dev_reg_err:
1724 kfree(bridge);
1725 err_out:
1726 kfree(b);
1727 return NULL;
1728 }
1729
1730 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1731 {
1732 struct resource *res = &b->busn_res;
1733 struct resource *parent_res, *conflict;
1734
1735 res->start = bus;
1736 res->end = bus_max;
1737 res->flags = IORESOURCE_BUS;
1738
1739 if (!pci_is_root_bus(b))
1740 parent_res = &b->parent->busn_res;
1741 else {
1742 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1743 res->flags |= IORESOURCE_PCI_FIXED;
1744 }
1745
1746 conflict = insert_resource_conflict(parent_res, res);
1747
1748 if (conflict)
1749 dev_printk(KERN_DEBUG, &b->dev,
1750 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1751 res, pci_is_root_bus(b) ? "domain " : "",
1752 parent_res, conflict->name, conflict);
1753
1754 return conflict == NULL;
1755 }
1756
1757 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1758 {
1759 struct resource *res = &b->busn_res;
1760 struct resource old_res = *res;
1761 resource_size_t size;
1762 int ret;
1763
1764 if (res->start > bus_max)
1765 return -EINVAL;
1766
1767 size = bus_max - res->start + 1;
1768 ret = adjust_resource(res, res->start, size);
1769 dev_printk(KERN_DEBUG, &b->dev,
1770 "busn_res: %pR end %s updated to %02x\n",
1771 &old_res, ret ? "can not be" : "is", bus_max);
1772
1773 if (!ret && !res->parent)
1774 pci_bus_insert_busn_res(b, res->start, res->end);
1775
1776 return ret;
1777 }
1778
1779 void pci_bus_release_busn_res(struct pci_bus *b)
1780 {
1781 struct resource *res = &b->busn_res;
1782 int ret;
1783
1784 if (!res->flags || !res->parent)
1785 return;
1786
1787 ret = release_resource(res);
1788 dev_printk(KERN_DEBUG, &b->dev,
1789 "busn_res: %pR %s released\n",
1790 res, ret ? "can not be" : "is");
1791 }
1792
1793 struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
1794 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1795 {
1796 struct pci_host_bridge_window *window;
1797 bool found = false;
1798 struct pci_bus *b;
1799 int max;
1800
1801 list_for_each_entry(window, resources, list)
1802 if (window->res->flags & IORESOURCE_BUS) {
1803 found = true;
1804 break;
1805 }
1806
1807 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1808 if (!b)
1809 return NULL;
1810
1811 if (!found) {
1812 dev_info(&b->dev,
1813 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1814 bus);
1815 pci_bus_insert_busn_res(b, bus, 255);
1816 }
1817
1818 max = pci_scan_child_bus(b);
1819
1820 if (!found)
1821 pci_bus_update_busn_res_end(b, max);
1822
1823 pci_bus_add_devices(b);
1824 return b;
1825 }
1826 EXPORT_SYMBOL(pci_scan_root_bus);
1827
1828 /* Deprecated; use pci_scan_root_bus() instead */
1829 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1830 int bus, struct pci_ops *ops, void *sysdata)
1831 {
1832 LIST_HEAD(resources);
1833 struct pci_bus *b;
1834
1835 pci_add_resource(&resources, &ioport_resource);
1836 pci_add_resource(&resources, &iomem_resource);
1837 pci_add_resource(&resources, &busn_resource);
1838 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1839 if (b)
1840 pci_scan_child_bus(b);
1841 else
1842 pci_free_resource_list(&resources);
1843 return b;
1844 }
1845 EXPORT_SYMBOL(pci_scan_bus_parented);
1846
1847 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
1848 void *sysdata)
1849 {
1850 LIST_HEAD(resources);
1851 struct pci_bus *b;
1852
1853 pci_add_resource(&resources, &ioport_resource);
1854 pci_add_resource(&resources, &iomem_resource);
1855 pci_add_resource(&resources, &busn_resource);
1856 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1857 if (b) {
1858 pci_scan_child_bus(b);
1859 pci_bus_add_devices(b);
1860 } else {
1861 pci_free_resource_list(&resources);
1862 }
1863 return b;
1864 }
1865 EXPORT_SYMBOL(pci_scan_bus);
1866
1867 #ifdef CONFIG_HOTPLUG
1868 /**
1869 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1870 * @bridge: PCI bridge for the bus to scan
1871 *
1872 * Scan a PCI bus and child buses for new devices, add them,
1873 * and enable them, resizing bridge mmio/io resource if necessary
1874 * and possible. The caller must ensure the child devices are already
1875 * removed for resizing to occur.
1876 *
1877 * Returns the max number of subordinate bus discovered.
1878 */
1879 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1880 {
1881 unsigned int max;
1882 struct pci_bus *bus = bridge->subordinate;
1883
1884 max = pci_scan_child_bus(bus);
1885
1886 pci_assign_unassigned_bridge_resources(bridge);
1887
1888 pci_bus_add_devices(bus);
1889
1890 return max;
1891 }
1892
1893 EXPORT_SYMBOL(pci_add_new_bus);
1894 EXPORT_SYMBOL(pci_scan_slot);
1895 EXPORT_SYMBOL(pci_scan_bridge);
1896 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1897 #endif
1898
1899 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1900 {
1901 const struct pci_dev *a = to_pci_dev(d_a);
1902 const struct pci_dev *b = to_pci_dev(d_b);
1903
1904 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1905 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1906
1907 if (a->bus->number < b->bus->number) return -1;
1908 else if (a->bus->number > b->bus->number) return 1;
1909
1910 if (a->devfn < b->devfn) return -1;
1911 else if (a->devfn > b->devfn) return 1;
1912
1913 return 0;
1914 }
1915
1916 void __init pci_sort_breadthfirst(void)
1917 {
1918 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1919 }
This page took 0.069018 seconds and 5 git commands to generate.