drm/i915: report Gen5+ CPU and PCH FIFO underruns
[deliverable/linux.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_attrs = pcibus_dev_attrs,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 /**
160 * pci_read_base - read a PCI BAR
161 * @dev: the PCI device
162 * @type: type of the BAR
163 * @res: resource buffer to be filled in
164 * @pos: BAR position in the config space
165 *
166 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
167 */
168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
169 struct resource *res, unsigned int pos)
170 {
171 u32 l, sz, mask;
172 u16 orig_cmd;
173 struct pci_bus_region region;
174 bool bar_too_big = false, bar_disabled = false;
175
176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
177
178 /* No printks while decoding is disabled! */
179 if (!dev->mmio_always_on) {
180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
181 pci_write_config_word(dev, PCI_COMMAND,
182 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
183 }
184
185 res->name = pci_name(dev);
186
187 pci_read_config_dword(dev, pos, &l);
188 pci_write_config_dword(dev, pos, l | mask);
189 pci_read_config_dword(dev, pos, &sz);
190 pci_write_config_dword(dev, pos, l);
191
192 /*
193 * All bits set in sz means the device isn't working properly.
194 * If the BAR isn't implemented, all bits must be 0. If it's a
195 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
196 * 1 must be clear.
197 */
198 if (!sz || sz == 0xffffffff)
199 goto fail;
200
201 /*
202 * I don't know how l can have all bits set. Copied from old code.
203 * Maybe it fixes a bug on some ancient platform.
204 */
205 if (l == 0xffffffff)
206 l = 0;
207
208 if (type == pci_bar_unknown) {
209 res->flags = decode_bar(dev, l);
210 res->flags |= IORESOURCE_SIZEALIGN;
211 if (res->flags & IORESOURCE_IO) {
212 l &= PCI_BASE_ADDRESS_IO_MASK;
213 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
214 } else {
215 l &= PCI_BASE_ADDRESS_MEM_MASK;
216 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
217 }
218 } else {
219 res->flags |= (l & IORESOURCE_ROM_ENABLE);
220 l &= PCI_ROM_ADDRESS_MASK;
221 mask = (u32)PCI_ROM_ADDRESS_MASK;
222 }
223
224 if (res->flags & IORESOURCE_MEM_64) {
225 u64 l64 = l;
226 u64 sz64 = sz;
227 u64 mask64 = mask | (u64)~0 << 32;
228
229 pci_read_config_dword(dev, pos + 4, &l);
230 pci_write_config_dword(dev, pos + 4, ~0);
231 pci_read_config_dword(dev, pos + 4, &sz);
232 pci_write_config_dword(dev, pos + 4, l);
233
234 l64 |= ((u64)l << 32);
235 sz64 |= ((u64)sz << 32);
236
237 sz64 = pci_size(l64, sz64, mask64);
238
239 if (!sz64)
240 goto fail;
241
242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
243 bar_too_big = true;
244 goto fail;
245 }
246
247 if ((sizeof(resource_size_t) < 8) && l) {
248 /* Address above 32-bit boundary; disable the BAR */
249 pci_write_config_dword(dev, pos, 0);
250 pci_write_config_dword(dev, pos + 4, 0);
251 region.start = 0;
252 region.end = sz64;
253 pcibios_bus_to_resource(dev, res, &region);
254 bar_disabled = true;
255 } else {
256 region.start = l64;
257 region.end = l64 + sz64;
258 pcibios_bus_to_resource(dev, res, &region);
259 }
260 } else {
261 sz = pci_size(l, sz, mask);
262
263 if (!sz)
264 goto fail;
265
266 region.start = l;
267 region.end = l + sz;
268 pcibios_bus_to_resource(dev, res, &region);
269 }
270
271 goto out;
272
273
274 fail:
275 res->flags = 0;
276 out:
277 if (!dev->mmio_always_on)
278 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
279
280 if (bar_too_big)
281 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
282 if (res->flags && !bar_disabled)
283 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
284
285 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
286 }
287
288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
289 {
290 unsigned int pos, reg;
291
292 for (pos = 0; pos < howmany; pos++) {
293 struct resource *res = &dev->resource[pos];
294 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
295 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
296 }
297
298 if (rom) {
299 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
300 dev->rom_base_reg = rom;
301 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
302 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
303 IORESOURCE_SIZEALIGN;
304 __pci_read_base(dev, pci_bar_mem32, res, rom);
305 }
306 }
307
308 static void pci_read_bridge_io(struct pci_bus *child)
309 {
310 struct pci_dev *dev = child->self;
311 u8 io_base_lo, io_limit_lo;
312 unsigned long io_mask, io_granularity, base, limit;
313 struct pci_bus_region region;
314 struct resource *res;
315
316 io_mask = PCI_IO_RANGE_MASK;
317 io_granularity = 0x1000;
318 if (dev->io_window_1k) {
319 /* Support 1K I/O space granularity */
320 io_mask = PCI_IO_1K_RANGE_MASK;
321 io_granularity = 0x400;
322 }
323
324 res = child->resource[0];
325 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
326 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
327 base = (io_base_lo & io_mask) << 8;
328 limit = (io_limit_lo & io_mask) << 8;
329
330 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
331 u16 io_base_hi, io_limit_hi;
332
333 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
334 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
335 base |= ((unsigned long) io_base_hi << 16);
336 limit |= ((unsigned long) io_limit_hi << 16);
337 }
338
339 if (base <= limit) {
340 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
341 region.start = base;
342 region.end = limit + io_granularity - 1;
343 pcibios_bus_to_resource(dev, res, &region);
344 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
345 }
346 }
347
348 static void pci_read_bridge_mmio(struct pci_bus *child)
349 {
350 struct pci_dev *dev = child->self;
351 u16 mem_base_lo, mem_limit_lo;
352 unsigned long base, limit;
353 struct pci_bus_region region;
354 struct resource *res;
355
356 res = child->resource[1];
357 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
358 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
359 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
360 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
361 if (base <= limit) {
362 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
363 region.start = base;
364 region.end = limit + 0xfffff;
365 pcibios_bus_to_resource(dev, res, &region);
366 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
367 }
368 }
369
370 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
371 {
372 struct pci_dev *dev = child->self;
373 u16 mem_base_lo, mem_limit_lo;
374 unsigned long base, limit;
375 struct pci_bus_region region;
376 struct resource *res;
377
378 res = child->resource[2];
379 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
380 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
381 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
382 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
383
384 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
385 u32 mem_base_hi, mem_limit_hi;
386
387 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
388 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
389
390 /*
391 * Some bridges set the base > limit by default, and some
392 * (broken) BIOSes do not initialize them. If we find
393 * this, just assume they are not being used.
394 */
395 if (mem_base_hi <= mem_limit_hi) {
396 #if BITS_PER_LONG == 64
397 base |= ((unsigned long) mem_base_hi) << 32;
398 limit |= ((unsigned long) mem_limit_hi) << 32;
399 #else
400 if (mem_base_hi || mem_limit_hi) {
401 dev_err(&dev->dev, "can't handle 64-bit "
402 "address space for bridge\n");
403 return;
404 }
405 #endif
406 }
407 }
408 if (base <= limit) {
409 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
410 IORESOURCE_MEM | IORESOURCE_PREFETCH;
411 if (res->flags & PCI_PREF_RANGE_TYPE_64)
412 res->flags |= IORESOURCE_MEM_64;
413 region.start = base;
414 region.end = limit + 0xfffff;
415 pcibios_bus_to_resource(dev, res, &region);
416 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
417 }
418 }
419
420 void pci_read_bridge_bases(struct pci_bus *child)
421 {
422 struct pci_dev *dev = child->self;
423 struct resource *res;
424 int i;
425
426 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
427 return;
428
429 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
430 &child->busn_res,
431 dev->transparent ? " (subtractive decode)" : "");
432
433 pci_bus_remove_resources(child);
434 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
435 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
436
437 pci_read_bridge_io(child);
438 pci_read_bridge_mmio(child);
439 pci_read_bridge_mmio_pref(child);
440
441 if (dev->transparent) {
442 pci_bus_for_each_resource(child->parent, res, i) {
443 if (res) {
444 pci_bus_add_resource(child, res,
445 PCI_SUBTRACTIVE_DECODE);
446 dev_printk(KERN_DEBUG, &dev->dev,
447 " bridge window %pR (subtractive decode)\n",
448 res);
449 }
450 }
451 }
452 }
453
454 static struct pci_bus * pci_alloc_bus(void)
455 {
456 struct pci_bus *b;
457
458 b = kzalloc(sizeof(*b), GFP_KERNEL);
459 if (b) {
460 INIT_LIST_HEAD(&b->node);
461 INIT_LIST_HEAD(&b->children);
462 INIT_LIST_HEAD(&b->devices);
463 INIT_LIST_HEAD(&b->slots);
464 INIT_LIST_HEAD(&b->resources);
465 b->max_bus_speed = PCI_SPEED_UNKNOWN;
466 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
467 }
468 return b;
469 }
470
471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
472 {
473 struct pci_host_bridge *bridge;
474
475 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
476 if (bridge) {
477 INIT_LIST_HEAD(&bridge->windows);
478 bridge->bus = b;
479 }
480
481 return bridge;
482 }
483
484 static unsigned char pcix_bus_speed[] = {
485 PCI_SPEED_UNKNOWN, /* 0 */
486 PCI_SPEED_66MHz_PCIX, /* 1 */
487 PCI_SPEED_100MHz_PCIX, /* 2 */
488 PCI_SPEED_133MHz_PCIX, /* 3 */
489 PCI_SPEED_UNKNOWN, /* 4 */
490 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
491 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
492 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
493 PCI_SPEED_UNKNOWN, /* 8 */
494 PCI_SPEED_66MHz_PCIX_266, /* 9 */
495 PCI_SPEED_100MHz_PCIX_266, /* A */
496 PCI_SPEED_133MHz_PCIX_266, /* B */
497 PCI_SPEED_UNKNOWN, /* C */
498 PCI_SPEED_66MHz_PCIX_533, /* D */
499 PCI_SPEED_100MHz_PCIX_533, /* E */
500 PCI_SPEED_133MHz_PCIX_533 /* F */
501 };
502
503 static unsigned char pcie_link_speed[] = {
504 PCI_SPEED_UNKNOWN, /* 0 */
505 PCIE_SPEED_2_5GT, /* 1 */
506 PCIE_SPEED_5_0GT, /* 2 */
507 PCIE_SPEED_8_0GT, /* 3 */
508 PCI_SPEED_UNKNOWN, /* 4 */
509 PCI_SPEED_UNKNOWN, /* 5 */
510 PCI_SPEED_UNKNOWN, /* 6 */
511 PCI_SPEED_UNKNOWN, /* 7 */
512 PCI_SPEED_UNKNOWN, /* 8 */
513 PCI_SPEED_UNKNOWN, /* 9 */
514 PCI_SPEED_UNKNOWN, /* A */
515 PCI_SPEED_UNKNOWN, /* B */
516 PCI_SPEED_UNKNOWN, /* C */
517 PCI_SPEED_UNKNOWN, /* D */
518 PCI_SPEED_UNKNOWN, /* E */
519 PCI_SPEED_UNKNOWN /* F */
520 };
521
522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
523 {
524 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
525 }
526 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
527
528 static unsigned char agp_speeds[] = {
529 AGP_UNKNOWN,
530 AGP_1X,
531 AGP_2X,
532 AGP_4X,
533 AGP_8X
534 };
535
536 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
537 {
538 int index = 0;
539
540 if (agpstat & 4)
541 index = 3;
542 else if (agpstat & 2)
543 index = 2;
544 else if (agpstat & 1)
545 index = 1;
546 else
547 goto out;
548
549 if (agp3) {
550 index += 2;
551 if (index == 5)
552 index = 0;
553 }
554
555 out:
556 return agp_speeds[index];
557 }
558
559
560 static void pci_set_bus_speed(struct pci_bus *bus)
561 {
562 struct pci_dev *bridge = bus->self;
563 int pos;
564
565 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
566 if (!pos)
567 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
568 if (pos) {
569 u32 agpstat, agpcmd;
570
571 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
572 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
573
574 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
575 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
576 }
577
578 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
579 if (pos) {
580 u16 status;
581 enum pci_bus_speed max;
582
583 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
584 &status);
585
586 if (status & PCI_X_SSTATUS_533MHZ) {
587 max = PCI_SPEED_133MHz_PCIX_533;
588 } else if (status & PCI_X_SSTATUS_266MHZ) {
589 max = PCI_SPEED_133MHz_PCIX_266;
590 } else if (status & PCI_X_SSTATUS_133MHZ) {
591 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
592 max = PCI_SPEED_133MHz_PCIX_ECC;
593 } else {
594 max = PCI_SPEED_133MHz_PCIX;
595 }
596 } else {
597 max = PCI_SPEED_66MHz_PCIX;
598 }
599
600 bus->max_bus_speed = max;
601 bus->cur_bus_speed = pcix_bus_speed[
602 (status & PCI_X_SSTATUS_FREQ) >> 6];
603
604 return;
605 }
606
607 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
608 if (pos) {
609 u32 linkcap;
610 u16 linksta;
611
612 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
613 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
614
615 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
616 pcie_update_link_speed(bus, linksta);
617 }
618 }
619
620
621 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
622 struct pci_dev *bridge, int busnr)
623 {
624 struct pci_bus *child;
625 int i;
626 int ret;
627
628 /*
629 * Allocate a new bus, and inherit stuff from the parent..
630 */
631 child = pci_alloc_bus();
632 if (!child)
633 return NULL;
634
635 child->parent = parent;
636 child->ops = parent->ops;
637 child->sysdata = parent->sysdata;
638 child->bus_flags = parent->bus_flags;
639
640 /* initialize some portions of the bus device, but don't register it
641 * now as the parent is not properly set up yet.
642 */
643 child->dev.class = &pcibus_class;
644 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
645
646 /*
647 * Set up the primary, secondary and subordinate
648 * bus numbers.
649 */
650 child->number = child->busn_res.start = busnr;
651 child->primary = parent->busn_res.start;
652 child->busn_res.end = 0xff;
653
654 if (!bridge) {
655 child->dev.parent = parent->bridge;
656 goto add_dev;
657 }
658
659 child->self = bridge;
660 child->bridge = get_device(&bridge->dev);
661 child->dev.parent = child->bridge;
662 pci_set_bus_of_node(child);
663 pci_set_bus_speed(child);
664
665 /* Set up default resource pointers and names.. */
666 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
667 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
668 child->resource[i]->name = child->name;
669 }
670 bridge->subordinate = child;
671
672 add_dev:
673 ret = device_register(&child->dev);
674 WARN_ON(ret < 0);
675
676 /* Create legacy_io and legacy_mem files for this bus */
677 pci_create_legacy_files(child);
678
679 return child;
680 }
681
682 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
683 {
684 struct pci_bus *child;
685
686 child = pci_alloc_child_bus(parent, dev, busnr);
687 if (child) {
688 down_write(&pci_bus_sem);
689 list_add_tail(&child->node, &parent->children);
690 up_write(&pci_bus_sem);
691 }
692 return child;
693 }
694
695 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
696 {
697 struct pci_bus *parent = child->parent;
698
699 /* Attempts to fix that up are really dangerous unless
700 we're going to re-assign all bus numbers. */
701 if (!pcibios_assign_all_busses())
702 return;
703
704 while (parent->parent && parent->busn_res.end < max) {
705 parent->busn_res.end = max;
706 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
707 parent = parent->parent;
708 }
709 }
710
711 /*
712 * If it's a bridge, configure it and scan the bus behind it.
713 * For CardBus bridges, we don't scan behind as the devices will
714 * be handled by the bridge driver itself.
715 *
716 * We need to process bridges in two passes -- first we scan those
717 * already configured by the BIOS and after we are done with all of
718 * them, we proceed to assigning numbers to the remaining buses in
719 * order to avoid overlaps between old and new bus numbers.
720 */
721 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
722 {
723 struct pci_bus *child;
724 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
725 u32 buses, i, j = 0;
726 u16 bctl;
727 u8 primary, secondary, subordinate;
728 int broken = 0;
729
730 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
731 primary = buses & 0xFF;
732 secondary = (buses >> 8) & 0xFF;
733 subordinate = (buses >> 16) & 0xFF;
734
735 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
736 secondary, subordinate, pass);
737
738 if (!primary && (primary != bus->number) && secondary && subordinate) {
739 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
740 primary = bus->number;
741 }
742
743 /* Check if setup is sensible at all */
744 if (!pass &&
745 (primary != bus->number || secondary <= bus->number ||
746 secondary > subordinate)) {
747 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
748 secondary, subordinate);
749 broken = 1;
750 }
751
752 /* Disable MasterAbortMode during probing to avoid reporting
753 of bus errors (in some architectures) */
754 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
755 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
756 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
757
758 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
759 !is_cardbus && !broken) {
760 unsigned int cmax;
761 /*
762 * Bus already configured by firmware, process it in the first
763 * pass and just note the configuration.
764 */
765 if (pass)
766 goto out;
767
768 /*
769 * If we already got to this bus through a different bridge,
770 * don't re-add it. This can happen with the i450NX chipset.
771 *
772 * However, we continue to descend down the hierarchy and
773 * scan remaining child buses.
774 */
775 child = pci_find_bus(pci_domain_nr(bus), secondary);
776 if (!child) {
777 child = pci_add_new_bus(bus, dev, secondary);
778 if (!child)
779 goto out;
780 child->primary = primary;
781 pci_bus_insert_busn_res(child, secondary, subordinate);
782 child->bridge_ctl = bctl;
783 }
784
785 cmax = pci_scan_child_bus(child);
786 if (cmax > max)
787 max = cmax;
788 if (child->busn_res.end > max)
789 max = child->busn_res.end;
790 } else {
791 /*
792 * We need to assign a number to this bus which we always
793 * do in the second pass.
794 */
795 if (!pass) {
796 if (pcibios_assign_all_busses() || broken)
797 /* Temporarily disable forwarding of the
798 configuration cycles on all bridges in
799 this bus segment to avoid possible
800 conflicts in the second pass between two
801 bridges programmed with overlapping
802 bus ranges. */
803 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
804 buses & ~0xffffff);
805 goto out;
806 }
807
808 /* Clear errors */
809 pci_write_config_word(dev, PCI_STATUS, 0xffff);
810
811 /* Prevent assigning a bus number that already exists.
812 * This can happen when a bridge is hot-plugged, so in
813 * this case we only re-scan this bus. */
814 child = pci_find_bus(pci_domain_nr(bus), max+1);
815 if (!child) {
816 child = pci_add_new_bus(bus, dev, ++max);
817 if (!child)
818 goto out;
819 pci_bus_insert_busn_res(child, max, 0xff);
820 }
821 buses = (buses & 0xff000000)
822 | ((unsigned int)(child->primary) << 0)
823 | ((unsigned int)(child->busn_res.start) << 8)
824 | ((unsigned int)(child->busn_res.end) << 16);
825
826 /*
827 * yenta.c forces a secondary latency timer of 176.
828 * Copy that behaviour here.
829 */
830 if (is_cardbus) {
831 buses &= ~0xff000000;
832 buses |= CARDBUS_LATENCY_TIMER << 24;
833 }
834
835 /*
836 * We need to blast all three values with a single write.
837 */
838 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
839
840 if (!is_cardbus) {
841 child->bridge_ctl = bctl;
842 /*
843 * Adjust subordinate busnr in parent buses.
844 * We do this before scanning for children because
845 * some devices may not be detected if the bios
846 * was lazy.
847 */
848 pci_fixup_parent_subordinate_busnr(child, max);
849 /* Now we can scan all subordinate buses... */
850 max = pci_scan_child_bus(child);
851 /*
852 * now fix it up again since we have found
853 * the real value of max.
854 */
855 pci_fixup_parent_subordinate_busnr(child, max);
856 } else {
857 /*
858 * For CardBus bridges, we leave 4 bus numbers
859 * as cards with a PCI-to-PCI bridge can be
860 * inserted later.
861 */
862 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
863 struct pci_bus *parent = bus;
864 if (pci_find_bus(pci_domain_nr(bus),
865 max+i+1))
866 break;
867 while (parent->parent) {
868 if ((!pcibios_assign_all_busses()) &&
869 (parent->busn_res.end > max) &&
870 (parent->busn_res.end <= max+i)) {
871 j = 1;
872 }
873 parent = parent->parent;
874 }
875 if (j) {
876 /*
877 * Often, there are two cardbus bridges
878 * -- try to leave one valid bus number
879 * for each one.
880 */
881 i /= 2;
882 break;
883 }
884 }
885 max += i;
886 pci_fixup_parent_subordinate_busnr(child, max);
887 }
888 /*
889 * Set the subordinate bus number to its real value.
890 */
891 pci_bus_update_busn_res_end(child, max);
892 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
893 }
894
895 sprintf(child->name,
896 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
897 pci_domain_nr(bus), child->number);
898
899 /* Has only triggered on CardBus, fixup is in yenta_socket */
900 while (bus->parent) {
901 if ((child->busn_res.end > bus->busn_res.end) ||
902 (child->number > bus->busn_res.end) ||
903 (child->number < bus->number) ||
904 (child->busn_res.end < bus->number)) {
905 dev_info(&child->dev, "%pR %s "
906 "hidden behind%s bridge %s %pR\n",
907 &child->busn_res,
908 (bus->number > child->busn_res.end &&
909 bus->busn_res.end < child->number) ?
910 "wholly" : "partially",
911 bus->self->transparent ? " transparent" : "",
912 dev_name(&bus->dev),
913 &bus->busn_res);
914 }
915 bus = bus->parent;
916 }
917
918 out:
919 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
920
921 return max;
922 }
923
924 /*
925 * Read interrupt line and base address registers.
926 * The architecture-dependent code can tweak these, of course.
927 */
928 static void pci_read_irq(struct pci_dev *dev)
929 {
930 unsigned char irq;
931
932 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
933 dev->pin = irq;
934 if (irq)
935 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
936 dev->irq = irq;
937 }
938
939 void set_pcie_port_type(struct pci_dev *pdev)
940 {
941 int pos;
942 u16 reg16;
943
944 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
945 if (!pos)
946 return;
947 pdev->is_pcie = 1;
948 pdev->pcie_cap = pos;
949 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
950 pdev->pcie_flags_reg = reg16;
951 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
952 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
953 }
954
955 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
956 {
957 u32 reg32;
958
959 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
960 if (reg32 & PCI_EXP_SLTCAP_HPC)
961 pdev->is_hotplug_bridge = 1;
962 }
963
964 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
965
966 /**
967 * pci_setup_device - fill in class and map information of a device
968 * @dev: the device structure to fill
969 *
970 * Initialize the device structure with information about the device's
971 * vendor,class,memory and IO-space addresses,IRQ lines etc.
972 * Called at initialisation of the PCI subsystem and by CardBus services.
973 * Returns 0 on success and negative if unknown type of device (not normal,
974 * bridge or CardBus).
975 */
976 int pci_setup_device(struct pci_dev *dev)
977 {
978 u32 class;
979 u8 hdr_type;
980 struct pci_slot *slot;
981 int pos = 0;
982 struct pci_bus_region region;
983 struct resource *res;
984
985 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
986 return -EIO;
987
988 dev->sysdata = dev->bus->sysdata;
989 dev->dev.parent = dev->bus->bridge;
990 dev->dev.bus = &pci_bus_type;
991 dev->dev.type = &pci_dev_type;
992 dev->hdr_type = hdr_type & 0x7f;
993 dev->multifunction = !!(hdr_type & 0x80);
994 dev->error_state = pci_channel_io_normal;
995 set_pcie_port_type(dev);
996
997 list_for_each_entry(slot, &dev->bus->slots, list)
998 if (PCI_SLOT(dev->devfn) == slot->number)
999 dev->slot = slot;
1000
1001 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1002 set this higher, assuming the system even supports it. */
1003 dev->dma_mask = 0xffffffff;
1004
1005 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1006 dev->bus->number, PCI_SLOT(dev->devfn),
1007 PCI_FUNC(dev->devfn));
1008
1009 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1010 dev->revision = class & 0xff;
1011 dev->class = class >> 8; /* upper 3 bytes */
1012
1013 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1014 dev->vendor, dev->device, dev->hdr_type, dev->class);
1015
1016 /* need to have dev->class ready */
1017 dev->cfg_size = pci_cfg_space_size(dev);
1018
1019 /* "Unknown power state" */
1020 dev->current_state = PCI_UNKNOWN;
1021
1022 /* Early fixups, before probing the BARs */
1023 pci_fixup_device(pci_fixup_early, dev);
1024 /* device class may be changed after fixup */
1025 class = dev->class >> 8;
1026
1027 switch (dev->hdr_type) { /* header type */
1028 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1029 if (class == PCI_CLASS_BRIDGE_PCI)
1030 goto bad;
1031 pci_read_irq(dev);
1032 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1033 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1034 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1035
1036 /*
1037 * Do the ugly legacy mode stuff here rather than broken chip
1038 * quirk code. Legacy mode ATA controllers have fixed
1039 * addresses. These are not always echoed in BAR0-3, and
1040 * BAR0-3 in a few cases contain junk!
1041 */
1042 if (class == PCI_CLASS_STORAGE_IDE) {
1043 u8 progif;
1044 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1045 if ((progif & 1) == 0) {
1046 region.start = 0x1F0;
1047 region.end = 0x1F7;
1048 res = &dev->resource[0];
1049 res->flags = LEGACY_IO_RESOURCE;
1050 pcibios_bus_to_resource(dev, res, &region);
1051 region.start = 0x3F6;
1052 region.end = 0x3F6;
1053 res = &dev->resource[1];
1054 res->flags = LEGACY_IO_RESOURCE;
1055 pcibios_bus_to_resource(dev, res, &region);
1056 }
1057 if ((progif & 4) == 0) {
1058 region.start = 0x170;
1059 region.end = 0x177;
1060 res = &dev->resource[2];
1061 res->flags = LEGACY_IO_RESOURCE;
1062 pcibios_bus_to_resource(dev, res, &region);
1063 region.start = 0x376;
1064 region.end = 0x376;
1065 res = &dev->resource[3];
1066 res->flags = LEGACY_IO_RESOURCE;
1067 pcibios_bus_to_resource(dev, res, &region);
1068 }
1069 }
1070 break;
1071
1072 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1073 if (class != PCI_CLASS_BRIDGE_PCI)
1074 goto bad;
1075 /* The PCI-to-PCI bridge spec requires that subtractive
1076 decoding (i.e. transparent) bridge must have programming
1077 interface code of 0x01. */
1078 pci_read_irq(dev);
1079 dev->transparent = ((dev->class & 0xff) == 1);
1080 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1081 set_pcie_hotplug_bridge(dev);
1082 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1083 if (pos) {
1084 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1085 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1086 }
1087 break;
1088
1089 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1090 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1091 goto bad;
1092 pci_read_irq(dev);
1093 pci_read_bases(dev, 1, 0);
1094 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1095 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1096 break;
1097
1098 default: /* unknown header */
1099 dev_err(&dev->dev, "unknown header type %02x, "
1100 "ignoring device\n", dev->hdr_type);
1101 return -EIO;
1102
1103 bad:
1104 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1105 "type %02x)\n", dev->class, dev->hdr_type);
1106 dev->class = PCI_CLASS_NOT_DEFINED;
1107 }
1108
1109 /* We found a fine healthy device, go go go... */
1110 return 0;
1111 }
1112
1113 static void pci_release_capabilities(struct pci_dev *dev)
1114 {
1115 pci_vpd_release(dev);
1116 pci_iov_release(dev);
1117 pci_free_cap_save_buffers(dev);
1118 }
1119
1120 /**
1121 * pci_release_dev - free a pci device structure when all users of it are finished.
1122 * @dev: device that's been disconnected
1123 *
1124 * Will be called only by the device core when all users of this pci device are
1125 * done.
1126 */
1127 static void pci_release_dev(struct device *dev)
1128 {
1129 struct pci_dev *pci_dev;
1130
1131 pci_dev = to_pci_dev(dev);
1132 pci_release_capabilities(pci_dev);
1133 pci_release_of_node(pci_dev);
1134 kfree(pci_dev);
1135 }
1136
1137 /**
1138 * pci_cfg_space_size - get the configuration space size of the PCI device.
1139 * @dev: PCI device
1140 *
1141 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1142 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1143 * access it. Maybe we don't have a way to generate extended config space
1144 * accesses, or the device is behind a reverse Express bridge. So we try
1145 * reading the dword at 0x100 which must either be 0 or a valid extended
1146 * capability header.
1147 */
1148 int pci_cfg_space_size_ext(struct pci_dev *dev)
1149 {
1150 u32 status;
1151 int pos = PCI_CFG_SPACE_SIZE;
1152
1153 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1154 goto fail;
1155 if (status == 0xffffffff)
1156 goto fail;
1157
1158 return PCI_CFG_SPACE_EXP_SIZE;
1159
1160 fail:
1161 return PCI_CFG_SPACE_SIZE;
1162 }
1163
1164 int pci_cfg_space_size(struct pci_dev *dev)
1165 {
1166 int pos;
1167 u32 status;
1168 u16 class;
1169
1170 class = dev->class >> 8;
1171 if (class == PCI_CLASS_BRIDGE_HOST)
1172 return pci_cfg_space_size_ext(dev);
1173
1174 if (!pci_is_pcie(dev)) {
1175 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1176 if (!pos)
1177 goto fail;
1178
1179 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1180 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1181 goto fail;
1182 }
1183
1184 return pci_cfg_space_size_ext(dev);
1185
1186 fail:
1187 return PCI_CFG_SPACE_SIZE;
1188 }
1189
1190 static void pci_release_bus_bridge_dev(struct device *dev)
1191 {
1192 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
1193
1194 if (bridge->release_fn)
1195 bridge->release_fn(bridge);
1196
1197 pci_free_resource_list(&bridge->windows);
1198
1199 kfree(bridge);
1200 }
1201
1202 struct pci_dev *alloc_pci_dev(void)
1203 {
1204 struct pci_dev *dev;
1205
1206 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1207 if (!dev)
1208 return NULL;
1209
1210 INIT_LIST_HEAD(&dev->bus_list);
1211
1212 return dev;
1213 }
1214 EXPORT_SYMBOL(alloc_pci_dev);
1215
1216 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1217 int crs_timeout)
1218 {
1219 int delay = 1;
1220
1221 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1222 return false;
1223
1224 /* some broken boards return 0 or ~0 if a slot is empty: */
1225 if (*l == 0xffffffff || *l == 0x00000000 ||
1226 *l == 0x0000ffff || *l == 0xffff0000)
1227 return false;
1228
1229 /* Configuration request Retry Status */
1230 while (*l == 0xffff0001) {
1231 if (!crs_timeout)
1232 return false;
1233
1234 msleep(delay);
1235 delay *= 2;
1236 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1237 return false;
1238 /* Card hasn't responded in 60 seconds? Must be stuck. */
1239 if (delay > crs_timeout) {
1240 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1241 "responding\n", pci_domain_nr(bus),
1242 bus->number, PCI_SLOT(devfn),
1243 PCI_FUNC(devfn));
1244 return false;
1245 }
1246 }
1247
1248 return true;
1249 }
1250 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1251
1252 /*
1253 * Read the config data for a PCI device, sanity-check it
1254 * and fill in the dev structure...
1255 */
1256 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1257 {
1258 struct pci_dev *dev;
1259 u32 l;
1260
1261 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1262 return NULL;
1263
1264 dev = alloc_pci_dev();
1265 if (!dev)
1266 return NULL;
1267
1268 dev->bus = bus;
1269 dev->devfn = devfn;
1270 dev->vendor = l & 0xffff;
1271 dev->device = (l >> 16) & 0xffff;
1272
1273 pci_set_of_node(dev);
1274
1275 if (pci_setup_device(dev)) {
1276 kfree(dev);
1277 return NULL;
1278 }
1279
1280 return dev;
1281 }
1282
1283 static void pci_init_capabilities(struct pci_dev *dev)
1284 {
1285 /* MSI/MSI-X list */
1286 pci_msi_init_pci_dev(dev);
1287
1288 /* Buffers for saving PCIe and PCI-X capabilities */
1289 pci_allocate_cap_save_buffers(dev);
1290
1291 /* Power Management */
1292 pci_pm_init(dev);
1293
1294 /* Vital Product Data */
1295 pci_vpd_pci22_init(dev);
1296
1297 /* Alternative Routing-ID Forwarding */
1298 pci_configure_ari(dev);
1299
1300 /* Single Root I/O Virtualization */
1301 pci_iov_init(dev);
1302
1303 /* Enable ACS P2P upstream forwarding */
1304 pci_enable_acs(dev);
1305 }
1306
1307 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1308 {
1309 int ret;
1310
1311 device_initialize(&dev->dev);
1312 dev->dev.release = pci_release_dev;
1313
1314 set_dev_node(&dev->dev, pcibus_to_node(bus));
1315 dev->dev.dma_mask = &dev->dma_mask;
1316 dev->dev.dma_parms = &dev->dma_parms;
1317 dev->dev.coherent_dma_mask = 0xffffffffull;
1318
1319 pci_set_dma_max_seg_size(dev, 65536);
1320 pci_set_dma_seg_boundary(dev, 0xffffffff);
1321
1322 /* Fix up broken headers */
1323 pci_fixup_device(pci_fixup_header, dev);
1324
1325 /* moved out from quirk header fixup code */
1326 pci_reassigndev_resource_alignment(dev);
1327
1328 /* Clear the state_saved flag. */
1329 dev->state_saved = false;
1330
1331 /* Initialize various capabilities */
1332 pci_init_capabilities(dev);
1333
1334 /*
1335 * Add the device to our list of discovered devices
1336 * and the bus list for fixup functions, etc.
1337 */
1338 down_write(&pci_bus_sem);
1339 list_add_tail(&dev->bus_list, &bus->devices);
1340 up_write(&pci_bus_sem);
1341
1342 pci_fixup_device(pci_fixup_final, dev);
1343 ret = pcibios_add_device(dev);
1344 WARN_ON(ret < 0);
1345
1346 /* Notifier could use PCI capabilities */
1347 dev->match_driver = false;
1348 ret = device_add(&dev->dev);
1349 WARN_ON(ret < 0);
1350
1351 pci_proc_attach_device(dev);
1352 }
1353
1354 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1355 {
1356 struct pci_dev *dev;
1357
1358 dev = pci_get_slot(bus, devfn);
1359 if (dev) {
1360 pci_dev_put(dev);
1361 return dev;
1362 }
1363
1364 dev = pci_scan_device(bus, devfn);
1365 if (!dev)
1366 return NULL;
1367
1368 pci_device_add(dev, bus);
1369
1370 return dev;
1371 }
1372 EXPORT_SYMBOL(pci_scan_single_device);
1373
1374 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1375 {
1376 int pos;
1377 u16 cap = 0;
1378 unsigned next_fn;
1379
1380 if (pci_ari_enabled(bus)) {
1381 if (!dev)
1382 return 0;
1383 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1384 if (!pos)
1385 return 0;
1386
1387 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1388 next_fn = PCI_ARI_CAP_NFN(cap);
1389 if (next_fn <= fn)
1390 return 0; /* protect against malformed list */
1391
1392 return next_fn;
1393 }
1394
1395 /* dev may be NULL for non-contiguous multifunction devices */
1396 if (!dev || dev->multifunction)
1397 return (fn + 1) % 8;
1398
1399 return 0;
1400 }
1401
1402 static int only_one_child(struct pci_bus *bus)
1403 {
1404 struct pci_dev *parent = bus->self;
1405
1406 if (!parent || !pci_is_pcie(parent))
1407 return 0;
1408 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1409 return 1;
1410 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1411 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1412 return 1;
1413 return 0;
1414 }
1415
1416 /**
1417 * pci_scan_slot - scan a PCI slot on a bus for devices.
1418 * @bus: PCI bus to scan
1419 * @devfn: slot number to scan (must have zero function.)
1420 *
1421 * Scan a PCI slot on the specified PCI bus for devices, adding
1422 * discovered devices to the @bus->devices list. New devices
1423 * will not have is_added set.
1424 *
1425 * Returns the number of new devices found.
1426 */
1427 int pci_scan_slot(struct pci_bus *bus, int devfn)
1428 {
1429 unsigned fn, nr = 0;
1430 struct pci_dev *dev;
1431
1432 if (only_one_child(bus) && (devfn > 0))
1433 return 0; /* Already scanned the entire slot */
1434
1435 dev = pci_scan_single_device(bus, devfn);
1436 if (!dev)
1437 return 0;
1438 if (!dev->is_added)
1439 nr++;
1440
1441 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1442 dev = pci_scan_single_device(bus, devfn + fn);
1443 if (dev) {
1444 if (!dev->is_added)
1445 nr++;
1446 dev->multifunction = 1;
1447 }
1448 }
1449
1450 /* only one slot has pcie device */
1451 if (bus->self && nr)
1452 pcie_aspm_init_link_state(bus->self);
1453
1454 return nr;
1455 }
1456
1457 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1458 {
1459 u8 *smpss = data;
1460
1461 if (!pci_is_pcie(dev))
1462 return 0;
1463
1464 /* For PCIE hotplug enabled slots not connected directly to a
1465 * PCI-E root port, there can be problems when hotplugging
1466 * devices. This is due to the possibility of hotplugging a
1467 * device into the fabric with a smaller MPS that the devices
1468 * currently running have configured. Modifying the MPS on the
1469 * running devices could cause a fatal bus error due to an
1470 * incoming frame being larger than the newly configured MPS.
1471 * To work around this, the MPS for the entire fabric must be
1472 * set to the minimum size. Any devices hotplugged into this
1473 * fabric will have the minimum MPS set. If the PCI hotplug
1474 * slot is directly connected to the root port and there are not
1475 * other devices on the fabric (which seems to be the most
1476 * common case), then this is not an issue and MPS discovery
1477 * will occur as normal.
1478 */
1479 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1480 (dev->bus->self &&
1481 pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
1482 *smpss = 0;
1483
1484 if (*smpss > dev->pcie_mpss)
1485 *smpss = dev->pcie_mpss;
1486
1487 return 0;
1488 }
1489
1490 static void pcie_write_mps(struct pci_dev *dev, int mps)
1491 {
1492 int rc;
1493
1494 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1495 mps = 128 << dev->pcie_mpss;
1496
1497 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1498 dev->bus->self)
1499 /* For "Performance", the assumption is made that
1500 * downstream communication will never be larger than
1501 * the MRRS. So, the MPS only needs to be configured
1502 * for the upstream communication. This being the case,
1503 * walk from the top down and set the MPS of the child
1504 * to that of the parent bus.
1505 *
1506 * Configure the device MPS with the smaller of the
1507 * device MPSS or the bridge MPS (which is assumed to be
1508 * properly configured at this point to the largest
1509 * allowable MPS based on its parent bus).
1510 */
1511 mps = min(mps, pcie_get_mps(dev->bus->self));
1512 }
1513
1514 rc = pcie_set_mps(dev, mps);
1515 if (rc)
1516 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1517 }
1518
1519 static void pcie_write_mrrs(struct pci_dev *dev)
1520 {
1521 int rc, mrrs;
1522
1523 /* In the "safe" case, do not configure the MRRS. There appear to be
1524 * issues with setting MRRS to 0 on a number of devices.
1525 */
1526 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1527 return;
1528
1529 /* For Max performance, the MRRS must be set to the largest supported
1530 * value. However, it cannot be configured larger than the MPS the
1531 * device or the bus can support. This should already be properly
1532 * configured by a prior call to pcie_write_mps.
1533 */
1534 mrrs = pcie_get_mps(dev);
1535
1536 /* MRRS is a R/W register. Invalid values can be written, but a
1537 * subsequent read will verify if the value is acceptable or not.
1538 * If the MRRS value provided is not acceptable (e.g., too large),
1539 * shrink the value until it is acceptable to the HW.
1540 */
1541 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1542 rc = pcie_set_readrq(dev, mrrs);
1543 if (!rc)
1544 break;
1545
1546 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1547 mrrs /= 2;
1548 }
1549
1550 if (mrrs < 128)
1551 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1552 "safe value. If problems are experienced, try running "
1553 "with pci=pcie_bus_safe.\n");
1554 }
1555
1556 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1557 {
1558 int mps, orig_mps;
1559
1560 if (!pci_is_pcie(dev))
1561 return 0;
1562
1563 mps = 128 << *(u8 *)data;
1564 orig_mps = pcie_get_mps(dev);
1565
1566 pcie_write_mps(dev, mps);
1567 pcie_write_mrrs(dev);
1568
1569 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
1570 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1571 orig_mps, pcie_get_readrq(dev));
1572
1573 return 0;
1574 }
1575
1576 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1577 * parents then children fashion. If this changes, then this code will not
1578 * work as designed.
1579 */
1580 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1581 {
1582 u8 smpss;
1583
1584 if (!pci_is_pcie(bus->self))
1585 return;
1586
1587 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1588 return;
1589
1590 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1591 * to be aware to the MPS of the destination. To work around this,
1592 * simply force the MPS of the entire system to the smallest possible.
1593 */
1594 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1595 smpss = 0;
1596
1597 if (pcie_bus_config == PCIE_BUS_SAFE) {
1598 smpss = mpss;
1599
1600 pcie_find_smpss(bus->self, &smpss);
1601 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1602 }
1603
1604 pcie_bus_configure_set(bus->self, &smpss);
1605 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1606 }
1607 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1608
1609 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1610 {
1611 unsigned int devfn, pass, max = bus->busn_res.start;
1612 struct pci_dev *dev;
1613
1614 dev_dbg(&bus->dev, "scanning bus\n");
1615
1616 /* Go find them, Rover! */
1617 for (devfn = 0; devfn < 0x100; devfn += 8)
1618 pci_scan_slot(bus, devfn);
1619
1620 /* Reserve buses for SR-IOV capability. */
1621 max += pci_iov_bus_range(bus);
1622
1623 /*
1624 * After performing arch-dependent fixup of the bus, look behind
1625 * all PCI-to-PCI bridges on this bus.
1626 */
1627 if (!bus->is_added) {
1628 dev_dbg(&bus->dev, "fixups for bus\n");
1629 pcibios_fixup_bus(bus);
1630 if (pci_is_root_bus(bus))
1631 bus->is_added = 1;
1632 }
1633
1634 for (pass=0; pass < 2; pass++)
1635 list_for_each_entry(dev, &bus->devices, bus_list) {
1636 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1637 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1638 max = pci_scan_bridge(bus, dev, max, pass);
1639 }
1640
1641 /*
1642 * We've scanned the bus and so we know all about what's on
1643 * the other side of any bridges that may be on this bus plus
1644 * any devices.
1645 *
1646 * Return how far we've got finding sub-buses.
1647 */
1648 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1649 return max;
1650 }
1651
1652 /**
1653 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1654 * @bridge: Host bridge to set up.
1655 *
1656 * Default empty implementation. Replace with an architecture-specific setup
1657 * routine, if necessary.
1658 */
1659 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1660 {
1661 return 0;
1662 }
1663
1664 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1665 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1666 {
1667 int error;
1668 struct pci_host_bridge *bridge;
1669 struct pci_bus *b, *b2;
1670 struct pci_host_bridge_window *window, *n;
1671 struct resource *res;
1672 resource_size_t offset;
1673 char bus_addr[64];
1674 char *fmt;
1675
1676 b = pci_alloc_bus();
1677 if (!b)
1678 return NULL;
1679
1680 b->sysdata = sysdata;
1681 b->ops = ops;
1682 b->number = b->busn_res.start = bus;
1683 b2 = pci_find_bus(pci_domain_nr(b), bus);
1684 if (b2) {
1685 /* If we already got to this bus through a different bridge, ignore it */
1686 dev_dbg(&b2->dev, "bus already known\n");
1687 goto err_out;
1688 }
1689
1690 bridge = pci_alloc_host_bridge(b);
1691 if (!bridge)
1692 goto err_out;
1693
1694 bridge->dev.parent = parent;
1695 bridge->dev.release = pci_release_bus_bridge_dev;
1696 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1697 error = pcibios_root_bridge_prepare(bridge);
1698 if (error)
1699 goto bridge_dev_reg_err;
1700
1701 error = device_register(&bridge->dev);
1702 if (error)
1703 goto bridge_dev_reg_err;
1704 b->bridge = get_device(&bridge->dev);
1705 device_enable_async_suspend(b->bridge);
1706 pci_set_bus_of_node(b);
1707
1708 if (!parent)
1709 set_dev_node(b->bridge, pcibus_to_node(b));
1710
1711 b->dev.class = &pcibus_class;
1712 b->dev.parent = b->bridge;
1713 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1714 error = device_register(&b->dev);
1715 if (error)
1716 goto class_dev_reg_err;
1717
1718 /* Create legacy_io and legacy_mem files for this bus */
1719 pci_create_legacy_files(b);
1720
1721 if (parent)
1722 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1723 else
1724 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1725
1726 /* Add initial resources to the bus */
1727 list_for_each_entry_safe(window, n, resources, list) {
1728 list_move_tail(&window->list, &bridge->windows);
1729 res = window->res;
1730 offset = window->offset;
1731 if (res->flags & IORESOURCE_BUS)
1732 pci_bus_insert_busn_res(b, bus, res->end);
1733 else
1734 pci_bus_add_resource(b, res, 0);
1735 if (offset) {
1736 if (resource_type(res) == IORESOURCE_IO)
1737 fmt = " (bus address [%#06llx-%#06llx])";
1738 else
1739 fmt = " (bus address [%#010llx-%#010llx])";
1740 snprintf(bus_addr, sizeof(bus_addr), fmt,
1741 (unsigned long long) (res->start - offset),
1742 (unsigned long long) (res->end - offset));
1743 } else
1744 bus_addr[0] = '\0';
1745 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1746 }
1747
1748 down_write(&pci_bus_sem);
1749 list_add_tail(&b->node, &pci_root_buses);
1750 up_write(&pci_bus_sem);
1751
1752 return b;
1753
1754 class_dev_reg_err:
1755 put_device(&bridge->dev);
1756 device_unregister(&bridge->dev);
1757 bridge_dev_reg_err:
1758 kfree(bridge);
1759 err_out:
1760 kfree(b);
1761 return NULL;
1762 }
1763
1764 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1765 {
1766 struct resource *res = &b->busn_res;
1767 struct resource *parent_res, *conflict;
1768
1769 res->start = bus;
1770 res->end = bus_max;
1771 res->flags = IORESOURCE_BUS;
1772
1773 if (!pci_is_root_bus(b))
1774 parent_res = &b->parent->busn_res;
1775 else {
1776 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1777 res->flags |= IORESOURCE_PCI_FIXED;
1778 }
1779
1780 conflict = insert_resource_conflict(parent_res, res);
1781
1782 if (conflict)
1783 dev_printk(KERN_DEBUG, &b->dev,
1784 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1785 res, pci_is_root_bus(b) ? "domain " : "",
1786 parent_res, conflict->name, conflict);
1787
1788 return conflict == NULL;
1789 }
1790
1791 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1792 {
1793 struct resource *res = &b->busn_res;
1794 struct resource old_res = *res;
1795 resource_size_t size;
1796 int ret;
1797
1798 if (res->start > bus_max)
1799 return -EINVAL;
1800
1801 size = bus_max - res->start + 1;
1802 ret = adjust_resource(res, res->start, size);
1803 dev_printk(KERN_DEBUG, &b->dev,
1804 "busn_res: %pR end %s updated to %02x\n",
1805 &old_res, ret ? "can not be" : "is", bus_max);
1806
1807 if (!ret && !res->parent)
1808 pci_bus_insert_busn_res(b, res->start, res->end);
1809
1810 return ret;
1811 }
1812
1813 void pci_bus_release_busn_res(struct pci_bus *b)
1814 {
1815 struct resource *res = &b->busn_res;
1816 int ret;
1817
1818 if (!res->flags || !res->parent)
1819 return;
1820
1821 ret = release_resource(res);
1822 dev_printk(KERN_DEBUG, &b->dev,
1823 "busn_res: %pR %s released\n",
1824 res, ret ? "can not be" : "is");
1825 }
1826
1827 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1828 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1829 {
1830 struct pci_host_bridge_window *window;
1831 bool found = false;
1832 struct pci_bus *b;
1833 int max;
1834
1835 list_for_each_entry(window, resources, list)
1836 if (window->res->flags & IORESOURCE_BUS) {
1837 found = true;
1838 break;
1839 }
1840
1841 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1842 if (!b)
1843 return NULL;
1844
1845 if (!found) {
1846 dev_info(&b->dev,
1847 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1848 bus);
1849 pci_bus_insert_busn_res(b, bus, 255);
1850 }
1851
1852 max = pci_scan_child_bus(b);
1853
1854 if (!found)
1855 pci_bus_update_busn_res_end(b, max);
1856
1857 pci_bus_add_devices(b);
1858 return b;
1859 }
1860 EXPORT_SYMBOL(pci_scan_root_bus);
1861
1862 /* Deprecated; use pci_scan_root_bus() instead */
1863 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1864 int bus, struct pci_ops *ops, void *sysdata)
1865 {
1866 LIST_HEAD(resources);
1867 struct pci_bus *b;
1868
1869 pci_add_resource(&resources, &ioport_resource);
1870 pci_add_resource(&resources, &iomem_resource);
1871 pci_add_resource(&resources, &busn_resource);
1872 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1873 if (b)
1874 pci_scan_child_bus(b);
1875 else
1876 pci_free_resource_list(&resources);
1877 return b;
1878 }
1879 EXPORT_SYMBOL(pci_scan_bus_parented);
1880
1881 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1882 void *sysdata)
1883 {
1884 LIST_HEAD(resources);
1885 struct pci_bus *b;
1886
1887 pci_add_resource(&resources, &ioport_resource);
1888 pci_add_resource(&resources, &iomem_resource);
1889 pci_add_resource(&resources, &busn_resource);
1890 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1891 if (b) {
1892 pci_scan_child_bus(b);
1893 pci_bus_add_devices(b);
1894 } else {
1895 pci_free_resource_list(&resources);
1896 }
1897 return b;
1898 }
1899 EXPORT_SYMBOL(pci_scan_bus);
1900
1901 /**
1902 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1903 * @bridge: PCI bridge for the bus to scan
1904 *
1905 * Scan a PCI bus and child buses for new devices, add them,
1906 * and enable them, resizing bridge mmio/io resource if necessary
1907 * and possible. The caller must ensure the child devices are already
1908 * removed for resizing to occur.
1909 *
1910 * Returns the max number of subordinate bus discovered.
1911 */
1912 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1913 {
1914 unsigned int max;
1915 struct pci_bus *bus = bridge->subordinate;
1916
1917 max = pci_scan_child_bus(bus);
1918
1919 pci_assign_unassigned_bridge_resources(bridge);
1920
1921 pci_bus_add_devices(bus);
1922
1923 return max;
1924 }
1925
1926 /**
1927 * pci_rescan_bus - scan a PCI bus for devices.
1928 * @bus: PCI bus to scan
1929 *
1930 * Scan a PCI bus and child buses for new devices, adds them,
1931 * and enables them.
1932 *
1933 * Returns the max number of subordinate bus discovered.
1934 */
1935 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1936 {
1937 unsigned int max;
1938
1939 max = pci_scan_child_bus(bus);
1940 pci_assign_unassigned_bus_resources(bus);
1941 pci_enable_bridges(bus);
1942 pci_bus_add_devices(bus);
1943
1944 return max;
1945 }
1946 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1947
1948 EXPORT_SYMBOL(pci_add_new_bus);
1949 EXPORT_SYMBOL(pci_scan_slot);
1950 EXPORT_SYMBOL(pci_scan_bridge);
1951 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1952
1953 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1954 {
1955 const struct pci_dev *a = to_pci_dev(d_a);
1956 const struct pci_dev *b = to_pci_dev(d_b);
1957
1958 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1959 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1960
1961 if (a->bus->number < b->bus->number) return -1;
1962 else if (a->bus->number > b->bus->number) return 1;
1963
1964 if (a->devfn < b->devfn) return -1;
1965 else if (a->devfn > b->devfn) return 1;
1966
1967 return 0;
1968 }
1969
1970 void __init pci_sort_breadthfirst(void)
1971 {
1972 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1973 }
This page took 0.072049 seconds and 5 git commands to generate.