705f52c7ce54541f2c4a80efc0e6c27bbe7da544
[deliverable/linux.git] / arch / powerpc / platforms / iseries / pci.c
1 /*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
3 *
4 * iSeries specific routines for PCI.
5 *
6 * Based on code from pci.c and iSeries_pci.c 32bit
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include <asm/io.h>
30 #include <asm/irq.h>
31 #include <asm/prom.h>
32 #include <asm/machdep.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/iommu.h>
35 #include <asm/abs_addr.h>
36 #include <asm/firmware.h>
37
38 #include <asm/iseries/hv_call_xm.h>
39 #include <asm/iseries/mf.h>
40 #include <asm/iseries/iommu.h>
41
42 #include <asm/ppc-pci.h>
43
44 #include "irq.h"
45 #include "pci.h"
46 #include "call_pci.h"
47
48 #define PCI_RETRY_MAX 3
49 static int limit_pci_retries = 1; /* Set Retry Error on. */
50
51 /*
52 * Table defines
53 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
54 */
55 #define IOMM_TABLE_MAX_ENTRIES 1024
56 #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
57 #define BASE_IO_MEMORY 0xE000000000000000UL
58
59 static unsigned long max_io_memory = BASE_IO_MEMORY;
60 static long current_iomm_table_entry;
61
62 /*
63 * Lookup Tables.
64 */
65 static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
66 static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
67
68 static const char pci_io_text[] = "iSeries PCI I/O";
69 static DEFINE_SPINLOCK(iomm_table_lock);
70
71 /*
72 * iomm_table_allocate_entry
73 *
74 * Adds pci_dev entry in address translation table
75 *
76 * - Allocates the number of entries required in table base on BAR
77 * size.
78 * - Allocates starting at BASE_IO_MEMORY and increases.
79 * - The size is round up to be a multiple of entry size.
80 * - CurrentIndex is incremented to keep track of the last entry.
81 * - Builds the resource entry for allocated BARs.
82 */
83 static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
84 {
85 struct resource *bar_res = &dev->resource[bar_num];
86 long bar_size = pci_resource_len(dev, bar_num);
87
88 /*
89 * No space to allocate, quick exit, skip Allocation.
90 */
91 if (bar_size == 0)
92 return;
93 /*
94 * Set Resource values.
95 */
96 spin_lock(&iomm_table_lock);
97 bar_res->name = pci_io_text;
98 bar_res->start = BASE_IO_MEMORY +
99 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
100 bar_res->end = bar_res->start + bar_size - 1;
101 /*
102 * Allocate the number of table entries needed for BAR.
103 */
104 while (bar_size > 0 ) {
105 iomm_table[current_iomm_table_entry] = dev->sysdata;
106 iobar_table[current_iomm_table_entry] = bar_num;
107 bar_size -= IOMM_TABLE_ENTRY_SIZE;
108 ++current_iomm_table_entry;
109 }
110 max_io_memory = BASE_IO_MEMORY +
111 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
112 spin_unlock(&iomm_table_lock);
113 }
114
115 /*
116 * allocate_device_bars
117 *
118 * - Allocates ALL pci_dev BAR's and updates the resources with the
119 * BAR value. BARS with zero length will have the resources
120 * The HvCallPci_getBarParms is used to get the size of the BAR
121 * space. It calls iomm_table_allocate_entry to allocate
122 * each entry.
123 * - Loops through The Bar resources(0 - 5) including the ROM
124 * is resource(6).
125 */
126 static void __init allocate_device_bars(struct pci_dev *dev)
127 {
128 int bar_num;
129
130 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
131 iomm_table_allocate_entry(dev, bar_num);
132 }
133
134 /*
135 * Log error information to system console.
136 * Filter out the device not there errors.
137 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
138 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
139 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
140 */
141 static void pci_log_error(char *error, int bus, int subbus,
142 int agent, int hv_res)
143 {
144 if (hv_res == 0x0302)
145 return;
146 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
147 error, bus, subbus, agent, hv_res);
148 }
149
150 /*
151 * Look down the chain to find the matching Device Device
152 */
153 static struct device_node *find_device_node(int bus, int devfn)
154 {
155 struct device_node *node;
156
157 for (node = NULL; (node = of_find_all_nodes(node)); ) {
158 struct pci_dn *pdn = PCI_DN(node);
159
160 if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
161 return node;
162 }
163 return NULL;
164 }
165
166 /*
167 * iSeries_pci_final_fixup(void)
168 */
169 void __init iSeries_pci_final_fixup(void)
170 {
171 struct pci_dev *pdev = NULL;
172 struct device_node *node;
173 int num_dev = 0;
174
175 /* Fix up at the device node and pci_dev relationship */
176 mf_display_src(0xC9000100);
177
178 printk("pcibios_final_fixup\n");
179 for_each_pci_dev(pdev) {
180 node = find_device_node(pdev->bus->number, pdev->devfn);
181 printk("pci dev %p (%x.%x), node %p\n", pdev,
182 pdev->bus->number, pdev->devfn, node);
183
184 if (node != NULL) {
185 struct pci_dn *pdn = PCI_DN(node);
186 const u32 *agent;
187
188 agent = of_get_property(node, "linux,agent-id", NULL);
189 if ((pdn != NULL) && (agent != NULL)) {
190 u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
191 pdn->bussubno);
192 int err;
193
194 err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
195 *agent, irq);
196 if (err)
197 pci_log_error("Connect Bus Unit",
198 pdn->busno, pdn->bussubno, *agent, err);
199 else {
200 err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
201 *agent,
202 PCI_INTERRUPT_LINE,
203 irq);
204 if (err)
205 pci_log_error("PciCfgStore Irq Failed!",
206 pdn->busno, pdn->bussubno, *agent, err);
207 }
208 if (!err)
209 pdev->irq = irq;
210 }
211
212 ++num_dev;
213 pdev->sysdata = (void *)node;
214 PCI_DN(node)->pcidev = pdev;
215 allocate_device_bars(pdev);
216 iSeries_Device_Information(pdev, num_dev);
217 iommu_devnode_init_iSeries(pdev, node);
218 } else
219 printk("PCI: Device Tree not found for 0x%016lX\n",
220 (unsigned long)pdev);
221 }
222 iSeries_activate_IRQs();
223 mf_display_src(0xC9000200);
224 }
225
226 #if 0
227 /*
228 * Returns the device node for the passed pci_dev
229 * Sanity Check Node PciDev to passed pci_dev
230 * If none is found, returns a NULL which the client must handle.
231 */
232 static struct device_node *get_device_node(struct pci_dev *pdev)
233 {
234 struct device_node *node;
235
236 node = pdev->sysdata;
237 if (node == NULL || PCI_DN(node)->pcidev != pdev)
238 node = find_device_node(pdev->bus->number, pdev->devfn);
239 return node;
240 }
241 #endif
242
243 /*
244 * Config space read and write functions.
245 * For now at least, we look for the device node for the bus and devfn
246 * that we are asked to access. It may be possible to translate the devfn
247 * to a subbus and deviceid more directly.
248 */
249 static u64 hv_cfg_read_func[4] = {
250 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
251 HvCallPciConfigLoad32, HvCallPciConfigLoad32
252 };
253
254 static u64 hv_cfg_write_func[4] = {
255 HvCallPciConfigStore8, HvCallPciConfigStore16,
256 HvCallPciConfigStore32, HvCallPciConfigStore32
257 };
258
259 /*
260 * Read PCI config space
261 */
262 static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
263 int offset, int size, u32 *val)
264 {
265 struct device_node *node = find_device_node(bus->number, devfn);
266 u64 fn;
267 struct HvCallPci_LoadReturn ret;
268
269 if (node == NULL)
270 return PCIBIOS_DEVICE_NOT_FOUND;
271 if (offset > 255) {
272 *val = ~0;
273 return PCIBIOS_BAD_REGISTER_NUMBER;
274 }
275
276 fn = hv_cfg_read_func[(size - 1) & 3];
277 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
278
279 if (ret.rc != 0) {
280 *val = ~0;
281 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
282 }
283
284 *val = ret.value;
285 return 0;
286 }
287
288 /*
289 * Write PCI config space
290 */
291
292 static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
293 int offset, int size, u32 val)
294 {
295 struct device_node *node = find_device_node(bus->number, devfn);
296 u64 fn;
297 u64 ret;
298
299 if (node == NULL)
300 return PCIBIOS_DEVICE_NOT_FOUND;
301 if (offset > 255)
302 return PCIBIOS_BAD_REGISTER_NUMBER;
303
304 fn = hv_cfg_write_func[(size - 1) & 3];
305 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
306
307 if (ret != 0)
308 return PCIBIOS_DEVICE_NOT_FOUND;
309
310 return 0;
311 }
312
313 static struct pci_ops iSeries_pci_ops = {
314 .read = iSeries_pci_read_config,
315 .write = iSeries_pci_write_config
316 };
317
318 /*
319 * Check Return Code
320 * -> On Failure, print and log information.
321 * Increment Retry Count, if exceeds max, panic partition.
322 *
323 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
324 * PCI: Device 23.90 ReadL Retry( 1)
325 * PCI: Device 23.90 ReadL Retry Successful(1)
326 */
327 static int check_return_code(char *type, struct device_node *dn,
328 int *retry, u64 ret)
329 {
330 if (ret != 0) {
331 struct pci_dn *pdn = PCI_DN(dn);
332
333 (*retry)++;
334 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
335 type, pdn->busno, pdn->devfn,
336 *retry, (int)ret);
337 /*
338 * Bump the retry and check for retry count exceeded.
339 * If, Exceeded, panic the system.
340 */
341 if (((*retry) > PCI_RETRY_MAX) &&
342 (limit_pci_retries > 0)) {
343 mf_display_src(0xB6000103);
344 panic_timeout = 0;
345 panic("PCI: Hardware I/O Error, SRC B6000103, "
346 "Automatic Reboot Disabled.\n");
347 }
348 return -1; /* Retry Try */
349 }
350 return 0;
351 }
352
353 /*
354 * Translate the I/O Address into a device node, bar, and bar offset.
355 * Note: Make sure the passed variable end up on the stack to avoid
356 * the exposure of being device global.
357 */
358 static inline struct device_node *xlate_iomm_address(
359 const volatile void __iomem *addr,
360 u64 *dsaptr, u64 *bar_offset)
361 {
362 unsigned long orig_addr;
363 unsigned long base_addr;
364 unsigned long ind;
365 struct device_node *dn;
366
367 orig_addr = (unsigned long __force)addr;
368 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory))
369 return NULL;
370 base_addr = orig_addr - BASE_IO_MEMORY;
371 ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
372 dn = iomm_table[ind];
373
374 if (dn != NULL) {
375 int barnum = iobar_table[ind];
376 *dsaptr = iseries_ds_addr(dn) | (barnum << 24);
377 *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
378 } else
379 panic("PCI: Invalid PCI IO address detected!\n");
380 return dn;
381 }
382
383 /*
384 * Read MM I/O Instructions for the iSeries
385 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
386 * else, data is returned in Big Endian format.
387 */
388 static u8 iSeries_read_byte(const volatile void __iomem *addr)
389 {
390 u64 bar_offset;
391 u64 dsa;
392 int retry = 0;
393 struct HvCallPci_LoadReturn ret;
394 struct device_node *dn =
395 xlate_iomm_address(addr, &dsa, &bar_offset);
396
397 if (dn == NULL) {
398 static unsigned long last_jiffies;
399 static int num_printed;
400
401 if ((jiffies - last_jiffies) > 60 * HZ) {
402 last_jiffies = jiffies;
403 num_printed = 0;
404 }
405 if (num_printed++ < 10)
406 printk(KERN_ERR "iSeries_read_byte: invalid access at IO address %p\n",
407 addr);
408 return 0xff;
409 }
410 do {
411 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
412 } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
413
414 return ret.value;
415 }
416
417 static u16 iSeries_read_word(const volatile void __iomem *addr)
418 {
419 u64 bar_offset;
420 u64 dsa;
421 int retry = 0;
422 struct HvCallPci_LoadReturn ret;
423 struct device_node *dn =
424 xlate_iomm_address(addr, &dsa, &bar_offset);
425
426 if (dn == NULL) {
427 static unsigned long last_jiffies;
428 static int num_printed;
429
430 if ((jiffies - last_jiffies) > 60 * HZ) {
431 last_jiffies = jiffies;
432 num_printed = 0;
433 }
434 if (num_printed++ < 10)
435 printk(KERN_ERR "iSeries_read_word: invalid access at IO address %p\n",
436 addr);
437 return 0xffff;
438 }
439 do {
440 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
441 bar_offset, 0);
442 } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
443
444 return ret.value;
445 }
446
447 static u32 iSeries_read_long(const volatile void __iomem *addr)
448 {
449 u64 bar_offset;
450 u64 dsa;
451 int retry = 0;
452 struct HvCallPci_LoadReturn ret;
453 struct device_node *dn =
454 xlate_iomm_address(addr, &dsa, &bar_offset);
455
456 if (dn == NULL) {
457 static unsigned long last_jiffies;
458 static int num_printed;
459
460 if ((jiffies - last_jiffies) > 60 * HZ) {
461 last_jiffies = jiffies;
462 num_printed = 0;
463 }
464 if (num_printed++ < 10)
465 printk(KERN_ERR "iSeries_read_long: invalid access at IO address %p\n",
466 addr);
467 return 0xffffffff;
468 }
469 do {
470 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
471 bar_offset, 0);
472 } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
473
474 return ret.value;
475 }
476
477 /*
478 * Write MM I/O Instructions for the iSeries
479 *
480 */
481 static void iSeries_write_byte(u8 data, volatile void __iomem *addr)
482 {
483 u64 bar_offset;
484 u64 dsa;
485 int retry = 0;
486 u64 rc;
487 struct device_node *dn =
488 xlate_iomm_address(addr, &dsa, &bar_offset);
489
490 if (dn == NULL) {
491 static unsigned long last_jiffies;
492 static int num_printed;
493
494 if ((jiffies - last_jiffies) > 60 * HZ) {
495 last_jiffies = jiffies;
496 num_printed = 0;
497 }
498 if (num_printed++ < 10)
499 printk(KERN_ERR "iSeries_write_byte: invalid access at IO address %p\n", addr);
500 return;
501 }
502 do {
503 rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
504 } while (check_return_code("WWB", dn, &retry, rc) != 0);
505 }
506
507 static void iSeries_write_word(u16 data, volatile void __iomem *addr)
508 {
509 u64 bar_offset;
510 u64 dsa;
511 int retry = 0;
512 u64 rc;
513 struct device_node *dn =
514 xlate_iomm_address(addr, &dsa, &bar_offset);
515
516 if (dn == NULL) {
517 static unsigned long last_jiffies;
518 static int num_printed;
519
520 if ((jiffies - last_jiffies) > 60 * HZ) {
521 last_jiffies = jiffies;
522 num_printed = 0;
523 }
524 if (num_printed++ < 10)
525 printk(KERN_ERR "iSeries_write_word: invalid access at IO address %p\n",
526 addr);
527 return;
528 }
529 do {
530 rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
531 } while (check_return_code("WWW", dn, &retry, rc) != 0);
532 }
533
534 static void iSeries_write_long(u32 data, volatile void __iomem *addr)
535 {
536 u64 bar_offset;
537 u64 dsa;
538 int retry = 0;
539 u64 rc;
540 struct device_node *dn =
541 xlate_iomm_address(addr, &dsa, &bar_offset);
542
543 if (dn == NULL) {
544 static unsigned long last_jiffies;
545 static int num_printed;
546
547 if ((jiffies - last_jiffies) > 60 * HZ) {
548 last_jiffies = jiffies;
549 num_printed = 0;
550 }
551 if (num_printed++ < 10)
552 printk(KERN_ERR "iSeries_write_long: invalid access at IO address %p\n",
553 addr);
554 return;
555 }
556 do {
557 rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
558 } while (check_return_code("WWL", dn, &retry, rc) != 0);
559 }
560
561 static u8 iseries_readb(const volatile void __iomem *addr)
562 {
563 return iSeries_read_byte(addr);
564 }
565
566 static u16 iseries_readw(const volatile void __iomem *addr)
567 {
568 return le16_to_cpu(iSeries_read_word(addr));
569 }
570
571 static u32 iseries_readl(const volatile void __iomem *addr)
572 {
573 return le32_to_cpu(iSeries_read_long(addr));
574 }
575
576 static u16 iseries_readw_be(const volatile void __iomem *addr)
577 {
578 return iSeries_read_word(addr);
579 }
580
581 static u32 iseries_readl_be(const volatile void __iomem *addr)
582 {
583 return iSeries_read_long(addr);
584 }
585
586 static void iseries_writeb(u8 data, volatile void __iomem *addr)
587 {
588 iSeries_write_byte(data, addr);
589 }
590
591 static void iseries_writew(u16 data, volatile void __iomem *addr)
592 {
593 iSeries_write_word(cpu_to_le16(data), addr);
594 }
595
596 static void iseries_writel(u32 data, volatile void __iomem *addr)
597 {
598 iSeries_write_long(cpu_to_le32(data), addr);
599 }
600
601 static void iseries_writew_be(u16 data, volatile void __iomem *addr)
602 {
603 iSeries_write_word(data, addr);
604 }
605
606 static void iseries_writel_be(u32 data, volatile void __iomem *addr)
607 {
608 iSeries_write_long(data, addr);
609 }
610
611 static void iseries_readsb(const volatile void __iomem *addr, void *buf,
612 unsigned long count)
613 {
614 u8 *dst = buf;
615 while(count-- > 0)
616 *(dst++) = iSeries_read_byte(addr);
617 }
618
619 static void iseries_readsw(const volatile void __iomem *addr, void *buf,
620 unsigned long count)
621 {
622 u16 *dst = buf;
623 while(count-- > 0)
624 *(dst++) = iSeries_read_word(addr);
625 }
626
627 static void iseries_readsl(const volatile void __iomem *addr, void *buf,
628 unsigned long count)
629 {
630 u32 *dst = buf;
631 while(count-- > 0)
632 *(dst++) = iSeries_read_long(addr);
633 }
634
635 static void iseries_writesb(volatile void __iomem *addr, const void *buf,
636 unsigned long count)
637 {
638 const u8 *src = buf;
639 while(count-- > 0)
640 iSeries_write_byte(*(src++), addr);
641 }
642
643 static void iseries_writesw(volatile void __iomem *addr, const void *buf,
644 unsigned long count)
645 {
646 const u16 *src = buf;
647 while(count-- > 0)
648 iSeries_write_word(*(src++), addr);
649 }
650
651 static void iseries_writesl(volatile void __iomem *addr, const void *buf,
652 unsigned long count)
653 {
654 const u32 *src = buf;
655 while(count-- > 0)
656 iSeries_write_long(*(src++), addr);
657 }
658
659 static void iseries_memset_io(volatile void __iomem *addr, int c,
660 unsigned long n)
661 {
662 volatile char __iomem *d = addr;
663
664 while (n-- > 0)
665 iSeries_write_byte(c, d++);
666 }
667
668 static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
669 unsigned long n)
670 {
671 char *d = dest;
672 const volatile char __iomem *s = src;
673
674 while (n-- > 0)
675 *d++ = iSeries_read_byte(s++);
676 }
677
678 static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
679 unsigned long n)
680 {
681 const char *s = src;
682 volatile char __iomem *d = dest;
683
684 while (n-- > 0)
685 iSeries_write_byte(*s++, d++);
686 }
687
688 /* We only set MMIO ops. The default PIO ops will be default
689 * to the MMIO ops + pci_io_base which is 0 on iSeries as
690 * expected so both should work.
691 *
692 * Note that we don't implement the readq/writeq versions as
693 * I don't know of an HV call for doing so. Thus, the default
694 * operation will be used instead, which will fault a the value
695 * return by iSeries for MMIO addresses always hits a non mapped
696 * area. This is as good as the BUG() we used to have there.
697 */
698 static struct ppc_pci_io __initdata iseries_pci_io = {
699 .readb = iseries_readb,
700 .readw = iseries_readw,
701 .readl = iseries_readl,
702 .readw_be = iseries_readw_be,
703 .readl_be = iseries_readl_be,
704 .writeb = iseries_writeb,
705 .writew = iseries_writew,
706 .writel = iseries_writel,
707 .writew_be = iseries_writew_be,
708 .writel_be = iseries_writel_be,
709 .readsb = iseries_readsb,
710 .readsw = iseries_readsw,
711 .readsl = iseries_readsl,
712 .writesb = iseries_writesb,
713 .writesw = iseries_writesw,
714 .writesl = iseries_writesl,
715 .memset_io = iseries_memset_io,
716 .memcpy_fromio = iseries_memcpy_fromio,
717 .memcpy_toio = iseries_memcpy_toio,
718 };
719
720 /*
721 * iSeries_pcibios_init
722 *
723 * Description:
724 * This function checks for all possible system PCI host bridges that connect
725 * PCI buses. The system hypervisor is queried as to the guest partition
726 * ownership status. A pci_controller is built for any bus which is partially
727 * owned or fully owned by this guest partition.
728 */
729 void __init iSeries_pcibios_init(void)
730 {
731 struct pci_controller *phb;
732 struct device_node *root = of_find_node_by_path("/");
733 struct device_node *node = NULL;
734
735 /* Install IO hooks */
736 ppc_pci_io = iseries_pci_io;
737
738 /* iSeries has no IO space in the common sense, it needs to set
739 * the IO base to 0
740 */
741 pci_io_base = 0;
742
743 if (root == NULL) {
744 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
745 "of device tree\n");
746 return;
747 }
748 while ((node = of_get_next_child(root, node)) != NULL) {
749 HvBusNumber bus;
750 const u32 *busp;
751
752 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
753 continue;
754
755 busp = of_get_property(node, "bus-range", NULL);
756 if (busp == NULL)
757 continue;
758 bus = *busp;
759 printk("bus %d appears to exist\n", bus);
760 phb = pcibios_alloc_controller(node);
761 if (phb == NULL)
762 continue;
763
764 phb->pci_mem_offset = bus;
765 phb->first_busno = bus;
766 phb->last_busno = bus;
767 phb->ops = &iSeries_pci_ops;
768 }
769
770 of_node_put(root);
771
772 pci_devs_phb_init();
773 }
774
This page took 0.054845 seconds and 4 git commands to generate.