1 /* pci_fire.c: Sun4u platform PCI-E controller support.
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/irq.h>
12 #include <asm/oplib.h>
18 #define fire_read(__reg) \
20 __asm__ __volatile__("ldxa [%1] %2, %0" \
22 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
26 #define fire_write(__reg, __val) \
27 __asm__ __volatile__("stxa %0, [%1] %2" \
29 : "r" (__val), "r" (__reg), \
30 "i" (ASI_PHYS_BYPASS_EC_E) \
33 static void pci_fire_scan_bus(struct pci_pbm_info
*pbm
)
35 pbm
->pci_bus
= pci_scan_one_pbm(pbm
);
37 /* XXX register error interrupt handlers XXX */
40 #define FIRE_IOMMU_CONTROL 0x40000UL
41 #define FIRE_IOMMU_TSBBASE 0x40008UL
42 #define FIRE_IOMMU_FLUSH 0x40100UL
43 #define FIRE_IOMMU_FLUSHINV 0x40108UL
45 static int pci_fire_pbm_iommu_init(struct pci_pbm_info
*pbm
)
47 struct iommu
*iommu
= pbm
->iommu
;
48 u32 vdma
[2], dma_mask
;
52 /* No virtual-dma property on these guys, use largest size. */
53 vdma
[0] = 0xc0000000; /* base */
54 vdma
[1] = 0x40000000; /* size */
55 dma_mask
= 0xffffffff;
58 /* Register addresses. */
59 iommu
->iommu_control
= pbm
->pbm_regs
+ FIRE_IOMMU_CONTROL
;
60 iommu
->iommu_tsbbase
= pbm
->pbm_regs
+ FIRE_IOMMU_TSBBASE
;
61 iommu
->iommu_flush
= pbm
->pbm_regs
+ FIRE_IOMMU_FLUSH
;
62 iommu
->iommu_flushinv
= pbm
->pbm_regs
+ FIRE_IOMMU_FLUSHINV
;
64 /* We use the main control/status register of FIRE as the write
65 * completion register.
67 iommu
->write_complete_reg
= pbm
->controller_regs
+ 0x410000UL
;
70 * Invalidate TLB Entries.
72 fire_write(iommu
->iommu_flushinv
, ~(u64
)0);
74 err
= iommu_table_init(iommu
, tsbsize
* 8 * 1024, vdma
[0], dma_mask
);
78 fire_write(iommu
->iommu_tsbbase
, __pa(iommu
->page_table
) | 0x7UL
);
80 control
= fire_read(iommu
->iommu_control
);
81 control
|= (0x00000400 /* TSB cache snoop enable */ |
82 0x00000300 /* Cache mode */ |
83 0x00000002 /* Bypass enable */ |
84 0x00000001 /* Translation enable */);
85 fire_write(iommu
->iommu_control
, control
);
91 struct pci_msiq_entry
{
93 #define MSIQ_WORD0_RESV 0x8000000000000000UL
94 #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
95 #define MSIQ_WORD0_FMT_TYPE_SHIFT 56
96 #define MSIQ_WORD0_LEN 0x00ffc00000000000UL
97 #define MSIQ_WORD0_LEN_SHIFT 46
98 #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
99 #define MSIQ_WORD0_ADDR0_SHIFT 32
100 #define MSIQ_WORD0_RID 0x00000000ffff0000UL
101 #define MSIQ_WORD0_RID_SHIFT 16
102 #define MSIQ_WORD0_DATA0 0x000000000000ffffUL
103 #define MSIQ_WORD0_DATA0_SHIFT 0
105 #define MSIQ_TYPE_MSG 0x6
106 #define MSIQ_TYPE_MSI32 0xb
107 #define MSIQ_TYPE_MSI64 0xf
110 #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
111 #define MSIQ_WORD1_ADDR1_SHIFT 16
112 #define MSIQ_WORD1_DATA1 0x000000000000ffffUL
113 #define MSIQ_WORD1_DATA1_SHIFT 0
118 /* All MSI registers are offset from pbm->pbm_regs */
119 #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
120 #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
122 #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
123 #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
124 #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
126 #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
127 #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
128 #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
129 #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
131 #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
132 #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
133 #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
134 #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
135 #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
137 #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
138 #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
139 #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
141 #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
142 #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
144 #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
145 #define MSI_MAP_VALID 0x8000000000000000UL
146 #define MSI_MAP_EQWR_N 0x4000000000000000UL
147 #define MSI_MAP_EQNUM 0x000000000000003fUL
149 #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
150 #define MSI_CLEAR_EQWR_N 0x4000000000000000UL
152 #define IMONDO_DATA0 0x02C000UL
153 #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
155 #define IMONDO_DATA1 0x02C008UL
156 #define IMONDO_DATA1_DATA 0xffffffffffffffffUL
158 #define MSI_32BIT_ADDR 0x034000UL
159 #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
161 #define MSI_64BIT_ADDR 0x034008UL
162 #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
164 /* For now this just runs as a pre-handler for the real interrupt handler.
165 * So we just walk through the queue and ACK all the entries, update the
166 * head pointer, and return.
168 * In the longer term it would be nice to do something more integrated
169 * wherein we can pass in some of this MSI info to the drivers. This
170 * would be most useful for PCIe fabric error messages, although we could
171 * invoke those directly from the loop here in order to pass the info around.
173 static void pci_msi_prehandler(unsigned int ino
, void *data1
, void *data2
)
175 unsigned long msiqid
, orig_head
, head
, type_fmt
, type
;
176 struct pci_pbm_info
*pbm
= data1
;
177 struct pci_msiq_entry
*base
, *ep
;
179 msiqid
= (unsigned long) data2
;
181 head
= fire_read(pbm
->pbm_regs
+ EVENT_QUEUE_HEAD(msiqid
));
184 base
= (pbm
->msi_queues
+ ((msiqid
- pbm
->msiq_first
) * 8192));
186 while ((ep
->word0
& MSIQ_WORD0_FMT_TYPE
) != 0) {
187 unsigned long msi_num
;
189 type_fmt
= ((ep
->word0
& MSIQ_WORD0_FMT_TYPE
) >>
190 MSIQ_WORD0_FMT_TYPE_SHIFT
);
191 type
= (type_fmt
>>3);
192 if (unlikely(type
!= MSIQ_TYPE_MSI32
&&
193 type
!= MSIQ_TYPE_MSI64
))
196 msi_num
= ((ep
->word0
& MSIQ_WORD0_DATA0
) >>
197 MSIQ_WORD0_DATA0_SHIFT
);
199 fire_write(pbm
->pbm_regs
+ MSI_CLEAR(msi_num
),
202 /* Clear the entry. */
203 ep
->word0
&= ~MSIQ_WORD0_FMT_TYPE
;
205 /* Go to next entry in ring. */
207 if (head
>= pbm
->msiq_ent_count
)
212 if (likely(head
!= orig_head
)) {
213 /* ACK entries by updating head pointer. */
214 fire_write(pbm
->pbm_regs
+
215 EVENT_QUEUE_HEAD(msiqid
),
221 printk(KERN_EMERG
"MSI: Entry has bad type %lx\n", type
);
225 static int msi_bitmap_alloc(struct pci_pbm_info
*pbm
)
227 unsigned long size
, bits_per_ulong
;
229 bits_per_ulong
= sizeof(unsigned long) * 8;
230 size
= (pbm
->msi_num
+ (bits_per_ulong
- 1)) & ~(bits_per_ulong
- 1);
232 BUG_ON(size
% sizeof(unsigned long));
234 pbm
->msi_bitmap
= kzalloc(size
, GFP_KERNEL
);
235 if (!pbm
->msi_bitmap
)
241 static void msi_bitmap_free(struct pci_pbm_info
*pbm
)
243 kfree(pbm
->msi_bitmap
);
244 pbm
->msi_bitmap
= NULL
;
247 static int msi_queue_alloc(struct pci_pbm_info
*pbm
)
249 unsigned long pages
, order
, i
;
251 order
= get_order(512 * 1024);
252 pages
= __get_free_pages(GFP_KERNEL
| __GFP_COMP
, order
);
254 printk(KERN_ERR
"MSI: Cannot allocate MSI queues (o=%lu).\n",
258 memset((char *)pages
, 0, PAGE_SIZE
<< order
);
259 pbm
->msi_queues
= (void *) pages
;
261 fire_write(pbm
->pbm_regs
+ EVENT_QUEUE_BASE_ADDR_REG
,
262 (EVENT_QUEUE_BASE_ADDR_ALL_ONES
|
263 __pa(pbm
->msi_queues
)));
265 fire_write(pbm
->pbm_regs
+ IMONDO_DATA0
,
267 fire_write(pbm
->pbm_regs
+ IMONDO_DATA1
, 0);
269 fire_write(pbm
->pbm_regs
+ MSI_32BIT_ADDR
,
271 fire_write(pbm
->pbm_regs
+ MSI_64BIT_ADDR
,
274 for (i
= 0; i
< pbm
->msiq_num
; i
++) {
275 fire_write(pbm
->pbm_regs
+ EVENT_QUEUE_HEAD(i
), 0);
276 fire_write(pbm
->pbm_regs
+ EVENT_QUEUE_TAIL(i
), 0);
282 static int alloc_msi(struct pci_pbm_info
*pbm
)
286 for (i
= 0; i
< pbm
->msi_num
; i
++) {
287 if (!test_and_set_bit(i
, pbm
->msi_bitmap
))
288 return i
+ pbm
->msi_first
;
294 static void free_msi(struct pci_pbm_info
*pbm
, int msi_num
)
296 msi_num
-= pbm
->msi_first
;
297 clear_bit(msi_num
, pbm
->msi_bitmap
);
300 static int pci_setup_msi_irq(unsigned int *virt_irq_p
,
301 struct pci_dev
*pdev
,
302 struct msi_desc
*entry
)
304 struct pci_pbm_info
*pbm
= pdev
->dev
.archdata
.host_controller
;
305 unsigned long devino
, msiqid
, cregs
, imap_off
;
312 msi_num
= alloc_msi(pbm
);
316 cregs
= (unsigned long) pbm
->pbm_regs
;
318 err
= sun4u_build_msi(pbm
->portid
, virt_irq_p
,
319 pbm
->msiq_first_devino
,
320 (pbm
->msiq_first_devino
+
328 imap_off
= 0x001000UL
+ (devino
* 0x8UL
);
330 val
= fire_read(pbm
->pbm_regs
+ imap_off
);
331 val
|= (1UL << 63) | (1UL << 6);
332 fire_write(pbm
->pbm_regs
+ imap_off
, val
);
334 msiqid
= ((devino
- pbm
->msiq_first_devino
) +
337 fire_write(pbm
->pbm_regs
+
338 EVENT_QUEUE_CONTROL_SET(msiqid
),
339 EVENT_QUEUE_CONTROL_SET_EN
);
341 val
= fire_read(pbm
->pbm_regs
+ MSI_MAP(msi_num
));
342 val
&= ~(MSI_MAP_EQNUM
);
344 fire_write(pbm
->pbm_regs
+ MSI_MAP(msi_num
), val
);
346 fire_write(pbm
->pbm_regs
+ MSI_CLEAR(msi_num
),
349 val
= fire_read(pbm
->pbm_regs
+ MSI_MAP(msi_num
));
350 val
|= MSI_MAP_VALID
;
351 fire_write(pbm
->pbm_regs
+ MSI_MAP(msi_num
), val
);
353 sparc64_set_msi(*virt_irq_p
, msi_num
);
355 if (entry
->msi_attrib
.is_64
) {
356 msg
.address_hi
= pbm
->msi64_start
>> 32;
357 msg
.address_lo
= pbm
->msi64_start
& 0xffffffff;
360 msg
.address_lo
= pbm
->msi32_start
;
364 set_irq_msi(*virt_irq_p
, entry
);
365 write_msi_msg(*virt_irq_p
, &msg
);
367 irq_install_pre_handler(*virt_irq_p
,
369 pbm
, (void *) msiqid
);
374 free_msi(pbm
, msi_num
);
378 static void pci_teardown_msi_irq(unsigned int virt_irq
,
379 struct pci_dev
*pdev
)
381 struct pci_pbm_info
*pbm
= pdev
->dev
.archdata
.host_controller
;
382 unsigned long msiqid
, msi_num
;
385 msi_num
= sparc64_get_msi(virt_irq
);
387 val
= fire_read(pbm
->pbm_regs
+ MSI_MAP(msi_num
));
389 msiqid
= (val
& MSI_MAP_EQNUM
);
391 val
&= ~MSI_MAP_VALID
;
392 fire_write(pbm
->pbm_regs
+ MSI_MAP(msi_num
), val
);
394 fire_write(pbm
->pbm_regs
+ EVENT_QUEUE_CONTROL_CLEAR(msiqid
),
395 EVENT_QUEUE_CONTROL_CLEAR_DIS
);
397 free_msi(pbm
, msi_num
);
399 /* The sun4u_destroy_msi() will liberate the devino and thus the MSIQ
402 sun4u_destroy_msi(virt_irq
);
405 static void pci_fire_msi_init(struct pci_pbm_info
*pbm
)
410 val
= of_get_property(pbm
->prom_node
, "#msi-eqs", &len
);
411 if (!val
|| len
!= 4)
413 pbm
->msiq_num
= *val
;
415 const struct msiq_prop
{
420 const struct msi_range_prop
{
424 const struct addr_range_prop
{
433 val
= of_get_property(pbm
->prom_node
, "msi-eq-size", &len
);
434 if (!val
|| len
!= 4)
437 pbm
->msiq_ent_count
= *val
;
439 mqp
= of_get_property(pbm
->prom_node
,
440 "msi-eq-to-devino", &len
);
442 mqp
= of_get_property(pbm
->prom_node
,
443 "msi-eq-devino", &len
);
444 if (!mqp
|| len
!= sizeof(struct msiq_prop
))
447 pbm
->msiq_first
= mqp
->first_msiq
;
448 pbm
->msiq_first_devino
= mqp
->first_devino
;
450 val
= of_get_property(pbm
->prom_node
, "#msi", &len
);
451 if (!val
|| len
!= 4)
455 mrng
= of_get_property(pbm
->prom_node
, "msi-ranges", &len
);
456 if (!mrng
|| len
!= sizeof(struct msi_range_prop
))
458 pbm
->msi_first
= mrng
->first_msi
;
460 val
= of_get_property(pbm
->prom_node
, "msi-data-mask", &len
);
461 if (!val
|| len
!= 4)
463 pbm
->msi_data_mask
= *val
;
465 val
= of_get_property(pbm
->prom_node
, "msix-data-width", &len
);
466 if (!val
|| len
!= 4)
468 pbm
->msix_data_width
= *val
;
470 arng
= of_get_property(pbm
->prom_node
, "msi-address-ranges",
472 if (!arng
|| len
!= sizeof(struct addr_range_prop
))
474 pbm
->msi32_start
= ((u64
)arng
->msi32_high
<< 32) |
475 (u64
) arng
->msi32_low
;
476 pbm
->msi64_start
= ((u64
)arng
->msi64_high
<< 32) |
477 (u64
) arng
->msi64_low
;
478 pbm
->msi32_len
= arng
->msi32_len
;
479 pbm
->msi64_len
= arng
->msi64_len
;
481 if (msi_bitmap_alloc(pbm
))
484 if (msi_queue_alloc(pbm
)) {
485 msi_bitmap_free(pbm
);
489 printk(KERN_INFO
"%s: MSI Queue first[%u] num[%u] count[%u] "
492 pbm
->msiq_first
, pbm
->msiq_num
,
494 pbm
->msiq_first_devino
);
495 printk(KERN_INFO
"%s: MSI first[%u] num[%u] mask[0x%x] "
498 pbm
->msi_first
, pbm
->msi_num
, pbm
->msi_data_mask
,
499 pbm
->msix_data_width
);
500 printk(KERN_INFO
"%s: MSI addr32[0x%lx:0x%x] "
501 "addr64[0x%lx:0x%x]\n",
503 pbm
->msi32_start
, pbm
->msi32_len
,
504 pbm
->msi64_start
, pbm
->msi64_len
);
505 printk(KERN_INFO
"%s: MSI queues at RA [%016lx]\n",
507 __pa(pbm
->msi_queues
));
509 pbm
->setup_msi_irq
= pci_setup_msi_irq
;
510 pbm
->teardown_msi_irq
= pci_teardown_msi_irq
;
516 printk(KERN_INFO
"%s: No MSI support.\n", pbm
->name
);
518 #else /* CONFIG_PCI_MSI */
519 static void pci_fire_msi_init(struct pci_pbm_info
*pbm
)
522 #endif /* !(CONFIG_PCI_MSI) */
524 /* Based at pbm->controller_regs */
525 #define FIRE_PARITY_CONTROL 0x470010UL
526 #define FIRE_PARITY_ENAB 0x8000000000000000UL
527 #define FIRE_FATAL_RESET_CTL 0x471028UL
528 #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
529 #define FIRE_FATAL_RESET_MB 0x0000000002000000UL
530 #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
531 #define FIRE_FATAL_RESET_APE 0x0000000000004000UL
532 #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
533 #define FIRE_FATAL_RESET_JW 0x0000000000000004UL
534 #define FIRE_FATAL_RESET_JI 0x0000000000000002UL
535 #define FIRE_FATAL_RESET_JR 0x0000000000000001UL
536 #define FIRE_CORE_INTR_ENABLE 0x471800UL
538 /* Based at pbm->pbm_regs */
539 #define FIRE_TLU_CTRL 0x80000UL
540 #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
541 #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
542 #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
543 #define FIRE_TLU_DEV_CTRL 0x90008UL
544 #define FIRE_TLU_LINK_CTRL 0x90020UL
545 #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
546 #define FIRE_LPU_RESET 0xe2008UL
547 #define FIRE_LPU_LLCFG 0xe2200UL
548 #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
549 #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
550 #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
551 #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
552 #define FIRE_LPU_TXL_FIFOP 0xe2430UL
553 #define FIRE_LPU_LTSSM_CFG2 0xe2788UL
554 #define FIRE_LPU_LTSSM_CFG3 0xe2790UL
555 #define FIRE_LPU_LTSSM_CFG4 0xe2798UL
556 #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
557 #define FIRE_DMC_IENAB 0x31800UL
558 #define FIRE_DMC_DBG_SEL_A 0x53000UL
559 #define FIRE_DMC_DBG_SEL_B 0x53008UL
560 #define FIRE_PEC_IENAB 0x51800UL
562 static void pci_fire_hw_init(struct pci_pbm_info
*pbm
)
566 fire_write(pbm
->controller_regs
+ FIRE_PARITY_CONTROL
,
569 fire_write(pbm
->controller_regs
+ FIRE_FATAL_RESET_CTL
,
570 (FIRE_FATAL_RESET_SPARE
|
571 FIRE_FATAL_RESET_MB
|
572 FIRE_FATAL_RESET_CPE
|
573 FIRE_FATAL_RESET_APE
|
574 FIRE_FATAL_RESET_PIO
|
575 FIRE_FATAL_RESET_JW
|
576 FIRE_FATAL_RESET_JI
|
577 FIRE_FATAL_RESET_JR
));
579 fire_write(pbm
->controller_regs
+ FIRE_CORE_INTR_ENABLE
, ~(u64
)0);
581 val
= fire_read(pbm
->pbm_regs
+ FIRE_TLU_CTRL
);
582 val
|= (FIRE_TLU_CTRL_TIM
|
585 fire_write(pbm
->pbm_regs
+ FIRE_TLU_CTRL
, val
);
586 fire_write(pbm
->pbm_regs
+ FIRE_TLU_DEV_CTRL
, 0);
587 fire_write(pbm
->pbm_regs
+ FIRE_TLU_LINK_CTRL
,
588 FIRE_TLU_LINK_CTRL_CLK
);
590 fire_write(pbm
->pbm_regs
+ FIRE_LPU_RESET
, 0);
591 fire_write(pbm
->pbm_regs
+ FIRE_LPU_LLCFG
,
593 fire_write(pbm
->pbm_regs
+ FIRE_LPU_FCTRL_UCTRL
,
594 (FIRE_LPU_FCTRL_UCTRL_N
|
595 FIRE_LPU_FCTRL_UCTRL_P
));
596 fire_write(pbm
->pbm_regs
+ FIRE_LPU_TXL_FIFOP
,
597 ((0xffff << 16) | (0x0000 << 0)));
598 fire_write(pbm
->pbm_regs
+ FIRE_LPU_LTSSM_CFG2
, 3000000);
599 fire_write(pbm
->pbm_regs
+ FIRE_LPU_LTSSM_CFG3
, 500000);
600 fire_write(pbm
->pbm_regs
+ FIRE_LPU_LTSSM_CFG4
,
601 (2 << 16) | (140 << 8));
602 fire_write(pbm
->pbm_regs
+ FIRE_LPU_LTSSM_CFG5
, 0);
604 fire_write(pbm
->pbm_regs
+ FIRE_DMC_IENAB
, ~(u64
)0);
605 fire_write(pbm
->pbm_regs
+ FIRE_DMC_DBG_SEL_A
, 0);
606 fire_write(pbm
->pbm_regs
+ FIRE_DMC_DBG_SEL_B
, 0);
608 fire_write(pbm
->pbm_regs
+ FIRE_PEC_IENAB
, ~(u64
)0);
611 static int pci_fire_pbm_init(struct pci_controller_info
*p
,
612 struct device_node
*dp
, u32 portid
)
614 const struct linux_prom64_registers
*regs
;
615 struct pci_pbm_info
*pbm
;
618 if ((portid
& 1) == 0)
623 pbm
->next
= pci_pbm_root
;
626 pbm
->scan_bus
= pci_fire_scan_bus
;
627 pbm
->pci_ops
= &sun4u_pci_ops
;
628 pbm
->config_space_reg_bits
= 12;
630 pbm
->index
= pci_num_pbms
++;
632 pbm
->portid
= portid
;
635 pbm
->name
= dp
->full_name
;
637 regs
= of_get_property(dp
, "reg", NULL
);
638 pbm
->pbm_regs
= regs
[0].phys_addr
;
639 pbm
->controller_regs
= regs
[1].phys_addr
- 0x410000UL
;
641 printk("%s: SUN4U PCIE Bus Module\n", pbm
->name
);
643 pci_determine_mem_io_space(pbm
);
645 pci_get_pbm_props(pbm
);
647 pci_fire_hw_init(pbm
);
649 err
= pci_fire_pbm_iommu_init(pbm
);
653 pci_fire_msi_init(pbm
);
658 static inline int portid_compare(u32 x
, u32 y
)
665 void fire_pci_init(struct device_node
*dp
, const char *model_name
)
667 struct pci_controller_info
*p
;
668 u32 portid
= of_getintprop_default(dp
, "portid", 0xff);
670 struct pci_pbm_info
*pbm
;
672 for (pbm
= pci_pbm_root
; pbm
; pbm
= pbm
->next
) {
673 if (portid_compare(pbm
->portid
, portid
)) {
674 if (pci_fire_pbm_init(pbm
->parent
, dp
, portid
))
675 goto fatal_memory_error
;
680 p
= kzalloc(sizeof(struct pci_controller_info
), GFP_ATOMIC
);
682 goto fatal_memory_error
;
684 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
686 goto fatal_memory_error
;
688 p
->pbm_A
.iommu
= iommu
;
690 iommu
= kzalloc(sizeof(struct iommu
), GFP_ATOMIC
);
692 goto fatal_memory_error
;
694 p
->pbm_B
.iommu
= iommu
;
696 /* XXX MSI support XXX */
698 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
701 pci_memspace_mask
= 0x7fffffffUL
;
703 if (pci_fire_pbm_init(p
, dp
, portid
))
704 goto fatal_memory_error
;
709 prom_printf("PCI_FIRE: Fatal memory allocation error.\n");