2ec046d0a394a148cf4d865e92cb44854d2d869f
[deliverable/linux.git] / arch / powerpc / sysdev / ppc4xx_pci.c
1 /*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems. All rights reserved.
15 * Roland Dreier <rolandd@cisco.com>
16 *
17 */
18
19 #undef DEBUG
20
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/init.h>
24 #include <linux/of.h>
25 #include <linux/bootmem.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28
29 #include <asm/io.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/machdep.h>
32 #include <asm/dcr.h>
33 #include <asm/dcr-regs.h>
34 #include <mm/mmu_decl.h>
35
36 #include "ppc4xx_pci.h"
37
38 static int dma_offset_set;
39
40 #define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
41 #define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
42
43 #define RES_TO_U32_LOW(val) \
44 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
45 #define RES_TO_U32_HIGH(val) \
46 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
47
48 static inline int ppc440spe_revA(void)
49 {
50 /* Catch both 440SPe variants, with and without RAID6 support */
51 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
52 return 1;
53 else
54 return 0;
55 }
56
57 static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
58 {
59 struct pci_controller *hose;
60 int i;
61
62 if (dev->devfn != 0 || dev->bus->self != NULL)
63 return;
64
65 hose = pci_bus_to_host(dev->bus);
66 if (hose == NULL)
67 return;
68
69 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
71 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
72 return;
73
74 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
75 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
76 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
77 }
78
79 /* Hide the PCI host BARs from the kernel as their content doesn't
80 * fit well in the resource management
81 */
82 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
83 dev->resource[i].start = dev->resource[i].end = 0;
84 dev->resource[i].flags = 0;
85 }
86
87 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
88 pci_name(dev));
89 }
90 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
91
92 static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
93 void __iomem *reg,
94 struct resource *res)
95 {
96 u64 size;
97 const u32 *ranges;
98 int rlen;
99 int pna = of_n_addr_cells(hose->dn);
100 int np = pna + 5;
101
102 /* Default */
103 res->start = 0;
104 size = 0x80000000;
105 res->end = size - 1;
106 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
107
108 /* Get dma-ranges property */
109 ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
110 if (ranges == NULL)
111 goto out;
112
113 /* Walk it */
114 while ((rlen -= np * 4) >= 0) {
115 u32 pci_space = ranges[0];
116 u64 pci_addr = of_read_number(ranges + 1, 2);
117 u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
118 size = of_read_number(ranges + pna + 3, 2);
119 ranges += np;
120 if (cpu_addr == OF_BAD_ADDR || size == 0)
121 continue;
122
123 /* We only care about memory */
124 if ((pci_space & 0x03000000) != 0x02000000)
125 continue;
126
127 /* We currently only support memory at 0, and pci_addr
128 * within 32 bits space
129 */
130 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
131 printk(KERN_WARNING "%s: Ignored unsupported dma range"
132 " 0x%016llx...0x%016llx -> 0x%016llx\n",
133 hose->dn->full_name,
134 pci_addr, pci_addr + size - 1, cpu_addr);
135 continue;
136 }
137
138 /* Check if not prefetchable */
139 if (!(pci_space & 0x40000000))
140 res->flags &= ~IORESOURCE_PREFETCH;
141
142
143 /* Use that */
144 res->start = pci_addr;
145 /* Beware of 32 bits resources */
146 if (sizeof(resource_size_t) == sizeof(u32) &&
147 (pci_addr + size) > 0x100000000ull)
148 res->end = 0xffffffff;
149 else
150 res->end = res->start + size - 1;
151 break;
152 }
153
154 /* We only support one global DMA offset */
155 if (dma_offset_set && pci_dram_offset != res->start) {
156 printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
157 hose->dn->full_name);
158 return -ENXIO;
159 }
160
161 /* Check that we can fit all of memory as we don't support
162 * DMA bounce buffers
163 */
164 if (size < total_memory) {
165 printk(KERN_ERR "%s: dma-ranges too small "
166 "(size=%llx total_memory=%llx)\n",
167 hose->dn->full_name, size, (u64)total_memory);
168 return -ENXIO;
169 }
170
171 /* Check we are a power of 2 size and that base is a multiple of size*/
172 if ((size & (size - 1)) != 0 ||
173 (res->start & (size - 1)) != 0) {
174 printk(KERN_ERR "%s: dma-ranges unaligned\n",
175 hose->dn->full_name);
176 return -ENXIO;
177 }
178
179 /* Check that we are fully contained within 32 bits space */
180 if (res->end > 0xffffffff) {
181 printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
182 hose->dn->full_name);
183 return -ENXIO;
184 }
185 out:
186 dma_offset_set = 1;
187 pci_dram_offset = res->start;
188
189 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
190 pci_dram_offset);
191 return 0;
192 }
193
194 /*
195 * 4xx PCI 2.x part
196 */
197
198 static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
199 void __iomem *reg,
200 u64 plb_addr,
201 u64 pci_addr,
202 u64 size,
203 unsigned int flags,
204 int index)
205 {
206 u32 ma, pcila, pciha;
207
208 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
209 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
210 * address are actually hard wired to a value that appears to depend
211 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
212 *
213 * The trick here is we just crop those top bits and ignore them when
214 * programming the chip. That means the device-tree has to be right
215 * for the specific part used (we don't print a warning if it's wrong
216 * but on the other hand, you'll crash quickly enough), but at least
217 * this code should work whatever the hard coded value is
218 */
219 plb_addr &= 0xffffffffull;
220
221 /* Note: Due to the above hack, the test below doesn't actually test
222 * if you address is above 4G, but it tests that address and
223 * (address + size) are both contained in the same 4G
224 */
225 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
226 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
227 printk(KERN_WARNING "%s: Resource out of range\n",
228 hose->dn->full_name);
229 return -1;
230 }
231 ma = (0xffffffffu << ilog2(size)) | 1;
232 if (flags & IORESOURCE_PREFETCH)
233 ma |= 2;
234
235 pciha = RES_TO_U32_HIGH(pci_addr);
236 pcila = RES_TO_U32_LOW(pci_addr);
237
238 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
239 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
240 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
241 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
242
243 return 0;
244 }
245
246 static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
247 void __iomem *reg)
248 {
249 int i, j, found_isa_hole = 0;
250
251 /* Setup outbound memory windows */
252 for (i = j = 0; i < 3; i++) {
253 struct resource *res = &hose->mem_resources[i];
254
255 /* we only care about memory windows */
256 if (!(res->flags & IORESOURCE_MEM))
257 continue;
258 if (j > 2) {
259 printk(KERN_WARNING "%s: Too many ranges\n",
260 hose->dn->full_name);
261 break;
262 }
263
264 /* Configure the resource */
265 if (ppc4xx_setup_one_pci_PMM(hose, reg,
266 res->start,
267 res->start - hose->pci_mem_offset,
268 resource_size(res),
269 res->flags,
270 j) == 0) {
271 j++;
272
273 /* If the resource PCI address is 0 then we have our
274 * ISA memory hole
275 */
276 if (res->start == hose->pci_mem_offset)
277 found_isa_hole = 1;
278 }
279 }
280
281 /* Handle ISA memory hole if not already covered */
282 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
283 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
284 hose->isa_mem_size, 0, j) == 0)
285 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
286 hose->dn->full_name);
287 }
288
289 static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
290 void __iomem *reg,
291 const struct resource *res)
292 {
293 resource_size_t size = resource_size(res);
294 u32 sa;
295
296 /* Calculate window size */
297 sa = (0xffffffffu << ilog2(size)) | 1;
298 sa |= 0x1;
299
300 /* RAM is always at 0 local for now */
301 writel(0, reg + PCIL0_PTM1LA);
302 writel(sa, reg + PCIL0_PTM1MS);
303
304 /* Map on PCI side */
305 early_write_config_dword(hose, hose->first_busno, 0,
306 PCI_BASE_ADDRESS_1, res->start);
307 early_write_config_dword(hose, hose->first_busno, 0,
308 PCI_BASE_ADDRESS_2, 0x00000000);
309 early_write_config_word(hose, hose->first_busno, 0,
310 PCI_COMMAND, 0x0006);
311 }
312
313 static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
314 {
315 /* NYI */
316 struct resource rsrc_cfg;
317 struct resource rsrc_reg;
318 struct resource dma_window;
319 struct pci_controller *hose = NULL;
320 void __iomem *reg = NULL;
321 const int *bus_range;
322 int primary = 0;
323
324 /* Check if device is enabled */
325 if (!of_device_is_available(np)) {
326 printk(KERN_INFO "%s: Port disabled via device-tree\n",
327 np->full_name);
328 return;
329 }
330
331 /* Fetch config space registers address */
332 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
333 printk(KERN_ERR "%s: Can't get PCI config register base !",
334 np->full_name);
335 return;
336 }
337 /* Fetch host bridge internal registers address */
338 if (of_address_to_resource(np, 3, &rsrc_reg)) {
339 printk(KERN_ERR "%s: Can't get PCI internal register base !",
340 np->full_name);
341 return;
342 }
343
344 /* Check if primary bridge */
345 if (of_get_property(np, "primary", NULL))
346 primary = 1;
347
348 /* Get bus range if any */
349 bus_range = of_get_property(np, "bus-range", NULL);
350
351 /* Map registers */
352 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
353 if (reg == NULL) {
354 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
355 goto fail;
356 }
357
358 /* Allocate the host controller data structure */
359 hose = pcibios_alloc_controller(np);
360 if (!hose)
361 goto fail;
362
363 hose->first_busno = bus_range ? bus_range[0] : 0x0;
364 hose->last_busno = bus_range ? bus_range[1] : 0xff;
365
366 /* Setup config space */
367 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
368
369 /* Disable all windows */
370 writel(0, reg + PCIL0_PMM0MA);
371 writel(0, reg + PCIL0_PMM1MA);
372 writel(0, reg + PCIL0_PMM2MA);
373 writel(0, reg + PCIL0_PTM1MS);
374 writel(0, reg + PCIL0_PTM2MS);
375
376 /* Parse outbound mapping resources */
377 pci_process_bridge_OF_ranges(hose, np, primary);
378
379 /* Parse inbound mapping resources */
380 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
381 goto fail;
382
383 /* Configure outbound ranges POMs */
384 ppc4xx_configure_pci_PMMs(hose, reg);
385
386 /* Configure inbound ranges PIMs */
387 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
388
389 /* We don't need the registers anymore */
390 iounmap(reg);
391 return;
392
393 fail:
394 if (hose)
395 pcibios_free_controller(hose);
396 if (reg)
397 iounmap(reg);
398 }
399
400 /*
401 * 4xx PCI-X part
402 */
403
404 static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
405 void __iomem *reg,
406 u64 plb_addr,
407 u64 pci_addr,
408 u64 size,
409 unsigned int flags,
410 int index)
411 {
412 u32 lah, lal, pciah, pcial, sa;
413
414 if (!is_power_of_2(size) || size < 0x1000 ||
415 (plb_addr & (size - 1)) != 0) {
416 printk(KERN_WARNING "%s: Resource out of range\n",
417 hose->dn->full_name);
418 return -1;
419 }
420
421 /* Calculate register values */
422 lah = RES_TO_U32_HIGH(plb_addr);
423 lal = RES_TO_U32_LOW(plb_addr);
424 pciah = RES_TO_U32_HIGH(pci_addr);
425 pcial = RES_TO_U32_LOW(pci_addr);
426 sa = (0xffffffffu << ilog2(size)) | 0x1;
427
428 /* Program register values */
429 if (index == 0) {
430 writel(lah, reg + PCIX0_POM0LAH);
431 writel(lal, reg + PCIX0_POM0LAL);
432 writel(pciah, reg + PCIX0_POM0PCIAH);
433 writel(pcial, reg + PCIX0_POM0PCIAL);
434 writel(sa, reg + PCIX0_POM0SA);
435 } else {
436 writel(lah, reg + PCIX0_POM1LAH);
437 writel(lal, reg + PCIX0_POM1LAL);
438 writel(pciah, reg + PCIX0_POM1PCIAH);
439 writel(pcial, reg + PCIX0_POM1PCIAL);
440 writel(sa, reg + PCIX0_POM1SA);
441 }
442
443 return 0;
444 }
445
446 static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
447 void __iomem *reg)
448 {
449 int i, j, found_isa_hole = 0;
450
451 /* Setup outbound memory windows */
452 for (i = j = 0; i < 3; i++) {
453 struct resource *res = &hose->mem_resources[i];
454
455 /* we only care about memory windows */
456 if (!(res->flags & IORESOURCE_MEM))
457 continue;
458 if (j > 1) {
459 printk(KERN_WARNING "%s: Too many ranges\n",
460 hose->dn->full_name);
461 break;
462 }
463
464 /* Configure the resource */
465 if (ppc4xx_setup_one_pcix_POM(hose, reg,
466 res->start,
467 res->start - hose->pci_mem_offset,
468 resource_size(res),
469 res->flags,
470 j) == 0) {
471 j++;
472
473 /* If the resource PCI address is 0 then we have our
474 * ISA memory hole
475 */
476 if (res->start == hose->pci_mem_offset)
477 found_isa_hole = 1;
478 }
479 }
480
481 /* Handle ISA memory hole if not already covered */
482 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
483 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
484 hose->isa_mem_size, 0, j) == 0)
485 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
486 hose->dn->full_name);
487 }
488
489 static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
490 void __iomem *reg,
491 const struct resource *res,
492 int big_pim,
493 int enable_msi_hole)
494 {
495 resource_size_t size = resource_size(res);
496 u32 sa;
497
498 /* RAM is always at 0 */
499 writel(0x00000000, reg + PCIX0_PIM0LAH);
500 writel(0x00000000, reg + PCIX0_PIM0LAL);
501
502 /* Calculate window size */
503 sa = (0xffffffffu << ilog2(size)) | 1;
504 sa |= 0x1;
505 if (res->flags & IORESOURCE_PREFETCH)
506 sa |= 0x2;
507 if (enable_msi_hole)
508 sa |= 0x4;
509 writel(sa, reg + PCIX0_PIM0SA);
510 if (big_pim)
511 writel(0xffffffff, reg + PCIX0_PIM0SAH);
512
513 /* Map on PCI side */
514 writel(0x00000000, reg + PCIX0_BAR0H);
515 writel(res->start, reg + PCIX0_BAR0L);
516 writew(0x0006, reg + PCIX0_COMMAND);
517 }
518
519 static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
520 {
521 struct resource rsrc_cfg;
522 struct resource rsrc_reg;
523 struct resource dma_window;
524 struct pci_controller *hose = NULL;
525 void __iomem *reg = NULL;
526 const int *bus_range;
527 int big_pim = 0, msi = 0, primary = 0;
528
529 /* Fetch config space registers address */
530 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
531 printk(KERN_ERR "%s:Can't get PCI-X config register base !",
532 np->full_name);
533 return;
534 }
535 /* Fetch host bridge internal registers address */
536 if (of_address_to_resource(np, 3, &rsrc_reg)) {
537 printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
538 np->full_name);
539 return;
540 }
541
542 /* Check if it supports large PIMs (440GX) */
543 if (of_get_property(np, "large-inbound-windows", NULL))
544 big_pim = 1;
545
546 /* Check if we should enable MSIs inbound hole */
547 if (of_get_property(np, "enable-msi-hole", NULL))
548 msi = 1;
549
550 /* Check if primary bridge */
551 if (of_get_property(np, "primary", NULL))
552 primary = 1;
553
554 /* Get bus range if any */
555 bus_range = of_get_property(np, "bus-range", NULL);
556
557 /* Map registers */
558 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
559 if (reg == NULL) {
560 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
561 goto fail;
562 }
563
564 /* Allocate the host controller data structure */
565 hose = pcibios_alloc_controller(np);
566 if (!hose)
567 goto fail;
568
569 hose->first_busno = bus_range ? bus_range[0] : 0x0;
570 hose->last_busno = bus_range ? bus_range[1] : 0xff;
571
572 /* Setup config space */
573 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
574 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
575
576 /* Disable all windows */
577 writel(0, reg + PCIX0_POM0SA);
578 writel(0, reg + PCIX0_POM1SA);
579 writel(0, reg + PCIX0_POM2SA);
580 writel(0, reg + PCIX0_PIM0SA);
581 writel(0, reg + PCIX0_PIM1SA);
582 writel(0, reg + PCIX0_PIM2SA);
583 if (big_pim) {
584 writel(0, reg + PCIX0_PIM0SAH);
585 writel(0, reg + PCIX0_PIM2SAH);
586 }
587
588 /* Parse outbound mapping resources */
589 pci_process_bridge_OF_ranges(hose, np, primary);
590
591 /* Parse inbound mapping resources */
592 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
593 goto fail;
594
595 /* Configure outbound ranges POMs */
596 ppc4xx_configure_pcix_POMs(hose, reg);
597
598 /* Configure inbound ranges PIMs */
599 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
600
601 /* We don't need the registers anymore */
602 iounmap(reg);
603 return;
604
605 fail:
606 if (hose)
607 pcibios_free_controller(hose);
608 if (reg)
609 iounmap(reg);
610 }
611
612 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
613
614 /*
615 * 4xx PCI-Express part
616 *
617 * We support 3 parts currently based on the compatible property:
618 *
619 * ibm,plb-pciex-440spe
620 * ibm,plb-pciex-405ex
621 * ibm,plb-pciex-460ex
622 *
623 * Anything else will be rejected for now as they are all subtly
624 * different unfortunately.
625 *
626 */
627
628 #define MAX_PCIE_BUS_MAPPED 0x40
629
630 struct ppc4xx_pciex_port
631 {
632 struct pci_controller *hose;
633 struct device_node *node;
634 unsigned int index;
635 int endpoint;
636 int link;
637 int has_ibpre;
638 unsigned int sdr_base;
639 dcr_host_t dcrs;
640 struct resource cfg_space;
641 struct resource utl_regs;
642 void __iomem *utl_base;
643 };
644
645 static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
646 static unsigned int ppc4xx_pciex_port_count;
647
648 struct ppc4xx_pciex_hwops
649 {
650 int (*core_init)(struct device_node *np);
651 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
652 int (*setup_utl)(struct ppc4xx_pciex_port *port);
653 void (*check_link)(struct ppc4xx_pciex_port *port);
654 };
655
656 static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
657
658 static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
659 unsigned int sdr_offset,
660 unsigned int mask,
661 unsigned int value,
662 int timeout_ms)
663 {
664 u32 val;
665
666 while(timeout_ms--) {
667 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
668 if ((val & mask) == value) {
669 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
670 port->index, sdr_offset, timeout_ms, val);
671 return 0;
672 }
673 msleep(1);
674 }
675 return -1;
676 }
677
678 static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
679 {
680 /* Wait for reset to complete */
681 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
682 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
683 port->index);
684 return -1;
685 }
686 return 0;
687 }
688
689
690 static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
691 {
692 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
693
694 /* Check for card presence detect if supported, if not, just wait for
695 * link unconditionally.
696 *
697 * note that we don't fail if there is no link, we just filter out
698 * config space accesses. That way, it will be easier to implement
699 * hotplug later on.
700 */
701 if (!port->has_ibpre ||
702 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
703 1 << 28, 1 << 28, 100)) {
704 printk(KERN_INFO
705 "PCIE%d: Device detected, waiting for link...\n",
706 port->index);
707 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
708 0x1000, 0x1000, 2000))
709 printk(KERN_WARNING
710 "PCIE%d: Link up failed\n", port->index);
711 else {
712 printk(KERN_INFO
713 "PCIE%d: link is up !\n", port->index);
714 port->link = 1;
715 }
716 } else
717 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
718 }
719
720 #ifdef CONFIG_44x
721
722 /* Check various reset bits of the 440SPe PCIe core */
723 static int __init ppc440spe_pciex_check_reset(struct device_node *np)
724 {
725 u32 valPE0, valPE1, valPE2;
726 int err = 0;
727
728 /* SDR0_PEGPLLLCT1 reset */
729 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
730 /*
731 * the PCIe core was probably already initialised
732 * by firmware - let's re-reset RCSSET regs
733 *
734 * -- Shouldn't we also re-reset the whole thing ? -- BenH
735 */
736 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
737 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
738 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
739 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
740 }
741
742 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
743 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
744 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
745
746 /* SDR0_PExRCSSET rstgu */
747 if (!(valPE0 & 0x01000000) ||
748 !(valPE1 & 0x01000000) ||
749 !(valPE2 & 0x01000000)) {
750 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
751 err = -1;
752 }
753
754 /* SDR0_PExRCSSET rstdl */
755 if (!(valPE0 & 0x00010000) ||
756 !(valPE1 & 0x00010000) ||
757 !(valPE2 & 0x00010000)) {
758 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
759 err = -1;
760 }
761
762 /* SDR0_PExRCSSET rstpyn */
763 if ((valPE0 & 0x00001000) ||
764 (valPE1 & 0x00001000) ||
765 (valPE2 & 0x00001000)) {
766 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
767 err = -1;
768 }
769
770 /* SDR0_PExRCSSET hldplb */
771 if ((valPE0 & 0x10000000) ||
772 (valPE1 & 0x10000000) ||
773 (valPE2 & 0x10000000)) {
774 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
775 err = -1;
776 }
777
778 /* SDR0_PExRCSSET rdy */
779 if ((valPE0 & 0x00100000) ||
780 (valPE1 & 0x00100000) ||
781 (valPE2 & 0x00100000)) {
782 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
783 err = -1;
784 }
785
786 /* SDR0_PExRCSSET shutdown */
787 if ((valPE0 & 0x00000100) ||
788 (valPE1 & 0x00000100) ||
789 (valPE2 & 0x00000100)) {
790 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
791 err = -1;
792 }
793
794 return err;
795 }
796
797 /* Global PCIe core initializations for 440SPe core */
798 static int __init ppc440spe_pciex_core_init(struct device_node *np)
799 {
800 int time_out = 20;
801
802 /* Set PLL clock receiver to LVPECL */
803 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
804
805 /* Shouldn't we do all the calibration stuff etc... here ? */
806 if (ppc440spe_pciex_check_reset(np))
807 return -ENXIO;
808
809 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
810 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
811 "failed (0x%08x)\n",
812 mfdcri(SDR0, PESDR0_PLLLCT2));
813 return -1;
814 }
815
816 /* De-assert reset of PCIe PLL, wait for lock */
817 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
818 udelay(3);
819
820 while (time_out) {
821 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
822 time_out--;
823 udelay(1);
824 } else
825 break;
826 }
827 if (!time_out) {
828 printk(KERN_INFO "PCIE: VCO output not locked\n");
829 return -1;
830 }
831
832 pr_debug("PCIE initialization OK\n");
833
834 return 3;
835 }
836
837 static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
838 {
839 u32 val = 1 << 24;
840
841 if (port->endpoint)
842 val = PTYPE_LEGACY_ENDPOINT << 20;
843 else
844 val = PTYPE_ROOT_PORT << 20;
845
846 if (port->index == 0)
847 val |= LNKW_X8 << 12;
848 else
849 val |= LNKW_X4 << 12;
850
851 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
852 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
853 if (ppc440spe_revA())
854 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
855 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
856 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
857 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
858 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
859 if (port->index == 0) {
860 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
861 0x35000000);
862 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
863 0x35000000);
864 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
865 0x35000000);
866 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
867 0x35000000);
868 }
869 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
870 (1 << 24) | (1 << 16), 1 << 12);
871
872 return ppc4xx_pciex_port_reset_sdr(port);
873 }
874
875 static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
876 {
877 return ppc440spe_pciex_init_port_hw(port);
878 }
879
880 static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
881 {
882 int rc = ppc440spe_pciex_init_port_hw(port);
883
884 port->has_ibpre = 1;
885
886 return rc;
887 }
888
889 static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
890 {
891 /* XXX Check what that value means... I hate magic */
892 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
893
894 /*
895 * Set buffer allocations and then assert VRB and TXE.
896 */
897 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
898 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
899 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
900 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
901 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
902 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
903 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
904 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
905
906 return 0;
907 }
908
909 static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
910 {
911 /* Report CRS to the operating system */
912 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
913
914 return 0;
915 }
916
917 static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
918 {
919 .core_init = ppc440spe_pciex_core_init,
920 .port_init_hw = ppc440speA_pciex_init_port_hw,
921 .setup_utl = ppc440speA_pciex_init_utl,
922 .check_link = ppc4xx_pciex_check_link_sdr,
923 };
924
925 static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
926 {
927 .core_init = ppc440spe_pciex_core_init,
928 .port_init_hw = ppc440speB_pciex_init_port_hw,
929 .setup_utl = ppc440speB_pciex_init_utl,
930 .check_link = ppc4xx_pciex_check_link_sdr,
931 };
932
933 static int __init ppc460ex_pciex_core_init(struct device_node *np)
934 {
935 /* Nothing to do, return 2 ports */
936 return 2;
937 }
938
939 static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
940 {
941 u32 val;
942 u32 utlset1;
943
944 if (port->endpoint)
945 val = PTYPE_LEGACY_ENDPOINT << 20;
946 else
947 val = PTYPE_ROOT_PORT << 20;
948
949 if (port->index == 0) {
950 val |= LNKW_X1 << 12;
951 utlset1 = 0x20000000;
952 } else {
953 val |= LNKW_X4 << 12;
954 utlset1 = 0x20101101;
955 }
956
957 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
958 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
959 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
960
961 switch (port->index) {
962 case 0:
963 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
964 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
965 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
966
967 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
968 break;
969
970 case 1:
971 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
972 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
973 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
974 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
975 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
976 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
977 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
978 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
979 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
980 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
981 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
982 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
983
984 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
985 break;
986 }
987
988 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
989 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
990 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
991
992 /* Poll for PHY reset */
993 /* XXX FIXME add timeout */
994 switch (port->index) {
995 case 0:
996 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
997 udelay(10);
998 break;
999 case 1:
1000 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1001 udelay(10);
1002 break;
1003 }
1004
1005 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1006 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1007 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1008 PESDRx_RCSSET_RSTPYN);
1009
1010 port->has_ibpre = 1;
1011
1012 return ppc4xx_pciex_port_reset_sdr(port);
1013 }
1014
1015 static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1016 {
1017 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1018
1019 /*
1020 * Set buffer allocations and then assert VRB and TXE.
1021 */
1022 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1023 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1024 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1025 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1026 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1027 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1028 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1029 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1030 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1031
1032 return 0;
1033 }
1034
1035 static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1036 {
1037 .core_init = ppc460ex_pciex_core_init,
1038 .port_init_hw = ppc460ex_pciex_init_port_hw,
1039 .setup_utl = ppc460ex_pciex_init_utl,
1040 .check_link = ppc4xx_pciex_check_link_sdr,
1041 };
1042
1043 static int __init ppc460sx_pciex_core_init(struct device_node *np)
1044 {
1045 /* HSS drive amplitude */
1046 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1047 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1048 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1049 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1050 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1051 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1052 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1053 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1054
1055 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1056 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1057 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1058 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1059
1060 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1061 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1062 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1063 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1064
1065 /* HSS TX pre-emphasis */
1066 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1067 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1068 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1069 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1070 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1071 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1072 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1073 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1074
1075 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1076 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1077 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1078 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1079
1080 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1081 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1082 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1083 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1084
1085 /* HSS TX calibration control */
1086 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1087 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1088 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1089
1090 /* HSS TX slew control */
1091 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1092 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1093 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1094
1095 /* Set HSS PRBS enabled */
1096 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1097 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1098
1099 udelay(100);
1100
1101 /* De-assert PLLRESET */
1102 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1103
1104 /* Reset DL, UTL, GPL before configuration */
1105 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1106 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1107 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1108 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1109 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1110 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1111
1112 udelay(100);
1113
1114 /*
1115 * If bifurcation is not enabled, u-boot would have disabled the
1116 * third PCIe port
1117 */
1118 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1119 0x00000001)) {
1120 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1121 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1122 return 3;
1123 }
1124
1125 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1126 return 2;
1127 }
1128
1129 static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1130 {
1131
1132 if (port->endpoint)
1133 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1134 0x01000000, 0);
1135 else
1136 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1137 0, 0x01000000);
1138
1139 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1140 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1141 PESDRx_RCSSET_RSTPYN);
1142
1143 port->has_ibpre = 1;
1144
1145 return ppc4xx_pciex_port_reset_sdr(port);
1146 }
1147
1148 static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1149 {
1150 /* Max 128 Bytes */
1151 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1152 /* Assert VRB and TXE - per datasheet turn off addr validation */
1153 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
1154 return 0;
1155 }
1156
1157 static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1158 {
1159 void __iomem *mbase;
1160 int attempt = 50;
1161
1162 port->link = 0;
1163
1164 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1165 if (mbase == NULL) {
1166 printk(KERN_ERR "%s: Can't map internal config space !",
1167 port->node->full_name);
1168 goto done;
1169 }
1170
1171 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1172 & PECFG_460SX_DLLSTA_LINKUP))) {
1173 attempt--;
1174 mdelay(10);
1175 }
1176 if (attempt)
1177 port->link = 1;
1178 done:
1179 iounmap(mbase);
1180
1181 }
1182
1183 static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1184 .core_init = ppc460sx_pciex_core_init,
1185 .port_init_hw = ppc460sx_pciex_init_port_hw,
1186 .setup_utl = ppc460sx_pciex_init_utl,
1187 .check_link = ppc460sx_pciex_check_link,
1188 };
1189
1190 #endif /* CONFIG_44x */
1191
1192 #ifdef CONFIG_40x
1193
1194 static int __init ppc405ex_pciex_core_init(struct device_node *np)
1195 {
1196 /* Nothing to do, return 2 ports */
1197 return 2;
1198 }
1199
1200 static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1201 {
1202 /* Assert the PE0_PHY reset */
1203 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1204 msleep(1);
1205
1206 /* deassert the PE0_hotreset */
1207 if (port->endpoint)
1208 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1209 else
1210 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1211
1212 /* poll for phy !reset */
1213 /* XXX FIXME add timeout */
1214 while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1215 ;
1216
1217 /* deassert the PE0_gpl_utl_reset */
1218 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1219 }
1220
1221 static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1222 {
1223 u32 val;
1224
1225 if (port->endpoint)
1226 val = PTYPE_LEGACY_ENDPOINT;
1227 else
1228 val = PTYPE_ROOT_PORT;
1229
1230 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1231 1 << 24 | val << 20 | LNKW_X1 << 12);
1232
1233 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1234 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1235 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1236 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1237
1238 /*
1239 * Only reset the PHY when no link is currently established.
1240 * This is for the Atheros PCIe board which has problems to establish
1241 * the link (again) after this PHY reset. All other currently tested
1242 * PCIe boards don't show this problem.
1243 * This has to be re-tested and fixed in a later release!
1244 */
1245 val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1246 if (!(val & 0x00001000))
1247 ppc405ex_pcie_phy_reset(port);
1248
1249 dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
1250
1251 port->has_ibpre = 1;
1252
1253 return ppc4xx_pciex_port_reset_sdr(port);
1254 }
1255
1256 static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1257 {
1258 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1259
1260 /*
1261 * Set buffer allocations and then assert VRB and TXE.
1262 */
1263 out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
1264 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1265 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1266 out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
1267 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1268 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1269 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1270 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1271
1272 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
1273
1274 return 0;
1275 }
1276
1277 static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1278 {
1279 .core_init = ppc405ex_pciex_core_init,
1280 .port_init_hw = ppc405ex_pciex_init_port_hw,
1281 .setup_utl = ppc405ex_pciex_init_utl,
1282 .check_link = ppc4xx_pciex_check_link_sdr,
1283 };
1284
1285 #endif /* CONFIG_40x */
1286
1287 /* Check that the core has been initied and if not, do it */
1288 static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1289 {
1290 static int core_init;
1291 int count = -ENODEV;
1292
1293 if (core_init++)
1294 return 0;
1295
1296 #ifdef CONFIG_44x
1297 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1298 if (ppc440spe_revA())
1299 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1300 else
1301 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1302 }
1303 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1304 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1305 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1306 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1307 #endif /* CONFIG_44x */
1308 #ifdef CONFIG_40x
1309 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1310 ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1311 #endif
1312 if (ppc4xx_pciex_hwops == NULL) {
1313 printk(KERN_WARNING "PCIE: unknown host type %s\n",
1314 np->full_name);
1315 return -ENODEV;
1316 }
1317
1318 count = ppc4xx_pciex_hwops->core_init(np);
1319 if (count > 0) {
1320 ppc4xx_pciex_ports =
1321 kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1322 GFP_KERNEL);
1323 if (ppc4xx_pciex_ports) {
1324 ppc4xx_pciex_port_count = count;
1325 return 0;
1326 }
1327 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1328 return -ENOMEM;
1329 }
1330 return -ENODEV;
1331 }
1332
1333 static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1334 {
1335 /* We map PCI Express configuration based on the reg property */
1336 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1337 RES_TO_U32_HIGH(port->cfg_space.start));
1338 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1339 RES_TO_U32_LOW(port->cfg_space.start));
1340
1341 /* XXX FIXME: Use size from reg property. For now, map 512M */
1342 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1343
1344 /* We map UTL registers based on the reg property */
1345 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1346 RES_TO_U32_HIGH(port->utl_regs.start));
1347 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1348 RES_TO_U32_LOW(port->utl_regs.start));
1349
1350 /* XXX FIXME: Use size from reg property */
1351 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1352
1353 /* Disable all other outbound windows */
1354 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1355 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1356 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1357 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1358 }
1359
1360 static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1361 {
1362 int rc = 0;
1363
1364 /* Init HW */
1365 if (ppc4xx_pciex_hwops->port_init_hw)
1366 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1367 if (rc != 0)
1368 return rc;
1369
1370 /*
1371 * Initialize mapping: disable all regions and configure
1372 * CFG and REG regions based on resources in the device tree
1373 */
1374 ppc4xx_pciex_port_init_mapping(port);
1375
1376 if (ppc4xx_pciex_hwops->check_link)
1377 ppc4xx_pciex_hwops->check_link(port);
1378
1379 /*
1380 * Map UTL
1381 */
1382 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1383 BUG_ON(port->utl_base == NULL);
1384
1385 /*
1386 * Setup UTL registers --BenH.
1387 */
1388 if (ppc4xx_pciex_hwops->setup_utl)
1389 ppc4xx_pciex_hwops->setup_utl(port);
1390
1391 /*
1392 * Check for VC0 active or PLL Locked and assert RDY.
1393 */
1394 if (port->sdr_base) {
1395 if (of_device_is_compatible(port->node,
1396 "ibm,plb-pciex-460sx")){
1397 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1398 PESDRn_RCSSTS,
1399 1 << 12, 1 << 12, 5000)) {
1400 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1401 port->index);
1402 port->link = 0;
1403 }
1404 } else if (port->link &&
1405 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1406 1 << 16, 1 << 16, 5000)) {
1407 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1408 port->index);
1409 port->link = 0;
1410 }
1411
1412 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1413 }
1414
1415 msleep(100);
1416
1417 return 0;
1418 }
1419
1420 static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1421 struct pci_bus *bus,
1422 unsigned int devfn)
1423 {
1424 static int message;
1425
1426 /* Endpoint can not generate upstream(remote) config cycles */
1427 if (port->endpoint && bus->number != port->hose->first_busno)
1428 return PCIBIOS_DEVICE_NOT_FOUND;
1429
1430 /* Check we are within the mapped range */
1431 if (bus->number > port->hose->last_busno) {
1432 if (!message) {
1433 printk(KERN_WARNING "Warning! Probing bus %u"
1434 " out of range !\n", bus->number);
1435 message++;
1436 }
1437 return PCIBIOS_DEVICE_NOT_FOUND;
1438 }
1439
1440 /* The root complex has only one device / function */
1441 if (bus->number == port->hose->first_busno && devfn != 0)
1442 return PCIBIOS_DEVICE_NOT_FOUND;
1443
1444 /* The other side of the RC has only one device as well */
1445 if (bus->number == (port->hose->first_busno + 1) &&
1446 PCI_SLOT(devfn) != 0)
1447 return PCIBIOS_DEVICE_NOT_FOUND;
1448
1449 /* Check if we have a link */
1450 if ((bus->number != port->hose->first_busno) && !port->link)
1451 return PCIBIOS_DEVICE_NOT_FOUND;
1452
1453 return 0;
1454 }
1455
1456 static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1457 struct pci_bus *bus,
1458 unsigned int devfn)
1459 {
1460 int relbus;
1461
1462 /* Remove the casts when we finally remove the stupid volatile
1463 * in struct pci_controller
1464 */
1465 if (bus->number == port->hose->first_busno)
1466 return (void __iomem *)port->hose->cfg_addr;
1467
1468 relbus = bus->number - (port->hose->first_busno + 1);
1469 return (void __iomem *)port->hose->cfg_data +
1470 ((relbus << 20) | (devfn << 12));
1471 }
1472
1473 static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1474 int offset, int len, u32 *val)
1475 {
1476 struct pci_controller *hose = pci_bus_to_host(bus);
1477 struct ppc4xx_pciex_port *port =
1478 &ppc4xx_pciex_ports[hose->indirect_type];
1479 void __iomem *addr;
1480 u32 gpl_cfg;
1481
1482 BUG_ON(hose != port->hose);
1483
1484 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1485 return PCIBIOS_DEVICE_NOT_FOUND;
1486
1487 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1488
1489 /*
1490 * Reading from configuration space of non-existing device can
1491 * generate transaction errors. For the read duration we suppress
1492 * assertion of machine check exceptions to avoid those.
1493 */
1494 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1495 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1496
1497 /* Make sure no CRS is recorded */
1498 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1499
1500 switch (len) {
1501 case 1:
1502 *val = in_8((u8 *)(addr + offset));
1503 break;
1504 case 2:
1505 *val = in_le16((u16 *)(addr + offset));
1506 break;
1507 default:
1508 *val = in_le32((u32 *)(addr + offset));
1509 break;
1510 }
1511
1512 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1513 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1514 bus->number, hose->first_busno, hose->last_busno,
1515 devfn, offset, len, addr + offset, *val);
1516
1517 /* Check for CRS (440SPe rev B does that for us but heh ..) */
1518 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1519 pr_debug("Got CRS !\n");
1520 if (len != 4 || offset != 0)
1521 return PCIBIOS_DEVICE_NOT_FOUND;
1522 *val = 0xffff0001;
1523 }
1524
1525 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1526
1527 return PCIBIOS_SUCCESSFUL;
1528 }
1529
1530 static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1531 int offset, int len, u32 val)
1532 {
1533 struct pci_controller *hose = pci_bus_to_host(bus);
1534 struct ppc4xx_pciex_port *port =
1535 &ppc4xx_pciex_ports[hose->indirect_type];
1536 void __iomem *addr;
1537 u32 gpl_cfg;
1538
1539 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1540 return PCIBIOS_DEVICE_NOT_FOUND;
1541
1542 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1543
1544 /*
1545 * Reading from configuration space of non-existing device can
1546 * generate transaction errors. For the read duration we suppress
1547 * assertion of machine check exceptions to avoid those.
1548 */
1549 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1550 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1551
1552 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1553 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1554 bus->number, hose->first_busno, hose->last_busno,
1555 devfn, offset, len, addr + offset, val);
1556
1557 switch (len) {
1558 case 1:
1559 out_8((u8 *)(addr + offset), val);
1560 break;
1561 case 2:
1562 out_le16((u16 *)(addr + offset), val);
1563 break;
1564 default:
1565 out_le32((u32 *)(addr + offset), val);
1566 break;
1567 }
1568
1569 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1570
1571 return PCIBIOS_SUCCESSFUL;
1572 }
1573
1574 static struct pci_ops ppc4xx_pciex_pci_ops =
1575 {
1576 .read = ppc4xx_pciex_read_config,
1577 .write = ppc4xx_pciex_write_config,
1578 };
1579
1580 static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1581 struct pci_controller *hose,
1582 void __iomem *mbase,
1583 u64 plb_addr,
1584 u64 pci_addr,
1585 u64 size,
1586 unsigned int flags,
1587 int index)
1588 {
1589 u32 lah, lal, pciah, pcial, sa;
1590
1591 if (!is_power_of_2(size) ||
1592 (index < 2 && size < 0x100000) ||
1593 (index == 2 && size < 0x100) ||
1594 (plb_addr & (size - 1)) != 0) {
1595 printk(KERN_WARNING "%s: Resource out of range\n",
1596 hose->dn->full_name);
1597 return -1;
1598 }
1599
1600 /* Calculate register values */
1601 lah = RES_TO_U32_HIGH(plb_addr);
1602 lal = RES_TO_U32_LOW(plb_addr);
1603 pciah = RES_TO_U32_HIGH(pci_addr);
1604 pcial = RES_TO_U32_LOW(pci_addr);
1605 sa = (0xffffffffu << ilog2(size)) | 0x1;
1606
1607 /* Program register values */
1608 switch (index) {
1609 case 0:
1610 out_le32(mbase + PECFG_POM0LAH, pciah);
1611 out_le32(mbase + PECFG_POM0LAL, pcial);
1612 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1613 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1614 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1615 /*Enabled and single region */
1616 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1617 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1618 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1619 | DCRO_PEGPL_OMRxMSKL_VAL);
1620 else
1621 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1622 sa | DCRO_PEGPL_OMR1MSKL_UOT
1623 | DCRO_PEGPL_OMRxMSKL_VAL);
1624 break;
1625 case 1:
1626 out_le32(mbase + PECFG_POM1LAH, pciah);
1627 out_le32(mbase + PECFG_POM1LAL, pcial);
1628 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1629 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1630 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1631 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1632 sa | DCRO_PEGPL_OMRxMSKL_VAL);
1633 break;
1634 case 2:
1635 out_le32(mbase + PECFG_POM2LAH, pciah);
1636 out_le32(mbase + PECFG_POM2LAL, pcial);
1637 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1638 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1639 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1640 /* Note that 3 here means enabled | IO space !!! */
1641 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1642 sa | DCRO_PEGPL_OMR3MSKL_IO
1643 | DCRO_PEGPL_OMRxMSKL_VAL);
1644 break;
1645 }
1646
1647 return 0;
1648 }
1649
1650 static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1651 struct pci_controller *hose,
1652 void __iomem *mbase)
1653 {
1654 int i, j, found_isa_hole = 0;
1655
1656 /* Setup outbound memory windows */
1657 for (i = j = 0; i < 3; i++) {
1658 struct resource *res = &hose->mem_resources[i];
1659
1660 /* we only care about memory windows */
1661 if (!(res->flags & IORESOURCE_MEM))
1662 continue;
1663 if (j > 1) {
1664 printk(KERN_WARNING "%s: Too many ranges\n",
1665 port->node->full_name);
1666 break;
1667 }
1668
1669 /* Configure the resource */
1670 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1671 res->start,
1672 res->start - hose->pci_mem_offset,
1673 resource_size(res),
1674 res->flags,
1675 j) == 0) {
1676 j++;
1677
1678 /* If the resource PCI address is 0 then we have our
1679 * ISA memory hole
1680 */
1681 if (res->start == hose->pci_mem_offset)
1682 found_isa_hole = 1;
1683 }
1684 }
1685
1686 /* Handle ISA memory hole if not already covered */
1687 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1688 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1689 hose->isa_mem_phys, 0,
1690 hose->isa_mem_size, 0, j) == 0)
1691 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1692 hose->dn->full_name);
1693
1694 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1695 * Note also that it -has- to be region index 2 on this HW
1696 */
1697 if (hose->io_resource.flags & IORESOURCE_IO)
1698 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1699 hose->io_base_phys, 0,
1700 0x10000, IORESOURCE_IO, 2);
1701 }
1702
1703 static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1704 struct pci_controller *hose,
1705 void __iomem *mbase,
1706 struct resource *res)
1707 {
1708 resource_size_t size = resource_size(res);
1709 u64 sa;
1710
1711 if (port->endpoint) {
1712 resource_size_t ep_addr = 0;
1713 resource_size_t ep_size = 32 << 20;
1714
1715 /* Currently we map a fixed 64MByte window to PLB address
1716 * 0 (SDRAM). This should probably be configurable via a dts
1717 * property.
1718 */
1719
1720 /* Calculate window size */
1721 sa = (0xffffffffffffffffull << ilog2(ep_size));
1722
1723 /* Setup BAR0 */
1724 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1725 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1726 PCI_BASE_ADDRESS_MEM_TYPE_64);
1727
1728 /* Disable BAR1 & BAR2 */
1729 out_le32(mbase + PECFG_BAR1MPA, 0);
1730 out_le32(mbase + PECFG_BAR2HMPA, 0);
1731 out_le32(mbase + PECFG_BAR2LMPA, 0);
1732
1733 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1734 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1735
1736 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1737 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1738 } else {
1739 /* Calculate window size */
1740 sa = (0xffffffffffffffffull << ilog2(size));
1741 if (res->flags & IORESOURCE_PREFETCH)
1742 sa |= 0x8;
1743
1744 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1745 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1746
1747 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1748 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1749
1750 /* The setup of the split looks weird to me ... let's see
1751 * if it works
1752 */
1753 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1754 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1755 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1756 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1757 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1758 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1759
1760 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1761 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1762 }
1763
1764 /* Enable inbound mapping */
1765 out_le32(mbase + PECFG_PIMEN, 0x1);
1766
1767 /* Enable I/O, Mem, and Busmaster cycles */
1768 out_le16(mbase + PCI_COMMAND,
1769 in_le16(mbase + PCI_COMMAND) |
1770 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1771 }
1772
1773 static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1774 {
1775 struct resource dma_window;
1776 struct pci_controller *hose = NULL;
1777 const int *bus_range;
1778 int primary = 0, busses;
1779 void __iomem *mbase = NULL, *cfg_data = NULL;
1780 const u32 *pval;
1781 u32 val;
1782
1783 /* Check if primary bridge */
1784 if (of_get_property(port->node, "primary", NULL))
1785 primary = 1;
1786
1787 /* Get bus range if any */
1788 bus_range = of_get_property(port->node, "bus-range", NULL);
1789
1790 /* Allocate the host controller data structure */
1791 hose = pcibios_alloc_controller(port->node);
1792 if (!hose)
1793 goto fail;
1794
1795 /* We stick the port number in "indirect_type" so the config space
1796 * ops can retrieve the port data structure easily
1797 */
1798 hose->indirect_type = port->index;
1799
1800 /* Get bus range */
1801 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1802 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1803
1804 /* Because of how big mapping the config space is (1M per bus), we
1805 * limit how many busses we support. In the long run, we could replace
1806 * that with something akin to kmap_atomic instead. We set aside 1 bus
1807 * for the host itself too.
1808 */
1809 busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1810 if (busses > MAX_PCIE_BUS_MAPPED) {
1811 busses = MAX_PCIE_BUS_MAPPED;
1812 hose->last_busno = hose->first_busno + busses;
1813 }
1814
1815 if (!port->endpoint) {
1816 /* Only map the external config space in cfg_data for
1817 * PCIe root-complexes. External space is 1M per bus
1818 */
1819 cfg_data = ioremap(port->cfg_space.start +
1820 (hose->first_busno + 1) * 0x100000,
1821 busses * 0x100000);
1822 if (cfg_data == NULL) {
1823 printk(KERN_ERR "%s: Can't map external config space !",
1824 port->node->full_name);
1825 goto fail;
1826 }
1827 hose->cfg_data = cfg_data;
1828 }
1829
1830 /* Always map the host config space in cfg_addr.
1831 * Internal space is 4K
1832 */
1833 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1834 if (mbase == NULL) {
1835 printk(KERN_ERR "%s: Can't map internal config space !",
1836 port->node->full_name);
1837 goto fail;
1838 }
1839 hose->cfg_addr = mbase;
1840
1841 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1842 hose->first_busno, hose->last_busno);
1843 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1844 hose->cfg_addr, hose->cfg_data);
1845
1846 /* Setup config space */
1847 hose->ops = &ppc4xx_pciex_pci_ops;
1848 port->hose = hose;
1849 mbase = (void __iomem *)hose->cfg_addr;
1850
1851 if (!port->endpoint) {
1852 /*
1853 * Set bus numbers on our root port
1854 */
1855 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
1856 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
1857 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
1858 }
1859
1860 /*
1861 * OMRs are already reset, also disable PIMs
1862 */
1863 out_le32(mbase + PECFG_PIMEN, 0);
1864
1865 /* Parse outbound mapping resources */
1866 pci_process_bridge_OF_ranges(hose, port->node, primary);
1867
1868 /* Parse inbound mapping resources */
1869 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
1870 goto fail;
1871
1872 /* Configure outbound ranges POMs */
1873 ppc4xx_configure_pciex_POMs(port, hose, mbase);
1874
1875 /* Configure inbound ranges PIMs */
1876 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
1877
1878 /* The root complex doesn't show up if we don't set some vendor
1879 * and device IDs into it. The defaults below are the same bogus
1880 * one that the initial code in arch/ppc had. This can be
1881 * overwritten by setting the "vendor-id/device-id" properties
1882 * in the pciex node.
1883 */
1884
1885 /* Get the (optional) vendor-/device-id from the device-tree */
1886 pval = of_get_property(port->node, "vendor-id", NULL);
1887 if (pval) {
1888 val = *pval;
1889 } else {
1890 if (!port->endpoint)
1891 val = 0xaaa0 + port->index;
1892 else
1893 val = 0xeee0 + port->index;
1894 }
1895 out_le16(mbase + 0x200, val);
1896
1897 pval = of_get_property(port->node, "device-id", NULL);
1898 if (pval) {
1899 val = *pval;
1900 } else {
1901 if (!port->endpoint)
1902 val = 0xbed0 + port->index;
1903 else
1904 val = 0xfed0 + port->index;
1905 }
1906 out_le16(mbase + 0x202, val);
1907
1908 /* Enable Bus master, memory, and io space */
1909 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1910 out_le16(mbase + 0x204, 0x7);
1911
1912 if (!port->endpoint) {
1913 /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
1914 out_le32(mbase + 0x208, 0x06040001);
1915
1916 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
1917 port->index);
1918 } else {
1919 /* Set Class Code to Processor/PPC */
1920 out_le32(mbase + 0x208, 0x0b200001);
1921
1922 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
1923 port->index);
1924 }
1925
1926 return;
1927 fail:
1928 if (hose)
1929 pcibios_free_controller(hose);
1930 if (cfg_data)
1931 iounmap(cfg_data);
1932 if (mbase)
1933 iounmap(mbase);
1934 }
1935
1936 static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
1937 {
1938 struct ppc4xx_pciex_port *port;
1939 const u32 *pval;
1940 int portno;
1941 unsigned int dcrs;
1942 const char *val;
1943
1944 /* First, proceed to core initialization as we assume there's
1945 * only one PCIe core in the system
1946 */
1947 if (ppc4xx_pciex_check_core_init(np))
1948 return;
1949
1950 /* Get the port number from the device-tree */
1951 pval = of_get_property(np, "port", NULL);
1952 if (pval == NULL) {
1953 printk(KERN_ERR "PCIE: Can't find port number for %s\n",
1954 np->full_name);
1955 return;
1956 }
1957 portno = *pval;
1958 if (portno >= ppc4xx_pciex_port_count) {
1959 printk(KERN_ERR "PCIE: port number out of range for %s\n",
1960 np->full_name);
1961 return;
1962 }
1963 port = &ppc4xx_pciex_ports[portno];
1964 port->index = portno;
1965
1966 /*
1967 * Check if device is enabled
1968 */
1969 if (!of_device_is_available(np)) {
1970 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
1971 return;
1972 }
1973
1974 port->node = of_node_get(np);
1975 pval = of_get_property(np, "sdr-base", NULL);
1976 if (pval == NULL) {
1977 printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
1978 np->full_name);
1979 return;
1980 }
1981 port->sdr_base = *pval;
1982
1983 /* Check if device_type property is set to "pci" or "pci-endpoint".
1984 * Resulting from this setup this PCIe port will be configured
1985 * as root-complex or as endpoint.
1986 */
1987 val = of_get_property(port->node, "device_type", NULL);
1988 if (!strcmp(val, "pci-endpoint")) {
1989 port->endpoint = 1;
1990 } else if (!strcmp(val, "pci")) {
1991 port->endpoint = 0;
1992 } else {
1993 printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
1994 np->full_name);
1995 return;
1996 }
1997
1998 /* Fetch config space registers address */
1999 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2000 printk(KERN_ERR "%s: Can't get PCI-E config space !",
2001 np->full_name);
2002 return;
2003 }
2004 /* Fetch host bridge internal registers address */
2005 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2006 printk(KERN_ERR "%s: Can't get UTL register base !",
2007 np->full_name);
2008 return;
2009 }
2010
2011 /* Map DCRs */
2012 dcrs = dcr_resource_start(np, 0);
2013 if (dcrs == 0) {
2014 printk(KERN_ERR "%s: Can't get DCR register base !",
2015 np->full_name);
2016 return;
2017 }
2018 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2019
2020 /* Initialize the port specific registers */
2021 if (ppc4xx_pciex_port_init(port)) {
2022 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2023 return;
2024 }
2025
2026 /* Setup the linux hose data structure */
2027 ppc4xx_pciex_port_setup_hose(port);
2028 }
2029
2030 #endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2031
2032 static int __init ppc4xx_pci_find_bridges(void)
2033 {
2034 struct device_node *np;
2035
2036 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2037
2038 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
2039 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2040 ppc4xx_probe_pciex_bridge(np);
2041 #endif
2042 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2043 ppc4xx_probe_pcix_bridge(np);
2044 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2045 ppc4xx_probe_pci_bridge(np);
2046
2047 return 0;
2048 }
2049 arch_initcall(ppc4xx_pci_find_bridges);
2050
This page took 0.070708 seconds and 4 git commands to generate.