2 * Versatile Express Core Tile Cortex A9x4 Support
4 #include <linux/init.h>
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/amba/bus.h>
10 #include <linux/amba/clcd.h>
11 #include <linux/platform_data/video-clcd-versatile.h>
12 #include <linux/clkdev.h>
13 #include <linux/vexpress.h>
14 #include <linux/irqchip/arm-gic.h>
16 #include <asm/hardware/arm_timer.h>
17 #include <asm/hardware/cache-l2x0.h>
18 #include <asm/smp_scu.h>
19 #include <asm/smp_twd.h>
21 #include <mach/ct-ca9x4.h>
23 #include <asm/hardware/timer-sp.h>
25 #include <asm/mach/map.h>
26 #include <asm/mach/time.h>
30 #include <mach/motherboard.h>
31 #include <mach/irqs.h>
33 static struct map_desc ct_ca9x4_io_desc
[] __initdata
= {
35 .virtual = V2T_PERIPH
,
36 .pfn
= __phys_to_pfn(CT_CA9X4_MPIC
),
42 static void __init
ct_ca9x4_map_io(void)
44 iotable_init(ct_ca9x4_io_desc
, ARRAY_SIZE(ct_ca9x4_io_desc
));
47 static void __init
ca9x4_l2_init(void)
49 #ifdef CONFIG_CACHE_L2X0
50 void __iomem
*l2x0_base
= ioremap(CT_CA9X4_L2CC
, SZ_4K
);
53 /* set RAM latencies to 1 cycle for this core tile. */
54 writel(0, l2x0_base
+ L310_TAG_LATENCY_CTRL
);
55 writel(0, l2x0_base
+ L310_DATA_LATENCY_CTRL
);
57 l2x0_init(l2x0_base
, 0x00400000, 0xfe0fffff);
59 pr_err("L2C: unable to map L2 cache controller\n");
64 #ifdef CONFIG_HAVE_ARM_TWD
65 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer
, A9_MPCORE_TWD
, IRQ_LOCALTIMER
);
67 static void __init
ca9x4_twd_init(void)
69 int err
= twd_local_timer_register(&twd_local_timer
);
71 pr_err("twd_local_timer_register failed %d\n", err
);
74 #define ca9x4_twd_init() do {} while(0)
77 static void __init
ct_ca9x4_init_irq(void)
79 gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST
, SZ_4K
),
80 ioremap(A9_MPCORE_GIC_CPU
, SZ_256
));
85 static int ct_ca9x4_clcd_setup(struct clcd_fb
*fb
)
87 unsigned long framesize
= 1024 * 768 * 2;
89 fb
->panel
= versatile_clcd_get_panel("XVGA");
93 return versatile_clcd_setup_dma(fb
, framesize
);
96 static struct clcd_board ct_ca9x4_clcd_data
= {
98 .caps
= CLCD_CAP_5551
| CLCD_CAP_565
,
99 .check
= clcdfb_check
,
100 .decode
= clcdfb_decode
,
101 .setup
= ct_ca9x4_clcd_setup
,
102 .mmap
= versatile_clcd_mmap_dma
,
103 .remove
= versatile_clcd_remove_dma
,
106 static AMBA_AHB_DEVICE(clcd
, "ct:clcd", 0, CT_CA9X4_CLCDC
, IRQ_CT_CA9X4_CLCDC
, &ct_ca9x4_clcd_data
);
107 static AMBA_APB_DEVICE(dmc
, "ct:dmc", 0, CT_CA9X4_DMC
, IRQ_CT_CA9X4_DMC
, NULL
);
108 static AMBA_APB_DEVICE(smc
, "ct:smc", 0, CT_CA9X4_SMC
, IRQ_CT_CA9X4_SMC
, NULL
);
109 static AMBA_APB_DEVICE(gpio
, "ct:gpio", 0, CT_CA9X4_GPIO
, IRQ_CT_CA9X4_GPIO
, NULL
);
111 static struct amba_device
*ct_ca9x4_amba_devs
[] __initdata
= {
118 static struct resource pmu_resources
[] = {
120 .start
= IRQ_CT_CA9X4_PMU_CPU0
,
121 .end
= IRQ_CT_CA9X4_PMU_CPU0
,
122 .flags
= IORESOURCE_IRQ
,
125 .start
= IRQ_CT_CA9X4_PMU_CPU1
,
126 .end
= IRQ_CT_CA9X4_PMU_CPU1
,
127 .flags
= IORESOURCE_IRQ
,
130 .start
= IRQ_CT_CA9X4_PMU_CPU2
,
131 .end
= IRQ_CT_CA9X4_PMU_CPU2
,
132 .flags
= IORESOURCE_IRQ
,
135 .start
= IRQ_CT_CA9X4_PMU_CPU3
,
136 .end
= IRQ_CT_CA9X4_PMU_CPU3
,
137 .flags
= IORESOURCE_IRQ
,
141 static struct platform_device pmu_device
= {
144 .num_resources
= ARRAY_SIZE(pmu_resources
),
145 .resource
= pmu_resources
,
148 static struct clk_lookup osc1_lookup
= {
152 static struct platform_device osc1_device
= {
153 .name
= "vexpress-osc",
156 .resource
= (struct resource
[]) {
157 VEXPRESS_RES_FUNC(0xf, 1),
159 .dev
.platform_data
= &osc1_lookup
,
162 static void __init
ct_ca9x4_init(void)
166 for (i
= 0; i
< ARRAY_SIZE(ct_ca9x4_amba_devs
); i
++)
167 amba_device_register(ct_ca9x4_amba_devs
[i
], &iomem_resource
);
169 platform_device_register(&pmu_device
);
170 vexpress_syscfg_device_register(&osc1_device
);
174 static void *ct_ca9x4_scu_base __initdata
;
176 static void __init
ct_ca9x4_init_cpu_map(void)
180 ct_ca9x4_scu_base
= ioremap(A9_MPCORE_SCU
, SZ_128
);
181 if (WARN_ON(!ct_ca9x4_scu_base
))
184 ncores
= scu_get_core_count(ct_ca9x4_scu_base
);
186 if (ncores
> nr_cpu_ids
) {
187 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
192 for (i
= 0; i
< ncores
; ++i
)
193 set_cpu_possible(i
, true);
196 static void __init
ct_ca9x4_smp_enable(unsigned int max_cpus
)
198 scu_enable(ct_ca9x4_scu_base
);
202 struct ct_desc ct_ca9x4_desc __initdata
= {
205 .map_io
= ct_ca9x4_map_io
,
206 .init_irq
= ct_ca9x4_init_irq
,
207 .init_tile
= ct_ca9x4_init
,
209 .init_cpu_map
= ct_ca9x4_init_cpu_map
,
210 .smp_enable
= ct_ca9x4_smp_enable
,