2 * Versatile Express Core Tile Cortex A9x4 Support
4 #include <linux/init.h>
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/amba/bus.h>
10 #include <linux/amba/clcd.h>
11 #include <linux/clkdev.h>
13 #include <asm/hardware/arm_timer.h>
14 #include <asm/hardware/cache-l2x0.h>
15 #include <asm/hardware/gic.h>
17 #include <asm/smp_scu.h>
18 #include <asm/smp_twd.h>
20 #include <mach/ct-ca9x4.h>
22 #include <asm/hardware/timer-sp.h>
24 #include <asm/mach/map.h>
25 #include <asm/mach/time.h>
29 #include <mach/motherboard.h>
31 #include <plat/clcd.h>
33 static struct map_desc ct_ca9x4_io_desc
[] __initdata
= {
35 .virtual = V2T_PERIPH
,
36 .pfn
= __phys_to_pfn(CT_CA9X4_MPIC
),
42 static void __init
ct_ca9x4_map_io(void)
44 iotable_init(ct_ca9x4_io_desc
, ARRAY_SIZE(ct_ca9x4_io_desc
));
47 #ifdef CONFIG_HAVE_ARM_TWD
48 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer
, A9_MPCORE_TWD
, IRQ_LOCALTIMER
);
50 static void __init
ca9x4_twd_init(void)
52 int err
= twd_local_timer_register(&twd_local_timer
);
54 pr_err("twd_local_timer_register failed %d\n", err
);
57 #define ca9x4_twd_init() do {} while(0)
60 static void __init
ct_ca9x4_init_irq(void)
62 gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST
, SZ_4K
),
63 ioremap(A9_MPCORE_GIC_CPU
, SZ_256
));
67 static void ct_ca9x4_clcd_enable(struct clcd_fb
*fb
)
69 u32 site
= v2m_get_master_site();
72 * Old firmware was using the "site" component of the command
73 * to control the DVI muxer (while it should be always 0 ie. MB).
74 * Newer firmware uses the data register. Keep both for compatibility.
76 v2m_cfg_write(SYS_CFG_MUXFPGA
| SYS_CFG_SITE(site
), site
);
77 v2m_cfg_write(SYS_CFG_DVIMODE
| SYS_CFG_SITE(SYS_CFG_SITE_MB
), 2);
80 static int ct_ca9x4_clcd_setup(struct clcd_fb
*fb
)
82 unsigned long framesize
= 1024 * 768 * 2;
84 fb
->panel
= versatile_clcd_get_panel("XVGA");
88 return versatile_clcd_setup_dma(fb
, framesize
);
91 static struct clcd_board ct_ca9x4_clcd_data
= {
93 .caps
= CLCD_CAP_5551
| CLCD_CAP_565
,
94 .check
= clcdfb_check
,
95 .decode
= clcdfb_decode
,
96 .enable
= ct_ca9x4_clcd_enable
,
97 .setup
= ct_ca9x4_clcd_setup
,
98 .mmap
= versatile_clcd_mmap_dma
,
99 .remove
= versatile_clcd_remove_dma
,
102 static AMBA_AHB_DEVICE(clcd
, "ct:clcd", 0, CT_CA9X4_CLCDC
, IRQ_CT_CA9X4_CLCDC
, &ct_ca9x4_clcd_data
);
103 static AMBA_APB_DEVICE(dmc
, "ct:dmc", 0, CT_CA9X4_DMC
, IRQ_CT_CA9X4_DMC
, NULL
);
104 static AMBA_APB_DEVICE(smc
, "ct:smc", 0, CT_CA9X4_SMC
, IRQ_CT_CA9X4_SMC
, NULL
);
105 static AMBA_APB_DEVICE(gpio
, "ct:gpio", 0, CT_CA9X4_GPIO
, IRQ_CT_CA9X4_GPIO
, NULL
);
107 static struct amba_device
*ct_ca9x4_amba_devs
[] __initdata
= {
115 static struct v2m_osc ct_osc1
= {
117 .rate_min
= 10000000,
118 .rate_max
= 80000000,
119 .rate_default
= 23750000,
122 static struct resource pmu_resources
[] = {
124 .start
= IRQ_CT_CA9X4_PMU_CPU0
,
125 .end
= IRQ_CT_CA9X4_PMU_CPU0
,
126 .flags
= IORESOURCE_IRQ
,
129 .start
= IRQ_CT_CA9X4_PMU_CPU1
,
130 .end
= IRQ_CT_CA9X4_PMU_CPU1
,
131 .flags
= IORESOURCE_IRQ
,
134 .start
= IRQ_CT_CA9X4_PMU_CPU2
,
135 .end
= IRQ_CT_CA9X4_PMU_CPU2
,
136 .flags
= IORESOURCE_IRQ
,
139 .start
= IRQ_CT_CA9X4_PMU_CPU3
,
140 .end
= IRQ_CT_CA9X4_PMU_CPU3
,
141 .flags
= IORESOURCE_IRQ
,
145 static struct platform_device pmu_device
= {
147 .id
= ARM_PMU_DEVICE_CPU
,
148 .num_resources
= ARRAY_SIZE(pmu_resources
),
149 .resource
= pmu_resources
,
152 static void __init
ct_ca9x4_init(void)
157 #ifdef CONFIG_CACHE_L2X0
158 void __iomem
*l2x0_base
= ioremap(CT_CA9X4_L2CC
, SZ_4K
);
160 /* set RAM latencies to 1 cycle for this core tile. */
161 writel(0, l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
162 writel(0, l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
164 l2x0_init(l2x0_base
, 0x00400000, 0xfe0fffff);
167 ct_osc1
.site
= v2m_get_master_site();
168 clk
= v2m_osc_register("ct:osc1", &ct_osc1
);
169 clk_register_clkdev(clk
, NULL
, "ct:clcd");
171 for (i
= 0; i
< ARRAY_SIZE(ct_ca9x4_amba_devs
); i
++)
172 amba_device_register(ct_ca9x4_amba_devs
[i
], &iomem_resource
);
174 platform_device_register(&pmu_device
);
178 static void *ct_ca9x4_scu_base __initdata
;
180 static void __init
ct_ca9x4_init_cpu_map(void)
184 ct_ca9x4_scu_base
= ioremap(A9_MPCORE_SCU
, SZ_128
);
185 if (WARN_ON(!ct_ca9x4_scu_base
))
188 ncores
= scu_get_core_count(ct_ca9x4_scu_base
);
190 if (ncores
> nr_cpu_ids
) {
191 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
196 for (i
= 0; i
< ncores
; ++i
)
197 set_cpu_possible(i
, true);
199 set_smp_cross_call(gic_raise_softirq
);
202 static void __init
ct_ca9x4_smp_enable(unsigned int max_cpus
)
204 scu_enable(ct_ca9x4_scu_base
);
208 struct ct_desc ct_ca9x4_desc __initdata
= {
211 .map_io
= ct_ca9x4_map_io
,
212 .init_irq
= ct_ca9x4_init_irq
,
213 .init_tile
= ct_ca9x4_init
,
215 .init_cpu_map
= ct_ca9x4_init_cpu_map
,
216 .smp_enable
= ct_ca9x4_smp_enable
,