Commit | Line | Data |
---|---|---|
1e904e1b NP |
1 | /* |
2 | * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block | |
3 | * | |
4 | * Created by: Nicolas Pitre, May 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/of_address.h> | |
18 | #include <linux/vexpress.h> | |
d41418c0 | 19 | #include <linux/arm-cci.h> |
1e904e1b NP |
20 | |
21 | #include <asm/mcpm.h> | |
22 | #include <asm/proc-fns.h> | |
23 | #include <asm/cacheflush.h> | |
24 | #include <asm/cputype.h> | |
25 | #include <asm/cp15.h> | |
26 | ||
27 | ||
28 | #define RST_HOLD0 0x0 | |
29 | #define RST_HOLD1 0x4 | |
30 | #define SYS_SWRESET 0x8 | |
31 | #define RST_STAT0 0xc | |
32 | #define RST_STAT1 0x10 | |
33 | #define EAG_CFG_R 0x20 | |
34 | #define EAG_CFG_W 0x24 | |
35 | #define KFC_CFG_R 0x28 | |
36 | #define KFC_CFG_W 0x2c | |
37 | #define DCS_CFG_R 0x30 | |
38 | ||
39 | /* | |
40 | * We can't use regular spinlocks. In the switcher case, it is possible | |
41 | * for an outbound CPU to call power_down() while its inbound counterpart | |
42 | * is already live using the same logical CPU number which trips lockdep | |
43 | * debugging. | |
44 | */ | |
45 | static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
46 | ||
47 | static void __iomem *dcscb_base; | |
13eae144 | 48 | static int dcscb_use_count[4][2]; |
2f2df895 | 49 | static int dcscb_allcpus_mask[2]; |
1e904e1b NP |
50 | |
51 | static int dcscb_power_up(unsigned int cpu, unsigned int cluster) | |
52 | { | |
53 | unsigned int rst_hold, cpumask = (1 << cpu); | |
bb6dd575 | 54 | unsigned int all_mask; |
1e904e1b NP |
55 | |
56 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
03fd5db7 | 57 | if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) |
1e904e1b NP |
58 | return -EINVAL; |
59 | ||
bb6dd575 HS |
60 | all_mask = dcscb_allcpus_mask[cluster]; |
61 | ||
1e904e1b NP |
62 | /* |
63 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | |
64 | * variant exists, we need to disable IRQs manually here. | |
65 | */ | |
66 | local_irq_disable(); | |
67 | arch_spin_lock(&dcscb_lock); | |
68 | ||
13eae144 NP |
69 | dcscb_use_count[cpu][cluster]++; |
70 | if (dcscb_use_count[cpu][cluster] == 1) { | |
71 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); | |
72 | if (rst_hold & (1 << 8)) { | |
73 | /* remove cluster reset and add individual CPU's reset */ | |
74 | rst_hold &= ~(1 << 8); | |
2f2df895 | 75 | rst_hold |= all_mask; |
13eae144 NP |
76 | } |
77 | rst_hold &= ~(cpumask | (cpumask << 4)); | |
78 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | |
79 | } else if (dcscb_use_count[cpu][cluster] != 2) { | |
80 | /* | |
81 | * The only possible values are: | |
82 | * 0 = CPU down | |
83 | * 1 = CPU (still) up | |
84 | * 2 = CPU requested to be up before it had a chance | |
85 | * to actually make itself down. | |
86 | * Any other value is a bug. | |
87 | */ | |
88 | BUG(); | |
1e904e1b | 89 | } |
1e904e1b NP |
90 | |
91 | arch_spin_unlock(&dcscb_lock); | |
92 | local_irq_enable(); | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
97 | static void dcscb_power_down(void) | |
98 | { | |
2f2df895 | 99 | unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask; |
13eae144 | 100 | bool last_man = false, skip_wfi = false; |
1e904e1b NP |
101 | |
102 | mpidr = read_cpuid_mpidr(); | |
103 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
104 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
105 | cpumask = (1 << cpu); | |
106 | ||
107 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
03fd5db7 | 108 | BUG_ON(cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])); |
1e904e1b | 109 | |
bb6dd575 HS |
110 | all_mask = dcscb_allcpus_mask[cluster]; |
111 | ||
d41418c0 DM |
112 | __mcpm_cpu_going_down(cpu, cluster); |
113 | ||
1e904e1b | 114 | arch_spin_lock(&dcscb_lock); |
d41418c0 | 115 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); |
13eae144 NP |
116 | dcscb_use_count[cpu][cluster]--; |
117 | if (dcscb_use_count[cpu][cluster] == 0) { | |
118 | rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); | |
119 | rst_hold |= cpumask; | |
2f2df895 | 120 | if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) { |
13eae144 NP |
121 | rst_hold |= (1 << 8); |
122 | last_man = true; | |
123 | } | |
124 | writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); | |
125 | } else if (dcscb_use_count[cpu][cluster] == 1) { | |
126 | /* | |
127 | * A power_up request went ahead of us. | |
128 | * Even if we do not want to shut this CPU down, | |
129 | * the caller expects a certain state as if the WFI | |
130 | * was aborted. So let's continue with cache cleaning. | |
131 | */ | |
132 | skip_wfi = true; | |
133 | } else | |
134 | BUG(); | |
1e904e1b | 135 | |
d41418c0 DM |
136 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { |
137 | arch_spin_unlock(&dcscb_lock); | |
1e904e1b | 138 | |
39792c7c NP |
139 | /* Flush all cache levels for this cluster. */ |
140 | v7_exit_coherency_flush(all); | |
d41418c0 DM |
141 | |
142 | /* | |
c9d347e0 NP |
143 | * A full outer cache flush could be needed at this point |
144 | * on platforms with such a cache, depending on where the | |
145 | * outer cache sits. In some cases the notion of a "last | |
146 | * cluster standing" would need to be implemented if the | |
147 | * outer cache is shared across clusters. In any case, when | |
148 | * the outer cache needs flushing, there is no concurrent | |
149 | * access to the cache controller to worry about and no | |
150 | * special locking besides what is already provided by the | |
151 | * MCPM state machinery is needed. | |
d41418c0 | 152 | */ |
d41418c0 | 153 | |
d41418c0 DM |
154 | /* |
155 | * Disable cluster-level coherency by masking | |
156 | * incoming snoops and DVM messages: | |
157 | */ | |
158 | cci_disable_port_by_cpu(mpidr); | |
159 | ||
160 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
161 | } else { | |
162 | arch_spin_unlock(&dcscb_lock); | |
163 | ||
39792c7c NP |
164 | /* Disable and flush the local CPU cache. */ |
165 | v7_exit_coherency_flush(louis); | |
1e904e1b NP |
166 | } |
167 | ||
d41418c0 | 168 | __mcpm_cpu_down(cpu, cluster); |
1e904e1b NP |
169 | |
170 | /* Now we are prepared for power-down, do it: */ | |
171 | dsb(); | |
13eae144 NP |
172 | if (!skip_wfi) |
173 | wfi(); | |
1e904e1b NP |
174 | |
175 | /* Not dead at this point? Let our caller cope. */ | |
176 | } | |
177 | ||
178 | static const struct mcpm_platform_ops dcscb_power_ops = { | |
179 | .power_up = dcscb_power_up, | |
180 | .power_down = dcscb_power_down, | |
181 | }; | |
182 | ||
13eae144 NP |
183 | static void __init dcscb_usage_count_init(void) |
184 | { | |
185 | unsigned int mpidr, cpu, cluster; | |
186 | ||
187 | mpidr = read_cpuid_mpidr(); | |
188 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
189 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
190 | ||
191 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
03fd5db7 | 192 | BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster])); |
13eae144 NP |
193 | dcscb_use_count[cpu][cluster] = 1; |
194 | } | |
195 | ||
d41418c0 DM |
196 | extern void dcscb_power_up_setup(unsigned int affinity_level); |
197 | ||
1e904e1b NP |
198 | static int __init dcscb_init(void) |
199 | { | |
200 | struct device_node *node; | |
2f2df895 | 201 | unsigned int cfg; |
1e904e1b NP |
202 | int ret; |
203 | ||
d41418c0 DM |
204 | if (!cci_probed()) |
205 | return -ENODEV; | |
206 | ||
1e904e1b NP |
207 | node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb"); |
208 | if (!node) | |
209 | return -ENODEV; | |
210 | dcscb_base = of_iomap(node, 0); | |
211 | if (!dcscb_base) | |
212 | return -EADDRNOTAVAIL; | |
2f2df895 NP |
213 | cfg = readl_relaxed(dcscb_base + DCS_CFG_R); |
214 | dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1; | |
215 | dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1; | |
13eae144 NP |
216 | dcscb_usage_count_init(); |
217 | ||
1e904e1b | 218 | ret = mcpm_platform_register(&dcscb_power_ops); |
d41418c0 DM |
219 | if (!ret) |
220 | ret = mcpm_sync_init(dcscb_power_up_setup); | |
1e904e1b NP |
221 | if (ret) { |
222 | iounmap(dcscb_base); | |
223 | return ret; | |
224 | } | |
225 | ||
226 | pr_info("VExpress DCSCB support installed\n"); | |
227 | ||
228 | /* | |
229 | * Future entries into the kernel can now go | |
230 | * through the cluster entry vectors. | |
231 | */ | |
232 | vexpress_flags_set(virt_to_phys(mcpm_entry_point)); | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | early_initcall(dcscb_init); |