Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[deliverable/linux.git] / arch / arm / mach-vexpress / dcscb.c
1 /*
2 * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
3 *
4 * Created by: Nicolas Pitre, May 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/io.h>
15 #include <linux/spinlock.h>
16 #include <linux/errno.h>
17 #include <linux/of_address.h>
18 #include <linux/vexpress.h>
19 #include <linux/arm-cci.h>
20
21 #include <asm/mcpm.h>
22 #include <asm/proc-fns.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/cp15.h>
26
27
28 #define RST_HOLD0 0x0
29 #define RST_HOLD1 0x4
30 #define SYS_SWRESET 0x8
31 #define RST_STAT0 0xc
32 #define RST_STAT1 0x10
33 #define EAG_CFG_R 0x20
34 #define EAG_CFG_W 0x24
35 #define KFC_CFG_R 0x28
36 #define KFC_CFG_W 0x2c
37 #define DCS_CFG_R 0x30
38
39 /*
40 * We can't use regular spinlocks. In the switcher case, it is possible
41 * for an outbound CPU to call power_down() while its inbound counterpart
42 * is already live using the same logical CPU number which trips lockdep
43 * debugging.
44 */
45 static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
46
47 static void __iomem *dcscb_base;
48 static int dcscb_use_count[4][2];
49 static int dcscb_allcpus_mask[2];
50
51 static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
52 {
53 unsigned int rst_hold, cpumask = (1 << cpu);
54 unsigned int all_mask = dcscb_allcpus_mask[cluster];
55
56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
57 if (cpu >= 4 || cluster >= 2)
58 return -EINVAL;
59
60 /*
61 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
62 * variant exists, we need to disable IRQs manually here.
63 */
64 local_irq_disable();
65 arch_spin_lock(&dcscb_lock);
66
67 dcscb_use_count[cpu][cluster]++;
68 if (dcscb_use_count[cpu][cluster] == 1) {
69 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
70 if (rst_hold & (1 << 8)) {
71 /* remove cluster reset and add individual CPU's reset */
72 rst_hold &= ~(1 << 8);
73 rst_hold |= all_mask;
74 }
75 rst_hold &= ~(cpumask | (cpumask << 4));
76 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
77 } else if (dcscb_use_count[cpu][cluster] != 2) {
78 /*
79 * The only possible values are:
80 * 0 = CPU down
81 * 1 = CPU (still) up
82 * 2 = CPU requested to be up before it had a chance
83 * to actually make itself down.
84 * Any other value is a bug.
85 */
86 BUG();
87 }
88
89 arch_spin_unlock(&dcscb_lock);
90 local_irq_enable();
91
92 return 0;
93 }
94
95 static void dcscb_power_down(void)
96 {
97 unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
98 bool last_man = false, skip_wfi = false;
99
100 mpidr = read_cpuid_mpidr();
101 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
102 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
103 cpumask = (1 << cpu);
104 all_mask = dcscb_allcpus_mask[cluster];
105
106 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
107 BUG_ON(cpu >= 4 || cluster >= 2);
108
109 __mcpm_cpu_going_down(cpu, cluster);
110
111 arch_spin_lock(&dcscb_lock);
112 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
113 dcscb_use_count[cpu][cluster]--;
114 if (dcscb_use_count[cpu][cluster] == 0) {
115 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
116 rst_hold |= cpumask;
117 if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
118 rst_hold |= (1 << 8);
119 last_man = true;
120 }
121 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
122 } else if (dcscb_use_count[cpu][cluster] == 1) {
123 /*
124 * A power_up request went ahead of us.
125 * Even if we do not want to shut this CPU down,
126 * the caller expects a certain state as if the WFI
127 * was aborted. So let's continue with cache cleaning.
128 */
129 skip_wfi = true;
130 } else
131 BUG();
132
133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
134 arch_spin_unlock(&dcscb_lock);
135
136 /*
137 * Flush all cache levels for this cluster.
138 *
139 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
140 * a preliminary flush here for those CPUs. At least, that's
141 * the theory -- without the extra flush, Linux explodes on
142 * RTSM (to be investigated).
143 */
144 flush_cache_all();
145 set_cr(get_cr() & ~CR_C);
146 flush_cache_all();
147
148 /*
149 * This is a harmless no-op. On platforms with a real
150 * outer cache this might either be needed or not,
151 * depending on where the outer cache sits.
152 */
153 outer_flush_all();
154
155 /* Disable local coherency by clearing the ACTLR "SMP" bit: */
156 set_auxcr(get_auxcr() & ~(1 << 6));
157
158 /*
159 * Disable cluster-level coherency by masking
160 * incoming snoops and DVM messages:
161 */
162 cci_disable_port_by_cpu(mpidr);
163
164 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
165 } else {
166 arch_spin_unlock(&dcscb_lock);
167
168 /*
169 * Flush the local CPU cache.
170 *
171 * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
172 * a preliminary flush here for those CPUs. At least, that's
173 * the theory -- without the extra flush, Linux explodes on
174 * RTSM (to be investigated).
175 */
176 flush_cache_louis();
177 set_cr(get_cr() & ~CR_C);
178 flush_cache_louis();
179
180 /* Disable local coherency by clearing the ACTLR "SMP" bit: */
181 set_auxcr(get_auxcr() & ~(1 << 6));
182 }
183
184 __mcpm_cpu_down(cpu, cluster);
185
186 /* Now we are prepared for power-down, do it: */
187 dsb();
188 if (!skip_wfi)
189 wfi();
190
191 /* Not dead at this point? Let our caller cope. */
192 }
193
194 static const struct mcpm_platform_ops dcscb_power_ops = {
195 .power_up = dcscb_power_up,
196 .power_down = dcscb_power_down,
197 };
198
199 static void __init dcscb_usage_count_init(void)
200 {
201 unsigned int mpidr, cpu, cluster;
202
203 mpidr = read_cpuid_mpidr();
204 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
205 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
206
207 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
208 BUG_ON(cpu >= 4 || cluster >= 2);
209 dcscb_use_count[cpu][cluster] = 1;
210 }
211
212 extern void dcscb_power_up_setup(unsigned int affinity_level);
213
214 static int __init dcscb_init(void)
215 {
216 struct device_node *node;
217 unsigned int cfg;
218 int ret;
219
220 if (!cci_probed())
221 return -ENODEV;
222
223 node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
224 if (!node)
225 return -ENODEV;
226 dcscb_base = of_iomap(node, 0);
227 if (!dcscb_base)
228 return -EADDRNOTAVAIL;
229 cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
230 dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
231 dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
232 dcscb_usage_count_init();
233
234 ret = mcpm_platform_register(&dcscb_power_ops);
235 if (!ret)
236 ret = mcpm_sync_init(dcscb_power_up_setup);
237 if (ret) {
238 iounmap(dcscb_base);
239 return ret;
240 }
241
242 pr_info("VExpress DCSCB support installed\n");
243
244 /*
245 * Future entries into the kernel can now go
246 * through the cluster entry vectors.
247 */
248 vexpress_flags_set(virt_to_phys(mcpm_entry_point));
249
250 return 0;
251 }
252
253 early_initcall(dcscb_init);
This page took 0.036113 seconds and 5 git commands to generate.