Commit | Line | Data |
---|---|---|
c9018aab VG |
1 | /* |
2 | * arch/arm/kernel/topology.c | |
3 | * | |
4 | * Copyright (C) 2011 Linaro Limited. | |
5 | * Written by: Vincent Guittot | |
6 | * | |
7 | * based on arch/sh/kernel/topology.c | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | ||
14 | #include <linux/cpu.h> | |
15 | #include <linux/cpumask.h> | |
92bdd3f5 | 16 | #include <linux/export.h> |
c9018aab VG |
17 | #include <linux/init.h> |
18 | #include <linux/percpu.h> | |
19 | #include <linux/node.h> | |
20 | #include <linux/nodemask.h> | |
339ca09d | 21 | #include <linux/of.h> |
c9018aab | 22 | #include <linux/sched.h> |
339ca09d | 23 | #include <linux/slab.h> |
c9018aab VG |
24 | |
25 | #include <asm/cputype.h> | |
26 | #include <asm/topology.h> | |
27 | ||
130d9aab | 28 | /* |
ca8ce3d0 | 29 | * cpu capacity scale management |
130d9aab VG |
30 | */ |
31 | ||
32 | /* | |
ca8ce3d0 | 33 | * cpu capacity table |
130d9aab VG |
34 | * This per cpu data structure describes the relative capacity of each core. |
35 | * On a heteregenous system, cores don't have the same computation capacity | |
ca8ce3d0 NP |
36 | * and we reflect that difference in the cpu_capacity field so the scheduler |
37 | * can take this difference into account during load balance. A per cpu | |
38 | * structure is preferred because each CPU updates its own cpu_capacity field | |
39 | * during the load balance except for idle cores. One idle core is selected | |
40 | * to run the rebalance_domains for all idle cores and the cpu_capacity can be | |
41 | * updated during this sequence. | |
130d9aab | 42 | */ |
d78e13a8 | 43 | static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
130d9aab | 44 | |
d3bfca1a | 45 | unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) |
130d9aab VG |
46 | { |
47 | return per_cpu(cpu_scale, cpu); | |
48 | } | |
49 | ||
ca8ce3d0 | 50 | static void set_capacity_scale(unsigned int cpu, unsigned long capacity) |
130d9aab | 51 | { |
ca8ce3d0 | 52 | per_cpu(cpu_scale, cpu) = capacity; |
130d9aab VG |
53 | } |
54 | ||
339ca09d VG |
55 | #ifdef CONFIG_OF |
56 | struct cpu_efficiency { | |
57 | const char *compatible; | |
58 | unsigned long efficiency; | |
59 | }; | |
60 | ||
61 | /* | |
62 | * Table of relative efficiency of each processors | |
63 | * The efficiency value must fit in 20bit and the final | |
64 | * cpu_scale value must be in the range | |
ca8ce3d0 | 65 | * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2 |
339ca09d VG |
66 | * in order to return at most 1 when DIV_ROUND_CLOSEST |
67 | * is used to compute the capacity of a CPU. | |
68 | * Processors that are not defined in the table, | |
ca8ce3d0 | 69 | * use the default SCHED_CAPACITY_SCALE value for cpu_scale. |
339ca09d | 70 | */ |
145bc292 | 71 | static const struct cpu_efficiency table_efficiency[] = { |
339ca09d VG |
72 | {"arm,cortex-a15", 3891}, |
73 | {"arm,cortex-a7", 2048}, | |
74 | {NULL, }, | |
75 | }; | |
76 | ||
145bc292 | 77 | static unsigned long *__cpu_capacity; |
816a8de0 | 78 | #define cpu_capacity(cpu) __cpu_capacity[cpu] |
339ca09d | 79 | |
145bc292 | 80 | static unsigned long middle_capacity = 1; |
339ca09d VG |
81 | |
82 | /* | |
83 | * Iterate all CPUs' descriptor in DT and compute the efficiency | |
84 | * (as per table_efficiency). Also calculate a middle efficiency | |
85 | * as close as possible to (max{eff_i} - min{eff_i}) / 2 | |
ca8ce3d0 NP |
86 | * This is later used to scale the cpu_capacity field such that an |
87 | * 'average' CPU is of middle capacity. Also see the comments near | |
88 | * table_efficiency[] and update_cpu_capacity(). | |
339ca09d VG |
89 | */ |
90 | static void __init parse_dt_topology(void) | |
91 | { | |
145bc292 | 92 | const struct cpu_efficiency *cpu_eff; |
339ca09d | 93 | struct device_node *cn = NULL; |
44ae903b | 94 | unsigned long min_capacity = ULONG_MAX; |
339ca09d VG |
95 | unsigned long max_capacity = 0; |
96 | unsigned long capacity = 0; | |
44ae903b | 97 | int cpu = 0; |
339ca09d | 98 | |
44ae903b MB |
99 | __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), |
100 | GFP_NOWAIT); | |
339ca09d | 101 | |
816a8de0 SK |
102 | for_each_possible_cpu(cpu) { |
103 | const u32 *rate; | |
339ca09d VG |
104 | int len; |
105 | ||
816a8de0 SK |
106 | /* too early to use cpu->of_node */ |
107 | cn = of_get_cpu_node(cpu, NULL); | |
108 | if (!cn) { | |
109 | pr_err("missing device node for CPU %d\n", cpu); | |
110 | continue; | |
111 | } | |
339ca09d VG |
112 | |
113 | for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) | |
114 | if (of_device_is_compatible(cn, cpu_eff->compatible)) | |
115 | break; | |
116 | ||
117 | if (cpu_eff->compatible == NULL) | |
118 | continue; | |
119 | ||
120 | rate = of_get_property(cn, "clock-frequency", &len); | |
121 | if (!rate || len != 4) { | |
122 | pr_err("%s missing clock-frequency property\n", | |
123 | cn->full_name); | |
124 | continue; | |
125 | } | |
126 | ||
339ca09d VG |
127 | capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; |
128 | ||
129 | /* Save min capacity of the system */ | |
130 | if (capacity < min_capacity) | |
131 | min_capacity = capacity; | |
132 | ||
133 | /* Save max capacity of the system */ | |
134 | if (capacity > max_capacity) | |
135 | max_capacity = capacity; | |
136 | ||
816a8de0 | 137 | cpu_capacity(cpu) = capacity; |
339ca09d VG |
138 | } |
139 | ||
339ca09d VG |
140 | /* If min and max capacities are equals, we bypass the update of the |
141 | * cpu_scale because all CPUs have the same capacity. Otherwise, we | |
142 | * compute a middle_capacity factor that will ensure that the capacity | |
143 | * of an 'average' CPU of the system will be as close as possible to | |
ca8ce3d0 | 144 | * SCHED_CAPACITY_SCALE, which is the default value, but with the |
339ca09d VG |
145 | * constraint explained near table_efficiency[]. |
146 | */ | |
816a8de0 | 147 | if (4*max_capacity < (3*(max_capacity + min_capacity))) |
339ca09d | 148 | middle_capacity = (min_capacity + max_capacity) |
ca8ce3d0 | 149 | >> (SCHED_CAPACITY_SHIFT+1); |
339ca09d VG |
150 | else |
151 | middle_capacity = ((max_capacity / 3) | |
ca8ce3d0 | 152 | >> (SCHED_CAPACITY_SHIFT-1)) + 1; |
339ca09d VG |
153 | |
154 | } | |
155 | ||
156 | /* | |
157 | * Look for a customed capacity of a CPU in the cpu_capacity table during the | |
158 | * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the | |
159 | * function returns directly for SMP system. | |
160 | */ | |
ca8ce3d0 | 161 | static void update_cpu_capacity(unsigned int cpu) |
339ca09d | 162 | { |
816a8de0 | 163 | if (!cpu_capacity(cpu)) |
339ca09d VG |
164 | return; |
165 | ||
ca8ce3d0 | 166 | set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); |
339ca09d | 167 | |
4ed89f22 | 168 | pr_info("CPU%u: update cpu_capacity %lu\n", |
d3bfca1a | 169 | cpu, arch_scale_cpu_capacity(NULL, cpu)); |
339ca09d VG |
170 | } |
171 | ||
172 | #else | |
173 | static inline void parse_dt_topology(void) {} | |
ca8ce3d0 | 174 | static inline void update_cpu_capacity(unsigned int cpuid) {} |
339ca09d VG |
175 | #endif |
176 | ||
dca463da | 177 | /* |
130d9aab VG |
178 | * cpu topology table |
179 | */ | |
c9018aab | 180 | struct cputopo_arm cpu_topology[NR_CPUS]; |
92bdd3f5 | 181 | EXPORT_SYMBOL_GPL(cpu_topology); |
c9018aab | 182 | |
4cbd6b16 | 183 | const struct cpumask *cpu_coregroup_mask(int cpu) |
c9018aab VG |
184 | { |
185 | return &cpu_topology[cpu].core_sibling; | |
186 | } | |
187 | ||
fb2aa855 VG |
188 | /* |
189 | * The current assumption is that we can power gate each core independently. | |
190 | * This will be superseded by DT binding once available. | |
191 | */ | |
192 | const struct cpumask *cpu_corepower_mask(int cpu) | |
193 | { | |
194 | return &cpu_topology[cpu].thread_sibling; | |
195 | } | |
196 | ||
145bc292 | 197 | static void update_siblings_masks(unsigned int cpuid) |
cb75dacb VG |
198 | { |
199 | struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | |
200 | int cpu; | |
201 | ||
202 | /* update core and thread sibling masks */ | |
203 | for_each_possible_cpu(cpu) { | |
204 | cpu_topo = &cpu_topology[cpu]; | |
205 | ||
206 | if (cpuid_topo->socket_id != cpu_topo->socket_id) | |
207 | continue; | |
208 | ||
209 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | |
210 | if (cpu != cpuid) | |
211 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | |
212 | ||
213 | if (cpuid_topo->core_id != cpu_topo->core_id) | |
214 | continue; | |
215 | ||
216 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | |
217 | if (cpu != cpuid) | |
218 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | |
219 | } | |
220 | smp_wmb(); | |
221 | } | |
222 | ||
c9018aab VG |
223 | /* |
224 | * store_cpu_topology is called at boot when only one cpu is running | |
225 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, | |
226 | * which prevents simultaneous write access to cpu_topology array | |
227 | */ | |
228 | void store_cpu_topology(unsigned int cpuid) | |
229 | { | |
230 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; | |
231 | unsigned int mpidr; | |
c9018aab VG |
232 | |
233 | /* If the cpu topology has been already set, just return */ | |
234 | if (cpuid_topo->core_id != -1) | |
235 | return; | |
236 | ||
237 | mpidr = read_cpuid_mpidr(); | |
238 | ||
239 | /* create cpu topology mapping */ | |
240 | if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) { | |
241 | /* | |
242 | * This is a multiprocessor system | |
243 | * multiprocessor format & multiprocessor mode field are set | |
244 | */ | |
245 | ||
246 | if (mpidr & MPIDR_MT_BITMASK) { | |
247 | /* core performance interdependency */ | |
71db5bfe LP |
248 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
249 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
250 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); | |
c9018aab VG |
251 | } else { |
252 | /* largely independent cores */ | |
253 | cpuid_topo->thread_id = -1; | |
71db5bfe LP |
254 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
255 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
c9018aab VG |
256 | } |
257 | } else { | |
258 | /* | |
259 | * This is an uniprocessor system | |
260 | * we are in multiprocessor format but uniprocessor system | |
261 | * or in the old uniprocessor format | |
262 | */ | |
263 | cpuid_topo->thread_id = -1; | |
264 | cpuid_topo->core_id = 0; | |
265 | cpuid_topo->socket_id = -1; | |
266 | } | |
267 | ||
cb75dacb | 268 | update_siblings_masks(cpuid); |
c9018aab | 269 | |
ca8ce3d0 | 270 | update_cpu_capacity(cpuid); |
339ca09d | 271 | |
4ed89f22 | 272 | pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
c9018aab VG |
273 | cpuid, cpu_topology[cpuid].thread_id, |
274 | cpu_topology[cpuid].core_id, | |
275 | cpu_topology[cpuid].socket_id, mpidr); | |
276 | } | |
277 | ||
b6220ad6 | 278 | static inline int cpu_corepower_flags(void) |
fb2aa855 VG |
279 | { |
280 | return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; | |
281 | } | |
282 | ||
283 | static struct sched_domain_topology_level arm_topology[] = { | |
284 | #ifdef CONFIG_SCHED_MC | |
285 | { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, | |
286 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, | |
287 | #endif | |
288 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, | |
289 | { NULL, }, | |
290 | }; | |
291 | ||
c9018aab VG |
292 | /* |
293 | * init_cpu_topology is called at boot when only one cpu is running | |
294 | * which prevent simultaneous write access to cpu_topology array | |
295 | */ | |
f7e416eb | 296 | void __init init_cpu_topology(void) |
c9018aab VG |
297 | { |
298 | unsigned int cpu; | |
299 | ||
ca8ce3d0 | 300 | /* init core mask and capacity */ |
c9018aab VG |
301 | for_each_possible_cpu(cpu) { |
302 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); | |
303 | ||
304 | cpu_topo->thread_id = -1; | |
305 | cpu_topo->core_id = -1; | |
306 | cpu_topo->socket_id = -1; | |
307 | cpumask_clear(&cpu_topo->core_sibling); | |
308 | cpumask_clear(&cpu_topo->thread_sibling); | |
309 | } | |
310 | smp_wmb(); | |
339ca09d VG |
311 | |
312 | parse_dt_topology(); | |
fb2aa855 VG |
313 | |
314 | /* Set scheduler topology descriptor */ | |
315 | set_sched_topology(arm_topology); | |
c9018aab | 316 | } |