ARM: bL_switcher: remove assumptions between logical and physical CPUs
[deliverable/linux.git] / arch / arm / common / bL_switcher.c
CommitLineData
1c33be57
NP
1/*
2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/cpu_pm.h>
71ce1dee 18#include <linux/cpu.h>
3f09d479 19#include <linux/cpumask.h>
71ce1dee
NP
20#include <linux/kthread.h>
21#include <linux/wait.h>
3f09d479
LP
22#include <linux/clockchips.h>
23#include <linux/hrtimer.h>
24#include <linux/tick.h>
1c33be57
NP
25#include <linux/mm.h>
26#include <linux/string.h>
6b7437ae 27#include <linux/sysfs.h>
1c33be57 28#include <linux/irqchip/arm-gic.h>
c4821c05 29#include <linux/moduleparam.h>
1c33be57
NP
30
31#include <asm/smp_plat.h>
32#include <asm/suspend.h>
33#include <asm/mcpm.h>
34#include <asm/bL_switcher.h>
35
36
37/*
38 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
39 * __attribute_const__ and we don't want the compiler to assume any
40 * constness here as the value _does_ change along some code paths.
41 */
42
43static int read_mpidr(void)
44{
45 unsigned int id;
46 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
47 return id & MPIDR_HWID_BITMASK;
48}
49
50/*
51 * bL switcher core code.
52 */
53
54static void bL_do_switch(void *_unused)
55{
38c35d4f 56 unsigned ib_mpidr, ib_cpu, ib_cluster;
1c33be57 57
1c33be57
NP
58 pr_debug("%s\n", __func__);
59
38c35d4f
NP
60 ib_mpidr = cpu_logical_map(smp_processor_id());
61 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
62 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
1c33be57
NP
63
64 /*
65 * Our state has been saved at this point. Let's release our
66 * inbound CPU.
67 */
38c35d4f 68 mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
1c33be57
NP
69 sev();
70
71 /*
72 * From this point, we must assume that our counterpart CPU might
73 * have taken over in its parallel world already, as if execution
74 * just returned from cpu_suspend(). It is therefore important to
75 * be very careful not to make any change the other guy is not
76 * expecting. This is why we need stack isolation.
77 *
78 * Fancy under cover tasks could be performed here. For now
79 * we have none.
80 */
81
82 /* Let's put ourself down. */
83 mcpm_cpu_power_down();
84
85 /* should never get here */
86 BUG();
87}
88
89/*
c052de26
NP
90 * Stack isolation. To ensure 'current' remains valid, we just use another
91 * piece of our thread's stack space which should be fairly lightly used.
92 * The selected area starts just above the thread_info structure located
93 * at the very bottom of the stack, aligned to a cache line, and indexed
94 * with the cluster number.
1c33be57 95 */
c052de26 96#define STACK_SIZE 512
1c33be57
NP
97extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
98static int bL_switchpoint(unsigned long _arg)
99{
100 unsigned int mpidr = read_mpidr();
1c33be57 101 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
c052de26 102 void *stack = current_thread_info() + 1;
1c33be57 103 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
c052de26 104 stack += clusterid * STACK_SIZE + STACK_SIZE;
1c33be57
NP
105 call_with_stack(bL_do_switch, (void *)_arg, stack);
106 BUG();
107}
108
109/*
110 * Generic switcher interface
111 */
112
ed96762e 113static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
38c35d4f 114static int bL_switcher_cpu_pairing[NR_CPUS];
ed96762e 115
1c33be57
NP
116/*
117 * bL_switch_to - Switch to a specific cluster for the current CPU
118 * @new_cluster_id: the ID of the cluster to switch to.
119 *
120 * This function must be called on the CPU to be switched.
121 * Returns 0 on success, else a negative status code.
122 */
123static int bL_switch_to(unsigned int new_cluster_id)
124{
38c35d4f
NP
125 unsigned int mpidr, this_cpu, that_cpu;
126 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
3f09d479
LP
127 struct tick_device *tdev;
128 enum clock_event_mode tdev_mode;
1c33be57
NP
129 int ret;
130
38c35d4f
NP
131 this_cpu = smp_processor_id();
132 ob_mpidr = read_mpidr();
133 ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
134 ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
135 BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
1c33be57 136
38c35d4f 137 if (new_cluster_id == ob_cluster)
1c33be57
NP
138 return 0;
139
38c35d4f
NP
140 that_cpu = bL_switcher_cpu_pairing[this_cpu];
141 ib_mpidr = cpu_logical_map(that_cpu);
142 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
143 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
144
145 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
146 this_cpu, ob_mpidr, ib_mpidr);
1c33be57
NP
147
148 /* Close the gate for our entry vectors */
38c35d4f
NP
149 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
150 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
1c33be57
NP
151
152 /*
153 * Let's wake up the inbound CPU now in case it requires some delay
154 * to come online, but leave it gated in our entry vector code.
155 */
38c35d4f 156 ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
1c33be57
NP
157 if (ret) {
158 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
159 return ret;
160 }
161
162 /*
163 * From this point we are entering the switch critical zone
164 * and can't take any interrupts anymore.
165 */
166 local_irq_disable();
167 local_fiq_disable();
168
1c33be57 169 /* redirect GIC's SGIs to our counterpart */
38c35d4f 170 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
1c33be57
NP
171
172 /*
173 * Raise a SGI on the inbound CPU to make sure it doesn't stall
174 * in a possible WFI, such as in mcpm_power_down().
175 */
176 arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
177
3f09d479
LP
178 tdev = tick_get_device(this_cpu);
179 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
180 tdev = NULL;
181 if (tdev) {
182 tdev_mode = tdev->evtdev->mode;
183 clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
184 }
185
1c33be57
NP
186 ret = cpu_pm_enter();
187
188 /* we can not tolerate errors at this point */
189 if (ret)
190 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
191
38c35d4f
NP
192 /* Swap the physical CPUs in the logical map for this logical CPU. */
193 cpu_logical_map(this_cpu) = ib_mpidr;
194 cpu_logical_map(that_cpu) = ob_mpidr;
1c33be57
NP
195
196 /* Let's do the actual CPU switch. */
197 ret = cpu_suspend(0, bL_switchpoint);
198 if (ret > 0)
199 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
200
201 /* We are executing on the inbound CPU at this point */
202 mpidr = read_mpidr();
38c35d4f
NP
203 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
204 BUG_ON(mpidr != ib_mpidr);
1c33be57
NP
205
206 mcpm_cpu_powered_up();
207
208 ret = cpu_pm_exit();
209
3f09d479
LP
210 if (tdev) {
211 clockevents_set_mode(tdev->evtdev, tdev_mode);
212 clockevents_program_event(tdev->evtdev,
213 tdev->evtdev->next_event, 1);
214 }
215
1c33be57
NP
216 local_fiq_enable();
217 local_irq_enable();
218
219 if (ret)
220 pr_err("%s exiting with error %d\n", __func__, ret);
221 return ret;
222}
223
71ce1dee
NP
224struct bL_thread {
225 struct task_struct *task;
226 wait_queue_head_t wq;
227 int wanted_cluster;
6b7437ae 228 struct completion started;
1c33be57
NP
229};
230
71ce1dee
NP
231static struct bL_thread bL_threads[NR_CPUS];
232
233static int bL_switcher_thread(void *arg)
234{
235 struct bL_thread *t = arg;
236 struct sched_param param = { .sched_priority = 1 };
237 int cluster;
238
239 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
6b7437ae 240 complete(&t->started);
71ce1dee
NP
241
242 do {
243 if (signal_pending(current))
244 flush_signals(current);
245 wait_event_interruptible(t->wq,
246 t->wanted_cluster != -1 ||
247 kthread_should_stop());
248 cluster = xchg(&t->wanted_cluster, -1);
249 if (cluster != -1)
250 bL_switch_to(cluster);
251 } while (!kthread_should_stop());
252
253 return 0;
254}
255
6b7437ae 256static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
1c33be57 257{
71ce1dee
NP
258 struct task_struct *task;
259
260 task = kthread_create_on_node(bL_switcher_thread, arg,
261 cpu_to_node(cpu), "kswitcher_%d", cpu);
262 if (!IS_ERR(task)) {
263 kthread_bind(task, cpu);
264 wake_up_process(task);
265 } else
266 pr_err("%s failed for CPU %d\n", __func__, cpu);
267 return task;
1c33be57
NP
268}
269
270/*
271 * bL_switch_request - Switch to a specific cluster for the given CPU
272 *
273 * @cpu: the CPU to switch
274 * @new_cluster_id: the ID of the cluster to switch to.
275 *
71ce1dee
NP
276 * This function causes a cluster switch on the given CPU by waking up
277 * the appropriate switcher thread. This function may or may not return
278 * before the switch has occurred.
1c33be57 279 */
71ce1dee 280int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
1c33be57 281{
71ce1dee 282 struct bL_thread *t;
1c33be57 283
71ce1dee
NP
284 if (cpu >= ARRAY_SIZE(bL_threads)) {
285 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
286 return -EINVAL;
1c33be57 287 }
1c33be57 288
71ce1dee
NP
289 t = &bL_threads[cpu];
290 if (IS_ERR(t->task))
291 return PTR_ERR(t->task);
292 if (!t->task)
293 return -ESRCH;
294
295 t->wanted_cluster = new_cluster_id;
296 wake_up(&t->wq);
297 return 0;
1c33be57
NP
298}
299EXPORT_SYMBOL_GPL(bL_switch_request);
71ce1dee 300
9797a0e9
NP
301/*
302 * Activation and configuration code.
303 */
304
6b7437ae 305static unsigned int bL_switcher_active;
38c35d4f 306static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
9797a0e9
NP
307static cpumask_t bL_switcher_removed_logical_cpus;
308
6b7437ae 309static void bL_switcher_restore_cpus(void)
9797a0e9
NP
310{
311 int i;
312
313 for_each_cpu(i, &bL_switcher_removed_logical_cpus)
314 cpu_up(i);
315}
316
6b7437ae 317static int bL_switcher_halve_cpus(void)
9797a0e9 318{
38c35d4f
NP
319 int i, j, cluster_0, gic_id, ret;
320 unsigned int cpu, cluster, mask;
321 cpumask_t available_cpus;
9797a0e9 322
38c35d4f
NP
323 /* First pass to validate what we have */
324 mask = 0;
9797a0e9 325 for_each_online_cpu(i) {
38c35d4f
NP
326 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
327 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
9797a0e9
NP
328 if (cluster >= 2) {
329 pr_err("%s: only dual cluster systems are supported\n", __func__);
330 return -EINVAL;
331 }
38c35d4f
NP
332 if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
333 return -EINVAL;
334 mask |= (1 << cluster);
9797a0e9 335 }
38c35d4f
NP
336 if (mask != 3) {
337 pr_err("%s: no CPU pairing possible\n", __func__);
9797a0e9
NP
338 return -EINVAL;
339 }
340
38c35d4f
NP
341 /*
342 * Now let's do the pairing. We match each CPU with another CPU
343 * from a different cluster. To get a uniform scheduling behavior
344 * without fiddling with CPU topology and compute capacity data,
345 * we'll use logical CPUs initially belonging to the same cluster.
346 */
347 memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
348 cpumask_copy(&available_cpus, cpu_online_mask);
349 cluster_0 = -1;
350 for_each_cpu(i, &available_cpus) {
351 int match = -1;
352 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
353 if (cluster_0 == -1)
354 cluster_0 = cluster;
355 if (cluster != cluster_0)
356 continue;
357 cpumask_clear_cpu(i, &available_cpus);
358 for_each_cpu(j, &available_cpus) {
359 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
9797a0e9 360 /*
38c35d4f
NP
361 * Let's remember the last match to create "odd"
362 * pairings on purpose in order for other code not
363 * to assume any relation between physical and
364 * logical CPU numbers.
9797a0e9 365 */
38c35d4f
NP
366 if (cluster != cluster_0)
367 match = j;
368 }
369 if (match != -1) {
370 bL_switcher_cpu_pairing[i] = match;
371 cpumask_clear_cpu(match, &available_cpus);
372 pr_info("CPU%d paired with CPU%d\n", i, match);
373 }
374 }
375
376 /*
377 * Now we disable the unwanted CPUs i.e. everything that has no
378 * pairing information (that includes the pairing counterparts).
379 */
380 cpumask_clear(&bL_switcher_removed_logical_cpus);
381 for_each_online_cpu(i) {
382 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
383 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
384
385 /* Let's take note of the GIC ID for this CPU */
386 gic_id = gic_get_cpu_id(i);
387 if (gic_id < 0) {
388 pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
389 bL_switcher_restore_cpus();
390 return -EINVAL;
391 }
392 bL_gic_id[cpu][cluster] = gic_id;
393 pr_info("GIC ID for CPU %u cluster %u is %u\n",
394 cpu, cluster, gic_id);
395
396 if (bL_switcher_cpu_pairing[i] != -1) {
397 bL_switcher_cpu_original_cluster[i] = cluster;
398 continue;
9797a0e9
NP
399 }
400
401 ret = cpu_down(i);
402 if (ret) {
403 bL_switcher_restore_cpus();
404 return ret;
405 }
406 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
407 }
408
409 return 0;
410}
411
6b7437ae 412static int bL_switcher_enable(void)
71ce1dee 413{
9797a0e9 414 int cpu, ret;
71ce1dee 415
6b7437ae
NP
416 cpu_hotplug_driver_lock();
417 if (bL_switcher_active) {
418 cpu_hotplug_driver_unlock();
419 return 0;
9797a0e9
NP
420 }
421
6b7437ae
NP
422 pr_info("big.LITTLE switcher initializing\n");
423
9797a0e9
NP
424 ret = bL_switcher_halve_cpus();
425 if (ret) {
426 cpu_hotplug_driver_unlock();
427 return ret;
428 }
429
71ce1dee
NP
430 for_each_online_cpu(cpu) {
431 struct bL_thread *t = &bL_threads[cpu];
432 init_waitqueue_head(&t->wq);
6b7437ae 433 init_completion(&t->started);
71ce1dee
NP
434 t->wanted_cluster = -1;
435 t->task = bL_switcher_thread_create(cpu, t);
436 }
6b7437ae
NP
437
438 bL_switcher_active = 1;
9797a0e9 439 cpu_hotplug_driver_unlock();
71ce1dee
NP
440
441 pr_info("big.LITTLE switcher initialized\n");
442 return 0;
443}
444
6b7437ae
NP
445#ifdef CONFIG_SYSFS
446
447static void bL_switcher_disable(void)
448{
38c35d4f 449 unsigned int cpu, cluster;
6b7437ae
NP
450 struct bL_thread *t;
451 struct task_struct *task;
452
453 cpu_hotplug_driver_lock();
454 if (!bL_switcher_active) {
455 cpu_hotplug_driver_unlock();
456 return;
457 }
458 bL_switcher_active = 0;
459
460 /*
461 * To deactivate the switcher, we must shut down the switcher
462 * threads to prevent any other requests from being accepted.
463 * Then, if the final cluster for given logical CPU is not the
464 * same as the original one, we'll recreate a switcher thread
465 * just for the purpose of switching the CPU back without any
466 * possibility for interference from external requests.
467 */
468 for_each_online_cpu(cpu) {
6b7437ae
NP
469 t = &bL_threads[cpu];
470 task = t->task;
471 t->task = NULL;
472 if (!task || IS_ERR(task))
473 continue;
474 kthread_stop(task);
475 /* no more switch may happen on this CPU at this point */
476 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
477 if (cluster == bL_switcher_cpu_original_cluster[cpu])
478 continue;
479 init_completion(&t->started);
480 t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
481 task = bL_switcher_thread_create(cpu, t);
482 if (!IS_ERR(task)) {
483 wait_for_completion(&t->started);
484 kthread_stop(task);
485 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
486 if (cluster == bL_switcher_cpu_original_cluster[cpu])
487 continue;
488 }
489 /* If execution gets here, we're in trouble. */
490 pr_crit("%s: unable to restore original cluster for CPU %d\n",
491 __func__, cpu);
38c35d4f
NP
492 pr_crit("%s: CPU %d can't be restored\n",
493 __func__, bL_switcher_cpu_pairing[cpu]);
494 cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
495 &bL_switcher_removed_logical_cpus);
6b7437ae
NP
496 }
497
498 bL_switcher_restore_cpus();
499 cpu_hotplug_driver_unlock();
500}
501
502static ssize_t bL_switcher_active_show(struct kobject *kobj,
503 struct kobj_attribute *attr, char *buf)
504{
505 return sprintf(buf, "%u\n", bL_switcher_active);
506}
507
508static ssize_t bL_switcher_active_store(struct kobject *kobj,
509 struct kobj_attribute *attr, const char *buf, size_t count)
510{
511 int ret;
512
513 switch (buf[0]) {
514 case '0':
515 bL_switcher_disable();
516 ret = 0;
517 break;
518 case '1':
519 ret = bL_switcher_enable();
520 break;
521 default:
522 ret = -EINVAL;
523 }
524
525 return (ret >= 0) ? count : ret;
526}
527
528static struct kobj_attribute bL_switcher_active_attr =
529 __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
530
531static struct attribute *bL_switcher_attrs[] = {
532 &bL_switcher_active_attr.attr,
533 NULL,
534};
535
536static struct attribute_group bL_switcher_attr_group = {
537 .attrs = bL_switcher_attrs,
538};
539
540static struct kobject *bL_switcher_kobj;
541
542static int __init bL_switcher_sysfs_init(void)
543{
544 int ret;
545
546 bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
547 if (!bL_switcher_kobj)
548 return -ENOMEM;
549 ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
550 if (ret)
551 kobject_put(bL_switcher_kobj);
552 return ret;
553}
554
555#endif /* CONFIG_SYSFS */
556
c4821c05
NP
557static bool no_bL_switcher;
558core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
559
6b7437ae
NP
560static int __init bL_switcher_init(void)
561{
562 int ret;
563
564 if (MAX_NR_CLUSTERS != 2) {
565 pr_err("%s: only dual cluster systems are supported\n", __func__);
566 return -EINVAL;
567 }
568
c4821c05
NP
569 if (!no_bL_switcher) {
570 ret = bL_switcher_enable();
571 if (ret)
572 return ret;
573 }
6b7437ae
NP
574
575#ifdef CONFIG_SYSFS
576 ret = bL_switcher_sysfs_init();
577 if (ret)
578 pr_err("%s: unable to create sysfs entry\n", __func__);
579#endif
580
581 return 0;
582}
583
71ce1dee 584late_initcall(bL_switcher_init);
This page took 0.1181 seconds and 5 git commands to generate.