2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/atomic.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/cpu_pm.h>
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/kthread.h>
22 #include <linux/wait.h>
23 #include <linux/time.h>
24 #include <linux/clockchips.h>
25 #include <linux/hrtimer.h>
26 #include <linux/tick.h>
27 #include <linux/notifier.h>
29 #include <linux/mutex.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/sysfs.h>
33 #include <linux/irqchip/arm-gic.h>
34 #include <linux/moduleparam.h>
36 #include <asm/smp_plat.h>
37 #include <asm/cputype.h>
38 #include <asm/suspend.h>
40 #include <asm/bL_switcher.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/power_cpu_migrate.h>
47 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
48 * __attribute_const__ and we don't want the compiler to assume any
49 * constness here as the value _does_ change along some code paths.
52 static int read_mpidr(void)
55 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id
));
56 return id
& MPIDR_HWID_BITMASK
;
60 * Get a global nanosecond time stamp for tracing.
62 static s64
get_ns(void)
66 return timespec_to_ns(&ts
);
70 * bL switcher core code.
73 static void bL_do_switch(void *_arg
)
75 unsigned ib_mpidr
, ib_cpu
, ib_cluster
;
76 long volatile handshake
, **handshake_ptr
= _arg
;
78 pr_debug("%s\n", __func__
);
80 ib_mpidr
= cpu_logical_map(smp_processor_id());
81 ib_cpu
= MPIDR_AFFINITY_LEVEL(ib_mpidr
, 0);
82 ib_cluster
= MPIDR_AFFINITY_LEVEL(ib_mpidr
, 1);
84 /* Advertise our handshake location */
87 *handshake_ptr
= &handshake
;
92 * Our state has been saved at this point. Let's release our
95 mcpm_set_entry_vector(ib_cpu
, ib_cluster
, cpu_resume
);
99 * From this point, we must assume that our counterpart CPU might
100 * have taken over in its parallel world already, as if execution
101 * just returned from cpu_suspend(). It is therefore important to
102 * be very careful not to make any change the other guy is not
103 * expecting. This is why we need stack isolation.
105 * Fancy under cover tasks could be performed here. For now
110 * Let's wait until our inbound is alive.
117 /* Let's put ourself down. */
118 mcpm_cpu_power_down();
120 /* should never get here */
125 * Stack isolation. To ensure 'current' remains valid, we just use another
126 * piece of our thread's stack space which should be fairly lightly used.
127 * The selected area starts just above the thread_info structure located
128 * at the very bottom of the stack, aligned to a cache line, and indexed
129 * with the cluster number.
131 #define STACK_SIZE 512
132 extern void call_with_stack(void (*fn
)(void *), void *arg
, void *sp
);
133 static int bL_switchpoint(unsigned long _arg
)
135 unsigned int mpidr
= read_mpidr();
136 unsigned int clusterid
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
137 void *stack
= current_thread_info() + 1;
138 stack
= PTR_ALIGN(stack
, L1_CACHE_BYTES
);
139 stack
+= clusterid
* STACK_SIZE
+ STACK_SIZE
;
140 call_with_stack(bL_do_switch
, (void *)_arg
, stack
);
145 * Generic switcher interface
148 static unsigned int bL_gic_id
[MAX_CPUS_PER_CLUSTER
][MAX_NR_CLUSTERS
];
149 static int bL_switcher_cpu_pairing
[NR_CPUS
];
152 * bL_switch_to - Switch to a specific cluster for the current CPU
153 * @new_cluster_id: the ID of the cluster to switch to.
155 * This function must be called on the CPU to be switched.
156 * Returns 0 on success, else a negative status code.
158 static int bL_switch_to(unsigned int new_cluster_id
)
160 unsigned int mpidr
, this_cpu
, that_cpu
;
161 unsigned int ob_mpidr
, ob_cpu
, ob_cluster
, ib_mpidr
, ib_cpu
, ib_cluster
;
162 struct completion inbound_alive
;
163 struct tick_device
*tdev
;
164 enum clock_event_mode tdev_mode
;
165 long volatile *handshake_ptr
;
168 this_cpu
= smp_processor_id();
169 ob_mpidr
= read_mpidr();
170 ob_cpu
= MPIDR_AFFINITY_LEVEL(ob_mpidr
, 0);
171 ob_cluster
= MPIDR_AFFINITY_LEVEL(ob_mpidr
, 1);
172 BUG_ON(cpu_logical_map(this_cpu
) != ob_mpidr
);
174 if (new_cluster_id
== ob_cluster
)
177 that_cpu
= bL_switcher_cpu_pairing
[this_cpu
];
178 ib_mpidr
= cpu_logical_map(that_cpu
);
179 ib_cpu
= MPIDR_AFFINITY_LEVEL(ib_mpidr
, 0);
180 ib_cluster
= MPIDR_AFFINITY_LEVEL(ib_mpidr
, 1);
182 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
183 this_cpu
, ob_mpidr
, ib_mpidr
);
185 this_cpu
= smp_processor_id();
187 /* Close the gate for our entry vectors */
188 mcpm_set_entry_vector(ob_cpu
, ob_cluster
, NULL
);
189 mcpm_set_entry_vector(ib_cpu
, ib_cluster
, NULL
);
191 /* Install our "inbound alive" notifier. */
192 init_completion(&inbound_alive
);
193 ipi_nr
= register_ipi_completion(&inbound_alive
, this_cpu
);
194 ipi_nr
|= ((1 << 16) << bL_gic_id
[ob_cpu
][ob_cluster
]);
195 mcpm_set_early_poke(ib_cpu
, ib_cluster
, gic_get_sgir_physaddr(), ipi_nr
);
198 * Let's wake up the inbound CPU now in case it requires some delay
199 * to come online, but leave it gated in our entry vector code.
201 ret
= mcpm_cpu_power_up(ib_cpu
, ib_cluster
);
203 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__
, ret
);
208 * Raise a SGI on the inbound CPU to make sure it doesn't stall
209 * in a possible WFI, such as in bL_power_down().
211 gic_send_sgi(bL_gic_id
[ib_cpu
][ib_cluster
], 0);
214 * Wait for the inbound to come up. This allows for other
215 * tasks to be scheduled in the mean time.
217 wait_for_completion(&inbound_alive
);
218 mcpm_set_early_poke(ib_cpu
, ib_cluster
, 0, 0);
221 * From this point we are entering the switch critical zone
222 * and can't take any interrupts anymore.
226 trace_cpu_migrate_begin(get_ns(), ob_mpidr
);
228 /* redirect GIC's SGIs to our counterpart */
229 gic_migrate_target(bL_gic_id
[ib_cpu
][ib_cluster
]);
231 tdev
= tick_get_device(this_cpu
);
232 if (tdev
&& !cpumask_equal(tdev
->evtdev
->cpumask
, cpumask_of(this_cpu
)))
235 tdev_mode
= tdev
->evtdev
->mode
;
236 clockevents_set_mode(tdev
->evtdev
, CLOCK_EVT_MODE_SHUTDOWN
);
239 ret
= cpu_pm_enter();
241 /* we can not tolerate errors at this point */
243 panic("%s: cpu_pm_enter() returned %d\n", __func__
, ret
);
245 /* Swap the physical CPUs in the logical map for this logical CPU. */
246 cpu_logical_map(this_cpu
) = ib_mpidr
;
247 cpu_logical_map(that_cpu
) = ob_mpidr
;
249 /* Let's do the actual CPU switch. */
250 ret
= cpu_suspend((unsigned long)&handshake_ptr
, bL_switchpoint
);
252 panic("%s: cpu_suspend() returned %d\n", __func__
, ret
);
254 /* We are executing on the inbound CPU at this point */
255 mpidr
= read_mpidr();
256 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu
, mpidr
);
257 BUG_ON(mpidr
!= ib_mpidr
);
259 mcpm_cpu_powered_up();
264 clockevents_set_mode(tdev
->evtdev
, tdev_mode
);
265 clockevents_program_event(tdev
->evtdev
,
266 tdev
->evtdev
->next_event
, 1);
269 trace_cpu_migrate_finish(get_ns(), ib_mpidr
);
277 pr_err("%s exiting with error %d\n", __func__
, ret
);
283 struct task_struct
*task
;
284 wait_queue_head_t wq
;
286 struct completion started
;
287 bL_switch_completion_handler completer
;
288 void *completer_cookie
;
291 static struct bL_thread bL_threads
[NR_CPUS
];
293 static int bL_switcher_thread(void *arg
)
295 struct bL_thread
*t
= arg
;
296 struct sched_param param
= { .sched_priority
= 1 };
298 bL_switch_completion_handler completer
;
299 void *completer_cookie
;
301 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
302 complete(&t
->started
);
305 if (signal_pending(current
))
306 flush_signals(current
);
307 wait_event_interruptible(t
->wq
,
308 t
->wanted_cluster
!= -1 ||
309 kthread_should_stop());
312 cluster
= t
->wanted_cluster
;
313 completer
= t
->completer
;
314 completer_cookie
= t
->completer_cookie
;
315 t
->wanted_cluster
= -1;
317 spin_unlock(&t
->lock
);
320 bL_switch_to(cluster
);
323 completer(completer_cookie
);
325 } while (!kthread_should_stop());
330 static struct task_struct
*bL_switcher_thread_create(int cpu
, void *arg
)
332 struct task_struct
*task
;
334 task
= kthread_create_on_node(bL_switcher_thread
, arg
,
335 cpu_to_node(cpu
), "kswitcher_%d", cpu
);
337 kthread_bind(task
, cpu
);
338 wake_up_process(task
);
340 pr_err("%s failed for CPU %d\n", __func__
, cpu
);
345 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
346 * with completion notification via a callback
348 * @cpu: the CPU to switch
349 * @new_cluster_id: the ID of the cluster to switch to.
350 * @completer: switch completion callback. if non-NULL,
351 * @completer(@completer_cookie) will be called on completion of
352 * the switch, in non-atomic context.
353 * @completer_cookie: opaque context argument for @completer.
355 * This function causes a cluster switch on the given CPU by waking up
356 * the appropriate switcher thread. This function may or may not return
357 * before the switch has occurred.
359 * If a @completer callback function is supplied, it will be called when
360 * the switch is complete. This can be used to determine asynchronously
361 * when the switch is complete, regardless of when bL_switch_request()
362 * returns. When @completer is supplied, no new switch request is permitted
363 * for the affected CPU until after the switch is complete, and @completer
366 int bL_switch_request_cb(unsigned int cpu
, unsigned int new_cluster_id
,
367 bL_switch_completion_handler completer
,
368 void *completer_cookie
)
372 if (cpu
>= ARRAY_SIZE(bL_threads
)) {
373 pr_err("%s: cpu %d out of bounds\n", __func__
, cpu
);
377 t
= &bL_threads
[cpu
];
380 return PTR_ERR(t
->task
);
386 spin_unlock(&t
->lock
);
389 t
->completer
= completer
;
390 t
->completer_cookie
= completer_cookie
;
391 t
->wanted_cluster
= new_cluster_id
;
392 spin_unlock(&t
->lock
);
396 EXPORT_SYMBOL_GPL(bL_switch_request_cb
);
399 * Activation and configuration code.
402 static DEFINE_MUTEX(bL_switcher_activation_lock
);
403 static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier
);
404 static unsigned int bL_switcher_active
;
405 static unsigned int bL_switcher_cpu_original_cluster
[NR_CPUS
];
406 static cpumask_t bL_switcher_removed_logical_cpus
;
408 int bL_switcher_register_notifier(struct notifier_block
*nb
)
410 return blocking_notifier_chain_register(&bL_activation_notifier
, nb
);
412 EXPORT_SYMBOL_GPL(bL_switcher_register_notifier
);
414 int bL_switcher_unregister_notifier(struct notifier_block
*nb
)
416 return blocking_notifier_chain_unregister(&bL_activation_notifier
, nb
);
418 EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier
);
420 static int bL_activation_notify(unsigned long val
)
424 ret
= blocking_notifier_call_chain(&bL_activation_notifier
, val
, NULL
);
425 if (ret
& NOTIFY_STOP_MASK
)
426 pr_err("%s: notifier chain failed with status 0x%x\n",
428 return notifier_to_errno(ret
);
431 static void bL_switcher_restore_cpus(void)
435 for_each_cpu(i
, &bL_switcher_removed_logical_cpus
)
439 static int bL_switcher_halve_cpus(void)
441 int i
, j
, cluster_0
, gic_id
, ret
;
442 unsigned int cpu
, cluster
, mask
;
443 cpumask_t available_cpus
;
445 /* First pass to validate what we have */
447 for_each_online_cpu(i
) {
448 cpu
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(i
), 0);
449 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(i
), 1);
451 pr_err("%s: only dual cluster systems are supported\n", __func__
);
454 if (WARN_ON(cpu
>= MAX_CPUS_PER_CLUSTER
))
456 mask
|= (1 << cluster
);
459 pr_err("%s: no CPU pairing possible\n", __func__
);
464 * Now let's do the pairing. We match each CPU with another CPU
465 * from a different cluster. To get a uniform scheduling behavior
466 * without fiddling with CPU topology and compute capacity data,
467 * we'll use logical CPUs initially belonging to the same cluster.
469 memset(bL_switcher_cpu_pairing
, -1, sizeof(bL_switcher_cpu_pairing
));
470 cpumask_copy(&available_cpus
, cpu_online_mask
);
472 for_each_cpu(i
, &available_cpus
) {
474 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(i
), 1);
477 if (cluster
!= cluster_0
)
479 cpumask_clear_cpu(i
, &available_cpus
);
480 for_each_cpu(j
, &available_cpus
) {
481 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(j
), 1);
483 * Let's remember the last match to create "odd"
484 * pairings on purpose in order for other code not
485 * to assume any relation between physical and
486 * logical CPU numbers.
488 if (cluster
!= cluster_0
)
492 bL_switcher_cpu_pairing
[i
] = match
;
493 cpumask_clear_cpu(match
, &available_cpus
);
494 pr_info("CPU%d paired with CPU%d\n", i
, match
);
499 * Now we disable the unwanted CPUs i.e. everything that has no
500 * pairing information (that includes the pairing counterparts).
502 cpumask_clear(&bL_switcher_removed_logical_cpus
);
503 for_each_online_cpu(i
) {
504 cpu
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(i
), 0);
505 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(i
), 1);
507 /* Let's take note of the GIC ID for this CPU */
508 gic_id
= gic_get_cpu_id(i
);
510 pr_err("%s: bad GIC ID for CPU %d\n", __func__
, i
);
511 bL_switcher_restore_cpus();
514 bL_gic_id
[cpu
][cluster
] = gic_id
;
515 pr_info("GIC ID for CPU %u cluster %u is %u\n",
516 cpu
, cluster
, gic_id
);
518 if (bL_switcher_cpu_pairing
[i
] != -1) {
519 bL_switcher_cpu_original_cluster
[i
] = cluster
;
525 bL_switcher_restore_cpus();
528 cpumask_set_cpu(i
, &bL_switcher_removed_logical_cpus
);
534 static int bL_switcher_enable(void)
538 mutex_lock(&bL_switcher_activation_lock
);
539 cpu_hotplug_driver_lock();
540 if (bL_switcher_active
) {
541 cpu_hotplug_driver_unlock();
542 mutex_unlock(&bL_switcher_activation_lock
);
546 pr_info("big.LITTLE switcher initializing\n");
548 ret
= bL_activation_notify(BL_NOTIFY_PRE_ENABLE
);
552 ret
= bL_switcher_halve_cpus();
556 for_each_online_cpu(cpu
) {
557 struct bL_thread
*t
= &bL_threads
[cpu
];
558 spin_lock_init(&t
->lock
);
559 init_waitqueue_head(&t
->wq
);
560 init_completion(&t
->started
);
561 t
->wanted_cluster
= -1;
562 t
->task
= bL_switcher_thread_create(cpu
, t
);
565 bL_switcher_active
= 1;
566 bL_activation_notify(BL_NOTIFY_POST_ENABLE
);
567 pr_info("big.LITTLE switcher initialized\n");
571 pr_warn("big.LITTLE switcher initialization failed\n");
572 bL_activation_notify(BL_NOTIFY_POST_DISABLE
);
575 cpu_hotplug_driver_unlock();
576 mutex_unlock(&bL_switcher_activation_lock
);
582 static void bL_switcher_disable(void)
584 unsigned int cpu
, cluster
;
586 struct task_struct
*task
;
588 mutex_lock(&bL_switcher_activation_lock
);
589 cpu_hotplug_driver_lock();
591 if (!bL_switcher_active
)
594 if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE
) != 0) {
595 bL_activation_notify(BL_NOTIFY_POST_ENABLE
);
599 bL_switcher_active
= 0;
602 * To deactivate the switcher, we must shut down the switcher
603 * threads to prevent any other requests from being accepted.
604 * Then, if the final cluster for given logical CPU is not the
605 * same as the original one, we'll recreate a switcher thread
606 * just for the purpose of switching the CPU back without any
607 * possibility for interference from external requests.
609 for_each_online_cpu(cpu
) {
610 t
= &bL_threads
[cpu
];
613 if (!task
|| IS_ERR(task
))
616 /* no more switch may happen on this CPU at this point */
617 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu
), 1);
618 if (cluster
== bL_switcher_cpu_original_cluster
[cpu
])
620 init_completion(&t
->started
);
621 t
->wanted_cluster
= bL_switcher_cpu_original_cluster
[cpu
];
622 task
= bL_switcher_thread_create(cpu
, t
);
624 wait_for_completion(&t
->started
);
626 cluster
= MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu
), 1);
627 if (cluster
== bL_switcher_cpu_original_cluster
[cpu
])
630 /* If execution gets here, we're in trouble. */
631 pr_crit("%s: unable to restore original cluster for CPU %d\n",
633 pr_crit("%s: CPU %d can't be restored\n",
634 __func__
, bL_switcher_cpu_pairing
[cpu
]);
635 cpumask_clear_cpu(bL_switcher_cpu_pairing
[cpu
],
636 &bL_switcher_removed_logical_cpus
);
639 bL_switcher_restore_cpus();
640 bL_activation_notify(BL_NOTIFY_POST_DISABLE
);
643 cpu_hotplug_driver_unlock();
644 mutex_unlock(&bL_switcher_activation_lock
);
647 static ssize_t
bL_switcher_active_show(struct kobject
*kobj
,
648 struct kobj_attribute
*attr
, char *buf
)
650 return sprintf(buf
, "%u\n", bL_switcher_active
);
653 static ssize_t
bL_switcher_active_store(struct kobject
*kobj
,
654 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
660 bL_switcher_disable();
664 ret
= bL_switcher_enable();
670 return (ret
>= 0) ? count
: ret
;
673 static struct kobj_attribute bL_switcher_active_attr
=
674 __ATTR(active
, 0644, bL_switcher_active_show
, bL_switcher_active_store
);
676 static struct attribute
*bL_switcher_attrs
[] = {
677 &bL_switcher_active_attr
.attr
,
681 static struct attribute_group bL_switcher_attr_group
= {
682 .attrs
= bL_switcher_attrs
,
685 static struct kobject
*bL_switcher_kobj
;
687 static int __init
bL_switcher_sysfs_init(void)
691 bL_switcher_kobj
= kobject_create_and_add("bL_switcher", kernel_kobj
);
692 if (!bL_switcher_kobj
)
694 ret
= sysfs_create_group(bL_switcher_kobj
, &bL_switcher_attr_group
);
696 kobject_put(bL_switcher_kobj
);
700 #endif /* CONFIG_SYSFS */
702 bool bL_switcher_get_enabled(void)
704 mutex_lock(&bL_switcher_activation_lock
);
706 return bL_switcher_active
;
708 EXPORT_SYMBOL_GPL(bL_switcher_get_enabled
);
710 void bL_switcher_put_enabled(void)
712 mutex_unlock(&bL_switcher_activation_lock
);
714 EXPORT_SYMBOL_GPL(bL_switcher_put_enabled
);
717 * Veto any CPU hotplug operation on those CPUs we've removed
718 * while the switcher is active.
719 * We're just not ready to deal with that given the trickery involved.
721 static int bL_switcher_hotplug_callback(struct notifier_block
*nfb
,
722 unsigned long action
, void *hcpu
)
724 if (bL_switcher_active
) {
725 int pairing
= bL_switcher_cpu_pairing
[(unsigned long)hcpu
];
726 switch (action
& 0xf) {
728 case CPU_DOWN_PREPARE
:
736 static bool no_bL_switcher
;
737 core_param(no_bL_switcher
, no_bL_switcher
, bool, 0644);
739 static int __init
bL_switcher_init(void)
743 if (MAX_NR_CLUSTERS
!= 2) {
744 pr_err("%s: only dual cluster systems are supported\n", __func__
);
748 cpu_notifier(bL_switcher_hotplug_callback
, 0);
750 if (!no_bL_switcher
) {
751 ret
= bL_switcher_enable();
757 ret
= bL_switcher_sysfs_init();
759 pr_err("%s: unable to create sysfs entry\n", __func__
);
765 late_initcall(bL_switcher_init
);