2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
26 #include <trace/events/power.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/cpuhp.h>
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
37 struct cpuhp_cpu_state
{
38 enum cpuhp_state state
;
39 enum cpuhp_state target
;
42 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
51 * @cant_stop: Bringup/teardown can't be stopped at this step
55 int (*startup
)(unsigned int cpu
);
56 int (*teardown
)(unsigned int cpu
);
61 static DEFINE_MUTEX(cpuhp_state_mutex
);
62 static struct cpuhp_step cpuhp_bp_states
[];
63 static struct cpuhp_step cpuhp_ap_states
[];
66 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
67 * @cpu: The cpu for which the callback should be invoked
68 * @step: The step in the state machine
69 * @cb: The callback function to invoke
71 * Called from cpu hotplug and from the state register machinery
73 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
74 int (*cb
)(unsigned int))
76 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
80 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
82 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
88 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
89 static DEFINE_MUTEX(cpu_add_remove_lock
);
90 bool cpuhp_tasks_frozen
;
91 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
94 * The following two APIs (cpu_maps_update_begin/done) must be used when
95 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
96 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
97 * hotplug callback (un)registration performed using __register_cpu_notifier()
98 * or __unregister_cpu_notifier().
100 void cpu_maps_update_begin(void)
102 mutex_lock(&cpu_add_remove_lock
);
104 EXPORT_SYMBOL(cpu_notifier_register_begin
);
106 void cpu_maps_update_done(void)
108 mutex_unlock(&cpu_add_remove_lock
);
110 EXPORT_SYMBOL(cpu_notifier_register_done
);
112 static RAW_NOTIFIER_HEAD(cpu_chain
);
114 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
115 * Should always be manipulated under cpu_add_remove_lock
117 static int cpu_hotplug_disabled
;
119 #ifdef CONFIG_HOTPLUG_CPU
122 struct task_struct
*active_writer
;
123 /* wait queue to wake up the active_writer */
124 wait_queue_head_t wq
;
125 /* verifies that no writer will get active while readers are active */
128 * Also blocks the new readers during
129 * an ongoing cpu hotplug operation.
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134 struct lockdep_map dep_map
;
137 .active_writer
= NULL
,
138 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
139 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
140 #ifdef CONFIG_DEBUG_LOCK_ALLOC
141 .dep_map
= {.name
= "cpu_hotplug.lock" },
145 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
146 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
147 #define cpuhp_lock_acquire_tryread() \
148 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
149 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
150 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
153 void get_online_cpus(void)
156 if (cpu_hotplug
.active_writer
== current
)
158 cpuhp_lock_acquire_read();
159 mutex_lock(&cpu_hotplug
.lock
);
160 atomic_inc(&cpu_hotplug
.refcount
);
161 mutex_unlock(&cpu_hotplug
.lock
);
163 EXPORT_SYMBOL_GPL(get_online_cpus
);
165 void put_online_cpus(void)
169 if (cpu_hotplug
.active_writer
== current
)
172 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
173 if (WARN_ON(refcount
< 0)) /* try to fix things up */
174 atomic_inc(&cpu_hotplug
.refcount
);
176 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
177 wake_up(&cpu_hotplug
.wq
);
179 cpuhp_lock_release();
182 EXPORT_SYMBOL_GPL(put_online_cpus
);
185 * This ensures that the hotplug operation can begin only when the
186 * refcount goes to zero.
188 * Note that during a cpu-hotplug operation, the new readers, if any,
189 * will be blocked by the cpu_hotplug.lock
191 * Since cpu_hotplug_begin() is always called after invoking
192 * cpu_maps_update_begin(), we can be sure that only one writer is active.
194 * Note that theoretically, there is a possibility of a livelock:
195 * - Refcount goes to zero, last reader wakes up the sleeping
197 * - Last reader unlocks the cpu_hotplug.lock.
198 * - A new reader arrives at this moment, bumps up the refcount.
199 * - The writer acquires the cpu_hotplug.lock finds the refcount
200 * non zero and goes to sleep again.
202 * However, this is very difficult to achieve in practice since
203 * get_online_cpus() not an api which is called all that often.
206 void cpu_hotplug_begin(void)
210 cpu_hotplug
.active_writer
= current
;
211 cpuhp_lock_acquire();
214 mutex_lock(&cpu_hotplug
.lock
);
215 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
216 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
218 mutex_unlock(&cpu_hotplug
.lock
);
221 finish_wait(&cpu_hotplug
.wq
, &wait
);
224 void cpu_hotplug_done(void)
226 cpu_hotplug
.active_writer
= NULL
;
227 mutex_unlock(&cpu_hotplug
.lock
);
228 cpuhp_lock_release();
232 * Wait for currently running CPU hotplug operations to complete (if any) and
233 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
234 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
235 * hotplug path before performing hotplug operations. So acquiring that lock
236 * guarantees mutual exclusion from any currently running hotplug operations.
238 void cpu_hotplug_disable(void)
240 cpu_maps_update_begin();
241 cpu_hotplug_disabled
++;
242 cpu_maps_update_done();
244 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
246 void cpu_hotplug_enable(void)
248 cpu_maps_update_begin();
249 WARN_ON(--cpu_hotplug_disabled
< 0);
250 cpu_maps_update_done();
252 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
253 #endif /* CONFIG_HOTPLUG_CPU */
255 /* Need to know about CPUs going up/down? */
256 int register_cpu_notifier(struct notifier_block
*nb
)
259 cpu_maps_update_begin();
260 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
261 cpu_maps_update_done();
265 int __register_cpu_notifier(struct notifier_block
*nb
)
267 return raw_notifier_chain_register(&cpu_chain
, nb
);
270 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
273 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
274 void *hcpu
= (void *)(long)cpu
;
278 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
281 return notifier_to_errno(ret
);
284 static int cpu_notify(unsigned long val
, unsigned int cpu
)
286 return __cpu_notify(val
, cpu
, -1, NULL
);
289 /* Notifier wrappers for transitioning to state machine */
290 static int notify_prepare(unsigned int cpu
)
295 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
298 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
300 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
305 static int notify_online(unsigned int cpu
)
307 cpu_notify(CPU_ONLINE
, cpu
);
311 static int notify_starting(unsigned int cpu
)
313 cpu_notify(CPU_STARTING
, cpu
);
317 static int bringup_cpu(unsigned int cpu
)
319 struct task_struct
*idle
= idle_thread_get(cpu
);
322 /* Arch-specific enabling code. */
323 ret
= __cpu_up(cpu
, idle
);
325 cpu_notify(CPU_UP_CANCELED
, cpu
);
328 BUG_ON(!cpu_online(cpu
));
333 * Hotplug state machine related functions
335 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
336 struct cpuhp_step
*steps
)
338 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
339 struct cpuhp_step
*step
= steps
+ st
->state
;
341 if (!step
->skip_onerr
)
342 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
346 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
347 struct cpuhp_step
*steps
, enum cpuhp_state target
)
349 enum cpuhp_state prev_state
= st
->state
;
352 for (; st
->state
> target
; st
->state
--) {
353 struct cpuhp_step
*step
= steps
+ st
->state
;
355 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
357 st
->target
= prev_state
;
358 undo_cpu_down(cpu
, st
, steps
);
365 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
366 struct cpuhp_step
*steps
)
368 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
369 struct cpuhp_step
*step
= steps
+ st
->state
;
371 if (!step
->skip_onerr
)
372 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
376 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
377 struct cpuhp_step
*steps
, enum cpuhp_state target
)
379 enum cpuhp_state prev_state
= st
->state
;
382 while (st
->state
< target
) {
383 struct cpuhp_step
*step
;
386 step
= steps
+ st
->state
;
387 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
389 st
->target
= prev_state
;
390 undo_cpu_up(cpu
, st
, steps
);
397 #ifdef CONFIG_HOTPLUG_CPU
398 EXPORT_SYMBOL(register_cpu_notifier
);
399 EXPORT_SYMBOL(__register_cpu_notifier
);
400 void unregister_cpu_notifier(struct notifier_block
*nb
)
402 cpu_maps_update_begin();
403 raw_notifier_chain_unregister(&cpu_chain
, nb
);
404 cpu_maps_update_done();
406 EXPORT_SYMBOL(unregister_cpu_notifier
);
408 void __unregister_cpu_notifier(struct notifier_block
*nb
)
410 raw_notifier_chain_unregister(&cpu_chain
, nb
);
412 EXPORT_SYMBOL(__unregister_cpu_notifier
);
415 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
418 * This function walks all processes, finds a valid mm struct for each one and
419 * then clears a corresponding bit in mm's cpumask. While this all sounds
420 * trivial, there are various non-obvious corner cases, which this function
421 * tries to solve in a safe manner.
423 * Also note that the function uses a somewhat relaxed locking scheme, so it may
424 * be called only for an already offlined CPU.
426 void clear_tasks_mm_cpumask(int cpu
)
428 struct task_struct
*p
;
431 * This function is called after the cpu is taken down and marked
432 * offline, so its not like new tasks will ever get this cpu set in
433 * their mm mask. -- Peter Zijlstra
434 * Thus, we may use rcu_read_lock() here, instead of grabbing
435 * full-fledged tasklist_lock.
437 WARN_ON(cpu_online(cpu
));
439 for_each_process(p
) {
440 struct task_struct
*t
;
443 * Main thread might exit, but other threads may still have
444 * a valid mm. Find one.
446 t
= find_lock_task_mm(p
);
449 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
455 static inline void check_for_tasks(int dead_cpu
)
457 struct task_struct
*g
, *p
;
459 read_lock(&tasklist_lock
);
460 for_each_process_thread(g
, p
) {
464 * We do the check with unlocked task_rq(p)->lock.
465 * Order the reading to do not warn about a task,
466 * which was running on this cpu in the past, and
467 * it's just been woken on another cpu.
470 if (task_cpu(p
) != dead_cpu
)
473 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
474 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
476 read_unlock(&tasklist_lock
);
479 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
481 BUG_ON(cpu_notify(val
, cpu
));
484 static int notify_down_prepare(unsigned int cpu
)
486 int err
, nr_calls
= 0;
488 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
491 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
492 pr_warn("%s: attempt to take down CPU %u failed\n",
498 static int notify_dying(unsigned int cpu
)
500 cpu_notify(CPU_DYING
, cpu
);
504 /* Take this CPU down. */
505 static int take_cpu_down(void *_param
)
507 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
508 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
509 int err
, cpu
= smp_processor_id();
511 /* Ensure this CPU doesn't handle any more interrupts. */
512 err
= __cpu_disable();
516 /* Invoke the former CPU_DYING callbacks */
517 for (; st
->state
> target
; st
->state
--) {
518 struct cpuhp_step
*step
= cpuhp_ap_states
+ st
->state
;
520 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
522 /* Give up timekeeping duties */
523 tick_handover_do_timer();
524 /* Park the stopper thread */
525 stop_machine_park(cpu
);
529 static int takedown_cpu(unsigned int cpu
)
534 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
535 * and RCU users of this state to go away such that all new such users
538 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
539 * not imply sync_sched(), so wait for both.
541 * Do sync before park smpboot threads to take care the rcu boost case.
543 if (IS_ENABLED(CONFIG_PREEMPT
))
544 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
549 * Prevent irq alloc/free while the dying cpu reorganizes the
550 * interrupt affinities.
555 * So now all preempt/rcu users must observe !cpu_active().
557 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
559 /* CPU didn't die: tell everyone. Can't complain. */
560 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
564 BUG_ON(cpu_online(cpu
));
567 * The migration_call() CPU_DYING callback will have removed all
568 * runnable tasks from the cpu, there's only the idle task left now
569 * that the migration thread is done doing the stop_machine thing.
571 * Wait for the stop thread to go away.
573 while (!per_cpu(cpu_dead_idle
, cpu
))
575 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
576 per_cpu(cpu_dead_idle
, cpu
) = false;
578 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
581 hotplug_cpu__broadcast_tick_pull(cpu
);
582 /* This actually kills the CPU. */
585 tick_cleanup_dead_cpu(cpu
);
589 static int notify_dead(unsigned int cpu
)
591 cpu_notify_nofail(CPU_DEAD
, cpu
);
592 check_for_tasks(cpu
);
597 #define notify_down_prepare NULL
598 #define takedown_cpu NULL
599 #define notify_dead NULL
600 #define notify_dying NULL
603 #ifdef CONFIG_HOTPLUG_CPU
605 /* Requires cpu_add_remove_lock to be held */
606 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
607 enum cpuhp_state target
)
609 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
610 int prev_state
, ret
= 0;
611 bool hasdied
= false;
613 if (num_online_cpus() == 1)
616 if (!cpu_present(cpu
))
621 cpuhp_tasks_frozen
= tasks_frozen
;
623 prev_state
= st
->state
;
625 ret
= cpuhp_down_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
627 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
630 /* This post dead nonsense must die */
632 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
636 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
640 cpu_maps_update_begin();
642 if (cpu_hotplug_disabled
) {
647 err
= _cpu_down(cpu
, 0, target
);
650 cpu_maps_update_done();
653 int cpu_down(unsigned int cpu
)
655 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
657 EXPORT_SYMBOL(cpu_down
);
658 #endif /*CONFIG_HOTPLUG_CPU*/
661 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
662 * @cpu: cpu that just started
664 * This function calls the cpu_chain notifiers with CPU_STARTING.
665 * It must be called by the arch code on the new cpu, before the new cpu
666 * enables interrupts and before the "boot" cpu returns from __cpu_up().
668 void notify_cpu_starting(unsigned int cpu
)
670 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
671 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
673 while (st
->state
< target
) {
674 struct cpuhp_step
*step
;
677 step
= cpuhp_ap_states
+ st
->state
;
678 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
683 * Called from the idle task. We need to set active here, so we can kick off
684 * the stopper thread.
686 static int cpuhp_set_cpu_active(unsigned int cpu
)
688 /* The cpu is marked online, set it active now */
689 set_cpu_active(cpu
, true);
690 /* Unpark the stopper thread */
691 stop_machine_unpark(cpu
);
695 /* Requires cpu_add_remove_lock to be held */
696 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
698 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
699 struct task_struct
*idle
;
704 if (!cpu_present(cpu
)) {
710 * The caller of do_cpu_up might have raced with another
711 * caller. Ignore it for now.
713 if (st
->state
>= target
)
716 if (st
->state
== CPUHP_OFFLINE
) {
717 /* Let it fail before we try to bring the cpu up */
718 idle
= idle_thread_get(cpu
);
725 cpuhp_tasks_frozen
= tasks_frozen
;
728 ret
= cpuhp_up_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
734 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
738 if (!cpu_possible(cpu
)) {
739 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
741 #if defined(CONFIG_IA64)
742 pr_err("please check additional_cpus= boot parameter\n");
747 err
= try_online_node(cpu_to_node(cpu
));
751 cpu_maps_update_begin();
753 if (cpu_hotplug_disabled
) {
758 err
= _cpu_up(cpu
, 0, target
);
760 cpu_maps_update_done();
764 int cpu_up(unsigned int cpu
)
766 return do_cpu_up(cpu
, CPUHP_ONLINE
);
768 EXPORT_SYMBOL_GPL(cpu_up
);
770 #ifdef CONFIG_PM_SLEEP_SMP
771 static cpumask_var_t frozen_cpus
;
773 int disable_nonboot_cpus(void)
775 int cpu
, first_cpu
, error
= 0;
777 cpu_maps_update_begin();
778 first_cpu
= cpumask_first(cpu_online_mask
);
780 * We take down all of the non-boot CPUs in one shot to avoid races
781 * with the userspace trying to use the CPU hotplug at the same time
783 cpumask_clear(frozen_cpus
);
785 pr_info("Disabling non-boot CPUs ...\n");
786 for_each_online_cpu(cpu
) {
787 if (cpu
== first_cpu
)
789 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
790 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
791 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
793 cpumask_set_cpu(cpu
, frozen_cpus
);
795 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
801 BUG_ON(num_online_cpus() > 1);
803 pr_err("Non-boot CPUs are not disabled\n");
806 * Make sure the CPUs won't be enabled by someone else. We need to do
807 * this even in case of failure as all disable_nonboot_cpus() users are
808 * supposed to do enable_nonboot_cpus() on the failure path.
810 cpu_hotplug_disabled
++;
812 cpu_maps_update_done();
816 void __weak
arch_enable_nonboot_cpus_begin(void)
820 void __weak
arch_enable_nonboot_cpus_end(void)
824 void enable_nonboot_cpus(void)
828 /* Allow everyone to use the CPU hotplug again */
829 cpu_maps_update_begin();
830 WARN_ON(--cpu_hotplug_disabled
< 0);
831 if (cpumask_empty(frozen_cpus
))
834 pr_info("Enabling non-boot CPUs ...\n");
836 arch_enable_nonboot_cpus_begin();
838 for_each_cpu(cpu
, frozen_cpus
) {
839 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
840 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
841 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
843 pr_info("CPU%d is up\n", cpu
);
846 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
849 arch_enable_nonboot_cpus_end();
851 cpumask_clear(frozen_cpus
);
853 cpu_maps_update_done();
856 static int __init
alloc_frozen_cpus(void)
858 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
862 core_initcall(alloc_frozen_cpus
);
865 * When callbacks for CPU hotplug notifications are being executed, we must
866 * ensure that the state of the system with respect to the tasks being frozen
867 * or not, as reported by the notification, remains unchanged *throughout the
868 * duration* of the execution of the callbacks.
869 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
871 * This synchronization is implemented by mutually excluding regular CPU
872 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
873 * Hibernate notifications.
876 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
877 unsigned long action
, void *ptr
)
881 case PM_SUSPEND_PREPARE
:
882 case PM_HIBERNATION_PREPARE
:
883 cpu_hotplug_disable();
886 case PM_POST_SUSPEND
:
887 case PM_POST_HIBERNATION
:
888 cpu_hotplug_enable();
899 static int __init
cpu_hotplug_pm_sync_init(void)
902 * cpu_hotplug_pm_callback has higher priority than x86
903 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
904 * to disable cpu hotplug to avoid cpu hotplug race.
906 pm_notifier(cpu_hotplug_pm_callback
, 0);
909 core_initcall(cpu_hotplug_pm_sync_init
);
911 #endif /* CONFIG_PM_SLEEP_SMP */
913 #endif /* CONFIG_SMP */
915 /* Boot processor state steps */
916 static struct cpuhp_step cpuhp_bp_states
[] = {
923 [CPUHP_CREATE_THREADS
]= {
924 .name
= "threads:create",
925 .startup
= smpboot_create_threads
,
929 [CPUHP_NOTIFY_PREPARE
] = {
930 .name
= "notify:prepare",
931 .startup
= notify_prepare
,
932 .teardown
= notify_dead
,
936 [CPUHP_BRINGUP_CPU
] = {
937 .name
= "cpu:bringup",
938 .startup
= bringup_cpu
,
942 [CPUHP_TEARDOWN_CPU
] = {
943 .name
= "cpu:teardown",
945 .teardown
= takedown_cpu
,
948 [CPUHP_CPU_SET_ACTIVE
] = {
949 .name
= "cpu:active",
950 .startup
= cpuhp_set_cpu_active
,
953 [CPUHP_SMPBOOT_THREADS
] = {
954 .name
= "smpboot:threads",
955 .startup
= smpboot_unpark_threads
,
956 .teardown
= smpboot_park_threads
,
958 [CPUHP_NOTIFY_ONLINE
] = {
959 .name
= "notify:online",
960 .startup
= notify_online
,
961 .teardown
= notify_down_prepare
,
972 /* Application processor state steps */
973 static struct cpuhp_step cpuhp_ap_states
[] = {
975 [CPUHP_AP_NOTIFY_STARTING
] = {
976 .name
= "notify:starting",
977 .startup
= notify_starting
,
978 .teardown
= notify_dying
,
990 /* Sanity check for callbacks */
991 static int cpuhp_cb_check(enum cpuhp_state state
)
993 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
998 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
1000 return (state
> CPUHP_AP_OFFLINE
&& state
< CPUHP_AP_ONLINE
);
1003 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
1005 struct cpuhp_step
*sp
;
1007 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
1011 static void cpuhp_store_callbacks(enum cpuhp_state state
,
1013 int (*startup
)(unsigned int cpu
),
1014 int (*teardown
)(unsigned int cpu
))
1016 /* (Un)Install the callbacks for further cpu hotplug operations */
1017 struct cpuhp_step
*sp
;
1019 mutex_lock(&cpuhp_state_mutex
);
1020 sp
= cpuhp_get_step(state
);
1021 sp
->startup
= startup
;
1022 sp
->teardown
= teardown
;
1024 mutex_unlock(&cpuhp_state_mutex
);
1027 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1029 return cpuhp_get_step(state
)->teardown
;
1032 /* Helper function to run callback on the target cpu */
1033 static void cpuhp_on_cpu_cb(void *__cb
)
1035 int (*cb
)(unsigned int cpu
) = __cb
;
1037 BUG_ON(cb(smp_processor_id()));
1041 * Call the startup/teardown function for a step either on the AP or
1042 * on the current CPU.
1044 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
,
1045 int (*cb
)(unsigned int), bool bringup
)
1053 * This invokes the callback directly for now. In a later step we
1054 * convert that to use cpuhp_invoke_callback().
1056 if (cpuhp_is_ap_state(state
)) {
1058 * Note, that a function called on the AP is not
1061 if (cpu_online(cpu
))
1062 smp_call_function_single(cpu
, cpuhp_on_cpu_cb
, cb
, 1);
1067 * The non AP bound callbacks can fail on bringup. On teardown
1068 * e.g. module removal we crash for now.
1071 BUG_ON(ret
&& !bringup
);
1076 * Called from __cpuhp_setup_state on a recoverable failure.
1078 * Note: The teardown callbacks for rollback are not allowed to fail!
1080 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1081 int (*teardown
)(unsigned int cpu
))
1088 /* Roll back the already executed steps on the other cpus */
1089 for_each_present_cpu(cpu
) {
1090 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1091 int cpustate
= st
->state
;
1093 if (cpu
>= failedcpu
)
1096 /* Did we invoke the startup call on that cpu ? */
1097 if (cpustate
>= state
)
1098 cpuhp_issue_call(cpu
, state
, teardown
, false);
1103 * Returns a free for dynamic slot assignment of the Online state. The states
1104 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1105 * by having no name assigned.
1107 static int cpuhp_reserve_state(enum cpuhp_state state
)
1111 mutex_lock(&cpuhp_state_mutex
);
1112 for (i
= CPUHP_ONLINE_DYN
; i
<= CPUHP_ONLINE_DYN_END
; i
++) {
1113 if (cpuhp_bp_states
[i
].name
)
1116 cpuhp_bp_states
[i
].name
= "Reserved";
1117 mutex_unlock(&cpuhp_state_mutex
);
1120 mutex_unlock(&cpuhp_state_mutex
);
1121 WARN(1, "No more dynamic states available for CPU hotplug\n");
1126 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1127 * @state: The state to setup
1128 * @invoke: If true, the startup function is invoked for cpus where
1129 * cpu state >= @state
1130 * @startup: startup callback function
1131 * @teardown: teardown callback function
1133 * Returns 0 if successful, otherwise a proper error code
1135 int __cpuhp_setup_state(enum cpuhp_state state
,
1136 const char *name
, bool invoke
,
1137 int (*startup
)(unsigned int cpu
),
1138 int (*teardown
)(unsigned int cpu
))
1143 if (cpuhp_cb_check(state
) || !name
)
1148 /* currently assignments for the ONLINE state are possible */
1149 if (state
== CPUHP_ONLINE_DYN
) {
1151 ret
= cpuhp_reserve_state(state
);
1157 cpuhp_store_callbacks(state
, name
, startup
, teardown
);
1159 if (!invoke
|| !startup
)
1163 * Try to call the startup callback for each present cpu
1164 * depending on the hotplug state of the cpu.
1166 for_each_present_cpu(cpu
) {
1167 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1168 int cpustate
= st
->state
;
1170 if (cpustate
< state
)
1173 ret
= cpuhp_issue_call(cpu
, state
, startup
, true);
1175 cpuhp_rollback_install(cpu
, state
, teardown
);
1176 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1182 if (!ret
&& dyn_state
)
1186 EXPORT_SYMBOL(__cpuhp_setup_state
);
1189 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1190 * @state: The state to remove
1191 * @invoke: If true, the teardown function is invoked for cpus where
1192 * cpu state >= @state
1194 * The teardown callback is currently not allowed to fail. Think
1195 * about module removal!
1197 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1199 int (*teardown
)(unsigned int cpu
) = cpuhp_get_teardown_cb(state
);
1202 BUG_ON(cpuhp_cb_check(state
));
1206 if (!invoke
|| !teardown
)
1210 * Call the teardown callback for each present cpu depending
1211 * on the hotplug state of the cpu. This function is not
1212 * allowed to fail currently!
1214 for_each_present_cpu(cpu
) {
1215 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1216 int cpustate
= st
->state
;
1218 if (cpustate
>= state
)
1219 cpuhp_issue_call(cpu
, state
, teardown
, false);
1222 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1225 EXPORT_SYMBOL(__cpuhp_remove_state
);
1227 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1228 static ssize_t
show_cpuhp_state(struct device
*dev
,
1229 struct device_attribute
*attr
, char *buf
)
1231 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1233 return sprintf(buf
, "%d\n", st
->state
);
1235 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1237 static ssize_t
write_cpuhp_target(struct device
*dev
,
1238 struct device_attribute
*attr
,
1239 const char *buf
, size_t count
)
1241 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1242 struct cpuhp_step
*sp
;
1245 ret
= kstrtoint(buf
, 10, &target
);
1249 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1250 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1253 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1257 ret
= lock_device_hotplug_sysfs();
1261 mutex_lock(&cpuhp_state_mutex
);
1262 sp
= cpuhp_get_step(target
);
1263 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1264 mutex_unlock(&cpuhp_state_mutex
);
1268 if (st
->state
< target
)
1269 ret
= do_cpu_up(dev
->id
, target
);
1271 ret
= do_cpu_down(dev
->id
, target
);
1273 unlock_device_hotplug();
1274 return ret
? ret
: count
;
1277 static ssize_t
show_cpuhp_target(struct device
*dev
,
1278 struct device_attribute
*attr
, char *buf
)
1280 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1282 return sprintf(buf
, "%d\n", st
->target
);
1284 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1286 static struct attribute
*cpuhp_cpu_attrs
[] = {
1287 &dev_attr_state
.attr
,
1288 &dev_attr_target
.attr
,
1292 static struct attribute_group cpuhp_cpu_attr_group
= {
1293 .attrs
= cpuhp_cpu_attrs
,
1298 static ssize_t
show_cpuhp_states(struct device
*dev
,
1299 struct device_attribute
*attr
, char *buf
)
1301 ssize_t cur
, res
= 0;
1304 mutex_lock(&cpuhp_state_mutex
);
1305 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1306 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1309 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1314 mutex_unlock(&cpuhp_state_mutex
);
1317 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1319 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1320 &dev_attr_states
.attr
,
1324 static struct attribute_group cpuhp_cpu_root_attr_group
= {
1325 .attrs
= cpuhp_cpu_root_attrs
,
1330 static int __init
cpuhp_sysfs_init(void)
1334 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1335 &cpuhp_cpu_root_attr_group
);
1339 for_each_possible_cpu(cpu
) {
1340 struct device
*dev
= get_cpu_device(cpu
);
1344 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1350 device_initcall(cpuhp_sysfs_init
);
1354 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1355 * represents all NR_CPUS bits binary values of 1<<nr.
1357 * It is used by cpumask_of() to get a constant address to a CPU
1358 * mask value that has a single bit set only.
1361 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1362 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1363 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1364 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1365 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1367 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1369 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1370 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1371 #if BITS_PER_LONG > 32
1372 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1373 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1376 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1378 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1379 EXPORT_SYMBOL(cpu_all_bits
);
1381 #ifdef CONFIG_INIT_ALL_POSSIBLE
1382 struct cpumask __cpu_possible_mask __read_mostly
1385 struct cpumask __cpu_possible_mask __read_mostly
;
1387 EXPORT_SYMBOL(__cpu_possible_mask
);
1389 struct cpumask __cpu_online_mask __read_mostly
;
1390 EXPORT_SYMBOL(__cpu_online_mask
);
1392 struct cpumask __cpu_present_mask __read_mostly
;
1393 EXPORT_SYMBOL(__cpu_present_mask
);
1395 struct cpumask __cpu_active_mask __read_mostly
;
1396 EXPORT_SYMBOL(__cpu_active_mask
);
1398 void init_cpu_present(const struct cpumask
*src
)
1400 cpumask_copy(&__cpu_present_mask
, src
);
1403 void init_cpu_possible(const struct cpumask
*src
)
1405 cpumask_copy(&__cpu_possible_mask
, src
);
1408 void init_cpu_online(const struct cpumask
*src
)
1410 cpumask_copy(&__cpu_online_mask
, src
);
1414 * Activate the first processor.
1416 void __init
boot_cpu_init(void)
1418 int cpu
= smp_processor_id();
1420 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1421 set_cpu_online(cpu
, true);
1422 set_cpu_active(cpu
, true);
1423 set_cpu_present(cpu
, true);
1424 set_cpu_possible(cpu
, true);
1428 * Must be called _AFTER_ setting up the per_cpu areas
1430 void __init
boot_cpu_state_init(void)
1432 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;