2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
27 #include <trace/events/power.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/cpuhp.h>
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
39 * @cb_stat: The state for a single callback (install/uninstall)
40 * @cb: Single callback function (install/uninstall)
41 * @result: Result of the operation
42 * @done: Signal completion to the issuer of the task
44 struct cpuhp_cpu_state
{
45 enum cpuhp_state state
;
46 enum cpuhp_state target
;
48 struct task_struct
*thread
;
50 enum cpuhp_state cb_state
;
51 int (*cb
)(unsigned int cpu
);
53 struct completion done
;
57 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
60 * cpuhp_step - Hotplug state machine step
61 * @name: Name of the step
62 * @startup: Startup function of the step
63 * @teardown: Teardown function of the step
64 * @skip_onerr: Do not invoke the functions on error rollback
65 * Will go away once the notifiers are gone
66 * @cant_stop: Bringup/teardown can't be stopped at this step
70 int (*startup
)(unsigned int cpu
);
71 int (*teardown
)(unsigned int cpu
);
76 static DEFINE_MUTEX(cpuhp_state_mutex
);
77 static struct cpuhp_step cpuhp_bp_states
[];
78 static struct cpuhp_step cpuhp_ap_states
[];
81 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
82 * @cpu: The cpu for which the callback should be invoked
83 * @step: The step in the state machine
84 * @cb: The callback function to invoke
86 * Called from cpu hotplug and from the state register machinery
88 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
89 int (*cb
)(unsigned int))
91 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
95 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
97 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
103 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
104 static DEFINE_MUTEX(cpu_add_remove_lock
);
105 bool cpuhp_tasks_frozen
;
106 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
109 * The following two APIs (cpu_maps_update_begin/done) must be used when
110 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
111 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
112 * hotplug callback (un)registration performed using __register_cpu_notifier()
113 * or __unregister_cpu_notifier().
115 void cpu_maps_update_begin(void)
117 mutex_lock(&cpu_add_remove_lock
);
119 EXPORT_SYMBOL(cpu_notifier_register_begin
);
121 void cpu_maps_update_done(void)
123 mutex_unlock(&cpu_add_remove_lock
);
125 EXPORT_SYMBOL(cpu_notifier_register_done
);
127 static RAW_NOTIFIER_HEAD(cpu_chain
);
129 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
130 * Should always be manipulated under cpu_add_remove_lock
132 static int cpu_hotplug_disabled
;
134 #ifdef CONFIG_HOTPLUG_CPU
137 struct task_struct
*active_writer
;
138 /* wait queue to wake up the active_writer */
139 wait_queue_head_t wq
;
140 /* verifies that no writer will get active while readers are active */
143 * Also blocks the new readers during
144 * an ongoing cpu hotplug operation.
148 #ifdef CONFIG_DEBUG_LOCK_ALLOC
149 struct lockdep_map dep_map
;
152 .active_writer
= NULL
,
153 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
154 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
155 #ifdef CONFIG_DEBUG_LOCK_ALLOC
156 .dep_map
= {.name
= "cpu_hotplug.lock" },
160 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
161 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
162 #define cpuhp_lock_acquire_tryread() \
163 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
164 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
165 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
168 void get_online_cpus(void)
171 if (cpu_hotplug
.active_writer
== current
)
173 cpuhp_lock_acquire_read();
174 mutex_lock(&cpu_hotplug
.lock
);
175 atomic_inc(&cpu_hotplug
.refcount
);
176 mutex_unlock(&cpu_hotplug
.lock
);
178 EXPORT_SYMBOL_GPL(get_online_cpus
);
180 void put_online_cpus(void)
184 if (cpu_hotplug
.active_writer
== current
)
187 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
188 if (WARN_ON(refcount
< 0)) /* try to fix things up */
189 atomic_inc(&cpu_hotplug
.refcount
);
191 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
192 wake_up(&cpu_hotplug
.wq
);
194 cpuhp_lock_release();
197 EXPORT_SYMBOL_GPL(put_online_cpus
);
200 * This ensures that the hotplug operation can begin only when the
201 * refcount goes to zero.
203 * Note that during a cpu-hotplug operation, the new readers, if any,
204 * will be blocked by the cpu_hotplug.lock
206 * Since cpu_hotplug_begin() is always called after invoking
207 * cpu_maps_update_begin(), we can be sure that only one writer is active.
209 * Note that theoretically, there is a possibility of a livelock:
210 * - Refcount goes to zero, last reader wakes up the sleeping
212 * - Last reader unlocks the cpu_hotplug.lock.
213 * - A new reader arrives at this moment, bumps up the refcount.
214 * - The writer acquires the cpu_hotplug.lock finds the refcount
215 * non zero and goes to sleep again.
217 * However, this is very difficult to achieve in practice since
218 * get_online_cpus() not an api which is called all that often.
221 void cpu_hotplug_begin(void)
225 cpu_hotplug
.active_writer
= current
;
226 cpuhp_lock_acquire();
229 mutex_lock(&cpu_hotplug
.lock
);
230 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
231 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
233 mutex_unlock(&cpu_hotplug
.lock
);
236 finish_wait(&cpu_hotplug
.wq
, &wait
);
239 void cpu_hotplug_done(void)
241 cpu_hotplug
.active_writer
= NULL
;
242 mutex_unlock(&cpu_hotplug
.lock
);
243 cpuhp_lock_release();
247 * Wait for currently running CPU hotplug operations to complete (if any) and
248 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
249 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
250 * hotplug path before performing hotplug operations. So acquiring that lock
251 * guarantees mutual exclusion from any currently running hotplug operations.
253 void cpu_hotplug_disable(void)
255 cpu_maps_update_begin();
256 cpu_hotplug_disabled
++;
257 cpu_maps_update_done();
259 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
261 void cpu_hotplug_enable(void)
263 cpu_maps_update_begin();
264 WARN_ON(--cpu_hotplug_disabled
< 0);
265 cpu_maps_update_done();
267 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
268 #endif /* CONFIG_HOTPLUG_CPU */
270 /* Need to know about CPUs going up/down? */
271 int register_cpu_notifier(struct notifier_block
*nb
)
274 cpu_maps_update_begin();
275 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
276 cpu_maps_update_done();
280 int __register_cpu_notifier(struct notifier_block
*nb
)
282 return raw_notifier_chain_register(&cpu_chain
, nb
);
285 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
288 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
289 void *hcpu
= (void *)(long)cpu
;
293 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
296 return notifier_to_errno(ret
);
299 static int cpu_notify(unsigned long val
, unsigned int cpu
)
301 return __cpu_notify(val
, cpu
, -1, NULL
);
304 /* Notifier wrappers for transitioning to state machine */
305 static int notify_prepare(unsigned int cpu
)
310 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
313 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
315 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
320 static int notify_online(unsigned int cpu
)
322 cpu_notify(CPU_ONLINE
, cpu
);
326 static int notify_starting(unsigned int cpu
)
328 cpu_notify(CPU_STARTING
, cpu
);
332 static int bringup_cpu(unsigned int cpu
)
334 struct task_struct
*idle
= idle_thread_get(cpu
);
337 /* Arch-specific enabling code. */
338 ret
= __cpu_up(cpu
, idle
);
340 cpu_notify(CPU_UP_CANCELED
, cpu
);
343 BUG_ON(!cpu_online(cpu
));
348 * Hotplug state machine related functions
350 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
351 struct cpuhp_step
*steps
)
353 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
354 struct cpuhp_step
*step
= steps
+ st
->state
;
356 if (!step
->skip_onerr
)
357 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
361 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
362 struct cpuhp_step
*steps
, enum cpuhp_state target
)
364 enum cpuhp_state prev_state
= st
->state
;
367 for (; st
->state
> target
; st
->state
--) {
368 struct cpuhp_step
*step
= steps
+ st
->state
;
370 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
372 st
->target
= prev_state
;
373 undo_cpu_down(cpu
, st
, steps
);
380 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
381 struct cpuhp_step
*steps
)
383 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
384 struct cpuhp_step
*step
= steps
+ st
->state
;
386 if (!step
->skip_onerr
)
387 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
391 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
392 struct cpuhp_step
*steps
, enum cpuhp_state target
)
394 enum cpuhp_state prev_state
= st
->state
;
397 while (st
->state
< target
) {
398 struct cpuhp_step
*step
;
401 step
= steps
+ st
->state
;
402 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
404 st
->target
= prev_state
;
405 undo_cpu_up(cpu
, st
, steps
);
413 * The cpu hotplug threads manage the bringup and teardown of the cpus
415 static void cpuhp_create(unsigned int cpu
)
417 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
419 init_completion(&st
->done
);
422 static int cpuhp_should_run(unsigned int cpu
)
424 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
426 return st
->should_run
;
429 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
430 static int cpuhp_ap_offline(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
432 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_ONLINE
);
434 return cpuhp_down_callbacks(cpu
, st
, cpuhp_ap_states
, target
);
437 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
438 static int cpuhp_ap_online(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
440 return cpuhp_up_callbacks(cpu
, st
, cpuhp_ap_states
, st
->target
);
444 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
445 * callbacks when a state gets [un]installed at runtime.
447 static void cpuhp_thread_fun(unsigned int cpu
)
449 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
453 * Paired with the mb() in cpuhp_kick_ap_work and
454 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
460 st
->should_run
= false;
462 /* Single callback invocation for [un]install ? */
464 if (st
->cb_state
< CPUHP_AP_ONLINE
) {
466 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
, st
->cb
);
469 ret
= cpuhp_invoke_callback(cpu
, st
->cb_state
, st
->cb
);
472 /* Regular hotplug work */
473 if (st
->state
< st
->target
)
474 ret
= cpuhp_ap_online(cpu
, st
);
475 else if (st
->state
> st
->target
)
476 ret
= cpuhp_ap_offline(cpu
, st
);
482 /* Invoke a single callback on a remote cpu */
483 static int cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
,
484 int (*cb
)(unsigned int))
486 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
488 if (!cpu_online(cpu
))
491 st
->cb_state
= state
;
494 * Make sure the above stores are visible before should_run becomes
495 * true. Paired with the mb() above in cpuhp_thread_fun()
498 st
->should_run
= true;
499 wake_up_process(st
->thread
);
500 wait_for_completion(&st
->done
);
504 /* Regular hotplug invocation of the AP hotplug thread */
505 static int cpuhp_kick_ap_work(unsigned int cpu
)
507 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
508 enum cpuhp_state state
= st
->state
;
510 trace_cpuhp_enter(cpu
, st
->target
, state
, cpuhp_kick_ap_work
);
514 * Make sure the above stores are visible before should_run becomes
515 * true. Paired with the mb() above in cpuhp_thread_fun()
518 st
->should_run
= true;
519 wake_up_process(st
->thread
);
520 wait_for_completion(&st
->done
);
521 trace_cpuhp_exit(cpu
, st
->state
, state
, st
->result
);
525 static struct smp_hotplug_thread cpuhp_threads
= {
526 .store
= &cpuhp_state
.thread
,
527 .create
= &cpuhp_create
,
528 .thread_should_run
= cpuhp_should_run
,
529 .thread_fn
= cpuhp_thread_fun
,
530 .thread_comm
= "cpuhp/%u",
534 void __init
cpuhp_threads_init(void)
536 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
537 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
540 #ifdef CONFIG_HOTPLUG_CPU
541 EXPORT_SYMBOL(register_cpu_notifier
);
542 EXPORT_SYMBOL(__register_cpu_notifier
);
543 void unregister_cpu_notifier(struct notifier_block
*nb
)
545 cpu_maps_update_begin();
546 raw_notifier_chain_unregister(&cpu_chain
, nb
);
547 cpu_maps_update_done();
549 EXPORT_SYMBOL(unregister_cpu_notifier
);
551 void __unregister_cpu_notifier(struct notifier_block
*nb
)
553 raw_notifier_chain_unregister(&cpu_chain
, nb
);
555 EXPORT_SYMBOL(__unregister_cpu_notifier
);
558 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
561 * This function walks all processes, finds a valid mm struct for each one and
562 * then clears a corresponding bit in mm's cpumask. While this all sounds
563 * trivial, there are various non-obvious corner cases, which this function
564 * tries to solve in a safe manner.
566 * Also note that the function uses a somewhat relaxed locking scheme, so it may
567 * be called only for an already offlined CPU.
569 void clear_tasks_mm_cpumask(int cpu
)
571 struct task_struct
*p
;
574 * This function is called after the cpu is taken down and marked
575 * offline, so its not like new tasks will ever get this cpu set in
576 * their mm mask. -- Peter Zijlstra
577 * Thus, we may use rcu_read_lock() here, instead of grabbing
578 * full-fledged tasklist_lock.
580 WARN_ON(cpu_online(cpu
));
582 for_each_process(p
) {
583 struct task_struct
*t
;
586 * Main thread might exit, but other threads may still have
587 * a valid mm. Find one.
589 t
= find_lock_task_mm(p
);
592 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
598 static inline void check_for_tasks(int dead_cpu
)
600 struct task_struct
*g
, *p
;
602 read_lock(&tasklist_lock
);
603 for_each_process_thread(g
, p
) {
607 * We do the check with unlocked task_rq(p)->lock.
608 * Order the reading to do not warn about a task,
609 * which was running on this cpu in the past, and
610 * it's just been woken on another cpu.
613 if (task_cpu(p
) != dead_cpu
)
616 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
617 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
619 read_unlock(&tasklist_lock
);
622 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
624 BUG_ON(cpu_notify(val
, cpu
));
627 static int notify_down_prepare(unsigned int cpu
)
629 int err
, nr_calls
= 0;
631 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
634 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
635 pr_warn("%s: attempt to take down CPU %u failed\n",
641 static int notify_dying(unsigned int cpu
)
643 cpu_notify(CPU_DYING
, cpu
);
647 /* Take this CPU down. */
648 static int take_cpu_down(void *_param
)
650 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
651 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
652 int err
, cpu
= smp_processor_id();
654 /* Ensure this CPU doesn't handle any more interrupts. */
655 err
= __cpu_disable();
659 /* Invoke the former CPU_DYING callbacks */
660 for (; st
->state
> target
; st
->state
--) {
661 struct cpuhp_step
*step
= cpuhp_ap_states
+ st
->state
;
663 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
665 /* Give up timekeeping duties */
666 tick_handover_do_timer();
667 /* Park the stopper thread */
668 stop_machine_park(cpu
);
672 static int takedown_cpu(unsigned int cpu
)
677 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
678 * and RCU users of this state to go away such that all new such users
681 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
682 * not imply sync_sched(), so wait for both.
684 * Do sync before park smpboot threads to take care the rcu boost case.
686 if (IS_ENABLED(CONFIG_PREEMPT
))
687 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
692 * Prevent irq alloc/free while the dying cpu reorganizes the
693 * interrupt affinities.
698 * So now all preempt/rcu users must observe !cpu_active().
700 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
702 /* CPU didn't die: tell everyone. Can't complain. */
703 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
707 BUG_ON(cpu_online(cpu
));
710 * The migration_call() CPU_DYING callback will have removed all
711 * runnable tasks from the cpu, there's only the idle task left now
712 * that the migration thread is done doing the stop_machine thing.
714 * Wait for the stop thread to go away.
716 while (!per_cpu(cpu_dead_idle
, cpu
))
718 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
719 per_cpu(cpu_dead_idle
, cpu
) = false;
721 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
724 hotplug_cpu__broadcast_tick_pull(cpu
);
725 /* This actually kills the CPU. */
728 tick_cleanup_dead_cpu(cpu
);
732 static int notify_dead(unsigned int cpu
)
734 cpu_notify_nofail(CPU_DEAD
, cpu
);
735 check_for_tasks(cpu
);
740 #define notify_down_prepare NULL
741 #define takedown_cpu NULL
742 #define notify_dead NULL
743 #define notify_dying NULL
746 #ifdef CONFIG_HOTPLUG_CPU
748 /* Requires cpu_add_remove_lock to be held */
749 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
750 enum cpuhp_state target
)
752 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
753 int prev_state
, ret
= 0;
754 bool hasdied
= false;
756 if (num_online_cpus() == 1)
759 if (!cpu_present(cpu
))
764 cpuhp_tasks_frozen
= tasks_frozen
;
766 prev_state
= st
->state
;
768 ret
= cpuhp_down_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
770 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
773 /* This post dead nonsense must die */
775 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
779 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
783 cpu_maps_update_begin();
785 if (cpu_hotplug_disabled
) {
790 err
= _cpu_down(cpu
, 0, target
);
793 cpu_maps_update_done();
796 int cpu_down(unsigned int cpu
)
798 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
800 EXPORT_SYMBOL(cpu_down
);
801 #endif /*CONFIG_HOTPLUG_CPU*/
804 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
805 * @cpu: cpu that just started
807 * This function calls the cpu_chain notifiers with CPU_STARTING.
808 * It must be called by the arch code on the new cpu, before the new cpu
809 * enables interrupts and before the "boot" cpu returns from __cpu_up().
811 void notify_cpu_starting(unsigned int cpu
)
813 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
814 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
816 while (st
->state
< target
) {
817 struct cpuhp_step
*step
;
820 step
= cpuhp_ap_states
+ st
->state
;
821 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
826 * Called from the idle task. We need to set active here, so we can kick off
827 * the stopper thread.
829 static int cpuhp_set_cpu_active(unsigned int cpu
)
831 /* The cpu is marked online, set it active now */
832 set_cpu_active(cpu
, true);
833 /* Unpark the stopper thread */
834 stop_machine_unpark(cpu
);
838 /* Requires cpu_add_remove_lock to be held */
839 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
841 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
842 struct task_struct
*idle
;
847 if (!cpu_present(cpu
)) {
853 * The caller of do_cpu_up might have raced with another
854 * caller. Ignore it for now.
856 if (st
->state
>= target
)
859 if (st
->state
== CPUHP_OFFLINE
) {
860 /* Let it fail before we try to bring the cpu up */
861 idle
= idle_thread_get(cpu
);
868 cpuhp_tasks_frozen
= tasks_frozen
;
871 ret
= cpuhp_up_callbacks(cpu
, st
, cpuhp_bp_states
, target
);
877 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
881 if (!cpu_possible(cpu
)) {
882 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
884 #if defined(CONFIG_IA64)
885 pr_err("please check additional_cpus= boot parameter\n");
890 err
= try_online_node(cpu_to_node(cpu
));
894 cpu_maps_update_begin();
896 if (cpu_hotplug_disabled
) {
901 err
= _cpu_up(cpu
, 0, target
);
903 cpu_maps_update_done();
907 int cpu_up(unsigned int cpu
)
909 return do_cpu_up(cpu
, CPUHP_ONLINE
);
911 EXPORT_SYMBOL_GPL(cpu_up
);
913 #ifdef CONFIG_PM_SLEEP_SMP
914 static cpumask_var_t frozen_cpus
;
916 int disable_nonboot_cpus(void)
918 int cpu
, first_cpu
, error
= 0;
920 cpu_maps_update_begin();
921 first_cpu
= cpumask_first(cpu_online_mask
);
923 * We take down all of the non-boot CPUs in one shot to avoid races
924 * with the userspace trying to use the CPU hotplug at the same time
926 cpumask_clear(frozen_cpus
);
928 pr_info("Disabling non-boot CPUs ...\n");
929 for_each_online_cpu(cpu
) {
930 if (cpu
== first_cpu
)
932 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
933 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
934 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
936 cpumask_set_cpu(cpu
, frozen_cpus
);
938 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
944 BUG_ON(num_online_cpus() > 1);
946 pr_err("Non-boot CPUs are not disabled\n");
949 * Make sure the CPUs won't be enabled by someone else. We need to do
950 * this even in case of failure as all disable_nonboot_cpus() users are
951 * supposed to do enable_nonboot_cpus() on the failure path.
953 cpu_hotplug_disabled
++;
955 cpu_maps_update_done();
959 void __weak
arch_enable_nonboot_cpus_begin(void)
963 void __weak
arch_enable_nonboot_cpus_end(void)
967 void enable_nonboot_cpus(void)
971 /* Allow everyone to use the CPU hotplug again */
972 cpu_maps_update_begin();
973 WARN_ON(--cpu_hotplug_disabled
< 0);
974 if (cpumask_empty(frozen_cpus
))
977 pr_info("Enabling non-boot CPUs ...\n");
979 arch_enable_nonboot_cpus_begin();
981 for_each_cpu(cpu
, frozen_cpus
) {
982 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
983 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
984 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
986 pr_info("CPU%d is up\n", cpu
);
989 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
992 arch_enable_nonboot_cpus_end();
994 cpumask_clear(frozen_cpus
);
996 cpu_maps_update_done();
999 static int __init
alloc_frozen_cpus(void)
1001 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1005 core_initcall(alloc_frozen_cpus
);
1008 * When callbacks for CPU hotplug notifications are being executed, we must
1009 * ensure that the state of the system with respect to the tasks being frozen
1010 * or not, as reported by the notification, remains unchanged *throughout the
1011 * duration* of the execution of the callbacks.
1012 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1014 * This synchronization is implemented by mutually excluding regular CPU
1015 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1016 * Hibernate notifications.
1019 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1020 unsigned long action
, void *ptr
)
1024 case PM_SUSPEND_PREPARE
:
1025 case PM_HIBERNATION_PREPARE
:
1026 cpu_hotplug_disable();
1029 case PM_POST_SUSPEND
:
1030 case PM_POST_HIBERNATION
:
1031 cpu_hotplug_enable();
1042 static int __init
cpu_hotplug_pm_sync_init(void)
1045 * cpu_hotplug_pm_callback has higher priority than x86
1046 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1047 * to disable cpu hotplug to avoid cpu hotplug race.
1049 pm_notifier(cpu_hotplug_pm_callback
, 0);
1052 core_initcall(cpu_hotplug_pm_sync_init
);
1054 #endif /* CONFIG_PM_SLEEP_SMP */
1056 #endif /* CONFIG_SMP */
1058 /* Boot processor state steps */
1059 static struct cpuhp_step cpuhp_bp_states
[] = {
1066 [CPUHP_CREATE_THREADS
]= {
1067 .name
= "threads:create",
1068 .startup
= smpboot_create_threads
,
1072 [CPUHP_NOTIFY_PREPARE
] = {
1073 .name
= "notify:prepare",
1074 .startup
= notify_prepare
,
1075 .teardown
= notify_dead
,
1079 [CPUHP_BRINGUP_CPU
] = {
1080 .name
= "cpu:bringup",
1081 .startup
= bringup_cpu
,
1085 [CPUHP_TEARDOWN_CPU
] = {
1086 .name
= "cpu:teardown",
1088 .teardown
= takedown_cpu
,
1091 [CPUHP_CPU_SET_ACTIVE
] = {
1092 .name
= "cpu:active",
1093 .startup
= cpuhp_set_cpu_active
,
1096 [CPUHP_SMPBOOT_THREADS
] = {
1097 .name
= "smpboot:threads",
1098 .startup
= smpboot_unpark_threads
,
1099 .teardown
= smpboot_park_threads
,
1101 [CPUHP_NOTIFY_ONLINE
] = {
1102 .name
= "notify:online",
1103 .startup
= notify_online
,
1104 .teardown
= notify_down_prepare
,
1115 /* Application processor state steps */
1116 static struct cpuhp_step cpuhp_ap_states
[] = {
1118 [CPUHP_AP_NOTIFY_STARTING
] = {
1119 .name
= "notify:starting",
1120 .startup
= notify_starting
,
1121 .teardown
= notify_dying
,
1133 /* Sanity check for callbacks */
1134 static int cpuhp_cb_check(enum cpuhp_state state
)
1136 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1141 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
1143 return (state
>= CPUHP_AP_OFFLINE
&& state
<= CPUHP_AP_ONLINE
);
1146 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
1148 struct cpuhp_step
*sp
;
1150 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
1154 static void cpuhp_store_callbacks(enum cpuhp_state state
,
1156 int (*startup
)(unsigned int cpu
),
1157 int (*teardown
)(unsigned int cpu
))
1159 /* (Un)Install the callbacks for further cpu hotplug operations */
1160 struct cpuhp_step
*sp
;
1162 mutex_lock(&cpuhp_state_mutex
);
1163 sp
= cpuhp_get_step(state
);
1164 sp
->startup
= startup
;
1165 sp
->teardown
= teardown
;
1167 mutex_unlock(&cpuhp_state_mutex
);
1170 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1172 return cpuhp_get_step(state
)->teardown
;
1175 /* Helper function to run callback on the target cpu */
1176 static void cpuhp_on_cpu_cb(void *__cb
)
1178 int (*cb
)(unsigned int cpu
) = __cb
;
1180 BUG_ON(cb(smp_processor_id()));
1184 * Call the startup/teardown function for a step either on the AP or
1185 * on the current CPU.
1187 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
,
1188 int (*cb
)(unsigned int), bool bringup
)
1196 * This invokes the callback directly for now. In a later step we
1197 * convert that to use cpuhp_invoke_callback().
1199 if (cpuhp_is_ap_state(state
)) {
1201 * Note, that a function called on the AP is not
1204 if (cpu_online(cpu
))
1205 smp_call_function_single(cpu
, cpuhp_on_cpu_cb
, cb
, 1);
1210 * The non AP bound callbacks can fail on bringup. On teardown
1211 * e.g. module removal we crash for now.
1214 BUG_ON(ret
&& !bringup
);
1219 * Called from __cpuhp_setup_state on a recoverable failure.
1221 * Note: The teardown callbacks for rollback are not allowed to fail!
1223 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1224 int (*teardown
)(unsigned int cpu
))
1231 /* Roll back the already executed steps on the other cpus */
1232 for_each_present_cpu(cpu
) {
1233 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1234 int cpustate
= st
->state
;
1236 if (cpu
>= failedcpu
)
1239 /* Did we invoke the startup call on that cpu ? */
1240 if (cpustate
>= state
)
1241 cpuhp_issue_call(cpu
, state
, teardown
, false);
1246 * Returns a free for dynamic slot assignment of the Online state. The states
1247 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1248 * by having no name assigned.
1250 static int cpuhp_reserve_state(enum cpuhp_state state
)
1254 mutex_lock(&cpuhp_state_mutex
);
1255 for (i
= CPUHP_ONLINE_DYN
; i
<= CPUHP_ONLINE_DYN_END
; i
++) {
1256 if (cpuhp_bp_states
[i
].name
)
1259 cpuhp_bp_states
[i
].name
= "Reserved";
1260 mutex_unlock(&cpuhp_state_mutex
);
1263 mutex_unlock(&cpuhp_state_mutex
);
1264 WARN(1, "No more dynamic states available for CPU hotplug\n");
1269 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1270 * @state: The state to setup
1271 * @invoke: If true, the startup function is invoked for cpus where
1272 * cpu state >= @state
1273 * @startup: startup callback function
1274 * @teardown: teardown callback function
1276 * Returns 0 if successful, otherwise a proper error code
1278 int __cpuhp_setup_state(enum cpuhp_state state
,
1279 const char *name
, bool invoke
,
1280 int (*startup
)(unsigned int cpu
),
1281 int (*teardown
)(unsigned int cpu
))
1286 if (cpuhp_cb_check(state
) || !name
)
1291 /* currently assignments for the ONLINE state are possible */
1292 if (state
== CPUHP_ONLINE_DYN
) {
1294 ret
= cpuhp_reserve_state(state
);
1300 cpuhp_store_callbacks(state
, name
, startup
, teardown
);
1302 if (!invoke
|| !startup
)
1306 * Try to call the startup callback for each present cpu
1307 * depending on the hotplug state of the cpu.
1309 for_each_present_cpu(cpu
) {
1310 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1311 int cpustate
= st
->state
;
1313 if (cpustate
< state
)
1316 ret
= cpuhp_issue_call(cpu
, state
, startup
, true);
1318 cpuhp_rollback_install(cpu
, state
, teardown
);
1319 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1325 if (!ret
&& dyn_state
)
1329 EXPORT_SYMBOL(__cpuhp_setup_state
);
1332 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1333 * @state: The state to remove
1334 * @invoke: If true, the teardown function is invoked for cpus where
1335 * cpu state >= @state
1337 * The teardown callback is currently not allowed to fail. Think
1338 * about module removal!
1340 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1342 int (*teardown
)(unsigned int cpu
) = cpuhp_get_teardown_cb(state
);
1345 BUG_ON(cpuhp_cb_check(state
));
1349 if (!invoke
|| !teardown
)
1353 * Call the teardown callback for each present cpu depending
1354 * on the hotplug state of the cpu. This function is not
1355 * allowed to fail currently!
1357 for_each_present_cpu(cpu
) {
1358 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1359 int cpustate
= st
->state
;
1361 if (cpustate
>= state
)
1362 cpuhp_issue_call(cpu
, state
, teardown
, false);
1365 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1368 EXPORT_SYMBOL(__cpuhp_remove_state
);
1370 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1371 static ssize_t
show_cpuhp_state(struct device
*dev
,
1372 struct device_attribute
*attr
, char *buf
)
1374 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1376 return sprintf(buf
, "%d\n", st
->state
);
1378 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1380 static ssize_t
write_cpuhp_target(struct device
*dev
,
1381 struct device_attribute
*attr
,
1382 const char *buf
, size_t count
)
1384 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1385 struct cpuhp_step
*sp
;
1388 ret
= kstrtoint(buf
, 10, &target
);
1392 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1393 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1396 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1400 ret
= lock_device_hotplug_sysfs();
1404 mutex_lock(&cpuhp_state_mutex
);
1405 sp
= cpuhp_get_step(target
);
1406 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1407 mutex_unlock(&cpuhp_state_mutex
);
1411 if (st
->state
< target
)
1412 ret
= do_cpu_up(dev
->id
, target
);
1414 ret
= do_cpu_down(dev
->id
, target
);
1416 unlock_device_hotplug();
1417 return ret
? ret
: count
;
1420 static ssize_t
show_cpuhp_target(struct device
*dev
,
1421 struct device_attribute
*attr
, char *buf
)
1423 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1425 return sprintf(buf
, "%d\n", st
->target
);
1427 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1429 static struct attribute
*cpuhp_cpu_attrs
[] = {
1430 &dev_attr_state
.attr
,
1431 &dev_attr_target
.attr
,
1435 static struct attribute_group cpuhp_cpu_attr_group
= {
1436 .attrs
= cpuhp_cpu_attrs
,
1441 static ssize_t
show_cpuhp_states(struct device
*dev
,
1442 struct device_attribute
*attr
, char *buf
)
1444 ssize_t cur
, res
= 0;
1447 mutex_lock(&cpuhp_state_mutex
);
1448 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1449 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1452 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1457 mutex_unlock(&cpuhp_state_mutex
);
1460 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1462 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1463 &dev_attr_states
.attr
,
1467 static struct attribute_group cpuhp_cpu_root_attr_group
= {
1468 .attrs
= cpuhp_cpu_root_attrs
,
1473 static int __init
cpuhp_sysfs_init(void)
1477 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1478 &cpuhp_cpu_root_attr_group
);
1482 for_each_possible_cpu(cpu
) {
1483 struct device
*dev
= get_cpu_device(cpu
);
1487 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1493 device_initcall(cpuhp_sysfs_init
);
1497 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1498 * represents all NR_CPUS bits binary values of 1<<nr.
1500 * It is used by cpumask_of() to get a constant address to a CPU
1501 * mask value that has a single bit set only.
1504 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1505 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1506 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1507 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1508 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1510 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1512 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1513 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1514 #if BITS_PER_LONG > 32
1515 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1516 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1519 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1521 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1522 EXPORT_SYMBOL(cpu_all_bits
);
1524 #ifdef CONFIG_INIT_ALL_POSSIBLE
1525 struct cpumask __cpu_possible_mask __read_mostly
1528 struct cpumask __cpu_possible_mask __read_mostly
;
1530 EXPORT_SYMBOL(__cpu_possible_mask
);
1532 struct cpumask __cpu_online_mask __read_mostly
;
1533 EXPORT_SYMBOL(__cpu_online_mask
);
1535 struct cpumask __cpu_present_mask __read_mostly
;
1536 EXPORT_SYMBOL(__cpu_present_mask
);
1538 struct cpumask __cpu_active_mask __read_mostly
;
1539 EXPORT_SYMBOL(__cpu_active_mask
);
1541 void init_cpu_present(const struct cpumask
*src
)
1543 cpumask_copy(&__cpu_present_mask
, src
);
1546 void init_cpu_possible(const struct cpumask
*src
)
1548 cpumask_copy(&__cpu_possible_mask
, src
);
1551 void init_cpu_online(const struct cpumask
*src
)
1553 cpumask_copy(&__cpu_online_mask
, src
);
1557 * Activate the first processor.
1559 void __init
boot_cpu_init(void)
1561 int cpu
= smp_processor_id();
1563 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1564 set_cpu_online(cpu
, true);
1565 set_cpu_active(cpu
, true);
1566 set_cpu_present(cpu
, true);
1567 set_cpu_possible(cpu
, true);
1571 * Must be called _AFTER_ setting up the per_cpu areas
1573 void __init
boot_cpu_state_init(void)
1575 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;