2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <trace/events/power.h>
30 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
31 static DEFINE_MUTEX(cpu_add_remove_lock
);
32 bool cpuhp_tasks_frozen
;
33 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
36 * The following two APIs (cpu_maps_update_begin/done) must be used when
37 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
38 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
39 * hotplug callback (un)registration performed using __register_cpu_notifier()
40 * or __unregister_cpu_notifier().
42 void cpu_maps_update_begin(void)
44 mutex_lock(&cpu_add_remove_lock
);
46 EXPORT_SYMBOL(cpu_notifier_register_begin
);
48 void cpu_maps_update_done(void)
50 mutex_unlock(&cpu_add_remove_lock
);
52 EXPORT_SYMBOL(cpu_notifier_register_done
);
54 static RAW_NOTIFIER_HEAD(cpu_chain
);
56 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
57 * Should always be manipulated under cpu_add_remove_lock
59 static int cpu_hotplug_disabled
;
61 #ifdef CONFIG_HOTPLUG_CPU
64 struct task_struct
*active_writer
;
65 /* wait queue to wake up the active_writer */
67 /* verifies that no writer will get active while readers are active */
70 * Also blocks the new readers during
71 * an ongoing cpu hotplug operation.
75 #ifdef CONFIG_DEBUG_LOCK_ALLOC
76 struct lockdep_map dep_map
;
79 .active_writer
= NULL
,
80 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
81 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 .dep_map
= {.name
= "cpu_hotplug.lock" },
87 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
88 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
89 #define cpuhp_lock_acquire_tryread() \
90 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
91 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
92 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
95 void get_online_cpus(void)
98 if (cpu_hotplug
.active_writer
== current
)
100 cpuhp_lock_acquire_read();
101 mutex_lock(&cpu_hotplug
.lock
);
102 atomic_inc(&cpu_hotplug
.refcount
);
103 mutex_unlock(&cpu_hotplug
.lock
);
105 EXPORT_SYMBOL_GPL(get_online_cpus
);
107 void put_online_cpus(void)
111 if (cpu_hotplug
.active_writer
== current
)
114 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
115 if (WARN_ON(refcount
< 0)) /* try to fix things up */
116 atomic_inc(&cpu_hotplug
.refcount
);
118 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
119 wake_up(&cpu_hotplug
.wq
);
121 cpuhp_lock_release();
124 EXPORT_SYMBOL_GPL(put_online_cpus
);
127 * This ensures that the hotplug operation can begin only when the
128 * refcount goes to zero.
130 * Note that during a cpu-hotplug operation, the new readers, if any,
131 * will be blocked by the cpu_hotplug.lock
133 * Since cpu_hotplug_begin() is always called after invoking
134 * cpu_maps_update_begin(), we can be sure that only one writer is active.
136 * Note that theoretically, there is a possibility of a livelock:
137 * - Refcount goes to zero, last reader wakes up the sleeping
139 * - Last reader unlocks the cpu_hotplug.lock.
140 * - A new reader arrives at this moment, bumps up the refcount.
141 * - The writer acquires the cpu_hotplug.lock finds the refcount
142 * non zero and goes to sleep again.
144 * However, this is very difficult to achieve in practice since
145 * get_online_cpus() not an api which is called all that often.
148 void cpu_hotplug_begin(void)
152 cpu_hotplug
.active_writer
= current
;
153 cpuhp_lock_acquire();
156 mutex_lock(&cpu_hotplug
.lock
);
157 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
158 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
160 mutex_unlock(&cpu_hotplug
.lock
);
163 finish_wait(&cpu_hotplug
.wq
, &wait
);
166 void cpu_hotplug_done(void)
168 cpu_hotplug
.active_writer
= NULL
;
169 mutex_unlock(&cpu_hotplug
.lock
);
170 cpuhp_lock_release();
174 * Wait for currently running CPU hotplug operations to complete (if any) and
175 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
176 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
177 * hotplug path before performing hotplug operations. So acquiring that lock
178 * guarantees mutual exclusion from any currently running hotplug operations.
180 void cpu_hotplug_disable(void)
182 cpu_maps_update_begin();
183 cpu_hotplug_disabled
++;
184 cpu_maps_update_done();
186 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
188 void cpu_hotplug_enable(void)
190 cpu_maps_update_begin();
191 WARN_ON(--cpu_hotplug_disabled
< 0);
192 cpu_maps_update_done();
194 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
195 #endif /* CONFIG_HOTPLUG_CPU */
197 /* Need to know about CPUs going up/down? */
198 int register_cpu_notifier(struct notifier_block
*nb
)
201 cpu_maps_update_begin();
202 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
203 cpu_maps_update_done();
207 int __register_cpu_notifier(struct notifier_block
*nb
)
209 return raw_notifier_chain_register(&cpu_chain
, nb
);
212 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
215 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
216 void *hcpu
= (void *)(long)cpu
;
220 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
223 return notifier_to_errno(ret
);
226 static int cpu_notify(unsigned long val
, unsigned int cpu
)
228 return __cpu_notify(val
, cpu
, -1, NULL
);
231 #ifdef CONFIG_HOTPLUG_CPU
233 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
235 BUG_ON(cpu_notify(val
, cpu
));
237 EXPORT_SYMBOL(register_cpu_notifier
);
238 EXPORT_SYMBOL(__register_cpu_notifier
);
240 void unregister_cpu_notifier(struct notifier_block
*nb
)
242 cpu_maps_update_begin();
243 raw_notifier_chain_unregister(&cpu_chain
, nb
);
244 cpu_maps_update_done();
246 EXPORT_SYMBOL(unregister_cpu_notifier
);
248 void __unregister_cpu_notifier(struct notifier_block
*nb
)
250 raw_notifier_chain_unregister(&cpu_chain
, nb
);
252 EXPORT_SYMBOL(__unregister_cpu_notifier
);
255 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
258 * This function walks all processes, finds a valid mm struct for each one and
259 * then clears a corresponding bit in mm's cpumask. While this all sounds
260 * trivial, there are various non-obvious corner cases, which this function
261 * tries to solve in a safe manner.
263 * Also note that the function uses a somewhat relaxed locking scheme, so it may
264 * be called only for an already offlined CPU.
266 void clear_tasks_mm_cpumask(int cpu
)
268 struct task_struct
*p
;
271 * This function is called after the cpu is taken down and marked
272 * offline, so its not like new tasks will ever get this cpu set in
273 * their mm mask. -- Peter Zijlstra
274 * Thus, we may use rcu_read_lock() here, instead of grabbing
275 * full-fledged tasklist_lock.
277 WARN_ON(cpu_online(cpu
));
279 for_each_process(p
) {
280 struct task_struct
*t
;
283 * Main thread might exit, but other threads may still have
284 * a valid mm. Find one.
286 t
= find_lock_task_mm(p
);
289 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
295 static inline void check_for_tasks(int dead_cpu
)
297 struct task_struct
*g
, *p
;
299 read_lock(&tasklist_lock
);
300 for_each_process_thread(g
, p
) {
304 * We do the check with unlocked task_rq(p)->lock.
305 * Order the reading to do not warn about a task,
306 * which was running on this cpu in the past, and
307 * it's just been woken on another cpu.
310 if (task_cpu(p
) != dead_cpu
)
313 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
314 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
316 read_unlock(&tasklist_lock
);
319 /* Take this CPU down. */
320 static int take_cpu_down(void *_param
)
322 int err
, cpu
= smp_processor_id();
324 /* Ensure this CPU doesn't handle any more interrupts. */
325 err
= __cpu_disable();
329 cpu_notify(CPU_DYING
, cpu
);
330 /* Give up timekeeping duties */
331 tick_handover_do_timer();
332 /* Park the stopper thread */
333 stop_machine_park(cpu
);
337 /* Requires cpu_add_remove_lock to be held */
338 static int _cpu_down(unsigned int cpu
, int tasks_frozen
)
340 int err
, nr_calls
= 0;
342 if (num_online_cpus() == 1)
345 if (!cpu_online(cpu
))
350 cpuhp_tasks_frozen
= tasks_frozen
;
352 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
355 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
356 pr_warn("%s: attempt to take down CPU %u failed\n",
362 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
363 * and RCU users of this state to go away such that all new such users
366 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
367 * not imply sync_sched(), so wait for both.
369 * Do sync before park smpboot threads to take care the rcu boost case.
371 if (IS_ENABLED(CONFIG_PREEMPT
))
372 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
376 smpboot_park_threads(cpu
);
379 * Prevent irq alloc/free while the dying cpu reorganizes the
380 * interrupt affinities.
385 * So now all preempt/rcu users must observe !cpu_active().
387 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
389 /* CPU didn't die: tell everyone. Can't complain. */
390 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
394 BUG_ON(cpu_online(cpu
));
397 * The migration_call() CPU_DYING callback will have removed all
398 * runnable tasks from the cpu, there's only the idle task left now
399 * that the migration thread is done doing the stop_machine thing.
401 * Wait for the stop thread to go away.
403 while (!per_cpu(cpu_dead_idle
, cpu
))
405 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
406 per_cpu(cpu_dead_idle
, cpu
) = false;
408 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
411 hotplug_cpu__broadcast_tick_pull(cpu
);
412 /* This actually kills the CPU. */
415 /* CPU is completely dead: tell everyone. Too late to complain. */
416 tick_cleanup_dead_cpu(cpu
);
417 cpu_notify_nofail(CPU_DEAD
, cpu
);
419 check_for_tasks(cpu
);
424 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
428 int cpu_down(unsigned int cpu
)
432 cpu_maps_update_begin();
434 if (cpu_hotplug_disabled
) {
439 err
= _cpu_down(cpu
, 0);
442 cpu_maps_update_done();
445 EXPORT_SYMBOL(cpu_down
);
446 #endif /*CONFIG_HOTPLUG_CPU*/
449 * Unpark per-CPU smpboot kthreads at CPU-online time.
451 static int smpboot_thread_call(struct notifier_block
*nfb
,
452 unsigned long action
, void *hcpu
)
454 int cpu
= (long)hcpu
;
456 switch (action
& ~CPU_TASKS_FROZEN
) {
458 case CPU_DOWN_FAILED
:
460 smpboot_unpark_threads(cpu
);
470 static struct notifier_block smpboot_thread_notifier
= {
471 .notifier_call
= smpboot_thread_call
,
472 .priority
= CPU_PRI_SMPBOOT
,
475 void smpboot_thread_init(void)
477 register_cpu_notifier(&smpboot_thread_notifier
);
480 /* Requires cpu_add_remove_lock to be held */
481 static int _cpu_up(unsigned int cpu
, int tasks_frozen
)
483 struct task_struct
*idle
;
484 int ret
, nr_calls
= 0;
488 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
493 idle
= idle_thread_get(cpu
);
499 ret
= smpboot_create_threads(cpu
);
503 cpuhp_tasks_frozen
= tasks_frozen
;
505 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
508 pr_warn("%s: attempt to bring up CPU %u failed\n",
513 /* Arch-specific enabling code. */
514 ret
= __cpu_up(cpu
, idle
);
518 BUG_ON(!cpu_online(cpu
));
520 /* Now call notifier in preparation. */
521 cpu_notify(CPU_ONLINE
, cpu
);
525 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
532 int cpu_up(unsigned int cpu
)
536 if (!cpu_possible(cpu
)) {
537 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
539 #if defined(CONFIG_IA64)
540 pr_err("please check additional_cpus= boot parameter\n");
545 err
= try_online_node(cpu_to_node(cpu
));
549 cpu_maps_update_begin();
551 if (cpu_hotplug_disabled
) {
556 err
= _cpu_up(cpu
, 0);
559 cpu_maps_update_done();
562 EXPORT_SYMBOL_GPL(cpu_up
);
564 #ifdef CONFIG_PM_SLEEP_SMP
565 static cpumask_var_t frozen_cpus
;
567 int disable_nonboot_cpus(void)
569 int cpu
, first_cpu
, error
= 0;
571 cpu_maps_update_begin();
572 first_cpu
= cpumask_first(cpu_online_mask
);
574 * We take down all of the non-boot CPUs in one shot to avoid races
575 * with the userspace trying to use the CPU hotplug at the same time
577 cpumask_clear(frozen_cpus
);
579 pr_info("Disabling non-boot CPUs ...\n");
580 for_each_online_cpu(cpu
) {
581 if (cpu
== first_cpu
)
583 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
584 error
= _cpu_down(cpu
, 1);
585 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
587 cpumask_set_cpu(cpu
, frozen_cpus
);
589 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
595 BUG_ON(num_online_cpus() > 1);
597 pr_err("Non-boot CPUs are not disabled\n");
600 * Make sure the CPUs won't be enabled by someone else. We need to do
601 * this even in case of failure as all disable_nonboot_cpus() users are
602 * supposed to do enable_nonboot_cpus() on the failure path.
604 cpu_hotplug_disabled
++;
606 cpu_maps_update_done();
610 void __weak
arch_enable_nonboot_cpus_begin(void)
614 void __weak
arch_enable_nonboot_cpus_end(void)
618 void enable_nonboot_cpus(void)
622 /* Allow everyone to use the CPU hotplug again */
623 cpu_maps_update_begin();
624 WARN_ON(--cpu_hotplug_disabled
< 0);
625 if (cpumask_empty(frozen_cpus
))
628 pr_info("Enabling non-boot CPUs ...\n");
630 arch_enable_nonboot_cpus_begin();
632 for_each_cpu(cpu
, frozen_cpus
) {
633 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
634 error
= _cpu_up(cpu
, 1);
635 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
637 pr_info("CPU%d is up\n", cpu
);
640 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
643 arch_enable_nonboot_cpus_end();
645 cpumask_clear(frozen_cpus
);
647 cpu_maps_update_done();
650 static int __init
alloc_frozen_cpus(void)
652 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
656 core_initcall(alloc_frozen_cpus
);
659 * When callbacks for CPU hotplug notifications are being executed, we must
660 * ensure that the state of the system with respect to the tasks being frozen
661 * or not, as reported by the notification, remains unchanged *throughout the
662 * duration* of the execution of the callbacks.
663 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
665 * This synchronization is implemented by mutually excluding regular CPU
666 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
667 * Hibernate notifications.
670 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
671 unsigned long action
, void *ptr
)
675 case PM_SUSPEND_PREPARE
:
676 case PM_HIBERNATION_PREPARE
:
677 cpu_hotplug_disable();
680 case PM_POST_SUSPEND
:
681 case PM_POST_HIBERNATION
:
682 cpu_hotplug_enable();
693 static int __init
cpu_hotplug_pm_sync_init(void)
696 * cpu_hotplug_pm_callback has higher priority than x86
697 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
698 * to disable cpu hotplug to avoid cpu hotplug race.
700 pm_notifier(cpu_hotplug_pm_callback
, 0);
703 core_initcall(cpu_hotplug_pm_sync_init
);
705 #endif /* CONFIG_PM_SLEEP_SMP */
708 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
709 * @cpu: cpu that just started
711 * This function calls the cpu_chain notifiers with CPU_STARTING.
712 * It must be called by the arch code on the new cpu, before the new cpu
713 * enables interrupts and before the "boot" cpu returns from __cpu_up().
715 void notify_cpu_starting(unsigned int cpu
)
717 cpu_notify(CPU_STARTING
, cpu
);
720 #endif /* CONFIG_SMP */
723 * cpu_bit_bitmap[] is a special, "compressed" data structure that
724 * represents all NR_CPUS bits binary values of 1<<nr.
726 * It is used by cpumask_of() to get a constant address to a CPU
727 * mask value that has a single bit set only.
730 /* cpu_bit_bitmap[0] is empty - so we can back into it */
731 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
732 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
733 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
734 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
736 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
738 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
739 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
740 #if BITS_PER_LONG > 32
741 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
742 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
745 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
747 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
748 EXPORT_SYMBOL(cpu_all_bits
);
750 #ifdef CONFIG_INIT_ALL_POSSIBLE
751 struct cpumask __cpu_possible_mask __read_mostly
754 struct cpumask __cpu_possible_mask __read_mostly
;
756 EXPORT_SYMBOL(__cpu_possible_mask
);
758 struct cpumask __cpu_online_mask __read_mostly
;
759 EXPORT_SYMBOL(__cpu_online_mask
);
761 struct cpumask __cpu_present_mask __read_mostly
;
762 EXPORT_SYMBOL(__cpu_present_mask
);
764 struct cpumask __cpu_active_mask __read_mostly
;
765 EXPORT_SYMBOL(__cpu_active_mask
);
767 void init_cpu_present(const struct cpumask
*src
)
769 cpumask_copy(&__cpu_present_mask
, src
);
772 void init_cpu_possible(const struct cpumask
*src
)
774 cpumask_copy(&__cpu_possible_mask
, src
);
777 void init_cpu_online(const struct cpumask
*src
)
779 cpumask_copy(&__cpu_online_mask
, src
);