2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
26 #include <trace/events/power.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/cpuhp.h>
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
37 struct cpuhp_cpu_state
{
38 enum cpuhp_state state
;
39 enum cpuhp_state target
;
42 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
54 int (*startup
)(unsigned int cpu
);
55 int (*teardown
)(unsigned int cpu
);
59 static struct cpuhp_step cpuhp_bp_states
[];
62 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
63 * @cpu: The cpu for which the callback should be invoked
64 * @step: The step in the state machine
65 * @cb: The callback function to invoke
67 * Called from cpu hotplug and from the state register machinery
69 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
70 int (*cb
)(unsigned int))
72 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
76 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
78 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
84 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
85 static DEFINE_MUTEX(cpu_add_remove_lock
);
86 bool cpuhp_tasks_frozen
;
87 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
90 * The following two APIs (cpu_maps_update_begin/done) must be used when
91 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
92 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
93 * hotplug callback (un)registration performed using __register_cpu_notifier()
94 * or __unregister_cpu_notifier().
96 void cpu_maps_update_begin(void)
98 mutex_lock(&cpu_add_remove_lock
);
100 EXPORT_SYMBOL(cpu_notifier_register_begin
);
102 void cpu_maps_update_done(void)
104 mutex_unlock(&cpu_add_remove_lock
);
106 EXPORT_SYMBOL(cpu_notifier_register_done
);
108 static RAW_NOTIFIER_HEAD(cpu_chain
);
110 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
111 * Should always be manipulated under cpu_add_remove_lock
113 static int cpu_hotplug_disabled
;
115 #ifdef CONFIG_HOTPLUG_CPU
118 struct task_struct
*active_writer
;
119 /* wait queue to wake up the active_writer */
120 wait_queue_head_t wq
;
121 /* verifies that no writer will get active while readers are active */
124 * Also blocks the new readers during
125 * an ongoing cpu hotplug operation.
129 #ifdef CONFIG_DEBUG_LOCK_ALLOC
130 struct lockdep_map dep_map
;
133 .active_writer
= NULL
,
134 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
135 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
136 #ifdef CONFIG_DEBUG_LOCK_ALLOC
137 .dep_map
= {.name
= "cpu_hotplug.lock" },
141 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
142 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
143 #define cpuhp_lock_acquire_tryread() \
144 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
145 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
146 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
149 void get_online_cpus(void)
152 if (cpu_hotplug
.active_writer
== current
)
154 cpuhp_lock_acquire_read();
155 mutex_lock(&cpu_hotplug
.lock
);
156 atomic_inc(&cpu_hotplug
.refcount
);
157 mutex_unlock(&cpu_hotplug
.lock
);
159 EXPORT_SYMBOL_GPL(get_online_cpus
);
161 void put_online_cpus(void)
165 if (cpu_hotplug
.active_writer
== current
)
168 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
169 if (WARN_ON(refcount
< 0)) /* try to fix things up */
170 atomic_inc(&cpu_hotplug
.refcount
);
172 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
173 wake_up(&cpu_hotplug
.wq
);
175 cpuhp_lock_release();
178 EXPORT_SYMBOL_GPL(put_online_cpus
);
181 * This ensures that the hotplug operation can begin only when the
182 * refcount goes to zero.
184 * Note that during a cpu-hotplug operation, the new readers, if any,
185 * will be blocked by the cpu_hotplug.lock
187 * Since cpu_hotplug_begin() is always called after invoking
188 * cpu_maps_update_begin(), we can be sure that only one writer is active.
190 * Note that theoretically, there is a possibility of a livelock:
191 * - Refcount goes to zero, last reader wakes up the sleeping
193 * - Last reader unlocks the cpu_hotplug.lock.
194 * - A new reader arrives at this moment, bumps up the refcount.
195 * - The writer acquires the cpu_hotplug.lock finds the refcount
196 * non zero and goes to sleep again.
198 * However, this is very difficult to achieve in practice since
199 * get_online_cpus() not an api which is called all that often.
202 void cpu_hotplug_begin(void)
206 cpu_hotplug
.active_writer
= current
;
207 cpuhp_lock_acquire();
210 mutex_lock(&cpu_hotplug
.lock
);
211 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
212 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
214 mutex_unlock(&cpu_hotplug
.lock
);
217 finish_wait(&cpu_hotplug
.wq
, &wait
);
220 void cpu_hotplug_done(void)
222 cpu_hotplug
.active_writer
= NULL
;
223 mutex_unlock(&cpu_hotplug
.lock
);
224 cpuhp_lock_release();
228 * Wait for currently running CPU hotplug operations to complete (if any) and
229 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
230 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
231 * hotplug path before performing hotplug operations. So acquiring that lock
232 * guarantees mutual exclusion from any currently running hotplug operations.
234 void cpu_hotplug_disable(void)
236 cpu_maps_update_begin();
237 cpu_hotplug_disabled
++;
238 cpu_maps_update_done();
240 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
242 void cpu_hotplug_enable(void)
244 cpu_maps_update_begin();
245 WARN_ON(--cpu_hotplug_disabled
< 0);
246 cpu_maps_update_done();
248 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
249 #endif /* CONFIG_HOTPLUG_CPU */
251 /* Need to know about CPUs going up/down? */
252 int register_cpu_notifier(struct notifier_block
*nb
)
255 cpu_maps_update_begin();
256 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
257 cpu_maps_update_done();
261 int __register_cpu_notifier(struct notifier_block
*nb
)
263 return raw_notifier_chain_register(&cpu_chain
, nb
);
266 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
269 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
270 void *hcpu
= (void *)(long)cpu
;
274 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
277 return notifier_to_errno(ret
);
280 static int cpu_notify(unsigned long val
, unsigned int cpu
)
282 return __cpu_notify(val
, cpu
, -1, NULL
);
285 /* Notifier wrappers for transitioning to state machine */
286 static int notify_prepare(unsigned int cpu
)
291 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
294 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
296 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
301 static int notify_online(unsigned int cpu
)
303 cpu_notify(CPU_ONLINE
, cpu
);
307 static int bringup_cpu(unsigned int cpu
)
309 struct task_struct
*idle
= idle_thread_get(cpu
);
312 /* Arch-specific enabling code. */
313 ret
= __cpu_up(cpu
, idle
);
315 cpu_notify(CPU_UP_CANCELED
, cpu
);
318 BUG_ON(!cpu_online(cpu
));
322 #ifdef CONFIG_HOTPLUG_CPU
323 EXPORT_SYMBOL(register_cpu_notifier
);
324 EXPORT_SYMBOL(__register_cpu_notifier
);
326 void unregister_cpu_notifier(struct notifier_block
*nb
)
328 cpu_maps_update_begin();
329 raw_notifier_chain_unregister(&cpu_chain
, nb
);
330 cpu_maps_update_done();
332 EXPORT_SYMBOL(unregister_cpu_notifier
);
334 void __unregister_cpu_notifier(struct notifier_block
*nb
)
336 raw_notifier_chain_unregister(&cpu_chain
, nb
);
338 EXPORT_SYMBOL(__unregister_cpu_notifier
);
341 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
344 * This function walks all processes, finds a valid mm struct for each one and
345 * then clears a corresponding bit in mm's cpumask. While this all sounds
346 * trivial, there are various non-obvious corner cases, which this function
347 * tries to solve in a safe manner.
349 * Also note that the function uses a somewhat relaxed locking scheme, so it may
350 * be called only for an already offlined CPU.
352 void clear_tasks_mm_cpumask(int cpu
)
354 struct task_struct
*p
;
357 * This function is called after the cpu is taken down and marked
358 * offline, so its not like new tasks will ever get this cpu set in
359 * their mm mask. -- Peter Zijlstra
360 * Thus, we may use rcu_read_lock() here, instead of grabbing
361 * full-fledged tasklist_lock.
363 WARN_ON(cpu_online(cpu
));
365 for_each_process(p
) {
366 struct task_struct
*t
;
369 * Main thread might exit, but other threads may still have
370 * a valid mm. Find one.
372 t
= find_lock_task_mm(p
);
375 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
381 static inline void check_for_tasks(int dead_cpu
)
383 struct task_struct
*g
, *p
;
385 read_lock(&tasklist_lock
);
386 for_each_process_thread(g
, p
) {
390 * We do the check with unlocked task_rq(p)->lock.
391 * Order the reading to do not warn about a task,
392 * which was running on this cpu in the past, and
393 * it's just been woken on another cpu.
396 if (task_cpu(p
) != dead_cpu
)
399 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
400 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
402 read_unlock(&tasklist_lock
);
405 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
407 BUG_ON(cpu_notify(val
, cpu
));
410 static int notify_down_prepare(unsigned int cpu
)
412 int err
, nr_calls
= 0;
414 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
417 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
418 pr_warn("%s: attempt to take down CPU %u failed\n",
424 /* Take this CPU down. */
425 static int take_cpu_down(void *_param
)
427 int err
, cpu
= smp_processor_id();
429 /* Ensure this CPU doesn't handle any more interrupts. */
430 err
= __cpu_disable();
434 cpu_notify(CPU_DYING
, cpu
);
435 /* Give up timekeeping duties */
436 tick_handover_do_timer();
437 /* Park the stopper thread */
438 stop_machine_park(cpu
);
442 static int takedown_cpu(unsigned int cpu
)
447 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
448 * and RCU users of this state to go away such that all new such users
451 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
452 * not imply sync_sched(), so wait for both.
454 * Do sync before park smpboot threads to take care the rcu boost case.
456 if (IS_ENABLED(CONFIG_PREEMPT
))
457 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
461 smpboot_park_threads(cpu
);
464 * Prevent irq alloc/free while the dying cpu reorganizes the
465 * interrupt affinities.
470 * So now all preempt/rcu users must observe !cpu_active().
472 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
474 /* CPU didn't die: tell everyone. Can't complain. */
475 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
479 BUG_ON(cpu_online(cpu
));
482 * The migration_call() CPU_DYING callback will have removed all
483 * runnable tasks from the cpu, there's only the idle task left now
484 * that the migration thread is done doing the stop_machine thing.
486 * Wait for the stop thread to go away.
488 while (!per_cpu(cpu_dead_idle
, cpu
))
490 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
491 per_cpu(cpu_dead_idle
, cpu
) = false;
493 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
496 hotplug_cpu__broadcast_tick_pull(cpu
);
497 /* This actually kills the CPU. */
500 tick_cleanup_dead_cpu(cpu
);
504 static int notify_dead(unsigned int cpu
)
506 cpu_notify_nofail(CPU_DEAD
, cpu
);
507 check_for_tasks(cpu
);
512 #define notify_down_prepare NULL
513 #define takedown_cpu NULL
514 #define notify_dead NULL
517 #ifdef CONFIG_HOTPLUG_CPU
518 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
520 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
521 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
523 if (!step
->skip_onerr
)
524 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
528 /* Requires cpu_add_remove_lock to be held */
529 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
)
531 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
532 int prev_state
, ret
= 0;
533 bool hasdied
= false;
535 if (num_online_cpus() == 1)
538 if (!cpu_online(cpu
))
543 cpuhp_tasks_frozen
= tasks_frozen
;
545 prev_state
= st
->state
;
546 st
->target
= CPUHP_OFFLINE
;
547 for (; st
->state
> st
->target
; st
->state
--) {
548 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
550 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
552 st
->target
= prev_state
;
553 undo_cpu_down(cpu
, st
);
557 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
560 /* This post dead nonsense must die */
562 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
566 int cpu_down(unsigned int cpu
)
570 cpu_maps_update_begin();
572 if (cpu_hotplug_disabled
) {
577 err
= _cpu_down(cpu
, 0);
580 cpu_maps_update_done();
583 EXPORT_SYMBOL(cpu_down
);
584 #endif /*CONFIG_HOTPLUG_CPU*/
587 * Unpark per-CPU smpboot kthreads at CPU-online time.
589 static int smpboot_thread_call(struct notifier_block
*nfb
,
590 unsigned long action
, void *hcpu
)
592 int cpu
= (long)hcpu
;
594 switch (action
& ~CPU_TASKS_FROZEN
) {
596 case CPU_DOWN_FAILED
:
598 smpboot_unpark_threads(cpu
);
608 static struct notifier_block smpboot_thread_notifier
= {
609 .notifier_call
= smpboot_thread_call
,
610 .priority
= CPU_PRI_SMPBOOT
,
613 void smpboot_thread_init(void)
615 register_cpu_notifier(&smpboot_thread_notifier
);
618 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
620 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
621 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
623 if (!step
->skip_onerr
)
624 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
628 /* Requires cpu_add_remove_lock to be held */
629 static int _cpu_up(unsigned int cpu
, int tasks_frozen
)
631 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
632 struct task_struct
*idle
;
633 int prev_state
, ret
= 0;
637 if (cpu_online(cpu
) || !cpu_present(cpu
)) {
642 /* Let it fail before we try to bring the cpu up */
643 idle
= idle_thread_get(cpu
);
649 cpuhp_tasks_frozen
= tasks_frozen
;
651 prev_state
= st
->state
;
652 st
->target
= CPUHP_ONLINE
;
653 while (st
->state
< st
->target
) {
654 struct cpuhp_step
*step
;
657 step
= cpuhp_bp_states
+ st
->state
;
658 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
660 st
->target
= prev_state
;
661 undo_cpu_up(cpu
, st
);
670 int cpu_up(unsigned int cpu
)
674 if (!cpu_possible(cpu
)) {
675 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
677 #if defined(CONFIG_IA64)
678 pr_err("please check additional_cpus= boot parameter\n");
683 err
= try_online_node(cpu_to_node(cpu
));
687 cpu_maps_update_begin();
689 if (cpu_hotplug_disabled
) {
694 err
= _cpu_up(cpu
, 0);
697 cpu_maps_update_done();
700 EXPORT_SYMBOL_GPL(cpu_up
);
702 #ifdef CONFIG_PM_SLEEP_SMP
703 static cpumask_var_t frozen_cpus
;
705 int disable_nonboot_cpus(void)
707 int cpu
, first_cpu
, error
= 0;
709 cpu_maps_update_begin();
710 first_cpu
= cpumask_first(cpu_online_mask
);
712 * We take down all of the non-boot CPUs in one shot to avoid races
713 * with the userspace trying to use the CPU hotplug at the same time
715 cpumask_clear(frozen_cpus
);
717 pr_info("Disabling non-boot CPUs ...\n");
718 for_each_online_cpu(cpu
) {
719 if (cpu
== first_cpu
)
721 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
722 error
= _cpu_down(cpu
, 1);
723 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
725 cpumask_set_cpu(cpu
, frozen_cpus
);
727 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
733 BUG_ON(num_online_cpus() > 1);
735 pr_err("Non-boot CPUs are not disabled\n");
738 * Make sure the CPUs won't be enabled by someone else. We need to do
739 * this even in case of failure as all disable_nonboot_cpus() users are
740 * supposed to do enable_nonboot_cpus() on the failure path.
742 cpu_hotplug_disabled
++;
744 cpu_maps_update_done();
748 void __weak
arch_enable_nonboot_cpus_begin(void)
752 void __weak
arch_enable_nonboot_cpus_end(void)
756 void enable_nonboot_cpus(void)
760 /* Allow everyone to use the CPU hotplug again */
761 cpu_maps_update_begin();
762 WARN_ON(--cpu_hotplug_disabled
< 0);
763 if (cpumask_empty(frozen_cpus
))
766 pr_info("Enabling non-boot CPUs ...\n");
768 arch_enable_nonboot_cpus_begin();
770 for_each_cpu(cpu
, frozen_cpus
) {
771 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
772 error
= _cpu_up(cpu
, 1);
773 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
775 pr_info("CPU%d is up\n", cpu
);
778 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
781 arch_enable_nonboot_cpus_end();
783 cpumask_clear(frozen_cpus
);
785 cpu_maps_update_done();
788 static int __init
alloc_frozen_cpus(void)
790 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
794 core_initcall(alloc_frozen_cpus
);
797 * When callbacks for CPU hotplug notifications are being executed, we must
798 * ensure that the state of the system with respect to the tasks being frozen
799 * or not, as reported by the notification, remains unchanged *throughout the
800 * duration* of the execution of the callbacks.
801 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
803 * This synchronization is implemented by mutually excluding regular CPU
804 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
805 * Hibernate notifications.
808 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
809 unsigned long action
, void *ptr
)
813 case PM_SUSPEND_PREPARE
:
814 case PM_HIBERNATION_PREPARE
:
815 cpu_hotplug_disable();
818 case PM_POST_SUSPEND
:
819 case PM_POST_HIBERNATION
:
820 cpu_hotplug_enable();
831 static int __init
cpu_hotplug_pm_sync_init(void)
834 * cpu_hotplug_pm_callback has higher priority than x86
835 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
836 * to disable cpu hotplug to avoid cpu hotplug race.
838 pm_notifier(cpu_hotplug_pm_callback
, 0);
841 core_initcall(cpu_hotplug_pm_sync_init
);
843 #endif /* CONFIG_PM_SLEEP_SMP */
846 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
847 * @cpu: cpu that just started
849 * This function calls the cpu_chain notifiers with CPU_STARTING.
850 * It must be called by the arch code on the new cpu, before the new cpu
851 * enables interrupts and before the "boot" cpu returns from __cpu_up().
853 void notify_cpu_starting(unsigned int cpu
)
855 cpu_notify(CPU_STARTING
, cpu
);
858 #endif /* CONFIG_SMP */
860 /* Boot processor state steps */
861 static struct cpuhp_step cpuhp_bp_states
[] = {
868 [CPUHP_CREATE_THREADS
]= {
869 .name
= "threads:create",
870 .startup
= smpboot_create_threads
,
873 [CPUHP_NOTIFY_PREPARE
] = {
874 .name
= "notify:prepare",
875 .startup
= notify_prepare
,
876 .teardown
= notify_dead
,
879 [CPUHP_BRINGUP_CPU
] = {
880 .name
= "cpu:bringup",
881 .startup
= bringup_cpu
,
882 .teardown
= takedown_cpu
,
885 [CPUHP_NOTIFY_ONLINE
] = {
886 .name
= "notify:online",
887 .startup
= notify_online
,
888 .teardown
= notify_down_prepare
,
899 * cpu_bit_bitmap[] is a special, "compressed" data structure that
900 * represents all NR_CPUS bits binary values of 1<<nr.
902 * It is used by cpumask_of() to get a constant address to a CPU
903 * mask value that has a single bit set only.
906 /* cpu_bit_bitmap[0] is empty - so we can back into it */
907 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
908 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
909 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
910 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
912 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
914 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
915 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
916 #if BITS_PER_LONG > 32
917 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
918 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
921 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
923 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
924 EXPORT_SYMBOL(cpu_all_bits
);
926 #ifdef CONFIG_INIT_ALL_POSSIBLE
927 struct cpumask __cpu_possible_mask __read_mostly
930 struct cpumask __cpu_possible_mask __read_mostly
;
932 EXPORT_SYMBOL(__cpu_possible_mask
);
934 struct cpumask __cpu_online_mask __read_mostly
;
935 EXPORT_SYMBOL(__cpu_online_mask
);
937 struct cpumask __cpu_present_mask __read_mostly
;
938 EXPORT_SYMBOL(__cpu_present_mask
);
940 struct cpumask __cpu_active_mask __read_mostly
;
941 EXPORT_SYMBOL(__cpu_active_mask
);
943 void init_cpu_present(const struct cpumask
*src
)
945 cpumask_copy(&__cpu_present_mask
, src
);
948 void init_cpu_possible(const struct cpumask
*src
)
950 cpumask_copy(&__cpu_possible_mask
, src
);
953 void init_cpu_online(const struct cpumask
*src
)
955 cpumask_copy(&__cpu_online_mask
, src
);
959 * Activate the first processor.
961 void __init
boot_cpu_init(void)
963 int cpu
= smp_processor_id();
965 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
966 set_cpu_online(cpu
, true);
967 set_cpu_active(cpu
, true);
968 set_cpu_present(cpu
, true);
969 set_cpu_possible(cpu
, true);
973 * Must be called _AFTER_ setting up the per_cpu areas
975 void __init
boot_cpu_state_init(void)
977 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;