Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
cb79295e AV |
13 | #include <linux/oom.h> |
14 | #include <linux/rcupdate.h> | |
9984de1a | 15 | #include <linux/export.h> |
e4cc2f87 | 16 | #include <linux/bug.h> |
1da177e4 LT |
17 | #include <linux/kthread.h> |
18 | #include <linux/stop_machine.h> | |
81615b62 | 19 | #include <linux/mutex.h> |
5a0e3ad6 | 20 | #include <linux/gfp.h> |
79cfbdfa | 21 | #include <linux/suspend.h> |
a19423b9 | 22 | #include <linux/lockdep.h> |
bb3632c6 | 23 | #include <trace/events/power.h> |
1da177e4 | 24 | |
38498a67 TG |
25 | #include "smpboot.h" |
26 | ||
98a79d6a | 27 | #ifdef CONFIG_SMP |
b3199c02 | 28 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa953877 | 29 | static DEFINE_MUTEX(cpu_add_remove_lock); |
1da177e4 | 30 | |
79a6cdeb | 31 | /* |
93ae4f97 SB |
32 | * The following two APIs (cpu_maps_update_begin/done) must be used when |
33 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. | |
34 | * The APIs cpu_notifier_register_begin/done() must be used to protect CPU | |
35 | * hotplug callback (un)registration performed using __register_cpu_notifier() | |
36 | * or __unregister_cpu_notifier(). | |
79a6cdeb LJ |
37 | */ |
38 | void cpu_maps_update_begin(void) | |
39 | { | |
40 | mutex_lock(&cpu_add_remove_lock); | |
41 | } | |
93ae4f97 | 42 | EXPORT_SYMBOL(cpu_notifier_register_begin); |
79a6cdeb LJ |
43 | |
44 | void cpu_maps_update_done(void) | |
45 | { | |
46 | mutex_unlock(&cpu_add_remove_lock); | |
47 | } | |
93ae4f97 | 48 | EXPORT_SYMBOL(cpu_notifier_register_done); |
79a6cdeb | 49 | |
5c113fbe | 50 | static RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4 | 51 | |
e3920fb4 RW |
52 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
53 | * Should always be manipulated under cpu_add_remove_lock | |
54 | */ | |
55 | static int cpu_hotplug_disabled; | |
56 | ||
79a6cdeb LJ |
57 | #ifdef CONFIG_HOTPLUG_CPU |
58 | ||
d221938c GS |
59 | static struct { |
60 | struct task_struct *active_writer; | |
61 | struct mutex lock; /* Synchronizes accesses to refcount, */ | |
62 | /* | |
63 | * Also blocks the new readers during | |
64 | * an ongoing cpu hotplug operation. | |
65 | */ | |
66 | int refcount; | |
a19423b9 GS |
67 | |
68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
69 | struct lockdep_map dep_map; | |
70 | #endif | |
31950eb6 LT |
71 | } cpu_hotplug = { |
72 | .active_writer = NULL, | |
73 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | |
74 | .refcount = 0, | |
a19423b9 GS |
75 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
76 | .dep_map = {.name = "cpu_hotplug.lock" }, | |
77 | #endif | |
31950eb6 | 78 | }; |
d221938c | 79 | |
a19423b9 GS |
80 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ |
81 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) | |
dd56af42 PM |
82 | #define cpuhp_lock_acquire_tryread() \ |
83 | lock_map_acquire_tryread(&cpu_hotplug.dep_map) | |
a19423b9 GS |
84 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
85 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | |
86 | ||
86ef5c9a | 87 | void get_online_cpus(void) |
a9d9baa1 | 88 | { |
d221938c GS |
89 | might_sleep(); |
90 | if (cpu_hotplug.active_writer == current) | |
aa953877 | 91 | return; |
a19423b9 | 92 | cpuhp_lock_acquire_read(); |
d221938c GS |
93 | mutex_lock(&cpu_hotplug.lock); |
94 | cpu_hotplug.refcount++; | |
95 | mutex_unlock(&cpu_hotplug.lock); | |
a9d9baa1 | 96 | } |
86ef5c9a | 97 | EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17 | 98 | |
dd56af42 PM |
99 | bool try_get_online_cpus(void) |
100 | { | |
101 | if (cpu_hotplug.active_writer == current) | |
102 | return true; | |
103 | if (!mutex_trylock(&cpu_hotplug.lock)) | |
104 | return false; | |
105 | cpuhp_lock_acquire_tryread(); | |
106 | cpu_hotplug.refcount++; | |
107 | mutex_unlock(&cpu_hotplug.lock); | |
108 | return true; | |
109 | } | |
110 | EXPORT_SYMBOL_GPL(try_get_online_cpus); | |
111 | ||
86ef5c9a | 112 | void put_online_cpus(void) |
a9d9baa1 | 113 | { |
d221938c | 114 | if (cpu_hotplug.active_writer == current) |
aa953877 | 115 | return; |
d221938c | 116 | mutex_lock(&cpu_hotplug.lock); |
075663d1 SB |
117 | |
118 | if (WARN_ON(!cpu_hotplug.refcount)) | |
119 | cpu_hotplug.refcount++; /* try to fix things up */ | |
120 | ||
d2ba7e2a ON |
121 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
122 | wake_up_process(cpu_hotplug.active_writer); | |
d221938c | 123 | mutex_unlock(&cpu_hotplug.lock); |
a19423b9 | 124 | cpuhp_lock_release(); |
d221938c | 125 | |
a9d9baa1 | 126 | } |
86ef5c9a | 127 | EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1 | 128 | |
d221938c GS |
129 | /* |
130 | * This ensures that the hotplug operation can begin only when the | |
131 | * refcount goes to zero. | |
132 | * | |
133 | * Note that during a cpu-hotplug operation, the new readers, if any, | |
134 | * will be blocked by the cpu_hotplug.lock | |
135 | * | |
d2ba7e2a ON |
136 | * Since cpu_hotplug_begin() is always called after invoking |
137 | * cpu_maps_update_begin(), we can be sure that only one writer is active. | |
d221938c GS |
138 | * |
139 | * Note that theoretically, there is a possibility of a livelock: | |
140 | * - Refcount goes to zero, last reader wakes up the sleeping | |
141 | * writer. | |
142 | * - Last reader unlocks the cpu_hotplug.lock. | |
143 | * - A new reader arrives at this moment, bumps up the refcount. | |
144 | * - The writer acquires the cpu_hotplug.lock finds the refcount | |
145 | * non zero and goes to sleep again. | |
146 | * | |
147 | * However, this is very difficult to achieve in practice since | |
86ef5c9a | 148 | * get_online_cpus() not an api which is called all that often. |
d221938c GS |
149 | * |
150 | */ | |
b9d10be7 | 151 | void cpu_hotplug_begin(void) |
d221938c | 152 | { |
d221938c | 153 | cpu_hotplug.active_writer = current; |
d2ba7e2a | 154 | |
a19423b9 | 155 | cpuhp_lock_acquire(); |
d2ba7e2a ON |
156 | for (;;) { |
157 | mutex_lock(&cpu_hotplug.lock); | |
158 | if (likely(!cpu_hotplug.refcount)) | |
159 | break; | |
160 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
d221938c GS |
161 | mutex_unlock(&cpu_hotplug.lock); |
162 | schedule(); | |
d221938c | 163 | } |
d221938c GS |
164 | } |
165 | ||
b9d10be7 | 166 | void cpu_hotplug_done(void) |
d221938c GS |
167 | { |
168 | cpu_hotplug.active_writer = NULL; | |
169 | mutex_unlock(&cpu_hotplug.lock); | |
a19423b9 | 170 | cpuhp_lock_release(); |
d221938c | 171 | } |
79a6cdeb | 172 | |
16e53dbf SB |
173 | /* |
174 | * Wait for currently running CPU hotplug operations to complete (if any) and | |
175 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects | |
176 | * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the | |
177 | * hotplug path before performing hotplug operations. So acquiring that lock | |
178 | * guarantees mutual exclusion from any currently running hotplug operations. | |
179 | */ | |
180 | void cpu_hotplug_disable(void) | |
181 | { | |
182 | cpu_maps_update_begin(); | |
183 | cpu_hotplug_disabled = 1; | |
184 | cpu_maps_update_done(); | |
185 | } | |
186 | ||
187 | void cpu_hotplug_enable(void) | |
188 | { | |
189 | cpu_maps_update_begin(); | |
190 | cpu_hotplug_disabled = 0; | |
191 | cpu_maps_update_done(); | |
192 | } | |
193 | ||
b9d10be7 | 194 | #endif /* CONFIG_HOTPLUG_CPU */ |
79a6cdeb | 195 | |
1da177e4 | 196 | /* Need to know about CPUs going up/down? */ |
f7b16c10 | 197 | int __ref register_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 198 | { |
bd5349cf | 199 | int ret; |
d221938c | 200 | cpu_maps_update_begin(); |
bd5349cf | 201 | ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c | 202 | cpu_maps_update_done(); |
bd5349cf | 203 | return ret; |
1da177e4 | 204 | } |
65edc68c | 205 | |
93ae4f97 SB |
206 | int __ref __register_cpu_notifier(struct notifier_block *nb) |
207 | { | |
208 | return raw_notifier_chain_register(&cpu_chain, nb); | |
209 | } | |
210 | ||
e9fb7631 AM |
211 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, |
212 | int *nr_calls) | |
213 | { | |
e6bde73b AM |
214 | int ret; |
215 | ||
216 | ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, | |
e9fb7631 | 217 | nr_calls); |
e6bde73b AM |
218 | |
219 | return notifier_to_errno(ret); | |
e9fb7631 AM |
220 | } |
221 | ||
222 | static int cpu_notify(unsigned long val, void *v) | |
223 | { | |
224 | return __cpu_notify(val, v, -1, NULL); | |
225 | } | |
226 | ||
00b9b0af LT |
227 | #ifdef CONFIG_HOTPLUG_CPU |
228 | ||
e9fb7631 AM |
229 | static void cpu_notify_nofail(unsigned long val, void *v) |
230 | { | |
00b9b0af | 231 | BUG_ON(cpu_notify(val, v)); |
e9fb7631 | 232 | } |
1da177e4 | 233 | EXPORT_SYMBOL(register_cpu_notifier); |
93ae4f97 | 234 | EXPORT_SYMBOL(__register_cpu_notifier); |
1da177e4 | 235 | |
9647155f | 236 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 237 | { |
d221938c | 238 | cpu_maps_update_begin(); |
bd5349cf | 239 | raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c | 240 | cpu_maps_update_done(); |
1da177e4 LT |
241 | } |
242 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
243 | ||
93ae4f97 SB |
244 | void __ref __unregister_cpu_notifier(struct notifier_block *nb) |
245 | { | |
246 | raw_notifier_chain_unregister(&cpu_chain, nb); | |
247 | } | |
248 | EXPORT_SYMBOL(__unregister_cpu_notifier); | |
249 | ||
e4cc2f87 AV |
250 | /** |
251 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU | |
252 | * @cpu: a CPU id | |
253 | * | |
254 | * This function walks all processes, finds a valid mm struct for each one and | |
255 | * then clears a corresponding bit in mm's cpumask. While this all sounds | |
256 | * trivial, there are various non-obvious corner cases, which this function | |
257 | * tries to solve in a safe manner. | |
258 | * | |
259 | * Also note that the function uses a somewhat relaxed locking scheme, so it may | |
260 | * be called only for an already offlined CPU. | |
261 | */ | |
cb79295e AV |
262 | void clear_tasks_mm_cpumask(int cpu) |
263 | { | |
264 | struct task_struct *p; | |
265 | ||
266 | /* | |
267 | * This function is called after the cpu is taken down and marked | |
268 | * offline, so its not like new tasks will ever get this cpu set in | |
269 | * their mm mask. -- Peter Zijlstra | |
270 | * Thus, we may use rcu_read_lock() here, instead of grabbing | |
271 | * full-fledged tasklist_lock. | |
272 | */ | |
e4cc2f87 | 273 | WARN_ON(cpu_online(cpu)); |
cb79295e AV |
274 | rcu_read_lock(); |
275 | for_each_process(p) { | |
276 | struct task_struct *t; | |
277 | ||
e4cc2f87 AV |
278 | /* |
279 | * Main thread might exit, but other threads may still have | |
280 | * a valid mm. Find one. | |
281 | */ | |
cb79295e AV |
282 | t = find_lock_task_mm(p); |
283 | if (!t) | |
284 | continue; | |
285 | cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); | |
286 | task_unlock(t); | |
287 | } | |
288 | rcu_read_unlock(); | |
289 | } | |
290 | ||
b728ca06 | 291 | static inline void check_for_tasks(int dead_cpu) |
1da177e4 | 292 | { |
b728ca06 | 293 | struct task_struct *g, *p; |
1da177e4 | 294 | |
b728ca06 KT |
295 | read_lock_irq(&tasklist_lock); |
296 | do_each_thread(g, p) { | |
297 | if (!p->on_rq) | |
298 | continue; | |
299 | /* | |
300 | * We do the check with unlocked task_rq(p)->lock. | |
301 | * Order the reading to do not warn about a task, | |
302 | * which was running on this cpu in the past, and | |
303 | * it's just been woken on another cpu. | |
304 | */ | |
305 | rmb(); | |
306 | if (task_cpu(p) != dead_cpu) | |
307 | continue; | |
308 | ||
309 | pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", | |
310 | p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); | |
311 | } while_each_thread(g, p); | |
312 | read_unlock_irq(&tasklist_lock); | |
1da177e4 LT |
313 | } |
314 | ||
db912f96 AK |
315 | struct take_cpu_down_param { |
316 | unsigned long mod; | |
317 | void *hcpu; | |
318 | }; | |
319 | ||
1da177e4 | 320 | /* Take this CPU down. */ |
514a20a5 | 321 | static int __ref take_cpu_down(void *_param) |
1da177e4 | 322 | { |
db912f96 | 323 | struct take_cpu_down_param *param = _param; |
1da177e4 LT |
324 | int err; |
325 | ||
1da177e4 LT |
326 | /* Ensure this CPU doesn't handle any more interrupts. */ |
327 | err = __cpu_disable(); | |
328 | if (err < 0) | |
f3705136 | 329 | return err; |
1da177e4 | 330 | |
e9fb7631 | 331 | cpu_notify(CPU_DYING | param->mod, param->hcpu); |
14e568e7 TG |
332 | /* Park the stopper thread */ |
333 | kthread_park(current); | |
f3705136 | 334 | return 0; |
1da177e4 LT |
335 | } |
336 | ||
e3920fb4 | 337 | /* Requires cpu_add_remove_lock to be held */ |
514a20a5 | 338 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
1da177e4 | 339 | { |
e7407dcc | 340 | int err, nr_calls = 0; |
e7407dcc | 341 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 342 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
db912f96 AK |
343 | struct take_cpu_down_param tcd_param = { |
344 | .mod = mod, | |
345 | .hcpu = hcpu, | |
346 | }; | |
1da177e4 | 347 | |
e3920fb4 RW |
348 | if (num_online_cpus() == 1) |
349 | return -EBUSY; | |
1da177e4 | 350 | |
e3920fb4 RW |
351 | if (!cpu_online(cpu)) |
352 | return -EINVAL; | |
1da177e4 | 353 | |
d221938c | 354 | cpu_hotplug_begin(); |
4d51985e | 355 | |
e9fb7631 | 356 | err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b | 357 | if (err) { |
a0d8cdb6 | 358 | nr_calls--; |
e9fb7631 | 359 | __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); |
84117da5 FF |
360 | pr_warn("%s: attempt to take down CPU %u failed\n", |
361 | __func__, cpu); | |
baaca49f | 362 | goto out_release; |
1da177e4 LT |
363 | } |
364 | ||
6acce3ef PZ |
365 | /* |
366 | * By now we've cleared cpu_active_mask, wait for all preempt-disabled | |
367 | * and RCU users of this state to go away such that all new such users | |
368 | * will observe it. | |
369 | * | |
370 | * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might | |
371 | * not imply sync_sched(), so explicitly call both. | |
106dd5af M |
372 | * |
373 | * Do sync before park smpboot threads to take care the rcu boost case. | |
6acce3ef PZ |
374 | */ |
375 | #ifdef CONFIG_PREEMPT | |
376 | synchronize_sched(); | |
377 | #endif | |
378 | synchronize_rcu(); | |
379 | ||
106dd5af M |
380 | smpboot_park_threads(cpu); |
381 | ||
6acce3ef PZ |
382 | /* |
383 | * So now all preempt/rcu users must observe !cpu_active(). | |
384 | */ | |
385 | ||
e0b582ec | 386 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
04321587 | 387 | if (err) { |
1da177e4 | 388 | /* CPU didn't die: tell everyone. Can't complain. */ |
f97f8f06 | 389 | smpboot_unpark_threads(cpu); |
e9fb7631 | 390 | cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
6a1bdc1b | 391 | goto out_release; |
8fa1d7d3 | 392 | } |
04321587 | 393 | BUG_ON(cpu_online(cpu)); |
1da177e4 | 394 | |
48c5ccae PZ |
395 | /* |
396 | * The migration_call() CPU_DYING callback will have removed all | |
397 | * runnable tasks from the cpu, there's only the idle task left now | |
398 | * that the migration thread is done doing the stop_machine thing. | |
51a96c77 PZ |
399 | * |
400 | * Wait for the stop thread to go away. | |
48c5ccae | 401 | */ |
51a96c77 PZ |
402 | while (!idle_cpu(cpu)) |
403 | cpu_relax(); | |
1da177e4 LT |
404 | |
405 | /* This actually kills the CPU. */ | |
406 | __cpu_die(cpu); | |
407 | ||
1da177e4 | 408 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
e9fb7631 | 409 | cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
1da177e4 LT |
410 | |
411 | check_for_tasks(cpu); | |
412 | ||
baaca49f | 413 | out_release: |
d221938c | 414 | cpu_hotplug_done(); |
e9fb7631 AM |
415 | if (!err) |
416 | cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); | |
e3920fb4 RW |
417 | return err; |
418 | } | |
419 | ||
514a20a5 | 420 | int __ref cpu_down(unsigned int cpu) |
e3920fb4 | 421 | { |
9ea09af3 | 422 | int err; |
e3920fb4 | 423 | |
d221938c | 424 | cpu_maps_update_begin(); |
e761b772 MK |
425 | |
426 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 427 | err = -EBUSY; |
e761b772 MK |
428 | goto out; |
429 | } | |
430 | ||
e761b772 | 431 | err = _cpu_down(cpu, 0); |
e3920fb4 | 432 | |
e761b772 | 433 | out: |
d221938c | 434 | cpu_maps_update_done(); |
1da177e4 LT |
435 | return err; |
436 | } | |
b62b8ef9 | 437 | EXPORT_SYMBOL(cpu_down); |
1da177e4 LT |
438 | #endif /*CONFIG_HOTPLUG_CPU*/ |
439 | ||
e3920fb4 | 440 | /* Requires cpu_add_remove_lock to be held */ |
0db0628d | 441 | static int _cpu_up(unsigned int cpu, int tasks_frozen) |
1da177e4 | 442 | { |
baaca49f | 443 | int ret, nr_calls = 0; |
1da177e4 | 444 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 445 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
3bb5d2ee | 446 | struct task_struct *idle; |
1da177e4 | 447 | |
d221938c | 448 | cpu_hotplug_begin(); |
38498a67 | 449 | |
5e5041f3 YI |
450 | if (cpu_online(cpu) || !cpu_present(cpu)) { |
451 | ret = -EINVAL; | |
452 | goto out; | |
453 | } | |
454 | ||
3bb5d2ee SS |
455 | idle = idle_thread_get(cpu); |
456 | if (IS_ERR(idle)) { | |
457 | ret = PTR_ERR(idle); | |
38498a67 | 458 | goto out; |
3bb5d2ee | 459 | } |
38498a67 | 460 | |
f97f8f06 TG |
461 | ret = smpboot_create_threads(cpu); |
462 | if (ret) | |
463 | goto out; | |
464 | ||
e9fb7631 | 465 | ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b | 466 | if (ret) { |
a0d8cdb6 | 467 | nr_calls--; |
84117da5 FF |
468 | pr_warn("%s: attempt to bring up CPU %u failed\n", |
469 | __func__, cpu); | |
1da177e4 LT |
470 | goto out_notify; |
471 | } | |
472 | ||
473 | /* Arch-specific enabling code. */ | |
3bb5d2ee | 474 | ret = __cpu_up(cpu, idle); |
1da177e4 LT |
475 | if (ret != 0) |
476 | goto out_notify; | |
6978c705 | 477 | BUG_ON(!cpu_online(cpu)); |
1da177e4 | 478 | |
f97f8f06 TG |
479 | /* Wake the per cpu threads */ |
480 | smpboot_unpark_threads(cpu); | |
481 | ||
1da177e4 | 482 | /* Now call notifier in preparation. */ |
e9fb7631 | 483 | cpu_notify(CPU_ONLINE | mod, hcpu); |
1da177e4 LT |
484 | |
485 | out_notify: | |
486 | if (ret != 0) | |
e9fb7631 | 487 | __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
38498a67 | 488 | out: |
d221938c | 489 | cpu_hotplug_done(); |
e3920fb4 RW |
490 | |
491 | return ret; | |
492 | } | |
493 | ||
0db0628d | 494 | int cpu_up(unsigned int cpu) |
e3920fb4 RW |
495 | { |
496 | int err = 0; | |
cf23422b | 497 | |
e0b582ec | 498 | if (!cpu_possible(cpu)) { |
84117da5 FF |
499 | pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", |
500 | cpu); | |
87d5e023 | 501 | #if defined(CONFIG_IA64) |
84117da5 | 502 | pr_err("please check additional_cpus= boot parameter\n"); |
73e753a5 KH |
503 | #endif |
504 | return -EINVAL; | |
505 | } | |
e3920fb4 | 506 | |
01b0f197 TK |
507 | err = try_online_node(cpu_to_node(cpu)); |
508 | if (err) | |
509 | return err; | |
cf23422b | 510 | |
d221938c | 511 | cpu_maps_update_begin(); |
e761b772 MK |
512 | |
513 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 514 | err = -EBUSY; |
e761b772 MK |
515 | goto out; |
516 | } | |
517 | ||
518 | err = _cpu_up(cpu, 0); | |
519 | ||
e761b772 | 520 | out: |
d221938c | 521 | cpu_maps_update_done(); |
e3920fb4 RW |
522 | return err; |
523 | } | |
a513f6ba | 524 | EXPORT_SYMBOL_GPL(cpu_up); |
e3920fb4 | 525 | |
f3de4be9 | 526 | #ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec | 527 | static cpumask_var_t frozen_cpus; |
e3920fb4 RW |
528 | |
529 | int disable_nonboot_cpus(void) | |
530 | { | |
e9a5f426 | 531 | int cpu, first_cpu, error = 0; |
e3920fb4 | 532 | |
d221938c | 533 | cpu_maps_update_begin(); |
e0b582ec | 534 | first_cpu = cpumask_first(cpu_online_mask); |
9ee349ad XF |
535 | /* |
536 | * We take down all of the non-boot CPUs in one shot to avoid races | |
e3920fb4 RW |
537 | * with the userspace trying to use the CPU hotplug at the same time |
538 | */ | |
e0b582ec | 539 | cpumask_clear(frozen_cpus); |
6ad4c188 | 540 | |
84117da5 | 541 | pr_info("Disabling non-boot CPUs ...\n"); |
e3920fb4 RW |
542 | for_each_online_cpu(cpu) { |
543 | if (cpu == first_cpu) | |
544 | continue; | |
bb3632c6 | 545 | trace_suspend_resume(TPS("CPU_OFF"), cpu, true); |
8bb78442 | 546 | error = _cpu_down(cpu, 1); |
bb3632c6 | 547 | trace_suspend_resume(TPS("CPU_OFF"), cpu, false); |
feae3203 | 548 | if (!error) |
e0b582ec | 549 | cpumask_set_cpu(cpu, frozen_cpus); |
feae3203 | 550 | else { |
84117da5 | 551 | pr_err("Error taking CPU%d down: %d\n", cpu, error); |
e3920fb4 RW |
552 | break; |
553 | } | |
554 | } | |
86886e55 | 555 | |
e3920fb4 RW |
556 | if (!error) { |
557 | BUG_ON(num_online_cpus() > 1); | |
558 | /* Make sure the CPUs won't be enabled by someone else */ | |
559 | cpu_hotplug_disabled = 1; | |
560 | } else { | |
84117da5 | 561 | pr_err("Non-boot CPUs are not disabled\n"); |
e3920fb4 | 562 | } |
d221938c | 563 | cpu_maps_update_done(); |
e3920fb4 RW |
564 | return error; |
565 | } | |
566 | ||
d0af9eed SS |
567 | void __weak arch_enable_nonboot_cpus_begin(void) |
568 | { | |
569 | } | |
570 | ||
571 | void __weak arch_enable_nonboot_cpus_end(void) | |
572 | { | |
573 | } | |
574 | ||
fa7303e2 | 575 | void __ref enable_nonboot_cpus(void) |
e3920fb4 RW |
576 | { |
577 | int cpu, error; | |
578 | ||
579 | /* Allow everyone to use the CPU hotplug again */ | |
d221938c | 580 | cpu_maps_update_begin(); |
e3920fb4 | 581 | cpu_hotplug_disabled = 0; |
e0b582ec | 582 | if (cpumask_empty(frozen_cpus)) |
1d64b9cb | 583 | goto out; |
e3920fb4 | 584 | |
84117da5 | 585 | pr_info("Enabling non-boot CPUs ...\n"); |
d0af9eed SS |
586 | |
587 | arch_enable_nonboot_cpus_begin(); | |
588 | ||
e0b582ec | 589 | for_each_cpu(cpu, frozen_cpus) { |
bb3632c6 | 590 | trace_suspend_resume(TPS("CPU_ON"), cpu, true); |
8bb78442 | 591 | error = _cpu_up(cpu, 1); |
bb3632c6 | 592 | trace_suspend_resume(TPS("CPU_ON"), cpu, false); |
e3920fb4 | 593 | if (!error) { |
84117da5 | 594 | pr_info("CPU%d is up\n", cpu); |
e3920fb4 RW |
595 | continue; |
596 | } | |
84117da5 | 597 | pr_warn("Error taking CPU%d up: %d\n", cpu, error); |
e3920fb4 | 598 | } |
d0af9eed SS |
599 | |
600 | arch_enable_nonboot_cpus_end(); | |
601 | ||
e0b582ec | 602 | cpumask_clear(frozen_cpus); |
1d64b9cb | 603 | out: |
d221938c | 604 | cpu_maps_update_done(); |
1da177e4 | 605 | } |
e0b582ec | 606 | |
d7268a31 | 607 | static int __init alloc_frozen_cpus(void) |
e0b582ec RR |
608 | { |
609 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | |
610 | return -ENOMEM; | |
611 | return 0; | |
612 | } | |
613 | core_initcall(alloc_frozen_cpus); | |
79cfbdfa | 614 | |
79cfbdfa SB |
615 | /* |
616 | * When callbacks for CPU hotplug notifications are being executed, we must | |
617 | * ensure that the state of the system with respect to the tasks being frozen | |
618 | * or not, as reported by the notification, remains unchanged *throughout the | |
619 | * duration* of the execution of the callbacks. | |
620 | * Hence we need to prevent the freezer from racing with regular CPU hotplug. | |
621 | * | |
622 | * This synchronization is implemented by mutually excluding regular CPU | |
623 | * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ | |
624 | * Hibernate notifications. | |
625 | */ | |
626 | static int | |
627 | cpu_hotplug_pm_callback(struct notifier_block *nb, | |
628 | unsigned long action, void *ptr) | |
629 | { | |
630 | switch (action) { | |
631 | ||
632 | case PM_SUSPEND_PREPARE: | |
633 | case PM_HIBERNATION_PREPARE: | |
16e53dbf | 634 | cpu_hotplug_disable(); |
79cfbdfa SB |
635 | break; |
636 | ||
637 | case PM_POST_SUSPEND: | |
638 | case PM_POST_HIBERNATION: | |
16e53dbf | 639 | cpu_hotplug_enable(); |
79cfbdfa SB |
640 | break; |
641 | ||
642 | default: | |
643 | return NOTIFY_DONE; | |
644 | } | |
645 | ||
646 | return NOTIFY_OK; | |
647 | } | |
648 | ||
649 | ||
d7268a31 | 650 | static int __init cpu_hotplug_pm_sync_init(void) |
79cfbdfa | 651 | { |
6e32d479 FY |
652 | /* |
653 | * cpu_hotplug_pm_callback has higher priority than x86 | |
654 | * bsp_pm_callback which depends on cpu_hotplug_pm_callback | |
655 | * to disable cpu hotplug to avoid cpu hotplug race. | |
656 | */ | |
79cfbdfa SB |
657 | pm_notifier(cpu_hotplug_pm_callback, 0); |
658 | return 0; | |
659 | } | |
660 | core_initcall(cpu_hotplug_pm_sync_init); | |
661 | ||
f3de4be9 | 662 | #endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec | 663 | |
e545a614 MS |
664 | /** |
665 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | |
666 | * @cpu: cpu that just started | |
667 | * | |
668 | * This function calls the cpu_chain notifiers with CPU_STARTING. | |
669 | * It must be called by the arch code on the new cpu, before the new cpu | |
670 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | |
671 | */ | |
0db0628d | 672 | void notify_cpu_starting(unsigned int cpu) |
e545a614 MS |
673 | { |
674 | unsigned long val = CPU_STARTING; | |
675 | ||
676 | #ifdef CONFIG_PM_SLEEP_SMP | |
e0b582ec | 677 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
e545a614 MS |
678 | val = CPU_STARTING_FROZEN; |
679 | #endif /* CONFIG_PM_SLEEP_SMP */ | |
e9fb7631 | 680 | cpu_notify(val, (void *)(long)cpu); |
e545a614 MS |
681 | } |
682 | ||
68f4f1ec | 683 | #endif /* CONFIG_SMP */ |
b8d317d1 | 684 | |
e56b3bc7 LT |
685 | /* |
686 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | |
687 | * represents all NR_CPUS bits binary values of 1<<nr. | |
688 | * | |
e0b582ec | 689 | * It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc7 LT |
690 | * mask value that has a single bit set only. |
691 | */ | |
b8d317d1 | 692 | |
e56b3bc7 | 693 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ |
4d51985e | 694 | #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
e56b3bc7 LT |
695 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) |
696 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | |
697 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | |
b8d317d1 | 698 | |
e56b3bc7 LT |
699 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { |
700 | ||
701 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | |
702 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | |
703 | #if BITS_PER_LONG > 32 | |
704 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | |
705 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | |
b8d317d1 MT |
706 | #endif |
707 | }; | |
e56b3bc7 | 708 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a3 RR |
709 | |
710 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | |
711 | EXPORT_SYMBOL(cpu_all_bits); | |
b3199c02 RR |
712 | |
713 | #ifdef CONFIG_INIT_ALL_POSSIBLE | |
714 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | |
715 | = CPU_BITS_ALL; | |
716 | #else | |
717 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | |
718 | #endif | |
719 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | |
720 | EXPORT_SYMBOL(cpu_possible_mask); | |
721 | ||
722 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | |
723 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | |
724 | EXPORT_SYMBOL(cpu_online_mask); | |
725 | ||
726 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | |
727 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | |
728 | EXPORT_SYMBOL(cpu_present_mask); | |
729 | ||
730 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | |
731 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | |
732 | EXPORT_SYMBOL(cpu_active_mask); | |
3fa41520 RR |
733 | |
734 | void set_cpu_possible(unsigned int cpu, bool possible) | |
735 | { | |
736 | if (possible) | |
737 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
738 | else | |
739 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
740 | } | |
741 | ||
742 | void set_cpu_present(unsigned int cpu, bool present) | |
743 | { | |
744 | if (present) | |
745 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | |
746 | else | |
747 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | |
748 | } | |
749 | ||
750 | void set_cpu_online(unsigned int cpu, bool online) | |
751 | { | |
6acbfb96 | 752 | if (online) { |
3fa41520 | 753 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); |
6acbfb96 LJ |
754 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); |
755 | } else { | |
3fa41520 | 756 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); |
6acbfb96 | 757 | } |
3fa41520 RR |
758 | } |
759 | ||
760 | void set_cpu_active(unsigned int cpu, bool active) | |
761 | { | |
762 | if (active) | |
763 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | |
764 | else | |
765 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | |
766 | } | |
767 | ||
768 | void init_cpu_present(const struct cpumask *src) | |
769 | { | |
770 | cpumask_copy(to_cpumask(cpu_present_bits), src); | |
771 | } | |
772 | ||
773 | void init_cpu_possible(const struct cpumask *src) | |
774 | { | |
775 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | |
776 | } | |
777 | ||
778 | void init_cpu_online(const struct cpumask *src) | |
779 | { | |
780 | cpumask_copy(to_cpumask(cpu_online_bits), src); | |
781 | } |