Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/stop_machine.h> | |
81615b62 | 16 | #include <linux/mutex.h> |
1da177e4 LT |
17 | |
18 | /* This protects CPUs going up and down... */ | |
aa953877 LT |
19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
20 | static DEFINE_MUTEX(cpu_bitmask_lock); | |
1da177e4 | 21 | |
bd5349cf | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4 | 23 | |
e3920fb4 RW |
24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
25 | * Should always be manipulated under cpu_add_remove_lock | |
26 | */ | |
27 | static int cpu_hotplug_disabled; | |
28 | ||
a9d9baa1 | 29 | #ifdef CONFIG_HOTPLUG_CPU |
90d45d17 | 30 | |
aa953877 LT |
31 | /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ |
32 | static struct task_struct *recursive; | |
33 | static int recursive_depth; | |
90d45d17 | 34 | |
a9d9baa1 AR |
35 | void lock_cpu_hotplug(void) |
36 | { | |
aa953877 LT |
37 | struct task_struct *tsk = current; |
38 | ||
39 | if (tsk == recursive) { | |
40 | static int warnings = 10; | |
41 | if (warnings) { | |
42 | printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); | |
43 | WARN_ON(1); | |
44 | warnings--; | |
45 | } | |
46 | recursive_depth++; | |
47 | return; | |
48 | } | |
49 | mutex_lock(&cpu_bitmask_lock); | |
50 | recursive = tsk; | |
a9d9baa1 AR |
51 | } |
52 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | |
90d45d17 | 53 | |
a9d9baa1 AR |
54 | void unlock_cpu_hotplug(void) |
55 | { | |
aa953877 LT |
56 | WARN_ON(recursive != current); |
57 | if (recursive_depth) { | |
58 | recursive_depth--; | |
59 | return; | |
a9d9baa1 | 60 | } |
aa953877 | 61 | recursive = NULL; |
4b96b1a1 | 62 | mutex_unlock(&cpu_bitmask_lock); |
a9d9baa1 AR |
63 | } |
64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | |
65 | ||
a9d9baa1 | 66 | #endif /* CONFIG_HOTPLUG_CPU */ |
90d45d17 | 67 | |
1da177e4 | 68 | /* Need to know about CPUs going up/down? */ |
65edc68c | 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 70 | { |
bd5349cf NB |
71 | int ret; |
72 | mutex_lock(&cpu_add_remove_lock); | |
73 | ret = raw_notifier_chain_register(&cpu_chain, nb); | |
74 | mutex_unlock(&cpu_add_remove_lock); | |
75 | return ret; | |
1da177e4 | 76 | } |
65edc68c CS |
77 | |
78 | #ifdef CONFIG_HOTPLUG_CPU | |
79 | ||
1da177e4 LT |
80 | EXPORT_SYMBOL(register_cpu_notifier); |
81 | ||
82 | void unregister_cpu_notifier(struct notifier_block *nb) | |
83 | { | |
bd5349cf NB |
84 | mutex_lock(&cpu_add_remove_lock); |
85 | raw_notifier_chain_unregister(&cpu_chain, nb); | |
86 | mutex_unlock(&cpu_add_remove_lock); | |
1da177e4 LT |
87 | } |
88 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
89 | ||
1da177e4 LT |
90 | static inline void check_for_tasks(int cpu) |
91 | { | |
92 | struct task_struct *p; | |
93 | ||
94 | write_lock_irq(&tasklist_lock); | |
95 | for_each_process(p) { | |
96 | if (task_cpu(p) == cpu && | |
97 | (!cputime_eq(p->utime, cputime_zero) || | |
98 | !cputime_eq(p->stime, cputime_zero))) | |
99 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | |
100 | (state = %ld, flags = %lx) \n", | |
101 | p->comm, p->pid, cpu, p->state, p->flags); | |
102 | } | |
103 | write_unlock_irq(&tasklist_lock); | |
104 | } | |
105 | ||
106 | /* Take this CPU down. */ | |
107 | static int take_cpu_down(void *unused) | |
108 | { | |
109 | int err; | |
110 | ||
1da177e4 LT |
111 | /* Ensure this CPU doesn't handle any more interrupts. */ |
112 | err = __cpu_disable(); | |
113 | if (err < 0) | |
f3705136 | 114 | return err; |
1da177e4 | 115 | |
f3705136 ZM |
116 | /* Force idle task to run as soon as we yield: it should |
117 | immediately notice cpu is offline and die quickly. */ | |
118 | sched_idle_next(); | |
119 | return 0; | |
1da177e4 LT |
120 | } |
121 | ||
e3920fb4 RW |
122 | /* Requires cpu_add_remove_lock to be held */ |
123 | static int _cpu_down(unsigned int cpu) | |
1da177e4 LT |
124 | { |
125 | int err; | |
126 | struct task_struct *p; | |
127 | cpumask_t old_allowed, tmp; | |
128 | ||
e3920fb4 RW |
129 | if (num_online_cpus() == 1) |
130 | return -EBUSY; | |
1da177e4 | 131 | |
e3920fb4 RW |
132 | if (!cpu_online(cpu)) |
133 | return -EINVAL; | |
1da177e4 | 134 | |
bd5349cf | 135 | err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
1da177e4 LT |
136 | (void *)(long)cpu); |
137 | if (err == NOTIFY_BAD) { | |
138 | printk("%s: attempt to take down CPU %u failed\n", | |
139 | __FUNCTION__, cpu); | |
e3920fb4 | 140 | return -EINVAL; |
1da177e4 LT |
141 | } |
142 | ||
143 | /* Ensure that we are not runnable on dying cpu */ | |
144 | old_allowed = current->cpus_allowed; | |
145 | tmp = CPU_MASK_ALL; | |
146 | cpu_clear(cpu, tmp); | |
147 | set_cpus_allowed(current, tmp); | |
148 | ||
aa953877 | 149 | mutex_lock(&cpu_bitmask_lock); |
1da177e4 | 150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
aa953877 LT |
151 | mutex_unlock(&cpu_bitmask_lock); |
152 | ||
8fa1d7d3 | 153 | if (IS_ERR(p) || cpu_online(cpu)) { |
1da177e4 | 154 | /* CPU didn't die: tell everyone. Can't complain. */ |
bd5349cf | 155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
1da177e4 LT |
156 | (void *)(long)cpu) == NOTIFY_BAD) |
157 | BUG(); | |
158 | ||
8fa1d7d3 ST |
159 | if (IS_ERR(p)) { |
160 | err = PTR_ERR(p); | |
161 | goto out_allowed; | |
162 | } | |
1da177e4 | 163 | goto out_thread; |
8fa1d7d3 | 164 | } |
1da177e4 LT |
165 | |
166 | /* Wait for it to sleep (leaving idle task). */ | |
167 | while (!idle_cpu(cpu)) | |
168 | yield(); | |
169 | ||
170 | /* This actually kills the CPU. */ | |
171 | __cpu_die(cpu); | |
172 | ||
173 | /* Move it here so it can run. */ | |
174 | kthread_bind(p, get_cpu()); | |
175 | put_cpu(); | |
176 | ||
177 | /* CPU is completely dead: tell everyone. Too late to complain. */ | |
bd5349cf | 178 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, |
e041c683 | 179 | (void *)(long)cpu) == NOTIFY_BAD) |
1da177e4 LT |
180 | BUG(); |
181 | ||
182 | check_for_tasks(cpu); | |
183 | ||
184 | out_thread: | |
185 | err = kthread_stop(p); | |
186 | out_allowed: | |
187 | set_cpus_allowed(current, old_allowed); | |
e3920fb4 RW |
188 | return err; |
189 | } | |
190 | ||
191 | int cpu_down(unsigned int cpu) | |
192 | { | |
193 | int err = 0; | |
194 | ||
195 | mutex_lock(&cpu_add_remove_lock); | |
196 | if (cpu_hotplug_disabled) | |
197 | err = -EBUSY; | |
198 | else | |
199 | err = _cpu_down(cpu); | |
200 | ||
aa953877 | 201 | mutex_unlock(&cpu_add_remove_lock); |
1da177e4 LT |
202 | return err; |
203 | } | |
204 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
205 | ||
e3920fb4 RW |
206 | /* Requires cpu_add_remove_lock to be held */ |
207 | static int __devinit _cpu_up(unsigned int cpu) | |
1da177e4 LT |
208 | { |
209 | int ret; | |
210 | void *hcpu = (void *)(long)cpu; | |
211 | ||
e3920fb4 RW |
212 | if (cpu_online(cpu) || !cpu_present(cpu)) |
213 | return -EINVAL; | |
90d45d17 | 214 | |
bd5349cf | 215 | ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
1da177e4 LT |
216 | if (ret == NOTIFY_BAD) { |
217 | printk("%s: attempt to bring up CPU %u failed\n", | |
218 | __FUNCTION__, cpu); | |
219 | ret = -EINVAL; | |
220 | goto out_notify; | |
221 | } | |
222 | ||
223 | /* Arch-specific enabling code. */ | |
aa953877 | 224 | mutex_lock(&cpu_bitmask_lock); |
1da177e4 | 225 | ret = __cpu_up(cpu); |
aa953877 | 226 | mutex_unlock(&cpu_bitmask_lock); |
1da177e4 LT |
227 | if (ret != 0) |
228 | goto out_notify; | |
6978c705 | 229 | BUG_ON(!cpu_online(cpu)); |
1da177e4 LT |
230 | |
231 | /* Now call notifier in preparation. */ | |
bd5349cf | 232 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); |
1da177e4 LT |
233 | |
234 | out_notify: | |
235 | if (ret != 0) | |
bd5349cf | 236 | raw_notifier_call_chain(&cpu_chain, |
e041c683 | 237 | CPU_UP_CANCELED, hcpu); |
e3920fb4 RW |
238 | |
239 | return ret; | |
240 | } | |
241 | ||
242 | int __devinit cpu_up(unsigned int cpu) | |
243 | { | |
244 | int err = 0; | |
245 | ||
246 | mutex_lock(&cpu_add_remove_lock); | |
247 | if (cpu_hotplug_disabled) | |
248 | err = -EBUSY; | |
249 | else | |
250 | err = _cpu_up(cpu); | |
251 | ||
252 | mutex_unlock(&cpu_add_remove_lock); | |
253 | return err; | |
254 | } | |
255 | ||
256 | #ifdef CONFIG_SUSPEND_SMP | |
257 | static cpumask_t frozen_cpus; | |
258 | ||
259 | int disable_nonboot_cpus(void) | |
260 | { | |
e1d9fd2e | 261 | int cpu, first_cpu, error = 0; |
e3920fb4 RW |
262 | |
263 | mutex_lock(&cpu_add_remove_lock); | |
264 | first_cpu = first_cpu(cpu_present_map); | |
265 | if (!cpu_online(first_cpu)) { | |
266 | error = _cpu_up(first_cpu); | |
267 | if (error) { | |
268 | printk(KERN_ERR "Could not bring CPU%d up.\n", | |
269 | first_cpu); | |
270 | goto out; | |
271 | } | |
272 | } | |
112cecb2 | 273 | |
e3920fb4 RW |
274 | /* We take down all of the non-boot CPUs in one shot to avoid races |
275 | * with the userspace trying to use the CPU hotplug at the same time | |
276 | */ | |
277 | cpus_clear(frozen_cpus); | |
278 | printk("Disabling non-boot CPUs ...\n"); | |
279 | for_each_online_cpu(cpu) { | |
280 | if (cpu == first_cpu) | |
281 | continue; | |
282 | error = _cpu_down(cpu); | |
283 | if (!error) { | |
284 | cpu_set(cpu, frozen_cpus); | |
285 | printk("CPU%d is down\n", cpu); | |
286 | } else { | |
287 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | |
288 | cpu, error); | |
289 | break; | |
290 | } | |
291 | } | |
292 | if (!error) { | |
293 | BUG_ON(num_online_cpus() > 1); | |
294 | /* Make sure the CPUs won't be enabled by someone else */ | |
295 | cpu_hotplug_disabled = 1; | |
296 | } else { | |
e1d9fd2e | 297 | printk(KERN_ERR "Non-boot CPUs are not disabled\n"); |
e3920fb4 | 298 | } |
1da177e4 | 299 | out: |
aa953877 | 300 | mutex_unlock(&cpu_add_remove_lock); |
e3920fb4 RW |
301 | return error; |
302 | } | |
303 | ||
304 | void enable_nonboot_cpus(void) | |
305 | { | |
306 | int cpu, error; | |
307 | ||
308 | /* Allow everyone to use the CPU hotplug again */ | |
309 | mutex_lock(&cpu_add_remove_lock); | |
310 | cpu_hotplug_disabled = 0; | |
311 | mutex_unlock(&cpu_add_remove_lock); | |
312 | ||
313 | printk("Enabling non-boot CPUs ...\n"); | |
314 | for_each_cpu_mask(cpu, frozen_cpus) { | |
315 | error = cpu_up(cpu); | |
316 | if (!error) { | |
317 | printk("CPU%d is up\n", cpu); | |
318 | continue; | |
319 | } | |
320 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", | |
321 | cpu, error); | |
322 | } | |
323 | cpus_clear(frozen_cpus); | |
1da177e4 | 324 | } |
e3920fb4 | 325 | #endif |