Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
9 | * whom a great many thanks are extended. | |
10 | * | |
11 | * Thanks to Intel for making available several different Pentium, | |
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
13 | * Original development of Linux SMP code supported by Caldera. | |
14 | * | |
a8ab26fe | 15 | * This code is released under the GNU General Public License version 2 |
1da177e4 LT |
16 | * |
17 | * Fixes | |
18 | * Felix Koop : NR_CPUS used properly | |
19 | * Jose Renau : Handle single CPU case. | |
20 | * Alan Cox : By repeated request 8) - Total BogoMIP report. | |
21 | * Greg Wright : Fix for kernel stacks panic. | |
22 | * Erich Boleyn : MP v1.4 and additional changes. | |
23 | * Matthias Sattler : Changes for 2.1 kernel map. | |
24 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
25 | * Michael Chastain : Change trampoline.S to gnu as. | |
26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
27 | * Ingo Molnar : Added APIC timers, based on code | |
28 | * from Jose Renau | |
29 | * Ingo Molnar : various cleanups and rewrites | |
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
32 | * Andi Kleen : Changed for SMP boot into long mode. | |
a8ab26fe AK |
33 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
34 | * Andi Kleen : Converted to new state machine. | |
35 | * Various cleanups. | |
36 | * Probably mostly hotplug CPU ready now. | |
76e4f660 | 37 | * Ashok Raj : CPU hotplug support |
1da177e4 LT |
38 | */ |
39 | ||
a8ab26fe | 40 | |
1da177e4 LT |
41 | #include <linux/init.h> |
42 | ||
43 | #include <linux/mm.h> | |
44 | #include <linux/kernel_stat.h> | |
1da177e4 LT |
45 | #include <linux/bootmem.h> |
46 | #include <linux/thread_info.h> | |
47 | #include <linux/module.h> | |
1da177e4 LT |
48 | #include <linux/delay.h> |
49 | #include <linux/mc146818rtc.h> | |
a3bc0dbc | 50 | #include <linux/smp.h> |
1eeb66a1 | 51 | #include <linux/kdebug.h> |
a3bc0dbc | 52 | |
1da177e4 LT |
53 | #include <asm/mtrr.h> |
54 | #include <asm/pgalloc.h> | |
55 | #include <asm/desc.h> | |
1da177e4 LT |
56 | #include <asm/tlbflush.h> |
57 | #include <asm/proto.h> | |
75152114 | 58 | #include <asm/nmi.h> |
9cdd304b AV |
59 | #include <asm/irq.h> |
60 | #include <asm/hw_irq.h> | |
488fc08d | 61 | #include <asm/numa.h> |
1da177e4 LT |
62 | |
63 | /* Number of siblings per CPU package */ | |
64 | int smp_num_siblings = 1; | |
2ee60e17 | 65 | EXPORT_SYMBOL(smp_num_siblings); |
1da177e4 | 66 | |
1e9f28fa | 67 | /* Last level cache ID of each logical CPU */ |
b6278470 | 68 | DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID; |
1e9f28fa | 69 | |
1da177e4 | 70 | /* Bitmask of currently online CPUs */ |
6c231b7b | 71 | cpumask_t cpu_online_map __read_mostly; |
1da177e4 | 72 | |
a8ab26fe AK |
73 | EXPORT_SYMBOL(cpu_online_map); |
74 | ||
75 | /* | |
76 | * Private maps to synchronize booting between AP and BP. | |
77 | * Probably not needed anymore, but it makes for easier debugging. -AK | |
78 | */ | |
1da177e4 LT |
79 | cpumask_t cpu_callin_map; |
80 | cpumask_t cpu_callout_map; | |
2ee60e17 | 81 | EXPORT_SYMBOL(cpu_callout_map); |
a8ab26fe AK |
82 | |
83 | cpumask_t cpu_possible_map; | |
84 | EXPORT_SYMBOL(cpu_possible_map); | |
1da177e4 LT |
85 | |
86 | /* Per CPU bogomips and other parameters */ | |
87 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; | |
2ee60e17 | 88 | EXPORT_SYMBOL(cpu_data); |
1da177e4 | 89 | |
a8ab26fe AK |
90 | /* Set when the idlers are all forked */ |
91 | int smp_threads_ready; | |
92 | ||
94605eff | 93 | /* representing HT siblings of each logical CPU */ |
d5a7430d MT |
94 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
95 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |
94605eff SS |
96 | |
97 | /* representing HT and core siblings of each logical CPU */ | |
08357611 MT |
98 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); |
99 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |
1da177e4 LT |
100 | |
101 | /* | |
102 | * Trampoline 80x86 program as an array. | |
103 | */ | |
104 | ||
121d7bf5 JB |
105 | extern const unsigned char trampoline_data[]; |
106 | extern const unsigned char trampoline_end[]; | |
1da177e4 | 107 | |
76e4f660 AR |
108 | /* State of each CPU */ |
109 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
110 | ||
111 | /* | |
112 | * Store all idle threads, this can be reused instead of creating | |
113 | * a new thread. Also avoids complicated thread destroy functionality | |
114 | * for idle threads. | |
115 | */ | |
116 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |
117 | ||
118 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | |
119 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | |
120 | ||
1da177e4 LT |
121 | /* |
122 | * Currently trivial. Write the real->protected mode | |
123 | * bootstrap into the page concerned. The caller | |
124 | * has made sure it's suitably aligned. | |
125 | */ | |
126 | ||
a8ab26fe | 127 | static unsigned long __cpuinit setup_trampoline(void) |
1da177e4 LT |
128 | { |
129 | void *tramp = __va(SMP_TRAMPOLINE_BASE); | |
130 | memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); | |
131 | return virt_to_phys(tramp); | |
132 | } | |
133 | ||
134 | /* | |
135 | * The bootstrap kernel entry code has set these up. Save them for | |
136 | * a given CPU | |
137 | */ | |
138 | ||
a8ab26fe | 139 | static void __cpuinit smp_store_cpu_info(int id) |
1da177e4 LT |
140 | { |
141 | struct cpuinfo_x86 *c = cpu_data + id; | |
142 | ||
143 | *c = boot_cpu_data; | |
144 | identify_cpu(c); | |
dda50e71 | 145 | print_cpu_info(c); |
1da177e4 LT |
146 | } |
147 | ||
a8ab26fe | 148 | static atomic_t init_deasserted __cpuinitdata; |
1da177e4 | 149 | |
a8ab26fe AK |
150 | /* |
151 | * Report back to the Boot Processor. | |
152 | * Running on AP. | |
153 | */ | |
154 | void __cpuinit smp_callin(void) | |
1da177e4 LT |
155 | { |
156 | int cpuid, phys_id; | |
157 | unsigned long timeout; | |
158 | ||
159 | /* | |
160 | * If waken up by an INIT in an 82489DX configuration | |
161 | * we may get here before an INIT-deassert IPI reaches | |
162 | * our local APIC. We have to wait for the IPI or we'll | |
163 | * lock up on an APIC access. | |
164 | */ | |
a8ab26fe AK |
165 | while (!atomic_read(&init_deasserted)) |
166 | cpu_relax(); | |
1da177e4 LT |
167 | |
168 | /* | |
169 | * (This works even if the APIC is not enabled.) | |
170 | */ | |
171 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
172 | cpuid = smp_processor_id(); | |
173 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
174 | panic("smp_callin: phys CPU#%d, CPU#%d already present??\n", | |
175 | phys_id, cpuid); | |
176 | } | |
177 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
178 | ||
179 | /* | |
180 | * STARTUP IPIs are fragile beasts as they might sometimes | |
181 | * trigger some glue motherboard logic. Complete APIC bus | |
182 | * silence for 1 second, this overestimates the time the | |
183 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
184 | * by a factor of two. This should be enough. | |
185 | */ | |
186 | ||
187 | /* | |
188 | * Waiting 2s total for startup (udelay is not yet working) | |
189 | */ | |
190 | timeout = jiffies + 2*HZ; | |
191 | while (time_before(jiffies, timeout)) { | |
192 | /* | |
193 | * Has the boot CPU finished it's STARTUP sequence? | |
194 | */ | |
195 | if (cpu_isset(cpuid, cpu_callout_map)) | |
196 | break; | |
a8ab26fe | 197 | cpu_relax(); |
1da177e4 LT |
198 | } |
199 | ||
200 | if (!time_before(jiffies, timeout)) { | |
201 | panic("smp_callin: CPU%d started up but did not get a callout!\n", | |
202 | cpuid); | |
203 | } | |
204 | ||
205 | /* | |
206 | * the boot CPU has finished the init stage and is spinning | |
207 | * on callin_map until we finish. We are free to set up this | |
208 | * CPU, first the APIC. (this is probably redundant on most | |
209 | * boards) | |
210 | */ | |
211 | ||
212 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
213 | setup_local_APIC(); | |
214 | ||
1da177e4 LT |
215 | /* |
216 | * Get our bogomips. | |
b4452218 AK |
217 | * |
218 | * Need to enable IRQs because it can take longer and then | |
219 | * the NMI watchdog might kill us. | |
1da177e4 | 220 | */ |
b4452218 | 221 | local_irq_enable(); |
1da177e4 | 222 | calibrate_delay(); |
b4452218 | 223 | local_irq_disable(); |
1da177e4 LT |
224 | Dprintk("Stack at about %p\n",&cpuid); |
225 | ||
1da177e4 LT |
226 | /* |
227 | * Save our processor parameters | |
228 | */ | |
229 | smp_store_cpu_info(cpuid); | |
230 | ||
1da177e4 LT |
231 | /* |
232 | * Allow the master to continue. | |
233 | */ | |
234 | cpu_set(cpuid, cpu_callin_map); | |
1da177e4 LT |
235 | } |
236 | ||
1e9f28fa SS |
237 | /* maps the cpu to the sched domain representing multi-core */ |
238 | cpumask_t cpu_coregroup_map(int cpu) | |
239 | { | |
240 | struct cpuinfo_x86 *c = cpu_data + cpu; | |
241 | /* | |
242 | * For perf, we return last level cache shared map. | |
5c45bf27 | 243 | * And for power savings, we return cpu_core_map |
1e9f28fa | 244 | */ |
5c45bf27 | 245 | if (sched_mc_power_savings || sched_smt_power_savings) |
08357611 | 246 | return per_cpu(cpu_core_map, cpu); |
5c45bf27 SS |
247 | else |
248 | return c->llc_shared_map; | |
1e9f28fa SS |
249 | } |
250 | ||
94605eff SS |
251 | /* representing cpus for which sibling maps can be computed */ |
252 | static cpumask_t cpu_sibling_setup_map; | |
253 | ||
cb0cd8d4 AR |
254 | static inline void set_cpu_sibling_map(int cpu) |
255 | { | |
256 | int i; | |
94605eff SS |
257 | struct cpuinfo_x86 *c = cpu_data; |
258 | ||
259 | cpu_set(cpu, cpu_sibling_setup_map); | |
cb0cd8d4 AR |
260 | |
261 | if (smp_num_siblings > 1) { | |
94605eff | 262 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
f3fa8ebc RS |
263 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && |
264 | c[cpu].cpu_core_id == c[i].cpu_core_id) { | |
d5a7430d MT |
265 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
266 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | |
08357611 MT |
267 | cpu_set(i, per_cpu(cpu_core_map, cpu)); |
268 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
1e9f28fa SS |
269 | cpu_set(i, c[cpu].llc_shared_map); |
270 | cpu_set(cpu, c[i].llc_shared_map); | |
cb0cd8d4 AR |
271 | } |
272 | } | |
273 | } else { | |
d5a7430d | 274 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); |
cb0cd8d4 AR |
275 | } |
276 | ||
1e9f28fa SS |
277 | cpu_set(cpu, c[cpu].llc_shared_map); |
278 | ||
94605eff | 279 | if (current_cpu_data.x86_max_cores == 1) { |
d5a7430d | 280 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); |
94605eff SS |
281 | c[cpu].booted_cores = 1; |
282 | return; | |
283 | } | |
284 | ||
285 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
b6278470 MT |
286 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
287 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | |
1e9f28fa SS |
288 | cpu_set(i, c[cpu].llc_shared_map); |
289 | cpu_set(cpu, c[i].llc_shared_map); | |
290 | } | |
f3fa8ebc | 291 | if (c[cpu].phys_proc_id == c[i].phys_proc_id) { |
08357611 MT |
292 | cpu_set(i, per_cpu(cpu_core_map, cpu)); |
293 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
94605eff SS |
294 | /* |
295 | * Does this new cpu bringup a new core? | |
296 | */ | |
d5a7430d | 297 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { |
94605eff SS |
298 | /* |
299 | * for each core in package, increment | |
300 | * the booted_cores for this new cpu | |
301 | */ | |
d5a7430d | 302 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) |
94605eff SS |
303 | c[cpu].booted_cores++; |
304 | /* | |
305 | * increment the core count for all | |
306 | * the other cpus in this package | |
307 | */ | |
308 | if (i != cpu) | |
309 | c[i].booted_cores++; | |
310 | } else if (i != cpu && !c[cpu].booted_cores) | |
311 | c[cpu].booted_cores = c[i].booted_cores; | |
312 | } | |
cb0cd8d4 AR |
313 | } |
314 | } | |
315 | ||
1da177e4 | 316 | /* |
a8ab26fe | 317 | * Setup code on secondary processor (after comming out of the trampoline) |
1da177e4 | 318 | */ |
a8ab26fe | 319 | void __cpuinit start_secondary(void) |
1da177e4 LT |
320 | { |
321 | /* | |
322 | * Dont put anything before smp_callin(), SMP | |
323 | * booting is too fragile that we want to limit the | |
324 | * things done here to the most necessary things. | |
325 | */ | |
326 | cpu_init(); | |
5bfb5d69 | 327 | preempt_disable(); |
1da177e4 LT |
328 | smp_callin(); |
329 | ||
330 | /* otherwise gcc will move up the smp_processor_id before the cpu_init */ | |
331 | barrier(); | |
332 | ||
95492e46 IM |
333 | /* |
334 | * Check TSC sync first: | |
335 | */ | |
336 | check_tsc_sync_target(); | |
337 | ||
1da177e4 LT |
338 | if (nmi_watchdog == NMI_IO_APIC) { |
339 | disable_8259A_irq(0); | |
340 | enable_NMI_through_LVT0(NULL); | |
341 | enable_8259A_irq(0); | |
342 | } | |
343 | ||
cb0cd8d4 AR |
344 | /* |
345 | * The sibling maps must be set before turing the online map on for | |
346 | * this cpu | |
347 | */ | |
348 | set_cpu_sibling_map(smp_processor_id()); | |
349 | ||
884d9e40 AR |
350 | /* |
351 | * We need to hold call_lock, so there is no inconsistency | |
352 | * between the time smp_call_function() determines number of | |
353 | * IPI receipients, and the time when the determination is made | |
354 | * for which cpus receive the IPI in genapic_flat.c. Holding this | |
355 | * lock helps us to not include this cpu in a currently in progress | |
356 | * smp_call_function(). | |
357 | */ | |
358 | lock_ipi_call_lock(); | |
70a0a535 | 359 | spin_lock(&vector_lock); |
884d9e40 | 360 | |
70a0a535 EB |
361 | /* Setup the per cpu irq handling data structures */ |
362 | __setup_vector_irq(smp_processor_id()); | |
1da177e4 | 363 | /* |
a8ab26fe | 364 | * Allow the master to continue. |
1da177e4 | 365 | */ |
1da177e4 | 366 | cpu_set(smp_processor_id(), cpu_online_map); |
884d9e40 | 367 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
70a0a535 | 368 | spin_unlock(&vector_lock); |
95492e46 | 369 | |
884d9e40 AR |
370 | unlock_ipi_call_lock(); |
371 | ||
3ac508be TG |
372 | setup_secondary_APIC_clock(); |
373 | ||
1da177e4 LT |
374 | cpu_idle(); |
375 | } | |
376 | ||
a8ab26fe | 377 | extern volatile unsigned long init_rsp; |
1da177e4 LT |
378 | extern void (*initial_code)(void); |
379 | ||
44456d37 | 380 | #ifdef APIC_DEBUG |
a8ab26fe | 381 | static void inquire_remote_apic(int apicid) |
1da177e4 LT |
382 | { |
383 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
384 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
3144c332 FLV |
385 | int timeout; |
386 | unsigned int status; | |
1da177e4 LT |
387 | |
388 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | |
389 | ||
390 | for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { | |
391 | printk("... APIC #%d %s: ", apicid, names[i]); | |
392 | ||
393 | /* | |
394 | * Wait for idle. | |
395 | */ | |
3144c332 FLV |
396 | status = safe_apic_wait_icr_idle(); |
397 | if (status) | |
398 | printk("a previous APIC delivery may have failed\n"); | |
1da177e4 | 399 | |
c1507eb2 AK |
400 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); |
401 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
1da177e4 LT |
402 | |
403 | timeout = 0; | |
404 | do { | |
405 | udelay(100); | |
406 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
407 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
408 | ||
409 | switch (status) { | |
410 | case APIC_ICR_RR_VALID: | |
411 | status = apic_read(APIC_RRR); | |
412 | printk("%08x\n", status); | |
413 | break; | |
414 | default: | |
415 | printk("failed\n"); | |
416 | } | |
417 | } | |
418 | } | |
419 | #endif | |
420 | ||
a8ab26fe AK |
421 | /* |
422 | * Kick the secondary to wake up. | |
423 | */ | |
424 | static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) | |
1da177e4 | 425 | { |
ea8c733b FLV |
426 | unsigned long send_status, accept_status = 0; |
427 | int maxlvt, num_starts, j; | |
1da177e4 LT |
428 | |
429 | Dprintk("Asserting INIT.\n"); | |
430 | ||
431 | /* | |
432 | * Turn INIT on target chip | |
433 | */ | |
c1507eb2 | 434 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
435 | |
436 | /* | |
437 | * Send IPI | |
438 | */ | |
c1507eb2 | 439 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT |
1da177e4 LT |
440 | | APIC_DM_INIT); |
441 | ||
442 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 443 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 LT |
444 | |
445 | mdelay(10); | |
446 | ||
447 | Dprintk("Deasserting INIT.\n"); | |
448 | ||
449 | /* Target chip */ | |
c1507eb2 | 450 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
451 | |
452 | /* Send IPI */ | |
c1507eb2 | 453 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); |
1da177e4 LT |
454 | |
455 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 456 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 | 457 | |
f2ecfab9 | 458 | mb(); |
1da177e4 LT |
459 | atomic_set(&init_deasserted, 1); |
460 | ||
5a40b7c2 | 461 | num_starts = 2; |
1da177e4 LT |
462 | |
463 | /* | |
464 | * Run STARTUP IPI loop. | |
465 | */ | |
466 | Dprintk("#startup loops: %d.\n", num_starts); | |
467 | ||
468 | maxlvt = get_maxlvt(); | |
469 | ||
470 | for (j = 1; j <= num_starts; j++) { | |
471 | Dprintk("Sending STARTUP #%d.\n",j); | |
1da177e4 LT |
472 | apic_write(APIC_ESR, 0); |
473 | apic_read(APIC_ESR); | |
474 | Dprintk("After apic_write.\n"); | |
475 | ||
476 | /* | |
477 | * STARTUP IPI | |
478 | */ | |
479 | ||
480 | /* Target chip */ | |
c1507eb2 | 481 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
482 | |
483 | /* Boot on the stack */ | |
484 | /* Kick the second */ | |
c1507eb2 | 485 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12)); |
1da177e4 LT |
486 | |
487 | /* | |
488 | * Give the other CPU some time to accept the IPI. | |
489 | */ | |
490 | udelay(300); | |
491 | ||
492 | Dprintk("Startup point 1.\n"); | |
493 | ||
494 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 495 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 LT |
496 | |
497 | /* | |
498 | * Give the other CPU some time to accept the IPI. | |
499 | */ | |
500 | udelay(200); | |
501 | /* | |
502 | * Due to the Pentium erratum 3AP. | |
503 | */ | |
504 | if (maxlvt > 3) { | |
1da177e4 LT |
505 | apic_write(APIC_ESR, 0); |
506 | } | |
507 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
508 | if (send_status || accept_status) | |
509 | break; | |
510 | } | |
511 | Dprintk("After Startup.\n"); | |
512 | ||
513 | if (send_status) | |
514 | printk(KERN_ERR "APIC never delivered???\n"); | |
515 | if (accept_status) | |
516 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
517 | ||
518 | return (send_status | accept_status); | |
519 | } | |
520 | ||
76e4f660 | 521 | struct create_idle { |
65f27f38 | 522 | struct work_struct work; |
76e4f660 AR |
523 | struct task_struct *idle; |
524 | struct completion done; | |
525 | int cpu; | |
526 | }; | |
527 | ||
65f27f38 | 528 | void do_fork_idle(struct work_struct *work) |
76e4f660 | 529 | { |
65f27f38 DH |
530 | struct create_idle *c_idle = |
531 | container_of(work, struct create_idle, work); | |
76e4f660 AR |
532 | |
533 | c_idle->idle = fork_idle(c_idle->cpu); | |
534 | complete(&c_idle->done); | |
535 | } | |
536 | ||
a8ab26fe AK |
537 | /* |
538 | * Boot one CPU. | |
539 | */ | |
540 | static int __cpuinit do_boot_cpu(int cpu, int apicid) | |
1da177e4 | 541 | { |
1da177e4 | 542 | unsigned long boot_error; |
a8ab26fe | 543 | int timeout; |
1da177e4 | 544 | unsigned long start_rip; |
76e4f660 | 545 | struct create_idle c_idle = { |
65f27f38 | 546 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), |
76e4f660 | 547 | .cpu = cpu, |
f86bf9b7 | 548 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
76e4f660 | 549 | }; |
76e4f660 | 550 | |
c11efdf9 RT |
551 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
552 | if (!cpu_gdt_descr[cpu].address && | |
553 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | |
554 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | |
555 | return -1; | |
556 | } | |
557 | ||
365ba917 RT |
558 | /* Allocate node local memory for AP pdas */ |
559 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { | |
560 | struct x8664_pda *newpda, *pda; | |
561 | int node = cpu_to_node(cpu); | |
562 | pda = cpu_pda(cpu); | |
563 | newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC, | |
564 | node); | |
565 | if (newpda) { | |
566 | memcpy(newpda, pda, sizeof (struct x8664_pda)); | |
567 | cpu_pda(cpu) = newpda; | |
568 | } else | |
569 | printk(KERN_ERR | |
570 | "Could not allocate node local PDA for CPU %d on node %d\n", | |
571 | cpu, node); | |
572 | } | |
573 | ||
d167a518 GH |
574 | alternatives_smp_switch(1); |
575 | ||
76e4f660 AR |
576 | c_idle.idle = get_idle_for_cpu(cpu); |
577 | ||
578 | if (c_idle.idle) { | |
579 | c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *) | |
57eafdc2 | 580 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); |
76e4f660 AR |
581 | init_idle(c_idle.idle, cpu); |
582 | goto do_rest; | |
583 | } | |
584 | ||
1da177e4 | 585 | /* |
76e4f660 AR |
586 | * During cold boot process, keventd thread is not spun up yet. |
587 | * When we do cpu hot-add, we create idle threads on the fly, we should | |
588 | * not acquire any attributes from the calling context. Hence the clean | |
589 | * way to create kernel_threads() is to do that from keventd(). | |
590 | * We do the current_is_keventd() due to the fact that ACPI notifier | |
591 | * was also queuing to keventd() and when the caller is already running | |
592 | * in context of keventd(), we would end up with locking up the keventd | |
593 | * thread. | |
1da177e4 | 594 | */ |
76e4f660 | 595 | if (!keventd_up() || current_is_keventd()) |
65f27f38 | 596 | c_idle.work.func(&c_idle.work); |
76e4f660 | 597 | else { |
65f27f38 | 598 | schedule_work(&c_idle.work); |
76e4f660 AR |
599 | wait_for_completion(&c_idle.done); |
600 | } | |
601 | ||
602 | if (IS_ERR(c_idle.idle)) { | |
a8ab26fe | 603 | printk("failed fork for CPU %d\n", cpu); |
76e4f660 | 604 | return PTR_ERR(c_idle.idle); |
a8ab26fe | 605 | } |
1da177e4 | 606 | |
76e4f660 AR |
607 | set_idle_for_cpu(cpu, c_idle.idle); |
608 | ||
609 | do_rest: | |
610 | ||
df79efde | 611 | cpu_pda(cpu)->pcurrent = c_idle.idle; |
1da177e4 LT |
612 | |
613 | start_rip = setup_trampoline(); | |
614 | ||
76e4f660 | 615 | init_rsp = c_idle.idle->thread.rsp; |
1da177e4 LT |
616 | per_cpu(init_tss,cpu).rsp0 = init_rsp; |
617 | initial_code = start_secondary; | |
e4f17c43 | 618 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
1da177e4 | 619 | |
de04f322 AK |
620 | printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, |
621 | cpus_weight(cpu_present_map), | |
622 | apicid); | |
1da177e4 LT |
623 | |
624 | /* | |
625 | * This grunge runs the startup process for | |
626 | * the targeted processor. | |
627 | */ | |
628 | ||
629 | atomic_set(&init_deasserted, 0); | |
630 | ||
631 | Dprintk("Setting warm reset code and vector.\n"); | |
632 | ||
633 | CMOS_WRITE(0xa, 0xf); | |
634 | local_flush_tlb(); | |
635 | Dprintk("1.\n"); | |
636 | *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4; | |
637 | Dprintk("2.\n"); | |
638 | *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf; | |
639 | Dprintk("3.\n"); | |
640 | ||
641 | /* | |
642 | * Be paranoid about clearing APIC errors. | |
643 | */ | |
11a8e778 AK |
644 | apic_write(APIC_ESR, 0); |
645 | apic_read(APIC_ESR); | |
1da177e4 LT |
646 | |
647 | /* | |
648 | * Status is now clean | |
649 | */ | |
650 | boot_error = 0; | |
651 | ||
652 | /* | |
653 | * Starting actual IPI sequence... | |
654 | */ | |
a8ab26fe | 655 | boot_error = wakeup_secondary_via_INIT(apicid, start_rip); |
1da177e4 LT |
656 | |
657 | if (!boot_error) { | |
658 | /* | |
659 | * allow APs to start initializing. | |
660 | */ | |
661 | Dprintk("Before Callout %d.\n", cpu); | |
662 | cpu_set(cpu, cpu_callout_map); | |
663 | Dprintk("After Callout %d.\n", cpu); | |
664 | ||
665 | /* | |
666 | * Wait 5s total for a response | |
667 | */ | |
668 | for (timeout = 0; timeout < 50000; timeout++) { | |
669 | if (cpu_isset(cpu, cpu_callin_map)) | |
670 | break; /* It has booted */ | |
671 | udelay(100); | |
672 | } | |
673 | ||
674 | if (cpu_isset(cpu, cpu_callin_map)) { | |
675 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
1da177e4 LT |
676 | Dprintk("CPU has booted.\n"); |
677 | } else { | |
678 | boot_error = 1; | |
679 | if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) | |
680 | == 0xA5) | |
681 | /* trampoline started but...? */ | |
682 | printk("Stuck ??\n"); | |
683 | else | |
684 | /* trampoline code not run */ | |
685 | printk("Not responding.\n"); | |
44456d37 | 686 | #ifdef APIC_DEBUG |
1da177e4 LT |
687 | inquire_remote_apic(apicid); |
688 | #endif | |
689 | } | |
690 | } | |
691 | if (boot_error) { | |
692 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | |
693 | clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ | |
488fc08d | 694 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ |
a8ab26fe AK |
695 | cpu_clear(cpu, cpu_present_map); |
696 | cpu_clear(cpu, cpu_possible_map); | |
71fff5e6 | 697 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; |
a8ab26fe | 698 | return -EIO; |
1da177e4 | 699 | } |
a8ab26fe AK |
700 | |
701 | return 0; | |
1da177e4 LT |
702 | } |
703 | ||
a8ab26fe AK |
704 | cycles_t cacheflush_time; |
705 | unsigned long cache_decay_ticks; | |
706 | ||
1da177e4 | 707 | /* |
a8ab26fe | 708 | * Cleanup possible dangling ends... |
1da177e4 | 709 | */ |
a8ab26fe | 710 | static __cpuinit void smp_cleanup_boot(void) |
1da177e4 | 711 | { |
a8ab26fe AK |
712 | /* |
713 | * Paranoid: Set warm reset code and vector here back | |
714 | * to default values. | |
715 | */ | |
716 | CMOS_WRITE(0, 0xf); | |
1da177e4 | 717 | |
a8ab26fe AK |
718 | /* |
719 | * Reset trampoline flag | |
720 | */ | |
721 | *((volatile int *) phys_to_virt(0x467)) = 0; | |
a8ab26fe AK |
722 | } |
723 | ||
724 | /* | |
725 | * Fall back to non SMP mode after errors. | |
726 | * | |
727 | * RED-PEN audit/test this more. I bet there is more state messed up here. | |
728 | */ | |
e6982c67 | 729 | static __init void disable_smp(void) |
a8ab26fe AK |
730 | { |
731 | cpu_present_map = cpumask_of_cpu(0); | |
732 | cpu_possible_map = cpumask_of_cpu(0); | |
733 | if (smp_found_config) | |
734 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); | |
735 | else | |
736 | phys_cpu_present_map = physid_mask_of_physid(0); | |
d5a7430d | 737 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
08357611 | 738 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
a8ab26fe AK |
739 | } |
740 | ||
61b1b2d0 | 741 | #ifdef CONFIG_HOTPLUG_CPU |
420f8f68 AK |
742 | |
743 | int additional_cpus __initdata = -1; | |
744 | ||
61b1b2d0 AK |
745 | /* |
746 | * cpu_possible_map should be static, it cannot change as cpu's | |
747 | * are onlined, or offlined. The reason is per-cpu data-structures | |
748 | * are allocated by some modules at init time, and dont expect to | |
749 | * do this dynamically on cpu arrival/departure. | |
750 | * cpu_present_map on the other hand can change dynamically. | |
751 | * In case when cpu_hotplug is not compiled, then we resort to current | |
752 | * behaviour, which is cpu_possible == cpu_present. | |
61b1b2d0 | 753 | * - Ashok Raj |
420f8f68 AK |
754 | * |
755 | * Three ways to find out the number of additional hotplug CPUs: | |
756 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
420f8f68 | 757 | * - The user can overwrite it with additional_cpus=NUM |
f62a91f6 | 758 | * - Otherwise don't reserve additional CPUs. |
420f8f68 AK |
759 | * We do this because additional CPUs waste a lot of memory. |
760 | * -AK | |
61b1b2d0 | 761 | */ |
421c7ce6 | 762 | __init void prefill_possible_map(void) |
61b1b2d0 AK |
763 | { |
764 | int i; | |
420f8f68 AK |
765 | int possible; |
766 | ||
767 | if (additional_cpus == -1) { | |
f62a91f6 | 768 | if (disabled_cpus > 0) |
420f8f68 | 769 | additional_cpus = disabled_cpus; |
f62a91f6 AK |
770 | else |
771 | additional_cpus = 0; | |
420f8f68 AK |
772 | } |
773 | possible = num_processors + additional_cpus; | |
774 | if (possible > NR_CPUS) | |
775 | possible = NR_CPUS; | |
776 | ||
777 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | |
778 | possible, | |
779 | max_t(int, possible - num_processors, 0)); | |
780 | ||
781 | for (i = 0; i < possible; i++) | |
61b1b2d0 AK |
782 | cpu_set(i, cpu_possible_map); |
783 | } | |
784 | #endif | |
785 | ||
a8ab26fe AK |
786 | /* |
787 | * Various sanity checks. | |
788 | */ | |
e6982c67 | 789 | static int __init smp_sanity_check(unsigned max_cpus) |
a8ab26fe | 790 | { |
1da177e4 LT |
791 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
792 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | |
793 | hard_smp_processor_id()); | |
794 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
795 | } | |
796 | ||
797 | /* | |
798 | * If we couldn't find an SMP configuration at boot time, | |
799 | * get out of here now! | |
800 | */ | |
801 | if (!smp_found_config) { | |
802 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | |
a8ab26fe | 803 | disable_smp(); |
1da177e4 LT |
804 | if (APIC_init_uniprocessor()) |
805 | printk(KERN_NOTICE "Local APIC not detected." | |
806 | " Using dummy APIC emulation.\n"); | |
a8ab26fe | 807 | return -1; |
1da177e4 LT |
808 | } |
809 | ||
810 | /* | |
811 | * Should not be necessary because the MP table should list the boot | |
812 | * CPU too, but we do it for the sake of robustness anyway. | |
813 | */ | |
814 | if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) { | |
815 | printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", | |
816 | boot_cpu_id); | |
817 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
818 | } | |
819 | ||
820 | /* | |
821 | * If we couldn't find a local APIC, then get out of here now! | |
822 | */ | |
11a8e778 | 823 | if (!cpu_has_apic) { |
1da177e4 LT |
824 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
825 | boot_cpu_id); | |
826 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | |
a8ab26fe AK |
827 | nr_ioapics = 0; |
828 | return -1; | |
1da177e4 LT |
829 | } |
830 | ||
1da177e4 LT |
831 | /* |
832 | * If SMP should be disabled, then really disable it! | |
833 | */ | |
834 | if (!max_cpus) { | |
1da177e4 | 835 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); |
a8ab26fe AK |
836 | nr_ioapics = 0; |
837 | return -1; | |
1da177e4 LT |
838 | } |
839 | ||
a8ab26fe AK |
840 | return 0; |
841 | } | |
1da177e4 | 842 | |
71fff5e6 MT |
843 | /* |
844 | * Copy apicid's found by MP_processor_info from initial array to the per cpu | |
845 | * data area. The x86_cpu_to_apicid_init array is then expendable and the | |
846 | * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no | |
847 | * longer available. | |
848 | */ | |
849 | void __init smp_set_apicids(void) | |
850 | { | |
851 | int cpu; | |
852 | ||
853 | for_each_cpu_mask(cpu, cpu_possible_map) { | |
854 | if (per_cpu_offset(cpu)) | |
855 | per_cpu(x86_cpu_to_apicid, cpu) = | |
856 | x86_cpu_to_apicid_init[cpu]; | |
857 | } | |
858 | ||
859 | /* indicate the static array will be going away soon */ | |
860 | x86_cpu_to_apicid_ptr = NULL; | |
861 | } | |
862 | ||
a8ab26fe AK |
863 | /* |
864 | * Prepare for SMP bootup. The MP table or ACPI has been read | |
865 | * earlier. Just do some sanity checking here and enable APIC mode. | |
866 | */ | |
e6982c67 | 867 | void __init smp_prepare_cpus(unsigned int max_cpus) |
a8ab26fe | 868 | { |
a8ab26fe AK |
869 | nmi_watchdog_default(); |
870 | current_cpu_data = boot_cpu_data; | |
871 | current_thread_info()->cpu = 0; /* needed? */ | |
71fff5e6 | 872 | smp_set_apicids(); |
94605eff | 873 | set_cpu_sibling_map(0); |
1da177e4 | 874 | |
a8ab26fe AK |
875 | if (smp_sanity_check(max_cpus) < 0) { |
876 | printk(KERN_INFO "SMP disabled\n"); | |
877 | disable_smp(); | |
878 | return; | |
1da177e4 LT |
879 | } |
880 | ||
a8ab26fe | 881 | |
1da177e4 | 882 | /* |
a8ab26fe | 883 | * Switch from PIC to APIC mode. |
1da177e4 | 884 | */ |
a8ab26fe | 885 | setup_local_APIC(); |
1da177e4 | 886 | |
a8ab26fe AK |
887 | if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) { |
888 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | |
889 | GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id); | |
890 | /* Or can we switch back to PIC here? */ | |
1da177e4 | 891 | } |
1da177e4 LT |
892 | |
893 | /* | |
a8ab26fe | 894 | * Now start the IO-APICs |
1da177e4 LT |
895 | */ |
896 | if (!skip_ioapic_setup && nr_ioapics) | |
897 | setup_IO_APIC(); | |
898 | else | |
899 | nr_ioapics = 0; | |
900 | ||
1da177e4 | 901 | /* |
a8ab26fe | 902 | * Set up local APIC timer on boot CPU. |
1da177e4 | 903 | */ |
1da177e4 | 904 | |
a8ab26fe | 905 | setup_boot_APIC_clock(); |
1da177e4 LT |
906 | } |
907 | ||
a8ab26fe AK |
908 | /* |
909 | * Early setup to make printk work. | |
910 | */ | |
911 | void __init smp_prepare_boot_cpu(void) | |
1da177e4 | 912 | { |
a8ab26fe AK |
913 | int me = smp_processor_id(); |
914 | cpu_set(me, cpu_online_map); | |
915 | cpu_set(me, cpu_callout_map); | |
884d9e40 | 916 | per_cpu(cpu_state, me) = CPU_ONLINE; |
1da177e4 LT |
917 | } |
918 | ||
a8ab26fe AK |
919 | /* |
920 | * Entry point to boot a CPU. | |
a8ab26fe AK |
921 | */ |
922 | int __cpuinit __cpu_up(unsigned int cpu) | |
1da177e4 | 923 | { |
a8ab26fe | 924 | int apicid = cpu_present_to_apicid(cpu); |
d04f41e3 IM |
925 | unsigned long flags; |
926 | int err; | |
1da177e4 | 927 | |
a8ab26fe | 928 | WARN_ON(irqs_disabled()); |
1da177e4 | 929 | |
a8ab26fe AK |
930 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
931 | ||
932 | if (apicid == BAD_APICID || apicid == boot_cpu_id || | |
933 | !physid_isset(apicid, phys_cpu_present_map)) { | |
934 | printk("__cpu_up: bad cpu %d\n", cpu); | |
935 | return -EINVAL; | |
936 | } | |
a8ab26fe | 937 | |
76e4f660 AR |
938 | /* |
939 | * Already booted CPU? | |
940 | */ | |
941 | if (cpu_isset(cpu, cpu_callin_map)) { | |
942 | Dprintk("do_boot_cpu %d Already started\n", cpu); | |
943 | return -ENOSYS; | |
944 | } | |
945 | ||
2b1f6278 BK |
946 | /* |
947 | * Save current MTRR state in case it was changed since early boot | |
948 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
949 | */ | |
950 | mtrr_save_state(); | |
951 | ||
884d9e40 | 952 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
a8ab26fe AK |
953 | /* Boot it! */ |
954 | err = do_boot_cpu(cpu, apicid); | |
955 | if (err < 0) { | |
a8ab26fe AK |
956 | Dprintk("do_boot_cpu failed %d\n", err); |
957 | return err; | |
1da177e4 | 958 | } |
a8ab26fe | 959 | |
1da177e4 LT |
960 | /* Unleash the CPU! */ |
961 | Dprintk("waiting for cpu %d\n", cpu); | |
962 | ||
95492e46 IM |
963 | /* |
964 | * Make sure and check TSC sync: | |
965 | */ | |
d04f41e3 | 966 | local_irq_save(flags); |
95492e46 | 967 | check_tsc_sync_source(cpu); |
d04f41e3 | 968 | local_irq_restore(flags); |
95492e46 | 969 | |
1da177e4 | 970 | while (!cpu_isset(cpu, cpu_online_map)) |
a8ab26fe | 971 | cpu_relax(); |
76e4f660 AR |
972 | err = 0; |
973 | ||
974 | return err; | |
1da177e4 LT |
975 | } |
976 | ||
a8ab26fe AK |
977 | /* |
978 | * Finish the SMP boot. | |
979 | */ | |
e6982c67 | 980 | void __init smp_cpus_done(unsigned int max_cpus) |
1da177e4 | 981 | { |
a8ab26fe | 982 | smp_cleanup_boot(); |
1da177e4 | 983 | setup_ioapic_dest(); |
75152114 | 984 | check_nmi_watchdog(); |
a8ab26fe | 985 | } |
76e4f660 AR |
986 | |
987 | #ifdef CONFIG_HOTPLUG_CPU | |
988 | ||
cb0cd8d4 | 989 | static void remove_siblinginfo(int cpu) |
76e4f660 AR |
990 | { |
991 | int sibling; | |
94605eff | 992 | struct cpuinfo_x86 *c = cpu_data; |
76e4f660 | 993 | |
08357611 MT |
994 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { |
995 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | |
94605eff SS |
996 | /* |
997 | * last thread sibling in this cpu core going down | |
998 | */ | |
d5a7430d | 999 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) |
94605eff SS |
1000 | c[sibling].booted_cores--; |
1001 | } | |
1002 | ||
d5a7430d MT |
1003 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) |
1004 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | |
1005 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | |
08357611 | 1006 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
f3fa8ebc RS |
1007 | c[cpu].phys_proc_id = 0; |
1008 | c[cpu].cpu_core_id = 0; | |
94605eff | 1009 | cpu_clear(cpu, cpu_sibling_setup_map); |
76e4f660 AR |
1010 | } |
1011 | ||
1012 | void remove_cpu_from_maps(void) | |
1013 | { | |
1014 | int cpu = smp_processor_id(); | |
1015 | ||
1016 | cpu_clear(cpu, cpu_callout_map); | |
1017 | cpu_clear(cpu, cpu_callin_map); | |
1018 | clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ | |
488fc08d | 1019 | clear_node_cpumask(cpu); |
76e4f660 AR |
1020 | } |
1021 | ||
1022 | int __cpu_disable(void) | |
1023 | { | |
1024 | int cpu = smp_processor_id(); | |
1025 | ||
1026 | /* | |
1027 | * Perhaps use cpufreq to drop frequency, but that could go | |
1028 | * into generic code. | |
1029 | * | |
1030 | * We won't take down the boot processor on i386 due to some | |
1031 | * interrupts only being able to be serviced by the BSP. | |
1032 | * Especially so if we're not using an IOAPIC -zwane | |
1033 | */ | |
1034 | if (cpu == 0) | |
1035 | return -EBUSY; | |
1036 | ||
4038f901 SL |
1037 | if (nmi_watchdog == NMI_LOCAL_APIC) |
1038 | stop_apic_nmi_watchdog(NULL); | |
5e9ef02e | 1039 | clear_local_APIC(); |
76e4f660 AR |
1040 | |
1041 | /* | |
1042 | * HACK: | |
1043 | * Allow any queued timer interrupts to get serviced | |
1044 | * This is only a temporary solution until we cleanup | |
1045 | * fixup_irqs as we do for IA64. | |
1046 | */ | |
1047 | local_irq_enable(); | |
1048 | mdelay(1); | |
1049 | ||
1050 | local_irq_disable(); | |
1051 | remove_siblinginfo(cpu); | |
1052 | ||
70a0a535 | 1053 | spin_lock(&vector_lock); |
76e4f660 AR |
1054 | /* It's now safe to remove this processor from the online map */ |
1055 | cpu_clear(cpu, cpu_online_map); | |
70a0a535 | 1056 | spin_unlock(&vector_lock); |
76e4f660 AR |
1057 | remove_cpu_from_maps(); |
1058 | fixup_irqs(cpu_online_map); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | void __cpu_die(unsigned int cpu) | |
1063 | { | |
1064 | /* We don't do anything here: idle task is faking death itself. */ | |
1065 | unsigned int i; | |
1066 | ||
1067 | for (i = 0; i < 10; i++) { | |
1068 | /* They ack this in play_dead by setting CPU_DEAD */ | |
884d9e40 AR |
1069 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
1070 | printk ("CPU %d is now offline\n", cpu); | |
d167a518 GH |
1071 | if (1 == num_online_cpus()) |
1072 | alternatives_smp_switch(0); | |
76e4f660 | 1073 | return; |
884d9e40 | 1074 | } |
ef6e5253 | 1075 | msleep(100); |
76e4f660 AR |
1076 | } |
1077 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
1078 | } | |
1079 | ||
2c8c0e6b | 1080 | static __init int setup_additional_cpus(char *s) |
420f8f68 | 1081 | { |
2c8c0e6b | 1082 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; |
420f8f68 | 1083 | } |
2c8c0e6b | 1084 | early_param("additional_cpus", setup_additional_cpus); |
420f8f68 | 1085 | |
76e4f660 AR |
1086 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1087 | ||
1088 | int __cpu_disable(void) | |
1089 | { | |
1090 | return -ENOSYS; | |
1091 | } | |
1092 | ||
1093 | void __cpu_die(unsigned int cpu) | |
1094 | { | |
1095 | /* We said "no" in __cpu_disable */ | |
1096 | BUG(); | |
1097 | } | |
1098 | #endif /* CONFIG_HOTPLUG_CPU */ |