Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * x86 SMP booting functions | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * Copyright 2001 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * Much of the core SMP work is based on previous work by Thomas Radke, to | |
9 | * whom a great many thanks are extended. | |
10 | * | |
11 | * Thanks to Intel for making available several different Pentium, | |
12 | * Pentium Pro and Pentium-II/Xeon MP machines. | |
13 | * Original development of Linux SMP code supported by Caldera. | |
14 | * | |
a8ab26fe | 15 | * This code is released under the GNU General Public License version 2 |
1da177e4 LT |
16 | * |
17 | * Fixes | |
18 | * Felix Koop : NR_CPUS used properly | |
19 | * Jose Renau : Handle single CPU case. | |
20 | * Alan Cox : By repeated request 8) - Total BogoMIP report. | |
21 | * Greg Wright : Fix for kernel stacks panic. | |
22 | * Erich Boleyn : MP v1.4 and additional changes. | |
23 | * Matthias Sattler : Changes for 2.1 kernel map. | |
24 | * Michel Lespinasse : Changes for 2.1 kernel map. | |
25 | * Michael Chastain : Change trampoline.S to gnu as. | |
26 | * Alan Cox : Dumb bug: 'B' step PPro's are fine | |
27 | * Ingo Molnar : Added APIC timers, based on code | |
28 | * from Jose Renau | |
29 | * Ingo Molnar : various cleanups and rewrites | |
30 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. | |
31 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs | |
32 | * Andi Kleen : Changed for SMP boot into long mode. | |
a8ab26fe AK |
33 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
34 | * Andi Kleen : Converted to new state machine. | |
35 | * Various cleanups. | |
36 | * Probably mostly hotplug CPU ready now. | |
76e4f660 | 37 | * Ashok Raj : CPU hotplug support |
1da177e4 LT |
38 | */ |
39 | ||
a8ab26fe | 40 | |
1da177e4 LT |
41 | #include <linux/init.h> |
42 | ||
43 | #include <linux/mm.h> | |
44 | #include <linux/kernel_stat.h> | |
1da177e4 LT |
45 | #include <linux/bootmem.h> |
46 | #include <linux/thread_info.h> | |
47 | #include <linux/module.h> | |
1da177e4 LT |
48 | #include <linux/delay.h> |
49 | #include <linux/mc146818rtc.h> | |
a3bc0dbc | 50 | #include <linux/smp.h> |
1eeb66a1 | 51 | #include <linux/kdebug.h> |
a3bc0dbc | 52 | |
1da177e4 LT |
53 | #include <asm/mtrr.h> |
54 | #include <asm/pgalloc.h> | |
55 | #include <asm/desc.h> | |
1da177e4 LT |
56 | #include <asm/tlbflush.h> |
57 | #include <asm/proto.h> | |
75152114 | 58 | #include <asm/nmi.h> |
9cdd304b AV |
59 | #include <asm/irq.h> |
60 | #include <asm/hw_irq.h> | |
488fc08d | 61 | #include <asm/numa.h> |
1da177e4 | 62 | |
a8ab26fe AK |
63 | /* Set when the idlers are all forked */ |
64 | int smp_threads_ready; | |
65 | ||
1da177e4 LT |
66 | /* |
67 | * Trampoline 80x86 program as an array. | |
68 | */ | |
69 | ||
121d7bf5 JB |
70 | extern const unsigned char trampoline_data[]; |
71 | extern const unsigned char trampoline_end[]; | |
1da177e4 | 72 | |
76e4f660 AR |
73 | /* State of each CPU */ |
74 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
75 | ||
76 | /* | |
77 | * Store all idle threads, this can be reused instead of creating | |
78 | * a new thread. Also avoids complicated thread destroy functionality | |
79 | * for idle threads. | |
80 | */ | |
24b0d22b | 81 | #ifdef CONFIG_HOTPLUG_CPU |
82 | /* | |
83 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
84 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
85 | */ | |
86 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
87 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
88 | #define set_idle_for_cpu(x,p) (per_cpu(idle_thread_array, x) = (p)) | |
89 | #else | |
76e4f660 | 90 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
76e4f660 AR |
91 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
92 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | |
24b0d22b | 93 | #endif |
94 | ||
76e4f660 | 95 | |
1da177e4 LT |
96 | /* |
97 | * Currently trivial. Write the real->protected mode | |
98 | * bootstrap into the page concerned. The caller | |
99 | * has made sure it's suitably aligned. | |
100 | */ | |
101 | ||
a8ab26fe | 102 | static unsigned long __cpuinit setup_trampoline(void) |
1da177e4 LT |
103 | { |
104 | void *tramp = __va(SMP_TRAMPOLINE_BASE); | |
105 | memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); | |
106 | return virt_to_phys(tramp); | |
107 | } | |
108 | ||
109 | /* | |
110 | * The bootstrap kernel entry code has set these up. Save them for | |
111 | * a given CPU | |
112 | */ | |
113 | ||
a8ab26fe | 114 | static void __cpuinit smp_store_cpu_info(int id) |
1da177e4 | 115 | { |
92cb7612 | 116 | struct cpuinfo_x86 *c = &cpu_data(id); |
1da177e4 LT |
117 | |
118 | *c = boot_cpu_data; | |
fbdcf18d | 119 | c->cpu_index = id; |
00684418 | 120 | identify_cpu(c); |
dda50e71 | 121 | print_cpu_info(c); |
1da177e4 LT |
122 | } |
123 | ||
a8ab26fe | 124 | static atomic_t init_deasserted __cpuinitdata; |
1da177e4 | 125 | |
a8ab26fe AK |
126 | /* |
127 | * Report back to the Boot Processor. | |
128 | * Running on AP. | |
129 | */ | |
130 | void __cpuinit smp_callin(void) | |
1da177e4 LT |
131 | { |
132 | int cpuid, phys_id; | |
133 | unsigned long timeout; | |
134 | ||
135 | /* | |
136 | * If waken up by an INIT in an 82489DX configuration | |
137 | * we may get here before an INIT-deassert IPI reaches | |
138 | * our local APIC. We have to wait for the IPI or we'll | |
139 | * lock up on an APIC access. | |
140 | */ | |
a8ab26fe AK |
141 | while (!atomic_read(&init_deasserted)) |
142 | cpu_relax(); | |
1da177e4 LT |
143 | |
144 | /* | |
145 | * (This works even if the APIC is not enabled.) | |
146 | */ | |
147 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
148 | cpuid = smp_processor_id(); | |
149 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
150 | panic("smp_callin: phys CPU#%d, CPU#%d already present??\n", | |
151 | phys_id, cpuid); | |
152 | } | |
153 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
154 | ||
155 | /* | |
156 | * STARTUP IPIs are fragile beasts as they might sometimes | |
157 | * trigger some glue motherboard logic. Complete APIC bus | |
158 | * silence for 1 second, this overestimates the time the | |
159 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
160 | * by a factor of two. This should be enough. | |
161 | */ | |
162 | ||
163 | /* | |
164 | * Waiting 2s total for startup (udelay is not yet working) | |
165 | */ | |
166 | timeout = jiffies + 2*HZ; | |
167 | while (time_before(jiffies, timeout)) { | |
168 | /* | |
169 | * Has the boot CPU finished it's STARTUP sequence? | |
170 | */ | |
171 | if (cpu_isset(cpuid, cpu_callout_map)) | |
172 | break; | |
a8ab26fe | 173 | cpu_relax(); |
1da177e4 LT |
174 | } |
175 | ||
176 | if (!time_before(jiffies, timeout)) { | |
177 | panic("smp_callin: CPU%d started up but did not get a callout!\n", | |
178 | cpuid); | |
179 | } | |
180 | ||
181 | /* | |
182 | * the boot CPU has finished the init stage and is spinning | |
183 | * on callin_map until we finish. We are free to set up this | |
184 | * CPU, first the APIC. (this is probably redundant on most | |
185 | * boards) | |
186 | */ | |
187 | ||
188 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
189 | setup_local_APIC(); | |
739f33b3 | 190 | end_local_APIC_setup(); |
1da177e4 | 191 | |
1da177e4 LT |
192 | /* |
193 | * Get our bogomips. | |
b4452218 AK |
194 | * |
195 | * Need to enable IRQs because it can take longer and then | |
196 | * the NMI watchdog might kill us. | |
1da177e4 | 197 | */ |
b4452218 | 198 | local_irq_enable(); |
1da177e4 | 199 | calibrate_delay(); |
b4452218 | 200 | local_irq_disable(); |
1da177e4 LT |
201 | Dprintk("Stack at about %p\n",&cpuid); |
202 | ||
1da177e4 LT |
203 | /* |
204 | * Save our processor parameters | |
205 | */ | |
206 | smp_store_cpu_info(cpuid); | |
207 | ||
1da177e4 LT |
208 | /* |
209 | * Allow the master to continue. | |
210 | */ | |
211 | cpu_set(cpuid, cpu_callin_map); | |
1da177e4 LT |
212 | } |
213 | ||
1e9f28fa SS |
214 | /* maps the cpu to the sched domain representing multi-core */ |
215 | cpumask_t cpu_coregroup_map(int cpu) | |
216 | { | |
92cb7612 | 217 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1e9f28fa SS |
218 | /* |
219 | * For perf, we return last level cache shared map. | |
5c45bf27 | 220 | * And for power savings, we return cpu_core_map |
1e9f28fa | 221 | */ |
5c45bf27 | 222 | if (sched_mc_power_savings || sched_smt_power_savings) |
08357611 | 223 | return per_cpu(cpu_core_map, cpu); |
5c45bf27 SS |
224 | else |
225 | return c->llc_shared_map; | |
1e9f28fa SS |
226 | } |
227 | ||
1da177e4 | 228 | /* |
a8ab26fe | 229 | * Setup code on secondary processor (after comming out of the trampoline) |
1da177e4 | 230 | */ |
a8ab26fe | 231 | void __cpuinit start_secondary(void) |
1da177e4 LT |
232 | { |
233 | /* | |
234 | * Dont put anything before smp_callin(), SMP | |
235 | * booting is too fragile that we want to limit the | |
236 | * things done here to the most necessary things. | |
237 | */ | |
238 | cpu_init(); | |
5bfb5d69 | 239 | preempt_disable(); |
1da177e4 LT |
240 | smp_callin(); |
241 | ||
242 | /* otherwise gcc will move up the smp_processor_id before the cpu_init */ | |
243 | barrier(); | |
244 | ||
95492e46 IM |
245 | /* |
246 | * Check TSC sync first: | |
247 | */ | |
248 | check_tsc_sync_target(); | |
249 | ||
1da177e4 LT |
250 | if (nmi_watchdog == NMI_IO_APIC) { |
251 | disable_8259A_irq(0); | |
e9427101 | 252 | enable_NMI_through_LVT0(); |
1da177e4 LT |
253 | enable_8259A_irq(0); |
254 | } | |
255 | ||
cb0cd8d4 AR |
256 | /* |
257 | * The sibling maps must be set before turing the online map on for | |
258 | * this cpu | |
259 | */ | |
260 | set_cpu_sibling_map(smp_processor_id()); | |
261 | ||
884d9e40 AR |
262 | /* |
263 | * We need to hold call_lock, so there is no inconsistency | |
264 | * between the time smp_call_function() determines number of | |
676b1855 | 265 | * IPI recipients, and the time when the determination is made |
884d9e40 AR |
266 | * for which cpus receive the IPI in genapic_flat.c. Holding this |
267 | * lock helps us to not include this cpu in a currently in progress | |
268 | * smp_call_function(). | |
269 | */ | |
270 | lock_ipi_call_lock(); | |
70a0a535 | 271 | spin_lock(&vector_lock); |
884d9e40 | 272 | |
70a0a535 EB |
273 | /* Setup the per cpu irq handling data structures */ |
274 | __setup_vector_irq(smp_processor_id()); | |
1da177e4 | 275 | /* |
a8ab26fe | 276 | * Allow the master to continue. |
1da177e4 | 277 | */ |
1da177e4 | 278 | cpu_set(smp_processor_id(), cpu_online_map); |
884d9e40 | 279 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
70a0a535 | 280 | spin_unlock(&vector_lock); |
95492e46 | 281 | |
884d9e40 AR |
282 | unlock_ipi_call_lock(); |
283 | ||
746ef0cd | 284 | setup_secondary_clock(); |
3ac508be | 285 | |
1da177e4 LT |
286 | cpu_idle(); |
287 | } | |
288 | ||
a8ab26fe | 289 | extern volatile unsigned long init_rsp; |
1da177e4 LT |
290 | extern void (*initial_code)(void); |
291 | ||
44456d37 | 292 | #ifdef APIC_DEBUG |
a8ab26fe | 293 | static void inquire_remote_apic(int apicid) |
1da177e4 LT |
294 | { |
295 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
296 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
3144c332 | 297 | int timeout; |
3c6bb07a | 298 | u32 status; |
1da177e4 LT |
299 | |
300 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | |
301 | ||
4d022ada | 302 | for (i = 0; i < ARRAY_SIZE(regs); i++) { |
3c6bb07a | 303 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); |
1da177e4 LT |
304 | |
305 | /* | |
306 | * Wait for idle. | |
307 | */ | |
3144c332 FLV |
308 | status = safe_apic_wait_icr_idle(); |
309 | if (status) | |
3c6bb07a TG |
310 | printk(KERN_CONT |
311 | "a previous APIC delivery may have failed\n"); | |
1da177e4 | 312 | |
c1507eb2 AK |
313 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); |
314 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
1da177e4 LT |
315 | |
316 | timeout = 0; | |
317 | do { | |
318 | udelay(100); | |
319 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
320 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
321 | ||
322 | switch (status) { | |
323 | case APIC_ICR_RR_VALID: | |
324 | status = apic_read(APIC_RRR); | |
3c6bb07a | 325 | printk(KERN_CONT "%08x\n", status); |
1da177e4 LT |
326 | break; |
327 | default: | |
3c6bb07a | 328 | printk(KERN_CONT "failed\n"); |
1da177e4 LT |
329 | } |
330 | } | |
331 | } | |
332 | #endif | |
333 | ||
a8ab26fe AK |
334 | /* |
335 | * Kick the secondary to wake up. | |
336 | */ | |
337 | static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip) | |
1da177e4 | 338 | { |
ea8c733b FLV |
339 | unsigned long send_status, accept_status = 0; |
340 | int maxlvt, num_starts, j; | |
1da177e4 LT |
341 | |
342 | Dprintk("Asserting INIT.\n"); | |
343 | ||
344 | /* | |
345 | * Turn INIT on target chip | |
346 | */ | |
c1507eb2 | 347 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
348 | |
349 | /* | |
350 | * Send IPI | |
351 | */ | |
c1507eb2 | 352 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT |
1da177e4 LT |
353 | | APIC_DM_INIT); |
354 | ||
355 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 356 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 LT |
357 | |
358 | mdelay(10); | |
359 | ||
360 | Dprintk("Deasserting INIT.\n"); | |
361 | ||
362 | /* Target chip */ | |
c1507eb2 | 363 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
364 | |
365 | /* Send IPI */ | |
c1507eb2 | 366 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); |
1da177e4 LT |
367 | |
368 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 369 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 | 370 | |
f2ecfab9 | 371 | mb(); |
1da177e4 LT |
372 | atomic_set(&init_deasserted, 1); |
373 | ||
5a40b7c2 | 374 | num_starts = 2; |
1da177e4 LT |
375 | |
376 | /* | |
377 | * Run STARTUP IPI loop. | |
378 | */ | |
379 | Dprintk("#startup loops: %d.\n", num_starts); | |
380 | ||
37e650c7 | 381 | maxlvt = lapic_get_maxlvt(); |
1da177e4 LT |
382 | |
383 | for (j = 1; j <= num_starts; j++) { | |
384 | Dprintk("Sending STARTUP #%d.\n",j); | |
1da177e4 LT |
385 | apic_write(APIC_ESR, 0); |
386 | apic_read(APIC_ESR); | |
387 | Dprintk("After apic_write.\n"); | |
388 | ||
389 | /* | |
390 | * STARTUP IPI | |
391 | */ | |
392 | ||
393 | /* Target chip */ | |
c1507eb2 | 394 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); |
1da177e4 LT |
395 | |
396 | /* Boot on the stack */ | |
397 | /* Kick the second */ | |
c1507eb2 | 398 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12)); |
1da177e4 LT |
399 | |
400 | /* | |
401 | * Give the other CPU some time to accept the IPI. | |
402 | */ | |
403 | udelay(300); | |
404 | ||
405 | Dprintk("Startup point 1.\n"); | |
406 | ||
407 | Dprintk("Waiting for send to finish...\n"); | |
ea8c733b | 408 | send_status = safe_apic_wait_icr_idle(); |
1da177e4 LT |
409 | |
410 | /* | |
411 | * Give the other CPU some time to accept the IPI. | |
412 | */ | |
413 | udelay(200); | |
414 | /* | |
415 | * Due to the Pentium erratum 3AP. | |
416 | */ | |
417 | if (maxlvt > 3) { | |
1da177e4 LT |
418 | apic_write(APIC_ESR, 0); |
419 | } | |
420 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
421 | if (send_status || accept_status) | |
422 | break; | |
423 | } | |
424 | Dprintk("After Startup.\n"); | |
425 | ||
426 | if (send_status) | |
427 | printk(KERN_ERR "APIC never delivered???\n"); | |
428 | if (accept_status) | |
429 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
430 | ||
431 | return (send_status | accept_status); | |
432 | } | |
433 | ||
76e4f660 | 434 | struct create_idle { |
65f27f38 | 435 | struct work_struct work; |
76e4f660 AR |
436 | struct task_struct *idle; |
437 | struct completion done; | |
438 | int cpu; | |
439 | }; | |
440 | ||
a2b484a2 | 441 | static void __cpuinit do_fork_idle(struct work_struct *work) |
76e4f660 | 442 | { |
65f27f38 DH |
443 | struct create_idle *c_idle = |
444 | container_of(work, struct create_idle, work); | |
76e4f660 AR |
445 | |
446 | c_idle->idle = fork_idle(c_idle->cpu); | |
447 | complete(&c_idle->done); | |
448 | } | |
449 | ||
a8ab26fe AK |
450 | /* |
451 | * Boot one CPU. | |
452 | */ | |
453 | static int __cpuinit do_boot_cpu(int cpu, int apicid) | |
1da177e4 | 454 | { |
1da177e4 | 455 | unsigned long boot_error; |
a8ab26fe | 456 | int timeout; |
1da177e4 | 457 | unsigned long start_rip; |
76e4f660 AR |
458 | struct create_idle c_idle = { |
459 | .cpu = cpu, | |
f86bf9b7 | 460 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
76e4f660 | 461 | }; |
2b775a27 | 462 | INIT_WORK(&c_idle.work, do_fork_idle); |
76e4f660 | 463 | |
c11efdf9 RT |
464 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
465 | if (!cpu_gdt_descr[cpu].address && | |
466 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | |
467 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | |
468 | return -1; | |
469 | } | |
470 | ||
365ba917 RT |
471 | /* Allocate node local memory for AP pdas */ |
472 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { | |
473 | struct x8664_pda *newpda, *pda; | |
474 | int node = cpu_to_node(cpu); | |
475 | pda = cpu_pda(cpu); | |
476 | newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC, | |
477 | node); | |
478 | if (newpda) { | |
479 | memcpy(newpda, pda, sizeof (struct x8664_pda)); | |
480 | cpu_pda(cpu) = newpda; | |
481 | } else | |
482 | printk(KERN_ERR | |
483 | "Could not allocate node local PDA for CPU %d on node %d\n", | |
484 | cpu, node); | |
485 | } | |
486 | ||
d167a518 GH |
487 | alternatives_smp_switch(1); |
488 | ||
76e4f660 AR |
489 | c_idle.idle = get_idle_for_cpu(cpu); |
490 | ||
491 | if (c_idle.idle) { | |
faca6227 | 492 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) |
57eafdc2 | 493 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); |
76e4f660 AR |
494 | init_idle(c_idle.idle, cpu); |
495 | goto do_rest; | |
496 | } | |
497 | ||
1da177e4 | 498 | /* |
76e4f660 AR |
499 | * During cold boot process, keventd thread is not spun up yet. |
500 | * When we do cpu hot-add, we create idle threads on the fly, we should | |
501 | * not acquire any attributes from the calling context. Hence the clean | |
502 | * way to create kernel_threads() is to do that from keventd(). | |
503 | * We do the current_is_keventd() due to the fact that ACPI notifier | |
504 | * was also queuing to keventd() and when the caller is already running | |
505 | * in context of keventd(), we would end up with locking up the keventd | |
506 | * thread. | |
1da177e4 | 507 | */ |
76e4f660 | 508 | if (!keventd_up() || current_is_keventd()) |
65f27f38 | 509 | c_idle.work.func(&c_idle.work); |
76e4f660 | 510 | else { |
65f27f38 | 511 | schedule_work(&c_idle.work); |
76e4f660 AR |
512 | wait_for_completion(&c_idle.done); |
513 | } | |
514 | ||
515 | if (IS_ERR(c_idle.idle)) { | |
a8ab26fe | 516 | printk("failed fork for CPU %d\n", cpu); |
76e4f660 | 517 | return PTR_ERR(c_idle.idle); |
a8ab26fe | 518 | } |
1da177e4 | 519 | |
76e4f660 AR |
520 | set_idle_for_cpu(cpu, c_idle.idle); |
521 | ||
522 | do_rest: | |
523 | ||
df79efde | 524 | cpu_pda(cpu)->pcurrent = c_idle.idle; |
1da177e4 LT |
525 | |
526 | start_rip = setup_trampoline(); | |
527 | ||
faca6227 | 528 | init_rsp = c_idle.idle->thread.sp; |
7818a1e0 | 529 | load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); |
1da177e4 | 530 | initial_code = start_secondary; |
e4f17c43 | 531 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
1da177e4 | 532 | |
de04f322 AK |
533 | printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, |
534 | cpus_weight(cpu_present_map), | |
535 | apicid); | |
1da177e4 LT |
536 | |
537 | /* | |
538 | * This grunge runs the startup process for | |
539 | * the targeted processor. | |
540 | */ | |
541 | ||
542 | atomic_set(&init_deasserted, 0); | |
543 | ||
544 | Dprintk("Setting warm reset code and vector.\n"); | |
545 | ||
546 | CMOS_WRITE(0xa, 0xf); | |
547 | local_flush_tlb(); | |
548 | Dprintk("1.\n"); | |
549 | *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4; | |
550 | Dprintk("2.\n"); | |
551 | *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf; | |
552 | Dprintk("3.\n"); | |
553 | ||
554 | /* | |
555 | * Be paranoid about clearing APIC errors. | |
556 | */ | |
11a8e778 AK |
557 | apic_write(APIC_ESR, 0); |
558 | apic_read(APIC_ESR); | |
1da177e4 LT |
559 | |
560 | /* | |
561 | * Status is now clean | |
562 | */ | |
563 | boot_error = 0; | |
564 | ||
565 | /* | |
566 | * Starting actual IPI sequence... | |
567 | */ | |
a8ab26fe | 568 | boot_error = wakeup_secondary_via_INIT(apicid, start_rip); |
1da177e4 LT |
569 | |
570 | if (!boot_error) { | |
571 | /* | |
572 | * allow APs to start initializing. | |
573 | */ | |
574 | Dprintk("Before Callout %d.\n", cpu); | |
575 | cpu_set(cpu, cpu_callout_map); | |
576 | Dprintk("After Callout %d.\n", cpu); | |
577 | ||
578 | /* | |
579 | * Wait 5s total for a response | |
580 | */ | |
581 | for (timeout = 0; timeout < 50000; timeout++) { | |
582 | if (cpu_isset(cpu, cpu_callin_map)) | |
583 | break; /* It has booted */ | |
584 | udelay(100); | |
585 | } | |
586 | ||
587 | if (cpu_isset(cpu, cpu_callin_map)) { | |
588 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
1da177e4 LT |
589 | Dprintk("CPU has booted.\n"); |
590 | } else { | |
591 | boot_error = 1; | |
592 | if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) | |
593 | == 0xA5) | |
594 | /* trampoline started but...? */ | |
595 | printk("Stuck ??\n"); | |
596 | else | |
597 | /* trampoline code not run */ | |
598 | printk("Not responding.\n"); | |
44456d37 | 599 | #ifdef APIC_DEBUG |
1da177e4 LT |
600 | inquire_remote_apic(apicid); |
601 | #endif | |
602 | } | |
603 | } | |
604 | if (boot_error) { | |
605 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | |
5548fecd | 606 | clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ |
488fc08d | 607 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ |
a8ab26fe AK |
608 | cpu_clear(cpu, cpu_present_map); |
609 | cpu_clear(cpu, cpu_possible_map); | |
71fff5e6 | 610 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; |
a8ab26fe | 611 | return -EIO; |
1da177e4 | 612 | } |
a8ab26fe AK |
613 | |
614 | return 0; | |
1da177e4 LT |
615 | } |
616 | ||
a8ab26fe AK |
617 | cycles_t cacheflush_time; |
618 | unsigned long cache_decay_ticks; | |
619 | ||
1da177e4 | 620 | /* |
a8ab26fe | 621 | * Cleanup possible dangling ends... |
1da177e4 | 622 | */ |
a8ab26fe | 623 | static __cpuinit void smp_cleanup_boot(void) |
1da177e4 | 624 | { |
a8ab26fe AK |
625 | /* |
626 | * Paranoid: Set warm reset code and vector here back | |
627 | * to default values. | |
628 | */ | |
629 | CMOS_WRITE(0, 0xf); | |
1da177e4 | 630 | |
a8ab26fe AK |
631 | /* |
632 | * Reset trampoline flag | |
633 | */ | |
634 | *((volatile int *) phys_to_virt(0x467)) = 0; | |
a8ab26fe AK |
635 | } |
636 | ||
637 | /* | |
638 | * Fall back to non SMP mode after errors. | |
639 | * | |
640 | * RED-PEN audit/test this more. I bet there is more state messed up here. | |
641 | */ | |
e6982c67 | 642 | static __init void disable_smp(void) |
a8ab26fe AK |
643 | { |
644 | cpu_present_map = cpumask_of_cpu(0); | |
645 | cpu_possible_map = cpumask_of_cpu(0); | |
646 | if (smp_found_config) | |
647 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); | |
648 | else | |
649 | phys_cpu_present_map = physid_mask_of_physid(0); | |
d5a7430d | 650 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
08357611 | 651 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
a8ab26fe AK |
652 | } |
653 | ||
a8ab26fe AK |
654 | /* |
655 | * Various sanity checks. | |
656 | */ | |
e6982c67 | 657 | static int __init smp_sanity_check(unsigned max_cpus) |
a8ab26fe | 658 | { |
1da177e4 LT |
659 | if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { |
660 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | |
661 | hard_smp_processor_id()); | |
662 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
663 | } | |
664 | ||
665 | /* | |
666 | * If we couldn't find an SMP configuration at boot time, | |
667 | * get out of here now! | |
668 | */ | |
669 | if (!smp_found_config) { | |
670 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | |
a8ab26fe | 671 | disable_smp(); |
1da177e4 LT |
672 | if (APIC_init_uniprocessor()) |
673 | printk(KERN_NOTICE "Local APIC not detected." | |
674 | " Using dummy APIC emulation.\n"); | |
a8ab26fe | 675 | return -1; |
1da177e4 LT |
676 | } |
677 | ||
678 | /* | |
679 | * Should not be necessary because the MP table should list the boot | |
680 | * CPU too, but we do it for the sake of robustness anyway. | |
681 | */ | |
682 | if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) { | |
683 | printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n", | |
684 | boot_cpu_id); | |
685 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | |
686 | } | |
687 | ||
688 | /* | |
689 | * If we couldn't find a local APIC, then get out of here now! | |
690 | */ | |
11a8e778 | 691 | if (!cpu_has_apic) { |
1da177e4 LT |
692 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
693 | boot_cpu_id); | |
694 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | |
a8ab26fe AK |
695 | nr_ioapics = 0; |
696 | return -1; | |
1da177e4 LT |
697 | } |
698 | ||
1da177e4 LT |
699 | /* |
700 | * If SMP should be disabled, then really disable it! | |
701 | */ | |
702 | if (!max_cpus) { | |
1da177e4 | 703 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); |
a8ab26fe AK |
704 | nr_ioapics = 0; |
705 | return -1; | |
1da177e4 LT |
706 | } |
707 | ||
a8ab26fe AK |
708 | return 0; |
709 | } | |
1da177e4 | 710 | |
949ec325 YL |
711 | static void __init smp_cpu_index_default(void) |
712 | { | |
713 | int i; | |
714 | struct cpuinfo_x86 *c; | |
715 | ||
716 | for_each_cpu_mask(i, cpu_possible_map) { | |
717 | c = &cpu_data(i); | |
718 | /* mark all to hotplug */ | |
719 | c->cpu_index = NR_CPUS; | |
720 | } | |
721 | } | |
722 | ||
a8ab26fe AK |
723 | /* |
724 | * Prepare for SMP bootup. The MP table or ACPI has been read | |
725 | * earlier. Just do some sanity checking here and enable APIC mode. | |
726 | */ | |
7557da67 | 727 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
a8ab26fe | 728 | { |
a8ab26fe | 729 | nmi_watchdog_default(); |
949ec325 | 730 | smp_cpu_index_default(); |
a8ab26fe AK |
731 | current_cpu_data = boot_cpu_data; |
732 | current_thread_info()->cpu = 0; /* needed? */ | |
94605eff | 733 | set_cpu_sibling_map(0); |
1da177e4 | 734 | |
a8ab26fe AK |
735 | if (smp_sanity_check(max_cpus) < 0) { |
736 | printk(KERN_INFO "SMP disabled\n"); | |
737 | disable_smp(); | |
738 | return; | |
1da177e4 LT |
739 | } |
740 | ||
a8ab26fe | 741 | |
1da177e4 | 742 | /* |
a8ab26fe | 743 | * Switch from PIC to APIC mode. |
1da177e4 | 744 | */ |
a8ab26fe | 745 | setup_local_APIC(); |
1da177e4 | 746 | |
739f33b3 AK |
747 | /* |
748 | * Enable IO APIC before setting up error vector | |
749 | */ | |
750 | if (!skip_ioapic_setup && nr_ioapics) | |
751 | enable_IO_APIC(); | |
752 | end_local_APIC_setup(); | |
753 | ||
a8ab26fe AK |
754 | if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) { |
755 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | |
756 | GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id); | |
757 | /* Or can we switch back to PIC here? */ | |
1da177e4 | 758 | } |
1da177e4 LT |
759 | |
760 | /* | |
a8ab26fe | 761 | * Now start the IO-APICs |
1da177e4 LT |
762 | */ |
763 | if (!skip_ioapic_setup && nr_ioapics) | |
764 | setup_IO_APIC(); | |
765 | else | |
766 | nr_ioapics = 0; | |
767 | ||
1da177e4 | 768 | /* |
a8ab26fe | 769 | * Set up local APIC timer on boot CPU. |
1da177e4 | 770 | */ |
1da177e4 | 771 | |
746ef0cd | 772 | setup_boot_clock(); |
1da177e4 LT |
773 | } |
774 | ||
a8ab26fe AK |
775 | /* |
776 | * Early setup to make printk work. | |
777 | */ | |
1e3fac83 | 778 | void __init native_smp_prepare_boot_cpu(void) |
1da177e4 | 779 | { |
a8ab26fe | 780 | int me = smp_processor_id(); |
23916d49 | 781 | /* already set me in cpu_online_map in boot_cpu_init() */ |
a8ab26fe | 782 | cpu_set(me, cpu_callout_map); |
884d9e40 | 783 | per_cpu(cpu_state, me) = CPU_ONLINE; |
1da177e4 LT |
784 | } |
785 | ||
a8ab26fe AK |
786 | /* |
787 | * Entry point to boot a CPU. | |
a8ab26fe | 788 | */ |
71d19549 | 789 | int __cpuinit native_cpu_up(unsigned int cpu) |
1da177e4 | 790 | { |
a8ab26fe | 791 | int apicid = cpu_present_to_apicid(cpu); |
d04f41e3 IM |
792 | unsigned long flags; |
793 | int err; | |
1da177e4 | 794 | |
a8ab26fe | 795 | WARN_ON(irqs_disabled()); |
1da177e4 | 796 | |
a8ab26fe AK |
797 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
798 | ||
799 | if (apicid == BAD_APICID || apicid == boot_cpu_id || | |
800 | !physid_isset(apicid, phys_cpu_present_map)) { | |
801 | printk("__cpu_up: bad cpu %d\n", cpu); | |
802 | return -EINVAL; | |
803 | } | |
a8ab26fe | 804 | |
76e4f660 AR |
805 | /* |
806 | * Already booted CPU? | |
807 | */ | |
808 | if (cpu_isset(cpu, cpu_callin_map)) { | |
809 | Dprintk("do_boot_cpu %d Already started\n", cpu); | |
810 | return -ENOSYS; | |
811 | } | |
812 | ||
2b1f6278 BK |
813 | /* |
814 | * Save current MTRR state in case it was changed since early boot | |
815 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
816 | */ | |
817 | mtrr_save_state(); | |
818 | ||
884d9e40 | 819 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
a8ab26fe AK |
820 | /* Boot it! */ |
821 | err = do_boot_cpu(cpu, apicid); | |
822 | if (err < 0) { | |
a8ab26fe AK |
823 | Dprintk("do_boot_cpu failed %d\n", err); |
824 | return err; | |
1da177e4 | 825 | } |
a8ab26fe | 826 | |
1da177e4 LT |
827 | /* Unleash the CPU! */ |
828 | Dprintk("waiting for cpu %d\n", cpu); | |
829 | ||
95492e46 IM |
830 | /* |
831 | * Make sure and check TSC sync: | |
832 | */ | |
d04f41e3 | 833 | local_irq_save(flags); |
95492e46 | 834 | check_tsc_sync_source(cpu); |
d04f41e3 | 835 | local_irq_restore(flags); |
95492e46 | 836 | |
1da177e4 | 837 | while (!cpu_isset(cpu, cpu_online_map)) |
a8ab26fe | 838 | cpu_relax(); |
76e4f660 AR |
839 | err = 0; |
840 | ||
841 | return err; | |
1da177e4 LT |
842 | } |
843 | ||
a8ab26fe AK |
844 | /* |
845 | * Finish the SMP boot. | |
846 | */ | |
c5597649 | 847 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1da177e4 | 848 | { |
a8ab26fe | 849 | smp_cleanup_boot(); |
1da177e4 | 850 | setup_ioapic_dest(); |
75152114 | 851 | check_nmi_watchdog(); |
a8ab26fe | 852 | } |
76e4f660 AR |
853 | |
854 | #ifdef CONFIG_HOTPLUG_CPU | |
69e97c02 | 855 | static void __ref remove_cpu_from_maps(void) |
76e4f660 AR |
856 | { |
857 | int cpu = smp_processor_id(); | |
858 | ||
859 | cpu_clear(cpu, cpu_callout_map); | |
860 | cpu_clear(cpu, cpu_callin_map); | |
5548fecd | 861 | clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ |
488fc08d | 862 | clear_node_cpumask(cpu); |
76e4f660 AR |
863 | } |
864 | ||
865 | int __cpu_disable(void) | |
866 | { | |
867 | int cpu = smp_processor_id(); | |
868 | ||
869 | /* | |
870 | * Perhaps use cpufreq to drop frequency, but that could go | |
871 | * into generic code. | |
872 | * | |
873 | * We won't take down the boot processor on i386 due to some | |
874 | * interrupts only being able to be serviced by the BSP. | |
875 | * Especially so if we're not using an IOAPIC -zwane | |
876 | */ | |
877 | if (cpu == 0) | |
878 | return -EBUSY; | |
879 | ||
4038f901 SL |
880 | if (nmi_watchdog == NMI_LOCAL_APIC) |
881 | stop_apic_nmi_watchdog(NULL); | |
5e9ef02e | 882 | clear_local_APIC(); |
76e4f660 AR |
883 | |
884 | /* | |
885 | * HACK: | |
886 | * Allow any queued timer interrupts to get serviced | |
887 | * This is only a temporary solution until we cleanup | |
888 | * fixup_irqs as we do for IA64. | |
889 | */ | |
890 | local_irq_enable(); | |
891 | mdelay(1); | |
892 | ||
893 | local_irq_disable(); | |
894 | remove_siblinginfo(cpu); | |
895 | ||
70a0a535 | 896 | spin_lock(&vector_lock); |
76e4f660 AR |
897 | /* It's now safe to remove this processor from the online map */ |
898 | cpu_clear(cpu, cpu_online_map); | |
70a0a535 | 899 | spin_unlock(&vector_lock); |
76e4f660 AR |
900 | remove_cpu_from_maps(); |
901 | fixup_irqs(cpu_online_map); | |
902 | return 0; | |
903 | } | |
904 | ||
905 | void __cpu_die(unsigned int cpu) | |
906 | { | |
907 | /* We don't do anything here: idle task is faking death itself. */ | |
908 | unsigned int i; | |
909 | ||
910 | for (i = 0; i < 10; i++) { | |
911 | /* They ack this in play_dead by setting CPU_DEAD */ | |
884d9e40 AR |
912 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
913 | printk ("CPU %d is now offline\n", cpu); | |
d167a518 GH |
914 | if (1 == num_online_cpus()) |
915 | alternatives_smp_switch(0); | |
76e4f660 | 916 | return; |
884d9e40 | 917 | } |
ef6e5253 | 918 | msleep(100); |
76e4f660 AR |
919 | } |
920 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
921 | } | |
922 | ||
923 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
924 | ||
925 | int __cpu_disable(void) | |
926 | { | |
927 | return -ENOSYS; | |
928 | } | |
929 | ||
930 | void __cpu_die(unsigned int cpu) | |
931 | { | |
932 | /* We said "no" in __cpu_disable */ | |
933 | BUG(); | |
934 | } | |
935 | #endif /* CONFIG_HOTPLUG_CPU */ |