[PATCH] x86: constify some parts of arch/i386/kernel/cpu/
[deliverable/linux.git] / arch / i386 / kernel / smpboot.c
CommitLineData
1da177e4
LT
1/*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
35
36#include <linux/module.h>
37#include <linux/config.h>
38#include <linux/init.h>
39#include <linux/kernel.h>
40
41#include <linux/mm.h>
42#include <linux/sched.h>
43#include <linux/kernel_stat.h>
44#include <linux/smp_lock.h>
1da177e4 45#include <linux/bootmem.h>
f3705136
ZM
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/percpu.h>
1da177e4
LT
49
50#include <linux/delay.h>
51#include <linux/mc146818rtc.h>
52#include <asm/tlbflush.h>
53#include <asm/desc.h>
54#include <asm/arch_hooks.h>
3e4ff115 55#include <asm/nmi.h>
1da177e4
LT
56
57#include <mach_apic.h>
58#include <mach_wakecpu.h>
59#include <smpboot_hooks.h>
60
61/* Set if we find a B stepping CPU */
0bb3184d 62static int __devinitdata smp_b_stepping;
1da177e4
LT
63
64/* Number of siblings per CPU package */
65int smp_num_siblings = 1;
129f6946
AD
66#ifdef CONFIG_X86_HT
67EXPORT_SYMBOL(smp_num_siblings);
68#endif
d720803a
LS
69
70/* Package ID of each logical CPU */
6c036527 71int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
d720803a
LS
72
73/* Core ID of each logical CPU */
6c036527 74int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
1da177e4 75
1e9f28fa
SS
76/* Last level cache ID of each logical CPU */
77int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
78
94605eff 79/* representing HT siblings of each logical CPU */
6c036527 80cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
d720803a
LS
81EXPORT_SYMBOL(cpu_sibling_map);
82
94605eff 83/* representing HT and core siblings of each logical CPU */
6c036527 84cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
d720803a
LS
85EXPORT_SYMBOL(cpu_core_map);
86
1da177e4 87/* bitmap of online cpus */
6c036527 88cpumask_t cpu_online_map __read_mostly;
129f6946 89EXPORT_SYMBOL(cpu_online_map);
1da177e4
LT
90
91cpumask_t cpu_callin_map;
92cpumask_t cpu_callout_map;
129f6946 93EXPORT_SYMBOL(cpu_callout_map);
4ad8d383
ZM
94cpumask_t cpu_possible_map;
95EXPORT_SYMBOL(cpu_possible_map);
1da177e4
LT
96static cpumask_t smp_commenced_mask;
97
e1367daf
LS
98/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
99 * is no way to resync one AP against BP. TBD: for prescott and above, we
100 * should use IA64's algorithm
101 */
102static int __devinitdata tsc_sync_disabled;
103
1da177e4
LT
104/* Per CPU bogomips and other parameters */
105struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
129f6946 106EXPORT_SYMBOL(cpu_data);
1da177e4 107
6c036527 108u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
1da177e4
LT
109 { [0 ... NR_CPUS-1] = 0xff };
110EXPORT_SYMBOL(x86_cpu_to_apicid);
111
112/*
113 * Trampoline 80x86 program as an array.
114 */
115
116extern unsigned char trampoline_data [];
117extern unsigned char trampoline_end [];
118static unsigned char *trampoline_base;
119static int trampoline_exec;
120
121static void map_cpu_to_logical_apicid(void);
122
f3705136
ZM
123/* State of each CPU. */
124DEFINE_PER_CPU(int, cpu_state) = { 0 };
125
1da177e4
LT
126/*
127 * Currently trivial. Write the real->protected mode
128 * bootstrap into the page concerned. The caller
129 * has made sure it's suitably aligned.
130 */
131
0bb3184d 132static unsigned long __devinit setup_trampoline(void)
1da177e4
LT
133{
134 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
135 return virt_to_phys(trampoline_base);
136}
137
138/*
139 * We are called very early to get the low memory for the
140 * SMP bootup trampoline page.
141 */
142void __init smp_alloc_memory(void)
143{
144 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
145 /*
146 * Has to be in very low memory so we can execute
147 * real-mode AP code.
148 */
149 if (__pa(trampoline_base) >= 0x9F000)
150 BUG();
151 /*
152 * Make the SMP trampoline executable:
153 */
154 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
155}
156
157/*
158 * The bootstrap kernel entry code has set these up. Save them for
159 * a given CPU
160 */
161
0bb3184d 162static void __devinit smp_store_cpu_info(int id)
1da177e4
LT
163{
164 struct cpuinfo_x86 *c = cpu_data + id;
165
166 *c = boot_cpu_data;
167 if (id!=0)
168 identify_cpu(c);
169 /*
170 * Mask B, Pentium, but not Pentium MMX
171 */
172 if (c->x86_vendor == X86_VENDOR_INTEL &&
173 c->x86 == 5 &&
174 c->x86_mask >= 1 && c->x86_mask <= 4 &&
175 c->x86_model <= 3)
176 /*
177 * Remember we have B step Pentia with bugs
178 */
179 smp_b_stepping = 1;
180
181 /*
182 * Certain Athlons might work (for various values of 'work') in SMP
183 * but they are not certified as MP capable.
184 */
185 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
186
187 /* Athlon 660/661 is valid. */
188 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
189 goto valid_k7;
190
191 /* Duron 670 is valid */
192 if ((c->x86_model==7) && (c->x86_mask==0))
193 goto valid_k7;
194
195 /*
196 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
197 * It's worth noting that the A5 stepping (662) of some Athlon XP's
198 * have the MP bit set.
199 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
200 */
201 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
202 ((c->x86_model==7) && (c->x86_mask>=1)) ||
203 (c->x86_model> 7))
204 if (cpu_has_mp)
205 goto valid_k7;
206
207 /* If we get here, it's not a certified SMP capable AMD system. */
9f158333 208 add_taint(TAINT_UNSAFE_SMP);
1da177e4
LT
209 }
210
211valid_k7:
212 ;
213}
214
215/*
216 * TSC synchronization.
217 *
218 * We first check whether all CPUs have their TSC's synchronized,
219 * then we print a warning if not, and always resync.
220 */
221
222static atomic_t tsc_start_flag = ATOMIC_INIT(0);
223static atomic_t tsc_count_start = ATOMIC_INIT(0);
224static atomic_t tsc_count_stop = ATOMIC_INIT(0);
225static unsigned long long tsc_values[NR_CPUS];
226
227#define NR_LOOPS 5
228
229static void __init synchronize_tsc_bp (void)
230{
231 int i;
232 unsigned long long t0;
233 unsigned long long sum, avg;
234 long long delta;
a3a255e7 235 unsigned int one_usec;
1da177e4
LT
236 int buggy = 0;
237
238 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
239
240 /* convert from kcyc/sec to cyc/usec */
241 one_usec = cpu_khz / 1000;
242
243 atomic_set(&tsc_start_flag, 1);
244 wmb();
245
246 /*
247 * We loop a few times to get a primed instruction cache,
248 * then the last pass is more or less synchronized and
249 * the BP and APs set their cycle counters to zero all at
250 * once. This reduces the chance of having random offsets
251 * between the processors, and guarantees that the maximum
252 * delay between the cycle counters is never bigger than
253 * the latency of information-passing (cachelines) between
254 * two CPUs.
255 */
256 for (i = 0; i < NR_LOOPS; i++) {
257 /*
258 * all APs synchronize but they loop on '== num_cpus'
259 */
260 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
18698917 261 cpu_relax();
1da177e4
LT
262 atomic_set(&tsc_count_stop, 0);
263 wmb();
264 /*
265 * this lets the APs save their current TSC:
266 */
267 atomic_inc(&tsc_count_start);
268
269 rdtscll(tsc_values[smp_processor_id()]);
270 /*
271 * We clear the TSC in the last loop:
272 */
273 if (i == NR_LOOPS-1)
274 write_tsc(0, 0);
275
276 /*
277 * Wait for all APs to leave the synchronization point:
278 */
279 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
18698917 280 cpu_relax();
1da177e4
LT
281 atomic_set(&tsc_count_start, 0);
282 wmb();
283 atomic_inc(&tsc_count_stop);
284 }
285
286 sum = 0;
287 for (i = 0; i < NR_CPUS; i++) {
288 if (cpu_isset(i, cpu_callout_map)) {
289 t0 = tsc_values[i];
290 sum += t0;
291 }
292 }
293 avg = sum;
294 do_div(avg, num_booting_cpus());
295
296 sum = 0;
297 for (i = 0; i < NR_CPUS; i++) {
298 if (!cpu_isset(i, cpu_callout_map))
299 continue;
300 delta = tsc_values[i] - avg;
301 if (delta < 0)
302 delta = -delta;
303 /*
304 * We report bigger than 2 microseconds clock differences.
305 */
306 if (delta > 2*one_usec) {
307 long realdelta;
308 if (!buggy) {
309 buggy = 1;
310 printk("\n");
311 }
312 realdelta = delta;
313 do_div(realdelta, one_usec);
314 if (tsc_values[i] < avg)
315 realdelta = -realdelta;
316
7f5910ec
DJ
317 if (realdelta > 0)
318 printk(KERN_INFO "CPU#%d had %ld usecs TSC "
319 "skew, fixed it up.\n", i, realdelta);
1da177e4
LT
320 }
321
322 sum += delta;
323 }
324 if (!buggy)
325 printk("passed.\n");
326}
327
328static void __init synchronize_tsc_ap (void)
329{
330 int i;
331
332 /*
333 * Not every cpu is online at the time
334 * this gets called, so we first wait for the BP to
335 * finish SMP initialization:
336 */
18698917
AM
337 while (!atomic_read(&tsc_start_flag))
338 cpu_relax();
1da177e4
LT
339
340 for (i = 0; i < NR_LOOPS; i++) {
341 atomic_inc(&tsc_count_start);
342 while (atomic_read(&tsc_count_start) != num_booting_cpus())
18698917 343 cpu_relax();
1da177e4
LT
344
345 rdtscll(tsc_values[smp_processor_id()]);
346 if (i == NR_LOOPS-1)
347 write_tsc(0, 0);
348
349 atomic_inc(&tsc_count_stop);
18698917
AM
350 while (atomic_read(&tsc_count_stop) != num_booting_cpus())
351 cpu_relax();
1da177e4
LT
352 }
353}
354#undef NR_LOOPS
355
356extern void calibrate_delay(void);
357
358static atomic_t init_deasserted;
359
0bb3184d 360static void __devinit smp_callin(void)
1da177e4
LT
361{
362 int cpuid, phys_id;
363 unsigned long timeout;
364
365 /*
366 * If waken up by an INIT in an 82489DX configuration
367 * we may get here before an INIT-deassert IPI reaches
368 * our local APIC. We have to wait for the IPI or we'll
369 * lock up on an APIC access.
370 */
371 wait_for_init_deassert(&init_deasserted);
372
373 /*
374 * (This works even if the APIC is not enabled.)
375 */
376 phys_id = GET_APIC_ID(apic_read(APIC_ID));
377 cpuid = smp_processor_id();
378 if (cpu_isset(cpuid, cpu_callin_map)) {
379 printk("huh, phys CPU#%d, CPU#%d already present??\n",
380 phys_id, cpuid);
381 BUG();
382 }
383 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
384
385 /*
386 * STARTUP IPIs are fragile beasts as they might sometimes
387 * trigger some glue motherboard logic. Complete APIC bus
388 * silence for 1 second, this overestimates the time the
389 * boot CPU is spending to send the up to 2 STARTUP IPIs
390 * by a factor of two. This should be enough.
391 */
392
393 /*
394 * Waiting 2s total for startup (udelay is not yet working)
395 */
396 timeout = jiffies + 2*HZ;
397 while (time_before(jiffies, timeout)) {
398 /*
399 * Has the boot CPU finished it's STARTUP sequence?
400 */
401 if (cpu_isset(cpuid, cpu_callout_map))
402 break;
403 rep_nop();
404 }
405
406 if (!time_before(jiffies, timeout)) {
407 printk("BUG: CPU%d started up but did not get a callout!\n",
408 cpuid);
409 BUG();
410 }
411
412 /*
413 * the boot CPU has finished the init stage and is spinning
414 * on callin_map until we finish. We are free to set up this
415 * CPU, first the APIC. (this is probably redundant on most
416 * boards)
417 */
418
419 Dprintk("CALLIN, before setup_local_APIC().\n");
420 smp_callin_clear_local_apic();
421 setup_local_APIC();
422 map_cpu_to_logical_apicid();
423
424 /*
425 * Get our bogomips.
426 */
427 calibrate_delay();
428 Dprintk("Stack at about %p\n",&cpuid);
429
430 /*
431 * Save our processor parameters
432 */
433 smp_store_cpu_info(cpuid);
434
435 disable_APIC_timer();
436
437 /*
438 * Allow the master to continue.
439 */
440 cpu_set(cpuid, cpu_callin_map);
441
442 /*
443 * Synchronize the TSC with the BP
444 */
e1367daf 445 if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
1da177e4
LT
446 synchronize_tsc_ap();
447}
448
449static int cpucount;
450
1e9f28fa
SS
451/* maps the cpu to the sched domain representing multi-core */
452cpumask_t cpu_coregroup_map(int cpu)
453{
454 struct cpuinfo_x86 *c = cpu_data + cpu;
455 /*
456 * For perf, we return last level cache shared map.
457 * TBD: when power saving sched policy is added, we will return
458 * cpu_core_map when power saving policy is enabled
459 */
460 return c->llc_shared_map;
461}
462
94605eff
SS
463/* representing cpus for which sibling maps can be computed */
464static cpumask_t cpu_sibling_setup_map;
465
d720803a
LS
466static inline void
467set_cpu_sibling_map(int cpu)
468{
469 int i;
94605eff
SS
470 struct cpuinfo_x86 *c = cpu_data;
471
472 cpu_set(cpu, cpu_sibling_setup_map);
d720803a
LS
473
474 if (smp_num_siblings > 1) {
94605eff
SS
475 for_each_cpu_mask(i, cpu_sibling_setup_map) {
476 if (phys_proc_id[cpu] == phys_proc_id[i] &&
477 cpu_core_id[cpu] == cpu_core_id[i]) {
d720803a
LS
478 cpu_set(i, cpu_sibling_map[cpu]);
479 cpu_set(cpu, cpu_sibling_map[i]);
94605eff
SS
480 cpu_set(i, cpu_core_map[cpu]);
481 cpu_set(cpu, cpu_core_map[i]);
1e9f28fa
SS
482 cpu_set(i, c[cpu].llc_shared_map);
483 cpu_set(cpu, c[i].llc_shared_map);
d720803a
LS
484 }
485 }
486 } else {
487 cpu_set(cpu, cpu_sibling_map[cpu]);
488 }
489
1e9f28fa
SS
490 cpu_set(cpu, c[cpu].llc_shared_map);
491
94605eff 492 if (current_cpu_data.x86_max_cores == 1) {
d720803a 493 cpu_core_map[cpu] = cpu_sibling_map[cpu];
94605eff
SS
494 c[cpu].booted_cores = 1;
495 return;
496 }
497
498 for_each_cpu_mask(i, cpu_sibling_setup_map) {
1e9f28fa
SS
499 if (cpu_llc_id[cpu] != BAD_APICID &&
500 cpu_llc_id[cpu] == cpu_llc_id[i]) {
501 cpu_set(i, c[cpu].llc_shared_map);
502 cpu_set(cpu, c[i].llc_shared_map);
503 }
94605eff
SS
504 if (phys_proc_id[cpu] == phys_proc_id[i]) {
505 cpu_set(i, cpu_core_map[cpu]);
506 cpu_set(cpu, cpu_core_map[i]);
507 /*
508 * Does this new cpu bringup a new core?
509 */
510 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
511 /*
512 * for each core in package, increment
513 * the booted_cores for this new cpu
514 */
515 if (first_cpu(cpu_sibling_map[i]) == i)
516 c[cpu].booted_cores++;
517 /*
518 * increment the core count for all
519 * the other cpus in this package
520 */
521 if (i != cpu)
522 c[i].booted_cores++;
523 } else if (i != cpu && !c[cpu].booted_cores)
524 c[cpu].booted_cores = c[i].booted_cores;
525 }
d720803a
LS
526 }
527}
528
1da177e4
LT
529/*
530 * Activate a secondary processor.
531 */
0bb3184d 532static void __devinit start_secondary(void *unused)
1da177e4
LT
533{
534 /*
535 * Dont put anything before smp_callin(), SMP
536 * booting is too fragile that we want to limit the
537 * things done here to the most necessary things.
538 */
539 cpu_init();
5bfb5d69 540 preempt_disable();
1da177e4
LT
541 smp_callin();
542 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
543 rep_nop();
544 setup_secondary_APIC_clock();
545 if (nmi_watchdog == NMI_IO_APIC) {
546 disable_8259A_irq(0);
547 enable_NMI_through_LVT0(NULL);
548 enable_8259A_irq(0);
549 }
550 enable_APIC_timer();
551 /*
552 * low-memory mappings have been cleared, flush them from
553 * the local TLBs too.
554 */
555 local_flush_tlb();
6fe940d6 556
d720803a
LS
557 /* This must be done before setting cpu_online_map */
558 set_cpu_sibling_map(raw_smp_processor_id());
559 wmb();
560
6fe940d6
LS
561 /*
562 * We need to hold call_lock, so there is no inconsistency
563 * between the time smp_call_function() determines number of
564 * IPI receipients, and the time when the determination is made
565 * for which cpus receive the IPI. Holding this
566 * lock helps us to not include this cpu in a currently in progress
567 * smp_call_function().
568 */
569 lock_ipi_call_lock();
1da177e4 570 cpu_set(smp_processor_id(), cpu_online_map);
6fe940d6 571 unlock_ipi_call_lock();
e1367daf 572 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
1da177e4
LT
573
574 /* We can take interrupts now: we're officially "up". */
575 local_irq_enable();
576
577 wmb();
578 cpu_idle();
579}
580
581/*
582 * Everything has been set up for the secondary
583 * CPUs - they just need to reload everything
584 * from the task structure
585 * This function must not return.
586 */
0bb3184d 587void __devinit initialize_secondary(void)
1da177e4
LT
588{
589 /*
590 * We don't actually need to load the full TSS,
591 * basically just the stack pointer and the eip.
592 */
593
594 asm volatile(
595 "movl %0,%%esp\n\t"
596 "jmp *%1"
597 :
598 :"r" (current->thread.esp),"r" (current->thread.eip));
599}
600
601extern struct {
602 void * esp;
603 unsigned short ss;
604} stack_start;
605
606#ifdef CONFIG_NUMA
607
608/* which logical CPUs are on which nodes */
6c036527 609cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
1da177e4
LT
610 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
611/* which node each logical CPU is on */
6c036527 612int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
1da177e4
LT
613EXPORT_SYMBOL(cpu_2_node);
614
615/* set up a mapping between cpu and node. */
616static inline void map_cpu_to_node(int cpu, int node)
617{
618 printk("Mapping cpu %d to node %d\n", cpu, node);
619 cpu_set(cpu, node_2_cpu_mask[node]);
620 cpu_2_node[cpu] = node;
621}
622
623/* undo a mapping between cpu and node. */
624static inline void unmap_cpu_to_node(int cpu)
625{
626 int node;
627
628 printk("Unmapping cpu %d from all nodes\n", cpu);
629 for (node = 0; node < MAX_NUMNODES; node ++)
630 cpu_clear(cpu, node_2_cpu_mask[node]);
631 cpu_2_node[cpu] = 0;
632}
633#else /* !CONFIG_NUMA */
634
635#define map_cpu_to_node(cpu, node) ({})
636#define unmap_cpu_to_node(cpu) ({})
637
638#endif /* CONFIG_NUMA */
639
6c036527 640u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
1da177e4
LT
641
642static void map_cpu_to_logical_apicid(void)
643{
644 int cpu = smp_processor_id();
645 int apicid = logical_smp_processor_id();
646
647 cpu_2_logical_apicid[cpu] = apicid;
648 map_cpu_to_node(cpu, apicid_to_node(apicid));
649}
650
651static void unmap_cpu_to_logical_apicid(int cpu)
652{
653 cpu_2_logical_apicid[cpu] = BAD_APICID;
654 unmap_cpu_to_node(cpu);
655}
656
657#if APIC_DEBUG
658static inline void __inquire_remote_apic(int apicid)
659{
660 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
661 char *names[] = { "ID", "VERSION", "SPIV" };
662 int timeout, status;
663
664 printk("Inquiring remote APIC #%d...\n", apicid);
665
38e548ee 666 for (i = 0; i < ARRAY_SIZE(regs); i++) {
1da177e4
LT
667 printk("... APIC #%d %s: ", apicid, names[i]);
668
669 /*
670 * Wait for idle.
671 */
672 apic_wait_icr_idle();
673
674 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
675 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
676
677 timeout = 0;
678 do {
679 udelay(100);
680 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
681 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
682
683 switch (status) {
684 case APIC_ICR_RR_VALID:
685 status = apic_read(APIC_RRR);
686 printk("%08x\n", status);
687 break;
688 default:
689 printk("failed\n");
690 }
691 }
692}
693#endif
694
695#ifdef WAKE_SECONDARY_VIA_NMI
696/*
697 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
698 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
699 * won't ... remember to clear down the APIC, etc later.
700 */
0bb3184d 701static int __devinit
1da177e4
LT
702wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
703{
704 unsigned long send_status = 0, accept_status = 0;
705 int timeout, maxlvt;
706
707 /* Target chip */
708 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
709
710 /* Boot on the stack */
711 /* Kick the second */
712 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
713
714 Dprintk("Waiting for send to finish...\n");
715 timeout = 0;
716 do {
717 Dprintk("+");
718 udelay(100);
719 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
720 } while (send_status && (timeout++ < 1000));
721
722 /*
723 * Give the other CPU some time to accept the IPI.
724 */
725 udelay(200);
726 /*
727 * Due to the Pentium erratum 3AP.
728 */
729 maxlvt = get_maxlvt();
730 if (maxlvt > 3) {
731 apic_read_around(APIC_SPIV);
732 apic_write(APIC_ESR, 0);
733 }
734 accept_status = (apic_read(APIC_ESR) & 0xEF);
735 Dprintk("NMI sent.\n");
736
737 if (send_status)
738 printk("APIC never delivered???\n");
739 if (accept_status)
740 printk("APIC delivery error (%lx).\n", accept_status);
741
742 return (send_status | accept_status);
743}
744#endif /* WAKE_SECONDARY_VIA_NMI */
745
746#ifdef WAKE_SECONDARY_VIA_INIT
0bb3184d 747static int __devinit
1da177e4
LT
748wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
749{
750 unsigned long send_status = 0, accept_status = 0;
751 int maxlvt, timeout, num_starts, j;
752
753 /*
754 * Be paranoid about clearing APIC errors.
755 */
756 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
757 apic_read_around(APIC_SPIV);
758 apic_write(APIC_ESR, 0);
759 apic_read(APIC_ESR);
760 }
761
762 Dprintk("Asserting INIT.\n");
763
764 /*
765 * Turn INIT on target chip
766 */
767 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
768
769 /*
770 * Send IPI
771 */
772 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
773 | APIC_DM_INIT);
774
775 Dprintk("Waiting for send to finish...\n");
776 timeout = 0;
777 do {
778 Dprintk("+");
779 udelay(100);
780 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
781 } while (send_status && (timeout++ < 1000));
782
783 mdelay(10);
784
785 Dprintk("Deasserting INIT.\n");
786
787 /* Target chip */
788 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
789
790 /* Send IPI */
791 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
792
793 Dprintk("Waiting for send to finish...\n");
794 timeout = 0;
795 do {
796 Dprintk("+");
797 udelay(100);
798 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
799 } while (send_status && (timeout++ < 1000));
800
801 atomic_set(&init_deasserted, 1);
802
803 /*
804 * Should we send STARTUP IPIs ?
805 *
806 * Determine this based on the APIC version.
807 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
808 */
809 if (APIC_INTEGRATED(apic_version[phys_apicid]))
810 num_starts = 2;
811 else
812 num_starts = 0;
813
814 /*
815 * Run STARTUP IPI loop.
816 */
817 Dprintk("#startup loops: %d.\n", num_starts);
818
819 maxlvt = get_maxlvt();
820
821 for (j = 1; j <= num_starts; j++) {
822 Dprintk("Sending STARTUP #%d.\n",j);
823 apic_read_around(APIC_SPIV);
824 apic_write(APIC_ESR, 0);
825 apic_read(APIC_ESR);
826 Dprintk("After apic_write.\n");
827
828 /*
829 * STARTUP IPI
830 */
831
832 /* Target chip */
833 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
834
835 /* Boot on the stack */
836 /* Kick the second */
837 apic_write_around(APIC_ICR, APIC_DM_STARTUP
838 | (start_eip >> 12));
839
840 /*
841 * Give the other CPU some time to accept the IPI.
842 */
843 udelay(300);
844
845 Dprintk("Startup point 1.\n");
846
847 Dprintk("Waiting for send to finish...\n");
848 timeout = 0;
849 do {
850 Dprintk("+");
851 udelay(100);
852 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
853 } while (send_status && (timeout++ < 1000));
854
855 /*
856 * Give the other CPU some time to accept the IPI.
857 */
858 udelay(200);
859 /*
860 * Due to the Pentium erratum 3AP.
861 */
862 if (maxlvt > 3) {
863 apic_read_around(APIC_SPIV);
864 apic_write(APIC_ESR, 0);
865 }
866 accept_status = (apic_read(APIC_ESR) & 0xEF);
867 if (send_status || accept_status)
868 break;
869 }
870 Dprintk("After Startup.\n");
871
872 if (send_status)
873 printk("APIC never delivered???\n");
874 if (accept_status)
875 printk("APIC delivery error (%lx).\n", accept_status);
876
877 return (send_status | accept_status);
878}
879#endif /* WAKE_SECONDARY_VIA_INIT */
880
881extern cpumask_t cpu_initialized;
e1367daf
LS
882static inline int alloc_cpu_id(void)
883{
884 cpumask_t tmp_map;
885 int cpu;
886 cpus_complement(tmp_map, cpu_present_map);
887 cpu = first_cpu(tmp_map);
888 if (cpu >= NR_CPUS)
889 return -ENODEV;
890 return cpu;
891}
892
893#ifdef CONFIG_HOTPLUG_CPU
894static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
895static inline struct task_struct * alloc_idle_task(int cpu)
896{
897 struct task_struct *idle;
898
899 if ((idle = cpu_idle_tasks[cpu]) != NULL) {
900 /* initialize thread_struct. we really want to avoid destroy
901 * idle tread
902 */
07b047fc 903 idle->thread.esp = (unsigned long)task_pt_regs(idle);
e1367daf
LS
904 init_idle(idle, cpu);
905 return idle;
906 }
907 idle = fork_idle(cpu);
908
909 if (!IS_ERR(idle))
910 cpu_idle_tasks[cpu] = idle;
911 return idle;
912}
913#else
914#define alloc_idle_task(cpu) fork_idle(cpu)
915#endif
1da177e4 916
e1367daf 917static int __devinit do_boot_cpu(int apicid, int cpu)
1da177e4
LT
918/*
919 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
920 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
921 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
922 */
923{
924 struct task_struct *idle;
925 unsigned long boot_error;
e1367daf 926 int timeout;
1da177e4
LT
927 unsigned long start_eip;
928 unsigned short nmi_high = 0, nmi_low = 0;
929
e1367daf 930 ++cpucount;
9a0b5817 931 alternatives_smp_switch(1);
e1367daf 932
1da177e4
LT
933 /*
934 * We can't use kernel_thread since we must avoid to
935 * reschedule the child.
936 */
e1367daf 937 idle = alloc_idle_task(cpu);
1da177e4
LT
938 if (IS_ERR(idle))
939 panic("failed fork for CPU %d", cpu);
940 idle->thread.eip = (unsigned long) start_secondary;
941 /* start_eip had better be page-aligned! */
942 start_eip = setup_trampoline();
943
944 /* So we see what's up */
945 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
946 /* Stack for startup_32 can be just as for start_secondary onwards */
947 stack_start.esp = (void *) idle->thread.esp;
948
949 irq_ctx_init(cpu);
950
951 /*
952 * This grunge runs the startup process for
953 * the targeted processor.
954 */
955
956 atomic_set(&init_deasserted, 0);
957
958 Dprintk("Setting warm reset code and vector.\n");
959
960 store_NMI_vector(&nmi_high, &nmi_low);
961
962 smpboot_setup_warm_reset_vector(start_eip);
963
964 /*
965 * Starting actual IPI sequence...
966 */
967 boot_error = wakeup_secondary_cpu(apicid, start_eip);
968
969 if (!boot_error) {
970 /*
971 * allow APs to start initializing.
972 */
973 Dprintk("Before Callout %d.\n", cpu);
974 cpu_set(cpu, cpu_callout_map);
975 Dprintk("After Callout %d.\n", cpu);
976
977 /*
978 * Wait 5s total for a response
979 */
980 for (timeout = 0; timeout < 50000; timeout++) {
981 if (cpu_isset(cpu, cpu_callin_map))
982 break; /* It has booted */
983 udelay(100);
984 }
985
986 if (cpu_isset(cpu, cpu_callin_map)) {
987 /* number CPUs logically, starting from 1 (BSP is 0) */
988 Dprintk("OK.\n");
989 printk("CPU%d: ", cpu);
990 print_cpu_info(&cpu_data[cpu]);
991 Dprintk("CPU has booted.\n");
992 } else {
993 boot_error= 1;
994 if (*((volatile unsigned char *)trampoline_base)
995 == 0xA5)
996 /* trampoline started but...? */
997 printk("Stuck ??\n");
998 else
999 /* trampoline code not run */
1000 printk("Not responding.\n");
1001 inquire_remote_apic(apicid);
1002 }
1003 }
e1367daf 1004
1da177e4
LT
1005 if (boot_error) {
1006 /* Try to put things back the way they were before ... */
1007 unmap_cpu_to_logical_apicid(cpu);
1008 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
1009 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
1010 cpucount--;
e1367daf
LS
1011 } else {
1012 x86_cpu_to_apicid[cpu] = apicid;
1013 cpu_set(cpu, cpu_present_map);
1da177e4
LT
1014 }
1015
1016 /* mark "stuck" area as not stuck */
1017 *((volatile unsigned long *)trampoline_base) = 0;
1018
1019 return boot_error;
1020}
1021
e1367daf
LS
1022#ifdef CONFIG_HOTPLUG_CPU
1023void cpu_exit_clear(void)
1024{
1025 int cpu = raw_smp_processor_id();
1026
1027 idle_task_exit();
1028
1029 cpucount --;
1030 cpu_uninit();
1031 irq_ctx_exit(cpu);
1032
1033 cpu_clear(cpu, cpu_callout_map);
1034 cpu_clear(cpu, cpu_callin_map);
e1367daf
LS
1035
1036 cpu_clear(cpu, smp_commenced_mask);
1037 unmap_cpu_to_logical_apicid(cpu);
1038}
1039
1040struct warm_boot_cpu_info {
1041 struct completion *complete;
1042 int apicid;
1043 int cpu;
1044};
1045
34f361ad 1046static void __cpuinit do_warm_boot_cpu(void *p)
e1367daf
LS
1047{
1048 struct warm_boot_cpu_info *info = p;
1049 do_boot_cpu(info->apicid, info->cpu);
1050 complete(info->complete);
1051}
1052
34f361ad 1053static int __cpuinit __smp_prepare_cpu(int cpu)
e1367daf
LS
1054{
1055 DECLARE_COMPLETION(done);
1056 struct warm_boot_cpu_info info;
1057 struct work_struct task;
1058 int apicid, ret;
bd9e0b74 1059 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
e1367daf 1060
e1367daf
LS
1061 apicid = x86_cpu_to_apicid[cpu];
1062 if (apicid == BAD_APICID) {
1063 ret = -ENODEV;
1064 goto exit;
1065 }
1066
bd9e0b74
SL
1067 /*
1068 * the CPU isn't initialized at boot time, allocate gdt table here.
1069 * cpu_init will initialize it
1070 */
1071 if (!cpu_gdt_descr->address) {
1072 cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
1073 if (!cpu_gdt_descr->address)
1074 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1075 ret = -ENOMEM;
1076 goto exit;
1077 }
1078
e1367daf
LS
1079 info.complete = &done;
1080 info.apicid = apicid;
1081 info.cpu = cpu;
1082 INIT_WORK(&task, do_warm_boot_cpu, &info);
1083
1084 tsc_sync_disabled = 1;
1085
1086 /* init low mem mapping */
d7271b14
ZA
1087 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
1088 KERNEL_PGD_PTRS);
e1367daf
LS
1089 flush_tlb_all();
1090 schedule_work(&task);
1091 wait_for_completion(&done);
1092
1093 tsc_sync_disabled = 0;
1094 zap_low_mappings();
1095 ret = 0;
1096exit:
e1367daf
LS
1097 return ret;
1098}
1099#endif
1100
1da177e4
LT
1101static void smp_tune_scheduling (void)
1102{
1103 unsigned long cachesize; /* kB */
1104 unsigned long bandwidth = 350; /* MB/s */
1105 /*
1106 * Rough estimation for SMP scheduling, this is the number of
1107 * cycles it takes for a fully memory-limited process to flush
1108 * the SMP-local cache.
1109 *
1110 * (For a P5 this pretty much means we will choose another idle
1111 * CPU almost always at wakeup time (this is due to the small
1112 * L1 cache), on PIIs it's around 50-100 usecs, depending on
1113 * the cache size)
1114 */
1115
1116 if (!cpu_khz) {
1117 /*
1118 * this basically disables processor-affinity
1119 * scheduling on SMP without a TSC.
1120 */
1121 return;
1122 } else {
1123 cachesize = boot_cpu_data.x86_cache_size;
1124 if (cachesize == -1) {
1125 cachesize = 16; /* Pentiums, 2x8kB cache */
1126 bandwidth = 100;
1127 }
198e2f18 1128 max_cache_size = cachesize * 1024;
1da177e4
LT
1129 }
1130}
1131
1132/*
1133 * Cycle through the processors sending APIC IPIs to boot each.
1134 */
1135
1136static int boot_cpu_logical_apicid;
1137/* Where the IO area was mapped on multiquad, always 0 otherwise */
1138void *xquad_portio;
129f6946
AD
1139#ifdef CONFIG_X86_NUMAQ
1140EXPORT_SYMBOL(xquad_portio);
1141#endif
1da177e4 1142
1da177e4
LT
1143static void __init smp_boot_cpus(unsigned int max_cpus)
1144{
1145 int apicid, cpu, bit, kicked;
1146 unsigned long bogosum = 0;
1147
1148 /*
1149 * Setup boot CPU information
1150 */
1151 smp_store_cpu_info(0); /* Final full version of the data */
1152 printk("CPU%d: ", 0);
1153 print_cpu_info(&cpu_data[0]);
1154
1e4c85f9 1155 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1da177e4
LT
1156 boot_cpu_logical_apicid = logical_smp_processor_id();
1157 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1158
1159 current_thread_info()->cpu = 0;
1160 smp_tune_scheduling();
1da177e4 1161
94605eff 1162 set_cpu_sibling_map(0);
3dd9d514 1163
1da177e4
LT
1164 /*
1165 * If we couldn't find an SMP configuration at boot time,
1166 * get out of here now!
1167 */
1168 if (!smp_found_config && !acpi_lapic) {
1169 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1e4c85f9
LT
1170 smpboot_clear_io_apic_irqs();
1171 phys_cpu_present_map = physid_mask_of_physid(0);
1172 if (APIC_init_uniprocessor())
1173 printk(KERN_NOTICE "Local APIC not detected."
1174 " Using dummy APIC emulation.\n");
1175 map_cpu_to_logical_apicid();
1176 cpu_set(0, cpu_sibling_map[0]);
1177 cpu_set(0, cpu_core_map[0]);
1178 return;
1179 }
1180
1181 /*
1182 * Should not be necessary because the MP table should list the boot
1183 * CPU too, but we do it for the sake of robustness anyway.
1184 * Makes no sense to do this check in clustered apic mode, so skip it
1185 */
1186 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1187 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1188 boot_cpu_physical_apicid);
1189 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1190 }
1191
1192 /*
1193 * If we couldn't find a local APIC, then get out of here now!
1194 */
1195 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1196 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1197 boot_cpu_physical_apicid);
1198 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1199 smpboot_clear_io_apic_irqs();
1200 phys_cpu_present_map = physid_mask_of_physid(0);
1201 cpu_set(0, cpu_sibling_map[0]);
1202 cpu_set(0, cpu_core_map[0]);
1da177e4
LT
1203 return;
1204 }
1205
1e4c85f9
LT
1206 verify_local_APIC();
1207
1da177e4
LT
1208 /*
1209 * If SMP should be disabled, then really disable it!
1210 */
1e4c85f9
LT
1211 if (!max_cpus) {
1212 smp_found_config = 0;
1213 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1214 smpboot_clear_io_apic_irqs();
1215 phys_cpu_present_map = physid_mask_of_physid(0);
1216 cpu_set(0, cpu_sibling_map[0]);
1217 cpu_set(0, cpu_core_map[0]);
1da177e4
LT
1218 return;
1219 }
1220
1e4c85f9
LT
1221 connect_bsp_APIC();
1222 setup_local_APIC();
1223 map_cpu_to_logical_apicid();
1224
1225
1da177e4
LT
1226 setup_portio_remap();
1227
1228 /*
1229 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1230 *
1231 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1232 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1233 * clustered apic ID.
1234 */
1235 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1236
1237 kicked = 1;
1238 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1239 apicid = cpu_present_to_apicid(bit);
1240 /*
1241 * Don't even attempt to start the boot CPU!
1242 */
1243 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1244 continue;
1245
1246 if (!check_apicid_present(bit))
1247 continue;
1248 if (max_cpus <= cpucount+1)
1249 continue;
1250
e1367daf 1251 if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
1da177e4
LT
1252 printk("CPU #%d not responding - cannot use it.\n",
1253 apicid);
1254 else
1255 ++kicked;
1256 }
1257
1258 /*
1259 * Cleanup possible dangling ends...
1260 */
1261 smpboot_restore_warm_reset_vector();
1262
1263 /*
1264 * Allow the user to impress friends.
1265 */
1266 Dprintk("Before bogomips.\n");
1267 for (cpu = 0; cpu < NR_CPUS; cpu++)
1268 if (cpu_isset(cpu, cpu_callout_map))
1269 bogosum += cpu_data[cpu].loops_per_jiffy;
1270 printk(KERN_INFO
1271 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1272 cpucount+1,
1273 bogosum/(500000/HZ),
1274 (bogosum/(5000/HZ))%100);
1275
1276 Dprintk("Before bogocount - setting activated=1.\n");
1277
1278 if (smp_b_stepping)
1279 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1280
1281 /*
1282 * Don't taint if we are running SMP kernel on a single non-MP
1283 * approved Athlon
1284 */
1285 if (tainted & TAINT_UNSAFE_SMP) {
1286 if (cpucount)
1287 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1288 else
1289 tainted &= ~TAINT_UNSAFE_SMP;
1290 }
1291
1292 Dprintk("Boot done.\n");
1293
1294 /*
1295 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1296 * efficiently.
1297 */
3dd9d514 1298 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1da177e4 1299 cpus_clear(cpu_sibling_map[cpu]);
3dd9d514
AK
1300 cpus_clear(cpu_core_map[cpu]);
1301 }
1da177e4 1302
d720803a
LS
1303 cpu_set(0, cpu_sibling_map[0]);
1304 cpu_set(0, cpu_core_map[0]);
1da177e4 1305
1e4c85f9
LT
1306 smpboot_setup_io_apic();
1307
1308 setup_boot_APIC_clock();
1309
1da177e4
LT
1310 /*
1311 * Synchronize the TSC with the AP
1312 */
1313 if (cpu_has_tsc && cpucount && cpu_khz)
1314 synchronize_tsc_bp();
1315}
1316
1317/* These are wrappers to interface to the new boot process. Someone
1318 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1319void __init smp_prepare_cpus(unsigned int max_cpus)
1320{
f3705136
ZM
1321 smp_commenced_mask = cpumask_of_cpu(0);
1322 cpu_callin_map = cpumask_of_cpu(0);
1323 mb();
1da177e4
LT
1324 smp_boot_cpus(max_cpus);
1325}
1326
1327void __devinit smp_prepare_boot_cpu(void)
1328{
1329 cpu_set(smp_processor_id(), cpu_online_map);
1330 cpu_set(smp_processor_id(), cpu_callout_map);
e1367daf 1331 cpu_set(smp_processor_id(), cpu_present_map);
4ad8d383 1332 cpu_set(smp_processor_id(), cpu_possible_map);
e1367daf 1333 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
1da177e4
LT
1334}
1335
f3705136 1336#ifdef CONFIG_HOTPLUG_CPU
e1367daf
LS
1337static void
1338remove_siblinginfo(int cpu)
1da177e4 1339{
e1367daf 1340 int sibling;
94605eff 1341 struct cpuinfo_x86 *c = cpu_data;
e1367daf 1342
94605eff
SS
1343 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1344 cpu_clear(cpu, cpu_core_map[sibling]);
1345 /*
1346 * last thread sibling in this cpu core going down
1347 */
1348 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1349 c[sibling].booted_cores--;
1350 }
1351
e1367daf
LS
1352 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1353 cpu_clear(cpu, cpu_sibling_map[sibling]);
e1367daf
LS
1354 cpus_clear(cpu_sibling_map[cpu]);
1355 cpus_clear(cpu_core_map[cpu]);
1356 phys_proc_id[cpu] = BAD_APICID;
1357 cpu_core_id[cpu] = BAD_APICID;
94605eff 1358 cpu_clear(cpu, cpu_sibling_setup_map);
f3705136
ZM
1359}
1360
1361int __cpu_disable(void)
1362{
1363 cpumask_t map = cpu_online_map;
1364 int cpu = smp_processor_id();
1365
1366 /*
1367 * Perhaps use cpufreq to drop frequency, but that could go
1368 * into generic code.
1369 *
1370 * We won't take down the boot processor on i386 due to some
1371 * interrupts only being able to be serviced by the BSP.
1372 * Especially so if we're not using an IOAPIC -zwane
1373 */
1374 if (cpu == 0)
1375 return -EBUSY;
1376
5e9ef02e 1377 clear_local_APIC();
f3705136
ZM
1378 /* Allow any queued timer interrupts to get serviced */
1379 local_irq_enable();
1380 mdelay(1);
1381 local_irq_disable();
1382
e1367daf
LS
1383 remove_siblinginfo(cpu);
1384
f3705136
ZM
1385 cpu_clear(cpu, map);
1386 fixup_irqs(map);
1387 /* It's now safe to remove this processor from the online map */
1388 cpu_clear(cpu, cpu_online_map);
1389 return 0;
1390}
1391
1392void __cpu_die(unsigned int cpu)
1393{
1394 /* We don't do anything here: idle task is faking death itself. */
1395 unsigned int i;
1396
1397 for (i = 0; i < 10; i++) {
1398 /* They ack this in play_dead by setting CPU_DEAD */
e1367daf
LS
1399 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1400 printk ("CPU %d is now offline\n", cpu);
9a0b5817
GH
1401 if (1 == num_online_cpus())
1402 alternatives_smp_switch(0);
f3705136 1403 return;
e1367daf 1404 }
aeb8397b 1405 msleep(100);
1da177e4 1406 }
f3705136
ZM
1407 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1408}
1409#else /* ... !CONFIG_HOTPLUG_CPU */
1410int __cpu_disable(void)
1411{
1412 return -ENOSYS;
1413}
1da177e4 1414
f3705136
ZM
1415void __cpu_die(unsigned int cpu)
1416{
1417 /* We said "no" in __cpu_disable */
1418 BUG();
1419}
1420#endif /* CONFIG_HOTPLUG_CPU */
1421
1422int __devinit __cpu_up(unsigned int cpu)
1423{
34f361ad
AR
1424#ifdef CONFIG_HOTPLUG_CPU
1425 int ret=0;
1426
1427 /*
1428 * We do warm boot only on cpus that had booted earlier
1429 * Otherwise cold boot is all handled from smp_boot_cpus().
1430 * cpu_callin_map is set during AP kickstart process. Its reset
1431 * when a cpu is taken offline from cpu_exit_clear().
1432 */
1433 if (!cpu_isset(cpu, cpu_callin_map))
1434 ret = __smp_prepare_cpu(cpu);
1435
1436 if (ret)
1437 return -EIO;
1438#endif
1439
1da177e4
LT
1440 /* In case one didn't come up */
1441 if (!cpu_isset(cpu, cpu_callin_map)) {
f3705136 1442 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1da177e4
LT
1443 local_irq_enable();
1444 return -EIO;
1445 }
1446
1447 local_irq_enable();
e1367daf 1448 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1da177e4
LT
1449 /* Unleash the CPU! */
1450 cpu_set(cpu, smp_commenced_mask);
1451 while (!cpu_isset(cpu, cpu_online_map))
18698917 1452 cpu_relax();
1da177e4
LT
1453 return 0;
1454}
1455
1456void __init smp_cpus_done(unsigned int max_cpus)
1457{
1458#ifdef CONFIG_X86_IO_APIC
1459 setup_ioapic_dest();
1460#endif
1461 zap_low_mappings();
e1367daf 1462#ifndef CONFIG_HOTPLUG_CPU
1da177e4
LT
1463 /*
1464 * Disable executability of the SMP trampoline:
1465 */
1466 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
e1367daf 1467#endif
1da177e4
LT
1468}
1469
1470void __init smp_intr_init(void)
1471{
1472 /*
1473 * IRQ0 must be given a fixed assignment and initialized,
1474 * because it's used before the IO-APIC is set up.
1475 */
1476 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1477
1478 /*
1479 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1480 * IPI, driven by wakeup.
1481 */
1482 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1483
1484 /* IPI for invalidation */
1485 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1486
1487 /* IPI for generic function call */
1488 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1489}
This page took 0.213378 seconds and 5 git commands to generate.