Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / arch / x86 / kernel / smpboot_32.c
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39
40 #include <linux/mm.h>
41 #include <linux/sched.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/bootmem.h>
44 #include <linux/notifier.h>
45 #include <linux/cpu.h>
46 #include <linux/percpu.h>
47 #include <linux/nmi.h>
48
49 #include <linux/delay.h>
50 #include <linux/mc146818rtc.h>
51 #include <asm/tlbflush.h>
52 #include <asm/desc.h>
53 #include <asm/arch_hooks.h>
54 #include <asm/nmi.h>
55
56 #include <mach_apic.h>
57 #include <mach_wakecpu.h>
58 #include <smpboot_hooks.h>
59 #include <asm/vmi.h>
60 #include <asm/mtrr.h>
61
62 /* Set if we find a B stepping CPU */
63 static int __cpuinitdata smp_b_stepping;
64
65 /* Number of siblings per CPU package */
66 int smp_num_siblings = 1;
67 EXPORT_SYMBOL(smp_num_siblings);
68
69 /* Last level cache ID of each logical CPU */
70 DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
71
72 /* representing HT siblings of each logical CPU */
73 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
74 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75
76 /* representing HT and core siblings of each logical CPU */
77 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
78 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79
80 /* bitmap of online cpus */
81 cpumask_t cpu_online_map __read_mostly;
82 EXPORT_SYMBOL(cpu_online_map);
83
84 cpumask_t cpu_callin_map;
85 cpumask_t cpu_callout_map;
86 EXPORT_SYMBOL(cpu_callout_map);
87 cpumask_t cpu_possible_map;
88 EXPORT_SYMBOL(cpu_possible_map);
89 static cpumask_t smp_commenced_mask;
90
91 /* Per CPU bogomips and other parameters */
92 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
93 EXPORT_PER_CPU_SYMBOL(cpu_info);
94
95 /*
96 * The following static array is used during kernel startup
97 * and the x86_cpu_to_apicid_ptr contains the address of the
98 * array during this time. Is it zeroed when the per_cpu
99 * data area is removed.
100 */
101 u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
102 { [0 ... NR_CPUS-1] = BAD_APICID };
103 void *x86_cpu_to_apicid_ptr;
104 DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
105 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
106
107 u8 apicid_2_node[MAX_APICID];
108
109 /*
110 * Trampoline 80x86 program as an array.
111 */
112
113 extern const unsigned char trampoline_data [];
114 extern const unsigned char trampoline_end [];
115 static unsigned char *trampoline_base;
116 static int trampoline_exec;
117
118 static void map_cpu_to_logical_apicid(void);
119
120 /* State of each CPU. */
121 DEFINE_PER_CPU(int, cpu_state) = { 0 };
122
123 /*
124 * Currently trivial. Write the real->protected mode
125 * bootstrap into the page concerned. The caller
126 * has made sure it's suitably aligned.
127 */
128
129 static unsigned long __cpuinit setup_trampoline(void)
130 {
131 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
132 return virt_to_phys(trampoline_base);
133 }
134
135 /*
136 * We are called very early to get the low memory for the
137 * SMP bootup trampoline page.
138 */
139 void __init smp_alloc_memory(void)
140 {
141 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
142 /*
143 * Has to be in very low memory so we can execute
144 * real-mode AP code.
145 */
146 if (__pa(trampoline_base) >= 0x9F000)
147 BUG();
148 /*
149 * Make the SMP trampoline executable:
150 */
151 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
152 }
153
154 /*
155 * The bootstrap kernel entry code has set these up. Save them for
156 * a given CPU
157 */
158
159 void __cpuinit smp_store_cpu_info(int id)
160 {
161 struct cpuinfo_x86 *c = &cpu_data(id);
162
163 *c = boot_cpu_data;
164 c->cpu_index = id;
165 if (id!=0)
166 identify_secondary_cpu(c);
167 /*
168 * Mask B, Pentium, but not Pentium MMX
169 */
170 if (c->x86_vendor == X86_VENDOR_INTEL &&
171 c->x86 == 5 &&
172 c->x86_mask >= 1 && c->x86_mask <= 4 &&
173 c->x86_model <= 3)
174 /*
175 * Remember we have B step Pentia with bugs
176 */
177 smp_b_stepping = 1;
178
179 /*
180 * Certain Athlons might work (for various values of 'work') in SMP
181 * but they are not certified as MP capable.
182 */
183 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
184
185 if (num_possible_cpus() == 1)
186 goto valid_k7;
187
188 /* Athlon 660/661 is valid. */
189 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
190 goto valid_k7;
191
192 /* Duron 670 is valid */
193 if ((c->x86_model==7) && (c->x86_mask==0))
194 goto valid_k7;
195
196 /*
197 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
198 * It's worth noting that the A5 stepping (662) of some Athlon XP's
199 * have the MP bit set.
200 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
201 */
202 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
203 ((c->x86_model==7) && (c->x86_mask>=1)) ||
204 (c->x86_model> 7))
205 if (cpu_has_mp)
206 goto valid_k7;
207
208 /* If we get here, it's not a certified SMP capable AMD system. */
209 add_taint(TAINT_UNSAFE_SMP);
210 }
211
212 valid_k7:
213 ;
214 }
215
216 extern void calibrate_delay(void);
217
218 static atomic_t init_deasserted;
219
220 static void __cpuinit smp_callin(void)
221 {
222 int cpuid, phys_id;
223 unsigned long timeout;
224
225 /*
226 * If waken up by an INIT in an 82489DX configuration
227 * we may get here before an INIT-deassert IPI reaches
228 * our local APIC. We have to wait for the IPI or we'll
229 * lock up on an APIC access.
230 */
231 wait_for_init_deassert(&init_deasserted);
232
233 /*
234 * (This works even if the APIC is not enabled.)
235 */
236 phys_id = GET_APIC_ID(apic_read(APIC_ID));
237 cpuid = smp_processor_id();
238 if (cpu_isset(cpuid, cpu_callin_map)) {
239 printk("huh, phys CPU#%d, CPU#%d already present??\n",
240 phys_id, cpuid);
241 BUG();
242 }
243 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
244
245 /*
246 * STARTUP IPIs are fragile beasts as they might sometimes
247 * trigger some glue motherboard logic. Complete APIC bus
248 * silence for 1 second, this overestimates the time the
249 * boot CPU is spending to send the up to 2 STARTUP IPIs
250 * by a factor of two. This should be enough.
251 */
252
253 /*
254 * Waiting 2s total for startup (udelay is not yet working)
255 */
256 timeout = jiffies + 2*HZ;
257 while (time_before(jiffies, timeout)) {
258 /*
259 * Has the boot CPU finished it's STARTUP sequence?
260 */
261 if (cpu_isset(cpuid, cpu_callout_map))
262 break;
263 rep_nop();
264 }
265
266 if (!time_before(jiffies, timeout)) {
267 printk("BUG: CPU%d started up but did not get a callout!\n",
268 cpuid);
269 BUG();
270 }
271
272 /*
273 * the boot CPU has finished the init stage and is spinning
274 * on callin_map until we finish. We are free to set up this
275 * CPU, first the APIC. (this is probably redundant on most
276 * boards)
277 */
278
279 Dprintk("CALLIN, before setup_local_APIC().\n");
280 smp_callin_clear_local_apic();
281 setup_local_APIC();
282 map_cpu_to_logical_apicid();
283
284 /*
285 * Get our bogomips.
286 */
287 calibrate_delay();
288 Dprintk("Stack at about %p\n",&cpuid);
289
290 /*
291 * Save our processor parameters
292 */
293 smp_store_cpu_info(cpuid);
294
295 /*
296 * Allow the master to continue.
297 */
298 cpu_set(cpuid, cpu_callin_map);
299 }
300
301 static int cpucount;
302
303 /* maps the cpu to the sched domain representing multi-core */
304 cpumask_t cpu_coregroup_map(int cpu)
305 {
306 struct cpuinfo_x86 *c = &cpu_data(cpu);
307 /*
308 * For perf, we return last level cache shared map.
309 * And for power savings, we return cpu_core_map
310 */
311 if (sched_mc_power_savings || sched_smt_power_savings)
312 return per_cpu(cpu_core_map, cpu);
313 else
314 return c->llc_shared_map;
315 }
316
317 /* representing cpus for which sibling maps can be computed */
318 static cpumask_t cpu_sibling_setup_map;
319
320 void __cpuinit set_cpu_sibling_map(int cpu)
321 {
322 int i;
323 struct cpuinfo_x86 *c = &cpu_data(cpu);
324
325 cpu_set(cpu, cpu_sibling_setup_map);
326
327 if (smp_num_siblings > 1) {
328 for_each_cpu_mask(i, cpu_sibling_setup_map) {
329 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
330 c->cpu_core_id == cpu_data(i).cpu_core_id) {
331 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
332 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
333 cpu_set(i, per_cpu(cpu_core_map, cpu));
334 cpu_set(cpu, per_cpu(cpu_core_map, i));
335 cpu_set(i, c->llc_shared_map);
336 cpu_set(cpu, cpu_data(i).llc_shared_map);
337 }
338 }
339 } else {
340 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
341 }
342
343 cpu_set(cpu, c->llc_shared_map);
344
345 if (current_cpu_data.x86_max_cores == 1) {
346 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
347 c->booted_cores = 1;
348 return;
349 }
350
351 for_each_cpu_mask(i, cpu_sibling_setup_map) {
352 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
353 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
354 cpu_set(i, c->llc_shared_map);
355 cpu_set(cpu, cpu_data(i).llc_shared_map);
356 }
357 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
358 cpu_set(i, per_cpu(cpu_core_map, cpu));
359 cpu_set(cpu, per_cpu(cpu_core_map, i));
360 /*
361 * Does this new cpu bringup a new core?
362 */
363 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
364 /*
365 * for each core in package, increment
366 * the booted_cores for this new cpu
367 */
368 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
369 c->booted_cores++;
370 /*
371 * increment the core count for all
372 * the other cpus in this package
373 */
374 if (i != cpu)
375 cpu_data(i).booted_cores++;
376 } else if (i != cpu && !c->booted_cores)
377 c->booted_cores = cpu_data(i).booted_cores;
378 }
379 }
380 }
381
382 /*
383 * Activate a secondary processor.
384 */
385 static void __cpuinit start_secondary(void *unused)
386 {
387 /*
388 * Don't put *anything* before cpu_init(), SMP booting is too
389 * fragile that we want to limit the things done here to the
390 * most necessary things.
391 */
392 #ifdef CONFIG_VMI
393 vmi_bringup();
394 #endif
395 cpu_init();
396 preempt_disable();
397 smp_callin();
398 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
399 rep_nop();
400 /*
401 * Check TSC synchronization with the BP:
402 */
403 check_tsc_sync_target();
404
405 setup_secondary_clock();
406 if (nmi_watchdog == NMI_IO_APIC) {
407 disable_8259A_irq(0);
408 enable_NMI_through_LVT0(NULL);
409 enable_8259A_irq(0);
410 }
411 /*
412 * low-memory mappings have been cleared, flush them from
413 * the local TLBs too.
414 */
415 local_flush_tlb();
416
417 /* This must be done before setting cpu_online_map */
418 set_cpu_sibling_map(raw_smp_processor_id());
419 wmb();
420
421 /*
422 * We need to hold call_lock, so there is no inconsistency
423 * between the time smp_call_function() determines number of
424 * IPI recipients, and the time when the determination is made
425 * for which cpus receive the IPI. Holding this
426 * lock helps us to not include this cpu in a currently in progress
427 * smp_call_function().
428 */
429 lock_ipi_call_lock();
430 cpu_set(smp_processor_id(), cpu_online_map);
431 unlock_ipi_call_lock();
432 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
433
434 /* We can take interrupts now: we're officially "up". */
435 local_irq_enable();
436
437 wmb();
438 cpu_idle();
439 }
440
441 /*
442 * Everything has been set up for the secondary
443 * CPUs - they just need to reload everything
444 * from the task structure
445 * This function must not return.
446 */
447 void __devinit initialize_secondary(void)
448 {
449 /*
450 * We don't actually need to load the full TSS,
451 * basically just the stack pointer and the eip.
452 */
453
454 asm volatile(
455 "movl %0,%%esp\n\t"
456 "jmp *%1"
457 :
458 :"m" (current->thread.esp),"m" (current->thread.eip));
459 }
460
461 /* Static state in head.S used to set up a CPU */
462 extern struct {
463 void * esp;
464 unsigned short ss;
465 } stack_start;
466
467 #ifdef CONFIG_NUMA
468
469 /* which logical CPUs are on which nodes */
470 cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
471 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
472 EXPORT_SYMBOL(node_2_cpu_mask);
473 /* which node each logical CPU is on */
474 int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
475 EXPORT_SYMBOL(cpu_2_node);
476
477 /* set up a mapping between cpu and node. */
478 static inline void map_cpu_to_node(int cpu, int node)
479 {
480 printk("Mapping cpu %d to node %d\n", cpu, node);
481 cpu_set(cpu, node_2_cpu_mask[node]);
482 cpu_2_node[cpu] = node;
483 }
484
485 /* undo a mapping between cpu and node. */
486 static inline void unmap_cpu_to_node(int cpu)
487 {
488 int node;
489
490 printk("Unmapping cpu %d from all nodes\n", cpu);
491 for (node = 0; node < MAX_NUMNODES; node ++)
492 cpu_clear(cpu, node_2_cpu_mask[node]);
493 cpu_2_node[cpu] = 0;
494 }
495 #else /* !CONFIG_NUMA */
496
497 #define map_cpu_to_node(cpu, node) ({})
498 #define unmap_cpu_to_node(cpu) ({})
499
500 #endif /* CONFIG_NUMA */
501
502 u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
503
504 static void map_cpu_to_logical_apicid(void)
505 {
506 int cpu = smp_processor_id();
507 int apicid = logical_smp_processor_id();
508 int node = apicid_to_node(apicid);
509
510 if (!node_online(node))
511 node = first_online_node;
512
513 cpu_2_logical_apicid[cpu] = apicid;
514 map_cpu_to_node(cpu, node);
515 }
516
517 static void unmap_cpu_to_logical_apicid(int cpu)
518 {
519 cpu_2_logical_apicid[cpu] = BAD_APICID;
520 unmap_cpu_to_node(cpu);
521 }
522
523 static inline void __inquire_remote_apic(int apicid)
524 {
525 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
526 char *names[] = { "ID", "VERSION", "SPIV" };
527 int timeout;
528 unsigned long status;
529
530 printk("Inquiring remote APIC #%d...\n", apicid);
531
532 for (i = 0; i < ARRAY_SIZE(regs); i++) {
533 printk("... APIC #%d %s: ", apicid, names[i]);
534
535 /*
536 * Wait for idle.
537 */
538 status = safe_apic_wait_icr_idle();
539 if (status)
540 printk("a previous APIC delivery may have failed\n");
541
542 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
543 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
544
545 timeout = 0;
546 do {
547 udelay(100);
548 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
549 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
550
551 switch (status) {
552 case APIC_ICR_RR_VALID:
553 status = apic_read(APIC_RRR);
554 printk("%lx\n", status);
555 break;
556 default:
557 printk("failed\n");
558 }
559 }
560 }
561
562 #ifdef WAKE_SECONDARY_VIA_NMI
563 /*
564 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
565 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
566 * won't ... remember to clear down the APIC, etc later.
567 */
568 static int __devinit
569 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
570 {
571 unsigned long send_status, accept_status = 0;
572 int maxlvt;
573
574 /* Target chip */
575 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
576
577 /* Boot on the stack */
578 /* Kick the second */
579 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
580
581 Dprintk("Waiting for send to finish...\n");
582 send_status = safe_apic_wait_icr_idle();
583
584 /*
585 * Give the other CPU some time to accept the IPI.
586 */
587 udelay(200);
588 /*
589 * Due to the Pentium erratum 3AP.
590 */
591 maxlvt = lapic_get_maxlvt();
592 if (maxlvt > 3) {
593 apic_read_around(APIC_SPIV);
594 apic_write(APIC_ESR, 0);
595 }
596 accept_status = (apic_read(APIC_ESR) & 0xEF);
597 Dprintk("NMI sent.\n");
598
599 if (send_status)
600 printk("APIC never delivered???\n");
601 if (accept_status)
602 printk("APIC delivery error (%lx).\n", accept_status);
603
604 return (send_status | accept_status);
605 }
606 #endif /* WAKE_SECONDARY_VIA_NMI */
607
608 #ifdef WAKE_SECONDARY_VIA_INIT
609 static int __devinit
610 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
611 {
612 unsigned long send_status, accept_status = 0;
613 int maxlvt, num_starts, j;
614
615 /*
616 * Be paranoid about clearing APIC errors.
617 */
618 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
619 apic_read_around(APIC_SPIV);
620 apic_write(APIC_ESR, 0);
621 apic_read(APIC_ESR);
622 }
623
624 Dprintk("Asserting INIT.\n");
625
626 /*
627 * Turn INIT on target chip
628 */
629 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
630
631 /*
632 * Send IPI
633 */
634 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
635 | APIC_DM_INIT);
636
637 Dprintk("Waiting for send to finish...\n");
638 send_status = safe_apic_wait_icr_idle();
639
640 mdelay(10);
641
642 Dprintk("Deasserting INIT.\n");
643
644 /* Target chip */
645 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
646
647 /* Send IPI */
648 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
649
650 Dprintk("Waiting for send to finish...\n");
651 send_status = safe_apic_wait_icr_idle();
652
653 atomic_set(&init_deasserted, 1);
654
655 /*
656 * Should we send STARTUP IPIs ?
657 *
658 * Determine this based on the APIC version.
659 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
660 */
661 if (APIC_INTEGRATED(apic_version[phys_apicid]))
662 num_starts = 2;
663 else
664 num_starts = 0;
665
666 /*
667 * Paravirt / VMI wants a startup IPI hook here to set up the
668 * target processor state.
669 */
670 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
671 (unsigned long) stack_start.esp);
672
673 /*
674 * Run STARTUP IPI loop.
675 */
676 Dprintk("#startup loops: %d.\n", num_starts);
677
678 maxlvt = lapic_get_maxlvt();
679
680 for (j = 1; j <= num_starts; j++) {
681 Dprintk("Sending STARTUP #%d.\n",j);
682 apic_read_around(APIC_SPIV);
683 apic_write(APIC_ESR, 0);
684 apic_read(APIC_ESR);
685 Dprintk("After apic_write.\n");
686
687 /*
688 * STARTUP IPI
689 */
690
691 /* Target chip */
692 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
693
694 /* Boot on the stack */
695 /* Kick the second */
696 apic_write_around(APIC_ICR, APIC_DM_STARTUP
697 | (start_eip >> 12));
698
699 /*
700 * Give the other CPU some time to accept the IPI.
701 */
702 udelay(300);
703
704 Dprintk("Startup point 1.\n");
705
706 Dprintk("Waiting for send to finish...\n");
707 send_status = safe_apic_wait_icr_idle();
708
709 /*
710 * Give the other CPU some time to accept the IPI.
711 */
712 udelay(200);
713 /*
714 * Due to the Pentium erratum 3AP.
715 */
716 if (maxlvt > 3) {
717 apic_read_around(APIC_SPIV);
718 apic_write(APIC_ESR, 0);
719 }
720 accept_status = (apic_read(APIC_ESR) & 0xEF);
721 if (send_status || accept_status)
722 break;
723 }
724 Dprintk("After Startup.\n");
725
726 if (send_status)
727 printk("APIC never delivered???\n");
728 if (accept_status)
729 printk("APIC delivery error (%lx).\n", accept_status);
730
731 return (send_status | accept_status);
732 }
733 #endif /* WAKE_SECONDARY_VIA_INIT */
734
735 extern cpumask_t cpu_initialized;
736 static inline int alloc_cpu_id(void)
737 {
738 cpumask_t tmp_map;
739 int cpu;
740 cpus_complement(tmp_map, cpu_present_map);
741 cpu = first_cpu(tmp_map);
742 if (cpu >= NR_CPUS)
743 return -ENODEV;
744 return cpu;
745 }
746
747 #ifdef CONFIG_HOTPLUG_CPU
748 static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
749 static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
750 {
751 struct task_struct *idle;
752
753 if ((idle = cpu_idle_tasks[cpu]) != NULL) {
754 /* initialize thread_struct. we really want to avoid destroy
755 * idle tread
756 */
757 idle->thread.esp = (unsigned long)task_pt_regs(idle);
758 init_idle(idle, cpu);
759 return idle;
760 }
761 idle = fork_idle(cpu);
762
763 if (!IS_ERR(idle))
764 cpu_idle_tasks[cpu] = idle;
765 return idle;
766 }
767 #else
768 #define alloc_idle_task(cpu) fork_idle(cpu)
769 #endif
770
771 static int __cpuinit do_boot_cpu(int apicid, int cpu)
772 /*
773 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
774 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
775 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
776 */
777 {
778 struct task_struct *idle;
779 unsigned long boot_error;
780 int timeout;
781 unsigned long start_eip;
782 unsigned short nmi_high = 0, nmi_low = 0;
783
784 /*
785 * Save current MTRR state in case it was changed since early boot
786 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
787 */
788 mtrr_save_state();
789
790 /*
791 * We can't use kernel_thread since we must avoid to
792 * reschedule the child.
793 */
794 idle = alloc_idle_task(cpu);
795 if (IS_ERR(idle))
796 panic("failed fork for CPU %d", cpu);
797
798 init_gdt(cpu);
799 per_cpu(current_task, cpu) = idle;
800 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
801
802 idle->thread.eip = (unsigned long) start_secondary;
803 /* start_eip had better be page-aligned! */
804 start_eip = setup_trampoline();
805
806 ++cpucount;
807 alternatives_smp_switch(1);
808
809 /* So we see what's up */
810 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
811 /* Stack for startup_32 can be just as for start_secondary onwards */
812 stack_start.esp = (void *) idle->thread.esp;
813
814 irq_ctx_init(cpu);
815
816 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
817 /*
818 * This grunge runs the startup process for
819 * the targeted processor.
820 */
821
822 atomic_set(&init_deasserted, 0);
823
824 Dprintk("Setting warm reset code and vector.\n");
825
826 store_NMI_vector(&nmi_high, &nmi_low);
827
828 smpboot_setup_warm_reset_vector(start_eip);
829
830 /*
831 * Starting actual IPI sequence...
832 */
833 boot_error = wakeup_secondary_cpu(apicid, start_eip);
834
835 if (!boot_error) {
836 /*
837 * allow APs to start initializing.
838 */
839 Dprintk("Before Callout %d.\n", cpu);
840 cpu_set(cpu, cpu_callout_map);
841 Dprintk("After Callout %d.\n", cpu);
842
843 /*
844 * Wait 5s total for a response
845 */
846 for (timeout = 0; timeout < 50000; timeout++) {
847 if (cpu_isset(cpu, cpu_callin_map))
848 break; /* It has booted */
849 udelay(100);
850 }
851
852 if (cpu_isset(cpu, cpu_callin_map)) {
853 /* number CPUs logically, starting from 1 (BSP is 0) */
854 Dprintk("OK.\n");
855 printk("CPU%d: ", cpu);
856 print_cpu_info(&cpu_data(cpu));
857 Dprintk("CPU has booted.\n");
858 } else {
859 boot_error= 1;
860 if (*((volatile unsigned char *)trampoline_base)
861 == 0xA5)
862 /* trampoline started but...? */
863 printk("Stuck ??\n");
864 else
865 /* trampoline code not run */
866 printk("Not responding.\n");
867 inquire_remote_apic(apicid);
868 }
869 }
870
871 if (boot_error) {
872 /* Try to put things back the way they were before ... */
873 unmap_cpu_to_logical_apicid(cpu);
874 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
875 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
876 cpucount--;
877 } else {
878 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
879 cpu_set(cpu, cpu_present_map);
880 }
881
882 /* mark "stuck" area as not stuck */
883 *((volatile unsigned long *)trampoline_base) = 0;
884
885 return boot_error;
886 }
887
888 #ifdef CONFIG_HOTPLUG_CPU
889 void cpu_exit_clear(void)
890 {
891 int cpu = raw_smp_processor_id();
892
893 idle_task_exit();
894
895 cpucount --;
896 cpu_uninit();
897 irq_ctx_exit(cpu);
898
899 cpu_clear(cpu, cpu_callout_map);
900 cpu_clear(cpu, cpu_callin_map);
901
902 cpu_clear(cpu, smp_commenced_mask);
903 unmap_cpu_to_logical_apicid(cpu);
904 }
905
906 struct warm_boot_cpu_info {
907 struct completion *complete;
908 struct work_struct task;
909 int apicid;
910 int cpu;
911 };
912
913 static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
914 {
915 struct warm_boot_cpu_info *info =
916 container_of(work, struct warm_boot_cpu_info, task);
917 do_boot_cpu(info->apicid, info->cpu);
918 complete(info->complete);
919 }
920
921 static int __cpuinit __smp_prepare_cpu(int cpu)
922 {
923 DECLARE_COMPLETION_ONSTACK(done);
924 struct warm_boot_cpu_info info;
925 int apicid, ret;
926
927 apicid = per_cpu(x86_cpu_to_apicid, cpu);
928 if (apicid == BAD_APICID) {
929 ret = -ENODEV;
930 goto exit;
931 }
932
933 info.complete = &done;
934 info.apicid = apicid;
935 info.cpu = cpu;
936 INIT_WORK(&info.task, do_warm_boot_cpu);
937
938 /* init low mem mapping */
939 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
940 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
941 flush_tlb_all();
942 schedule_work(&info.task);
943 wait_for_completion(&done);
944
945 zap_low_mappings();
946 ret = 0;
947 exit:
948 return ret;
949 }
950 #endif
951
952 /*
953 * Cycle through the processors sending APIC IPIs to boot each.
954 */
955
956 static int boot_cpu_logical_apicid;
957 /* Where the IO area was mapped on multiquad, always 0 otherwise */
958 void *xquad_portio;
959 #ifdef CONFIG_X86_NUMAQ
960 EXPORT_SYMBOL(xquad_portio);
961 #endif
962
963 static void __init smp_boot_cpus(unsigned int max_cpus)
964 {
965 int apicid, cpu, bit, kicked;
966 unsigned long bogosum = 0;
967
968 /*
969 * Setup boot CPU information
970 */
971 smp_store_cpu_info(0); /* Final full version of the data */
972 printk("CPU%d: ", 0);
973 print_cpu_info(&cpu_data(0));
974
975 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
976 boot_cpu_logical_apicid = logical_smp_processor_id();
977 per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
978
979 current_thread_info()->cpu = 0;
980
981 set_cpu_sibling_map(0);
982
983 /*
984 * If we couldn't find an SMP configuration at boot time,
985 * get out of here now!
986 */
987 if (!smp_found_config && !acpi_lapic) {
988 printk(KERN_NOTICE "SMP motherboard not detected.\n");
989 smpboot_clear_io_apic_irqs();
990 phys_cpu_present_map = physid_mask_of_physid(0);
991 if (APIC_init_uniprocessor())
992 printk(KERN_NOTICE "Local APIC not detected."
993 " Using dummy APIC emulation.\n");
994 map_cpu_to_logical_apicid();
995 cpu_set(0, per_cpu(cpu_sibling_map, 0));
996 cpu_set(0, per_cpu(cpu_core_map, 0));
997 return;
998 }
999
1000 /*
1001 * Should not be necessary because the MP table should list the boot
1002 * CPU too, but we do it for the sake of robustness anyway.
1003 * Makes no sense to do this check in clustered apic mode, so skip it
1004 */
1005 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1006 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1007 boot_cpu_physical_apicid);
1008 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1009 }
1010
1011 /*
1012 * If we couldn't find a local APIC, then get out of here now!
1013 */
1014 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1015 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1016 boot_cpu_physical_apicid);
1017 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1018 smpboot_clear_io_apic_irqs();
1019 phys_cpu_present_map = physid_mask_of_physid(0);
1020 map_cpu_to_logical_apicid();
1021 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1022 cpu_set(0, per_cpu(cpu_core_map, 0));
1023 return;
1024 }
1025
1026 verify_local_APIC();
1027
1028 /*
1029 * If SMP should be disabled, then really disable it!
1030 */
1031 if (!max_cpus) {
1032 smp_found_config = 0;
1033 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1034
1035 if (nmi_watchdog == NMI_LOCAL_APIC) {
1036 printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
1037 connect_bsp_APIC();
1038 setup_local_APIC();
1039 }
1040 smpboot_clear_io_apic_irqs();
1041 phys_cpu_present_map = physid_mask_of_physid(0);
1042 map_cpu_to_logical_apicid();
1043 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1044 cpu_set(0, per_cpu(cpu_core_map, 0));
1045 return;
1046 }
1047
1048 connect_bsp_APIC();
1049 setup_local_APIC();
1050 map_cpu_to_logical_apicid();
1051
1052
1053 setup_portio_remap();
1054
1055 /*
1056 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1057 *
1058 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1059 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1060 * clustered apic ID.
1061 */
1062 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1063
1064 kicked = 1;
1065 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1066 apicid = cpu_present_to_apicid(bit);
1067 /*
1068 * Don't even attempt to start the boot CPU!
1069 */
1070 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1071 continue;
1072
1073 if (!check_apicid_present(bit))
1074 continue;
1075 if (max_cpus <= cpucount+1)
1076 continue;
1077
1078 if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
1079 printk("CPU #%d not responding - cannot use it.\n",
1080 apicid);
1081 else
1082 ++kicked;
1083 }
1084
1085 /*
1086 * Cleanup possible dangling ends...
1087 */
1088 smpboot_restore_warm_reset_vector();
1089
1090 /*
1091 * Allow the user to impress friends.
1092 */
1093 Dprintk("Before bogomips.\n");
1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1095 if (cpu_isset(cpu, cpu_callout_map))
1096 bogosum += cpu_data(cpu).loops_per_jiffy;
1097 printk(KERN_INFO
1098 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1099 cpucount+1,
1100 bogosum/(500000/HZ),
1101 (bogosum/(5000/HZ))%100);
1102
1103 Dprintk("Before bogocount - setting activated=1.\n");
1104
1105 if (smp_b_stepping)
1106 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1107
1108 /*
1109 * Don't taint if we are running SMP kernel on a single non-MP
1110 * approved Athlon
1111 */
1112 if (tainted & TAINT_UNSAFE_SMP) {
1113 if (cpucount)
1114 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1115 else
1116 tainted &= ~TAINT_UNSAFE_SMP;
1117 }
1118
1119 Dprintk("Boot done.\n");
1120
1121 /*
1122 * construct cpu_sibling_map, so that we can tell sibling CPUs
1123 * efficiently.
1124 */
1125 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1126 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1127 cpus_clear(per_cpu(cpu_core_map, cpu));
1128 }
1129
1130 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1131 cpu_set(0, per_cpu(cpu_core_map, 0));
1132
1133 smpboot_setup_io_apic();
1134
1135 setup_boot_clock();
1136 }
1137
1138 /* These are wrappers to interface to the new boot process. Someone
1139 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1140 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1141 {
1142 smp_commenced_mask = cpumask_of_cpu(0);
1143 cpu_callin_map = cpumask_of_cpu(0);
1144 mb();
1145 smp_boot_cpus(max_cpus);
1146 }
1147
1148 void __init native_smp_prepare_boot_cpu(void)
1149 {
1150 unsigned int cpu = smp_processor_id();
1151
1152 init_gdt(cpu);
1153 switch_to_new_gdt();
1154
1155 cpu_set(cpu, cpu_online_map);
1156 cpu_set(cpu, cpu_callout_map);
1157 cpu_set(cpu, cpu_present_map);
1158 cpu_set(cpu, cpu_possible_map);
1159 __get_cpu_var(cpu_state) = CPU_ONLINE;
1160 }
1161
1162 #ifdef CONFIG_HOTPLUG_CPU
1163 void remove_siblinginfo(int cpu)
1164 {
1165 int sibling;
1166 struct cpuinfo_x86 *c = &cpu_data(cpu);
1167
1168 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1169 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1170 /*/
1171 * last thread sibling in this cpu core going down
1172 */
1173 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1174 cpu_data(sibling).booted_cores--;
1175 }
1176
1177 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1178 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1179 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1180 cpus_clear(per_cpu(cpu_core_map, cpu));
1181 c->phys_proc_id = 0;
1182 c->cpu_core_id = 0;
1183 cpu_clear(cpu, cpu_sibling_setup_map);
1184 }
1185
1186 int __cpu_disable(void)
1187 {
1188 cpumask_t map = cpu_online_map;
1189 int cpu = smp_processor_id();
1190
1191 /*
1192 * Perhaps use cpufreq to drop frequency, but that could go
1193 * into generic code.
1194 *
1195 * We won't take down the boot processor on i386 due to some
1196 * interrupts only being able to be serviced by the BSP.
1197 * Especially so if we're not using an IOAPIC -zwane
1198 */
1199 if (cpu == 0)
1200 return -EBUSY;
1201 if (nmi_watchdog == NMI_LOCAL_APIC)
1202 stop_apic_nmi_watchdog(NULL);
1203 clear_local_APIC();
1204 /* Allow any queued timer interrupts to get serviced */
1205 local_irq_enable();
1206 mdelay(1);
1207 local_irq_disable();
1208
1209 remove_siblinginfo(cpu);
1210
1211 cpu_clear(cpu, map);
1212 fixup_irqs(map);
1213 /* It's now safe to remove this processor from the online map */
1214 cpu_clear(cpu, cpu_online_map);
1215 return 0;
1216 }
1217
1218 void __cpu_die(unsigned int cpu)
1219 {
1220 /* We don't do anything here: idle task is faking death itself. */
1221 unsigned int i;
1222
1223 for (i = 0; i < 10; i++) {
1224 /* They ack this in play_dead by setting CPU_DEAD */
1225 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1226 printk ("CPU %d is now offline\n", cpu);
1227 if (1 == num_online_cpus())
1228 alternatives_smp_switch(0);
1229 return;
1230 }
1231 msleep(100);
1232 }
1233 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1234 }
1235 #else /* ... !CONFIG_HOTPLUG_CPU */
1236 int __cpu_disable(void)
1237 {
1238 return -ENOSYS;
1239 }
1240
1241 void __cpu_die(unsigned int cpu)
1242 {
1243 /* We said "no" in __cpu_disable */
1244 BUG();
1245 }
1246 #endif /* CONFIG_HOTPLUG_CPU */
1247
1248 int __cpuinit native_cpu_up(unsigned int cpu)
1249 {
1250 unsigned long flags;
1251 #ifdef CONFIG_HOTPLUG_CPU
1252 int ret = 0;
1253
1254 /*
1255 * We do warm boot only on cpus that had booted earlier
1256 * Otherwise cold boot is all handled from smp_boot_cpus().
1257 * cpu_callin_map is set during AP kickstart process. Its reset
1258 * when a cpu is taken offline from cpu_exit_clear().
1259 */
1260 if (!cpu_isset(cpu, cpu_callin_map))
1261 ret = __smp_prepare_cpu(cpu);
1262
1263 if (ret)
1264 return -EIO;
1265 #endif
1266
1267 /* In case one didn't come up */
1268 if (!cpu_isset(cpu, cpu_callin_map)) {
1269 printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
1270 return -EIO;
1271 }
1272
1273 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1274 /* Unleash the CPU! */
1275 cpu_set(cpu, smp_commenced_mask);
1276
1277 /*
1278 * Check TSC synchronization with the AP (keep irqs disabled
1279 * while doing so):
1280 */
1281 local_irq_save(flags);
1282 check_tsc_sync_source(cpu);
1283 local_irq_restore(flags);
1284
1285 while (!cpu_isset(cpu, cpu_online_map)) {
1286 cpu_relax();
1287 touch_nmi_watchdog();
1288 }
1289
1290 return 0;
1291 }
1292
1293 void __init native_smp_cpus_done(unsigned int max_cpus)
1294 {
1295 #ifdef CONFIG_X86_IO_APIC
1296 setup_ioapic_dest();
1297 #endif
1298 zap_low_mappings();
1299 #ifndef CONFIG_HOTPLUG_CPU
1300 /*
1301 * Disable executability of the SMP trampoline:
1302 */
1303 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1304 #endif
1305 }
1306
1307 void __init smp_intr_init(void)
1308 {
1309 /*
1310 * IRQ0 must be given a fixed assignment and initialized,
1311 * because it's used before the IO-APIC is set up.
1312 */
1313 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1314
1315 /*
1316 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1317 * IPI, driven by wakeup.
1318 */
1319 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1320
1321 /* IPI for invalidation */
1322 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1323
1324 /* IPI for generic function call */
1325 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1326 }
1327
1328 /*
1329 * If the BIOS enumerates physical processors before logical,
1330 * maxcpus=N at enumeration-time can be used to disable HT.
1331 */
1332 static int __init parse_maxcpus(char *arg)
1333 {
1334 extern unsigned int maxcpus;
1335
1336 maxcpus = simple_strtoul(arg, NULL, 0);
1337 return 0;
1338 }
1339 early_param("maxcpus", parse_maxcpus);
This page took 0.05649 seconds and 6 git commands to generate.