Merge branch 'linus' into x86/x2apic
[deliverable/linux.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/ioport.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30 #include <linux/dmar.h>
31
32 #include <asm/atomic.h>
33 #include <asm/smp.h>
34 #include <asm/mtrr.h>
35 #include <asm/mpspec.h>
36 #include <asm/hpet.h>
37 #include <asm/pgalloc.h>
38 #include <asm/nmi.h>
39 #include <asm/idle.h>
40 #include <asm/proto.h>
41 #include <asm/timex.h>
42 #include <asm/apic.h>
43 #include <asm/i8259.h>
44
45 #include <mach_ipi.h>
46 #include <mach_apic.h>
47
48 static int disable_apic_timer __cpuinitdata;
49 static int apic_calibrate_pmtmr __initdata;
50 int disable_apic;
51 int disable_x2apic;
52 int x2apic;
53
54 /* x2apic enabled before OS handover */
55 int x2apic_preenabled;
56
57 /* Local APIC timer works in C2 */
58 int local_apic_timer_c2_ok;
59 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
60
61 /*
62 * Debug level, exported for io_apic.c
63 */
64 unsigned int apic_verbosity;
65
66 /* Have we found an MP table */
67 int smp_found_config;
68
69 static struct resource lapic_resource = {
70 .name = "Local APIC",
71 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
72 };
73
74 static unsigned int calibration_result;
75
76 static int lapic_next_event(unsigned long delta,
77 struct clock_event_device *evt);
78 static void lapic_timer_setup(enum clock_event_mode mode,
79 struct clock_event_device *evt);
80 static void lapic_timer_broadcast(cpumask_t mask);
81 static void apic_pm_activate(void);
82
83 static struct clock_event_device lapic_clockevent = {
84 .name = "lapic",
85 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
86 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
87 .shift = 32,
88 .set_mode = lapic_timer_setup,
89 .set_next_event = lapic_next_event,
90 .broadcast = lapic_timer_broadcast,
91 .rating = 100,
92 .irq = -1,
93 };
94 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
95
96 static unsigned long apic_phys;
97
98 unsigned long mp_lapic_addr;
99
100 /*
101 * Get the LAPIC version
102 */
103 static inline int lapic_get_version(void)
104 {
105 return GET_APIC_VERSION(apic_read(APIC_LVR));
106 }
107
108 /*
109 * Check, if the APIC is integrated or a seperate chip
110 */
111 static inline int lapic_is_integrated(void)
112 {
113 return 1;
114 }
115
116 /*
117 * Check, whether this is a modern or a first generation APIC
118 */
119 static int modern_apic(void)
120 {
121 /* AMD systems use old APIC versions, so check the CPU */
122 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
123 boot_cpu_data.x86 >= 0xf)
124 return 1;
125 return lapic_get_version() >= 0x14;
126 }
127
128 void xapic_wait_icr_idle(void)
129 {
130 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
131 cpu_relax();
132 }
133
134 u32 safe_xapic_wait_icr_idle(void)
135 {
136 u32 send_status;
137 int timeout;
138
139 timeout = 0;
140 do {
141 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
142 if (!send_status)
143 break;
144 udelay(100);
145 } while (timeout++ < 1000);
146
147 return send_status;
148 }
149
150 void xapic_icr_write(u32 low, u32 id)
151 {
152 apic_write(APIC_ICR2, id << 24);
153 apic_write(APIC_ICR, low);
154 }
155
156 u64 xapic_icr_read(void)
157 {
158 u32 icr1, icr2;
159
160 icr2 = apic_read(APIC_ICR2);
161 icr1 = apic_read(APIC_ICR);
162
163 return (icr1 | ((u64)icr2 << 32));
164 }
165
166 static struct apic_ops xapic_ops = {
167 .read = native_apic_mem_read,
168 .write = native_apic_mem_write,
169 .icr_read = xapic_icr_read,
170 .icr_write = xapic_icr_write,
171 .wait_icr_idle = xapic_wait_icr_idle,
172 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
173 };
174
175 struct apic_ops __read_mostly *apic_ops = &xapic_ops;
176
177 EXPORT_SYMBOL_GPL(apic_ops);
178
179 static void x2apic_wait_icr_idle(void)
180 {
181 /* no need to wait for icr idle in x2apic */
182 return;
183 }
184
185 static u32 safe_x2apic_wait_icr_idle(void)
186 {
187 /* no need to wait for icr idle in x2apic */
188 return 0;
189 }
190
191 void x2apic_icr_write(u32 low, u32 id)
192 {
193 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
194 }
195
196 u64 x2apic_icr_read(void)
197 {
198 unsigned long val;
199
200 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
201 return val;
202 }
203
204 static struct apic_ops x2apic_ops = {
205 .read = native_apic_msr_read,
206 .write = native_apic_msr_write,
207 .icr_read = x2apic_icr_read,
208 .icr_write = x2apic_icr_write,
209 .wait_icr_idle = x2apic_wait_icr_idle,
210 .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
211 };
212
213 /**
214 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
215 */
216 void __cpuinit enable_NMI_through_LVT0(void)
217 {
218 unsigned int v;
219
220 /* unmask and set to NMI */
221 v = APIC_DM_NMI;
222 apic_write(APIC_LVT0, v);
223 }
224
225 /**
226 * lapic_get_maxlvt - get the maximum number of local vector table entries
227 */
228 int lapic_get_maxlvt(void)
229 {
230 unsigned int v, maxlvt;
231
232 v = apic_read(APIC_LVR);
233 maxlvt = GET_APIC_MAXLVT(v);
234 return maxlvt;
235 }
236
237 /*
238 * This function sets up the local APIC timer, with a timeout of
239 * 'clocks' APIC bus clock. During calibration we actually call
240 * this function twice on the boot CPU, once with a bogus timeout
241 * value, second time for real. The other (noncalibrating) CPUs
242 * call this function only once, with the real, calibrated value.
243 *
244 * We do reads before writes even if unnecessary, to get around the
245 * P5 APIC double write bug.
246 */
247
248 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
249 {
250 unsigned int lvtt_value, tmp_value;
251
252 lvtt_value = LOCAL_TIMER_VECTOR;
253 if (!oneshot)
254 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
255 if (!irqen)
256 lvtt_value |= APIC_LVT_MASKED;
257
258 apic_write(APIC_LVTT, lvtt_value);
259
260 /*
261 * Divide PICLK by 16
262 */
263 tmp_value = apic_read(APIC_TDCR);
264 apic_write(APIC_TDCR, (tmp_value
265 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
266 | APIC_TDR_DIV_16);
267
268 if (!oneshot)
269 apic_write(APIC_TMICT, clocks);
270 }
271
272 /*
273 * Setup extended LVT, AMD specific (K8, family 10h)
274 *
275 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
276 * MCE interrupts are supported. Thus MCE offset must be set to 0.
277 */
278
279 #define APIC_EILVT_LVTOFF_MCE 0
280 #define APIC_EILVT_LVTOFF_IBS 1
281
282 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
283 {
284 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
285 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
286
287 apic_write(reg, v);
288 }
289
290 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
291 {
292 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
293 return APIC_EILVT_LVTOFF_MCE;
294 }
295
296 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
297 {
298 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
299 return APIC_EILVT_LVTOFF_IBS;
300 }
301
302 /*
303 * Program the next event, relative to now
304 */
305 static int lapic_next_event(unsigned long delta,
306 struct clock_event_device *evt)
307 {
308 apic_write(APIC_TMICT, delta);
309 return 0;
310 }
311
312 /*
313 * Setup the lapic timer in periodic or oneshot mode
314 */
315 static void lapic_timer_setup(enum clock_event_mode mode,
316 struct clock_event_device *evt)
317 {
318 unsigned long flags;
319 unsigned int v;
320
321 /* Lapic used as dummy for broadcast ? */
322 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
323 return;
324
325 local_irq_save(flags);
326
327 switch (mode) {
328 case CLOCK_EVT_MODE_PERIODIC:
329 case CLOCK_EVT_MODE_ONESHOT:
330 __setup_APIC_LVTT(calibration_result,
331 mode != CLOCK_EVT_MODE_PERIODIC, 1);
332 break;
333 case CLOCK_EVT_MODE_UNUSED:
334 case CLOCK_EVT_MODE_SHUTDOWN:
335 v = apic_read(APIC_LVTT);
336 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
337 apic_write(APIC_LVTT, v);
338 break;
339 case CLOCK_EVT_MODE_RESUME:
340 /* Nothing to do here */
341 break;
342 }
343
344 local_irq_restore(flags);
345 }
346
347 /*
348 * Local APIC timer broadcast function
349 */
350 static void lapic_timer_broadcast(cpumask_t mask)
351 {
352 #ifdef CONFIG_SMP
353 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
354 #endif
355 }
356
357 /*
358 * Setup the local APIC timer for this CPU. Copy the initilized values
359 * of the boot CPU and register the clock event in the framework.
360 */
361 static void setup_APIC_timer(void)
362 {
363 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
364
365 memcpy(levt, &lapic_clockevent, sizeof(*levt));
366 levt->cpumask = cpumask_of_cpu(smp_processor_id());
367
368 clockevents_register_device(levt);
369 }
370
371 /*
372 * In this function we calibrate APIC bus clocks to the external
373 * timer. Unfortunately we cannot use jiffies and the timer irq
374 * to calibrate, since some later bootup code depends on getting
375 * the first irq? Ugh.
376 *
377 * We want to do the calibration only once since we
378 * want to have local timer irqs syncron. CPUs connected
379 * by the same APIC bus have the very same bus frequency.
380 * And we want to have irqs off anyways, no accidental
381 * APIC irq that way.
382 */
383
384 #define TICK_COUNT 100000000
385
386 static int __init calibrate_APIC_clock(void)
387 {
388 unsigned apic, apic_start;
389 unsigned long tsc, tsc_start;
390 int result;
391
392 local_irq_disable();
393
394 /*
395 * Put whatever arbitrary (but long enough) timeout
396 * value into the APIC clock, we just want to get the
397 * counter running for calibration.
398 *
399 * No interrupt enable !
400 */
401 __setup_APIC_LVTT(250000000, 0, 0);
402
403 apic_start = apic_read(APIC_TMCCT);
404 #ifdef CONFIG_X86_PM_TIMER
405 if (apic_calibrate_pmtmr && pmtmr_ioport) {
406 pmtimer_wait(5000); /* 5ms wait */
407 apic = apic_read(APIC_TMCCT);
408 result = (apic_start - apic) * 1000L / 5;
409 } else
410 #endif
411 {
412 rdtscll(tsc_start);
413
414 do {
415 apic = apic_read(APIC_TMCCT);
416 rdtscll(tsc);
417 } while ((tsc - tsc_start) < TICK_COUNT &&
418 (apic_start - apic) < TICK_COUNT);
419
420 result = (apic_start - apic) * 1000L * tsc_khz /
421 (tsc - tsc_start);
422 }
423
424 local_irq_enable();
425
426 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
427
428 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
429 result / 1000 / 1000, result / 1000 % 1000);
430
431 /* Calculate the scaled math multiplication factor */
432 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
433 lapic_clockevent.shift);
434 lapic_clockevent.max_delta_ns =
435 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
436 lapic_clockevent.min_delta_ns =
437 clockevent_delta2ns(0xF, &lapic_clockevent);
438
439 calibration_result = result / HZ;
440
441 /*
442 * Do a sanity check on the APIC calibration result
443 */
444 if (calibration_result < (1000000 / HZ)) {
445 printk(KERN_WARNING
446 "APIC frequency too slow, disabling apic timer\n");
447 return -1;
448 }
449
450 return 0;
451 }
452
453 /*
454 * Setup the boot APIC
455 *
456 * Calibrate and verify the result.
457 */
458 void __init setup_boot_APIC_clock(void)
459 {
460 /*
461 * The local apic timer can be disabled via the kernel commandline.
462 * Register the lapic timer as a dummy clock event source on SMP
463 * systems, so the broadcast mechanism is used. On UP systems simply
464 * ignore it.
465 */
466 if (disable_apic_timer) {
467 printk(KERN_INFO "Disabling APIC timer\n");
468 /* No broadcast on UP ! */
469 if (num_possible_cpus() > 1) {
470 lapic_clockevent.mult = 1;
471 setup_APIC_timer();
472 }
473 return;
474 }
475
476 printk(KERN_INFO "Using local APIC timer interrupts.\n");
477 if (calibrate_APIC_clock()) {
478 /* No broadcast on UP ! */
479 if (num_possible_cpus() > 1)
480 setup_APIC_timer();
481 return;
482 }
483
484 /*
485 * If nmi_watchdog is set to IO_APIC, we need the
486 * PIT/HPET going. Otherwise register lapic as a dummy
487 * device.
488 */
489 if (nmi_watchdog != NMI_IO_APIC)
490 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
491 else
492 printk(KERN_WARNING "APIC timer registered as dummy,"
493 " due to nmi_watchdog=%d!\n", nmi_watchdog);
494
495 setup_APIC_timer();
496 }
497
498 void __cpuinit setup_secondary_APIC_clock(void)
499 {
500 setup_APIC_timer();
501 }
502
503 /*
504 * The guts of the apic timer interrupt
505 */
506 static void local_apic_timer_interrupt(void)
507 {
508 int cpu = smp_processor_id();
509 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
510
511 /*
512 * Normally we should not be here till LAPIC has been initialized but
513 * in some cases like kdump, its possible that there is a pending LAPIC
514 * timer interrupt from previous kernel's context and is delivered in
515 * new kernel the moment interrupts are enabled.
516 *
517 * Interrupts are enabled early and LAPIC is setup much later, hence
518 * its possible that when we get here evt->event_handler is NULL.
519 * Check for event_handler being NULL and discard the interrupt as
520 * spurious.
521 */
522 if (!evt->event_handler) {
523 printk(KERN_WARNING
524 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
525 /* Switch it off */
526 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
527 return;
528 }
529
530 /*
531 * the NMI deadlock-detector uses this.
532 */
533 add_pda(apic_timer_irqs, 1);
534
535 evt->event_handler(evt);
536 }
537
538 /*
539 * Local APIC timer interrupt. This is the most natural way for doing
540 * local interrupts, but local timer interrupts can be emulated by
541 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
542 *
543 * [ if a single-CPU system runs an SMP kernel then we call the local
544 * interrupt as well. Thus we cannot inline the local irq ... ]
545 */
546 void smp_apic_timer_interrupt(struct pt_regs *regs)
547 {
548 struct pt_regs *old_regs = set_irq_regs(regs);
549
550 /*
551 * NOTE! We'd better ACK the irq immediately,
552 * because timer handling can be slow.
553 */
554 ack_APIC_irq();
555 /*
556 * update_process_times() expects us to have done irq_enter().
557 * Besides, if we don't timer interrupts ignore the global
558 * interrupt lock, which is the WrongThing (tm) to do.
559 */
560 exit_idle();
561 irq_enter();
562 local_apic_timer_interrupt();
563 irq_exit();
564 set_irq_regs(old_regs);
565 }
566
567 int setup_profiling_timer(unsigned int multiplier)
568 {
569 return -EINVAL;
570 }
571
572
573 /*
574 * Local APIC start and shutdown
575 */
576
577 /**
578 * clear_local_APIC - shutdown the local APIC
579 *
580 * This is called, when a CPU is disabled and before rebooting, so the state of
581 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
582 * leftovers during boot.
583 */
584 void clear_local_APIC(void)
585 {
586 int maxlvt;
587 u32 v;
588
589 /* APIC hasn't been mapped yet */
590 if (!apic_phys)
591 return;
592
593 maxlvt = lapic_get_maxlvt();
594 /*
595 * Masking an LVT entry can trigger a local APIC error
596 * if the vector is zero. Mask LVTERR first to prevent this.
597 */
598 if (maxlvt >= 3) {
599 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
600 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
601 }
602 /*
603 * Careful: we have to set masks only first to deassert
604 * any level-triggered sources.
605 */
606 v = apic_read(APIC_LVTT);
607 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
608 v = apic_read(APIC_LVT0);
609 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
610 v = apic_read(APIC_LVT1);
611 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
612 if (maxlvt >= 4) {
613 v = apic_read(APIC_LVTPC);
614 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
615 }
616
617 /*
618 * Clean APIC state for other OSs:
619 */
620 apic_write(APIC_LVTT, APIC_LVT_MASKED);
621 apic_write(APIC_LVT0, APIC_LVT_MASKED);
622 apic_write(APIC_LVT1, APIC_LVT_MASKED);
623 if (maxlvt >= 3)
624 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
625 if (maxlvt >= 4)
626 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
627 apic_write(APIC_ESR, 0);
628 apic_read(APIC_ESR);
629 }
630
631 /**
632 * disable_local_APIC - clear and disable the local APIC
633 */
634 void disable_local_APIC(void)
635 {
636 unsigned int value;
637
638 clear_local_APIC();
639
640 /*
641 * Disable APIC (implies clearing of registers
642 * for 82489DX!).
643 */
644 value = apic_read(APIC_SPIV);
645 value &= ~APIC_SPIV_APIC_ENABLED;
646 apic_write(APIC_SPIV, value);
647 }
648
649 void lapic_shutdown(void)
650 {
651 unsigned long flags;
652
653 if (!cpu_has_apic)
654 return;
655
656 local_irq_save(flags);
657
658 disable_local_APIC();
659
660 local_irq_restore(flags);
661 }
662
663 /*
664 * This is to verify that we're looking at a real local APIC.
665 * Check these against your board if the CPUs aren't getting
666 * started for no apparent reason.
667 */
668 int __init verify_local_APIC(void)
669 {
670 unsigned int reg0, reg1;
671
672 /*
673 * The version register is read-only in a real APIC.
674 */
675 reg0 = apic_read(APIC_LVR);
676 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
677 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
678 reg1 = apic_read(APIC_LVR);
679 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
680
681 /*
682 * The two version reads above should print the same
683 * numbers. If the second one is different, then we
684 * poke at a non-APIC.
685 */
686 if (reg1 != reg0)
687 return 0;
688
689 /*
690 * Check if the version looks reasonably.
691 */
692 reg1 = GET_APIC_VERSION(reg0);
693 if (reg1 == 0x00 || reg1 == 0xff)
694 return 0;
695 reg1 = lapic_get_maxlvt();
696 if (reg1 < 0x02 || reg1 == 0xff)
697 return 0;
698
699 /*
700 * The ID register is read/write in a real APIC.
701 */
702 reg0 = apic_read(APIC_ID);
703 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
704 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
705 reg1 = apic_read(APIC_ID);
706 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
707 apic_write(APIC_ID, reg0);
708 if (reg1 != (reg0 ^ APIC_ID_MASK))
709 return 0;
710
711 /*
712 * The next two are just to see if we have sane values.
713 * They're only really relevant if we're in Virtual Wire
714 * compatibility mode, but most boxes are anymore.
715 */
716 reg0 = apic_read(APIC_LVT0);
717 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
718 reg1 = apic_read(APIC_LVT1);
719 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
720
721 return 1;
722 }
723
724 /**
725 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
726 */
727 void __init sync_Arb_IDs(void)
728 {
729 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
730 if (modern_apic())
731 return;
732
733 /*
734 * Wait for idle.
735 */
736 apic_wait_icr_idle();
737
738 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
739 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
740 | APIC_DM_INIT);
741 }
742
743 /*
744 * An initial setup of the virtual wire mode.
745 */
746 void __init init_bsp_APIC(void)
747 {
748 unsigned int value;
749
750 /*
751 * Don't do the setup now if we have a SMP BIOS as the
752 * through-I/O-APIC virtual wire mode might be active.
753 */
754 if (smp_found_config || !cpu_has_apic)
755 return;
756
757 value = apic_read(APIC_LVR);
758
759 /*
760 * Do not trust the local APIC being empty at bootup.
761 */
762 clear_local_APIC();
763
764 /*
765 * Enable APIC.
766 */
767 value = apic_read(APIC_SPIV);
768 value &= ~APIC_VECTOR_MASK;
769 value |= APIC_SPIV_APIC_ENABLED;
770 value |= APIC_SPIV_FOCUS_DISABLED;
771 value |= SPURIOUS_APIC_VECTOR;
772 apic_write(APIC_SPIV, value);
773
774 /*
775 * Set up the virtual wire mode.
776 */
777 apic_write(APIC_LVT0, APIC_DM_EXTINT);
778 value = APIC_DM_NMI;
779 apic_write(APIC_LVT1, value);
780 }
781
782 /**
783 * setup_local_APIC - setup the local APIC
784 */
785 void __cpuinit setup_local_APIC(void)
786 {
787 unsigned int value;
788 int i, j;
789
790 preempt_disable();
791 value = apic_read(APIC_LVR);
792
793 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
794
795 /*
796 * Double-check whether this APIC is really registered.
797 * This is meaningless in clustered apic mode, so we skip it.
798 */
799 if (!apic_id_registered())
800 BUG();
801
802 /*
803 * Intel recommends to set DFR, LDR and TPR before enabling
804 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
805 * document number 292116). So here it goes...
806 */
807 init_apic_ldr();
808
809 /*
810 * Set Task Priority to 'accept all'. We never change this
811 * later on.
812 */
813 value = apic_read(APIC_TASKPRI);
814 value &= ~APIC_TPRI_MASK;
815 apic_write(APIC_TASKPRI, value);
816
817 /*
818 * After a crash, we no longer service the interrupts and a pending
819 * interrupt from previous kernel might still have ISR bit set.
820 *
821 * Most probably by now CPU has serviced that pending interrupt and
822 * it might not have done the ack_APIC_irq() because it thought,
823 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
824 * does not clear the ISR bit and cpu thinks it has already serivced
825 * the interrupt. Hence a vector might get locked. It was noticed
826 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
827 */
828 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
829 value = apic_read(APIC_ISR + i*0x10);
830 for (j = 31; j >= 0; j--) {
831 if (value & (1<<j))
832 ack_APIC_irq();
833 }
834 }
835
836 /*
837 * Now that we are all set up, enable the APIC
838 */
839 value = apic_read(APIC_SPIV);
840 value &= ~APIC_VECTOR_MASK;
841 /*
842 * Enable APIC
843 */
844 value |= APIC_SPIV_APIC_ENABLED;
845
846 /* We always use processor focus */
847
848 /*
849 * Set spurious IRQ vector
850 */
851 value |= SPURIOUS_APIC_VECTOR;
852 apic_write(APIC_SPIV, value);
853
854 /*
855 * Set up LVT0, LVT1:
856 *
857 * set up through-local-APIC on the BP's LINT0. This is not
858 * strictly necessary in pure symmetric-IO mode, but sometimes
859 * we delegate interrupts to the 8259A.
860 */
861 /*
862 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
863 */
864 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
865 if (!smp_processor_id() && !value) {
866 value = APIC_DM_EXTINT;
867 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
868 smp_processor_id());
869 } else {
870 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
871 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
872 smp_processor_id());
873 }
874 apic_write(APIC_LVT0, value);
875
876 /*
877 * only the BP should see the LINT1 NMI signal, obviously.
878 */
879 if (!smp_processor_id())
880 value = APIC_DM_NMI;
881 else
882 value = APIC_DM_NMI | APIC_LVT_MASKED;
883 apic_write(APIC_LVT1, value);
884 preempt_enable();
885 }
886
887 static void __cpuinit lapic_setup_esr(void)
888 {
889 unsigned maxlvt = lapic_get_maxlvt();
890
891 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
892 /*
893 * spec says clear errors after enabling vector.
894 */
895 if (maxlvt > 3)
896 apic_write(APIC_ESR, 0);
897 }
898
899 void __cpuinit end_local_APIC_setup(void)
900 {
901 lapic_setup_esr();
902 setup_apic_nmi_watchdog(NULL);
903 apic_pm_activate();
904 }
905
906 void check_x2apic(void)
907 {
908 int msr, msr2;
909
910 rdmsr(MSR_IA32_APICBASE, msr, msr2);
911
912 if (msr & X2APIC_ENABLE) {
913 printk("x2apic enabled by BIOS, switching to x2apic ops\n");
914 x2apic_preenabled = x2apic = 1;
915 apic_ops = &x2apic_ops;
916 }
917 }
918
919 void enable_x2apic(void)
920 {
921 int msr, msr2;
922
923 rdmsr(MSR_IA32_APICBASE, msr, msr2);
924 if (!(msr & X2APIC_ENABLE)) {
925 printk("Enabling x2apic\n");
926 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
927 }
928 }
929
930 void enable_IR_x2apic(void)
931 {
932 #ifdef CONFIG_INTR_REMAP
933 int ret;
934 unsigned long flags;
935
936 if (!cpu_has_x2apic)
937 return;
938
939 if (!x2apic_preenabled && disable_x2apic) {
940 printk(KERN_INFO
941 "Skipped enabling x2apic and Interrupt-remapping "
942 "because of nox2apic\n");
943 return;
944 }
945
946 if (x2apic_preenabled && disable_x2apic)
947 panic("Bios already enabled x2apic, can't enforce nox2apic");
948
949 if (!x2apic_preenabled && skip_ioapic_setup) {
950 printk(KERN_INFO
951 "Skipped enabling x2apic and Interrupt-remapping "
952 "because of skipping io-apic setup\n");
953 return;
954 }
955
956 ret = dmar_table_init();
957 if (ret) {
958 printk(KERN_INFO
959 "dmar_table_init() failed with %d:\n", ret);
960
961 if (x2apic_preenabled)
962 panic("x2apic enabled by bios. But IR enabling failed");
963 else
964 printk(KERN_INFO
965 "Not enabling x2apic,Intr-remapping\n");
966 return;
967 }
968
969 local_irq_save(flags);
970 mask_8259A();
971 save_mask_IO_APIC_setup();
972
973 ret = enable_intr_remapping(1);
974
975 if (ret && x2apic_preenabled) {
976 local_irq_restore(flags);
977 panic("x2apic enabled by bios. But IR enabling failed");
978 }
979
980 if (ret)
981 goto end;
982
983 if (!x2apic) {
984 x2apic = 1;
985 apic_ops = &x2apic_ops;
986 enable_x2apic();
987 }
988 end:
989 if (ret)
990 /*
991 * IR enabling failed
992 */
993 restore_IO_APIC_setup();
994 else
995 reinit_intr_remapped_IO_APIC(x2apic_preenabled);
996
997 unmask_8259A();
998 local_irq_restore(flags);
999
1000 if (!ret) {
1001 if (!x2apic_preenabled)
1002 printk(KERN_INFO
1003 "Enabled x2apic and interrupt-remapping\n");
1004 else
1005 printk(KERN_INFO
1006 "Enabled Interrupt-remapping\n");
1007 } else
1008 printk(KERN_ERR
1009 "Failed to enable Interrupt-remapping and x2apic\n");
1010 #else
1011 if (!cpu_has_x2apic)
1012 return;
1013
1014 if (x2apic_preenabled)
1015 panic("x2apic enabled prior OS handover,"
1016 " enable CONFIG_INTR_REMAP");
1017
1018 printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
1019 " and x2apic\n");
1020 #endif
1021
1022 return;
1023 }
1024
1025 /*
1026 * Detect and enable local APICs on non-SMP boards.
1027 * Original code written by Keir Fraser.
1028 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1029 * not correctly set up (usually the APIC timer won't work etc.)
1030 */
1031 static int __init detect_init_APIC(void)
1032 {
1033 if (!cpu_has_apic) {
1034 printk(KERN_INFO "No local APIC present\n");
1035 return -1;
1036 }
1037
1038 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1039 boot_cpu_physical_apicid = 0;
1040 return 0;
1041 }
1042
1043 void __init early_init_lapic_mapping(void)
1044 {
1045 unsigned long phys_addr;
1046
1047 /*
1048 * If no local APIC can be found then go out
1049 * : it means there is no mpatable and MADT
1050 */
1051 if (!smp_found_config)
1052 return;
1053
1054 phys_addr = mp_lapic_addr;
1055
1056 set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
1057 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1058 APIC_BASE, phys_addr);
1059
1060 /*
1061 * Fetch the APIC ID of the BSP in case we have a
1062 * default configuration (or the MP table is broken).
1063 */
1064 boot_cpu_physical_apicid = read_apic_id();
1065 }
1066
1067 /**
1068 * init_apic_mappings - initialize APIC mappings
1069 */
1070 void __init init_apic_mappings(void)
1071 {
1072 if (x2apic) {
1073 boot_cpu_physical_apicid = read_apic_id();
1074 return;
1075 }
1076
1077 /*
1078 * If no local APIC can be found then set up a fake all
1079 * zeroes page to simulate the local APIC and another
1080 * one for the IO-APIC.
1081 */
1082 if (!smp_found_config && detect_init_APIC()) {
1083 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
1084 apic_phys = __pa(apic_phys);
1085 } else
1086 apic_phys = mp_lapic_addr;
1087
1088 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1089 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1090 APIC_BASE, apic_phys);
1091
1092 /*
1093 * Fetch the APIC ID of the BSP in case we have a
1094 * default configuration (or the MP table is broken).
1095 */
1096 boot_cpu_physical_apicid = read_apic_id();
1097 }
1098
1099 /*
1100 * This initializes the IO-APIC and APIC hardware if this is
1101 * a UP kernel.
1102 */
1103 int __init APIC_init_uniprocessor(void)
1104 {
1105 if (disable_apic) {
1106 printk(KERN_INFO "Apic disabled\n");
1107 return -1;
1108 }
1109 if (!cpu_has_apic) {
1110 disable_apic = 1;
1111 printk(KERN_INFO "Apic disabled by BIOS\n");
1112 return -1;
1113 }
1114
1115 enable_IR_x2apic();
1116 setup_apic_routing();
1117
1118 verify_local_APIC();
1119
1120 connect_bsp_APIC();
1121
1122 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1123 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1124
1125 setup_local_APIC();
1126
1127 /*
1128 * Now enable IO-APICs, actually call clear_IO_APIC
1129 * We need clear_IO_APIC before enabling vector on BP
1130 */
1131 if (!skip_ioapic_setup && nr_ioapics)
1132 enable_IO_APIC();
1133
1134 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
1135 localise_nmi_watchdog();
1136 end_local_APIC_setup();
1137
1138 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1139 setup_IO_APIC();
1140 else
1141 nr_ioapics = 0;
1142 setup_boot_APIC_clock();
1143 check_nmi_watchdog();
1144 return 0;
1145 }
1146
1147 /*
1148 * Local APIC interrupts
1149 */
1150
1151 /*
1152 * This interrupt should _never_ happen with our APIC/SMP architecture
1153 */
1154 asmlinkage void smp_spurious_interrupt(void)
1155 {
1156 unsigned int v;
1157 exit_idle();
1158 irq_enter();
1159 /*
1160 * Check if this really is a spurious interrupt and ACK it
1161 * if it is a vectored one. Just in case...
1162 * Spurious interrupts should not be ACKed.
1163 */
1164 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1165 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1166 ack_APIC_irq();
1167
1168 add_pda(irq_spurious_count, 1);
1169 irq_exit();
1170 }
1171
1172 /*
1173 * This interrupt should never happen with our APIC/SMP architecture
1174 */
1175 asmlinkage void smp_error_interrupt(void)
1176 {
1177 unsigned int v, v1;
1178
1179 exit_idle();
1180 irq_enter();
1181 /* First tickle the hardware, only then report what went on. -- REW */
1182 v = apic_read(APIC_ESR);
1183 apic_write(APIC_ESR, 0);
1184 v1 = apic_read(APIC_ESR);
1185 ack_APIC_irq();
1186 atomic_inc(&irq_err_count);
1187
1188 /* Here is what the APIC error bits mean:
1189 0: Send CS error
1190 1: Receive CS error
1191 2: Send accept error
1192 3: Receive accept error
1193 4: Reserved
1194 5: Send illegal vector
1195 6: Received illegal vector
1196 7: Illegal register address
1197 */
1198 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1199 smp_processor_id(), v , v1);
1200 irq_exit();
1201 }
1202
1203 /**
1204 * * connect_bsp_APIC - attach the APIC to the interrupt system
1205 * */
1206 void __init connect_bsp_APIC(void)
1207 {
1208 enable_apic_mode();
1209 }
1210
1211 void disconnect_bsp_APIC(int virt_wire_setup)
1212 {
1213 /* Go back to Virtual Wire compatibility mode */
1214 unsigned long value;
1215
1216 /* For the spurious interrupt use vector F, and enable it */
1217 value = apic_read(APIC_SPIV);
1218 value &= ~APIC_VECTOR_MASK;
1219 value |= APIC_SPIV_APIC_ENABLED;
1220 value |= 0xf;
1221 apic_write(APIC_SPIV, value);
1222
1223 if (!virt_wire_setup) {
1224 /*
1225 * For LVT0 make it edge triggered, active high,
1226 * external and enabled
1227 */
1228 value = apic_read(APIC_LVT0);
1229 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1230 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1231 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1232 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1233 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1234 apic_write(APIC_LVT0, value);
1235 } else {
1236 /* Disable LVT0 */
1237 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1238 }
1239
1240 /* For LVT1 make it edge triggered, active high, nmi and enabled */
1241 value = apic_read(APIC_LVT1);
1242 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1243 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1244 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1245 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1246 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1247 apic_write(APIC_LVT1, value);
1248 }
1249
1250 void __cpuinit generic_processor_info(int apicid, int version)
1251 {
1252 int cpu;
1253 cpumask_t tmp_map;
1254
1255 if (num_processors >= NR_CPUS) {
1256 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1257 " Processor ignored.\n", NR_CPUS);
1258 return;
1259 }
1260
1261 num_processors++;
1262 cpus_complement(tmp_map, cpu_present_map);
1263 cpu = first_cpu(tmp_map);
1264
1265 physid_set(apicid, phys_cpu_present_map);
1266 if (apicid == boot_cpu_physical_apicid) {
1267 /*
1268 * x86_bios_cpu_apicid is required to have processors listed
1269 * in same order as logical cpu numbers. Hence the first
1270 * entry is BSP, and so on.
1271 */
1272 cpu = 0;
1273 }
1274 if (apicid > max_physical_apicid)
1275 max_physical_apicid = apicid;
1276
1277 /* are we being called early in kernel startup? */
1278 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1279 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1280 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1281
1282 cpu_to_apicid[cpu] = apicid;
1283 bios_cpu_apicid[cpu] = apicid;
1284 } else {
1285 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1286 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1287 }
1288
1289 cpu_set(cpu, cpu_possible_map);
1290 cpu_set(cpu, cpu_present_map);
1291 }
1292
1293 int hard_smp_processor_id(void)
1294 {
1295 return read_apic_id();
1296 }
1297
1298 /*
1299 * Power management
1300 */
1301 #ifdef CONFIG_PM
1302
1303 static struct {
1304 /* 'active' is true if the local APIC was enabled by us and
1305 not the BIOS; this signifies that we are also responsible
1306 for disabling it before entering apm/acpi suspend */
1307 int active;
1308 /* r/w apic fields */
1309 unsigned int apic_id;
1310 unsigned int apic_taskpri;
1311 unsigned int apic_ldr;
1312 unsigned int apic_dfr;
1313 unsigned int apic_spiv;
1314 unsigned int apic_lvtt;
1315 unsigned int apic_lvtpc;
1316 unsigned int apic_lvt0;
1317 unsigned int apic_lvt1;
1318 unsigned int apic_lvterr;
1319 unsigned int apic_tmict;
1320 unsigned int apic_tdcr;
1321 unsigned int apic_thmr;
1322 } apic_pm_state;
1323
1324 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1325 {
1326 unsigned long flags;
1327 int maxlvt;
1328
1329 if (!apic_pm_state.active)
1330 return 0;
1331
1332 maxlvt = lapic_get_maxlvt();
1333
1334 apic_pm_state.apic_id = apic_read(APIC_ID);
1335 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1336 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1337 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1338 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1339 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1340 if (maxlvt >= 4)
1341 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1342 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1343 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1344 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1345 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1346 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1347 #ifdef CONFIG_X86_MCE_INTEL
1348 if (maxlvt >= 5)
1349 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1350 #endif
1351 local_irq_save(flags);
1352 disable_local_APIC();
1353 local_irq_restore(flags);
1354 return 0;
1355 }
1356
1357 static int lapic_resume(struct sys_device *dev)
1358 {
1359 unsigned int l, h;
1360 unsigned long flags;
1361 int maxlvt;
1362
1363 if (!apic_pm_state.active)
1364 return 0;
1365
1366 maxlvt = lapic_get_maxlvt();
1367
1368 local_irq_save(flags);
1369 if (!x2apic) {
1370 rdmsr(MSR_IA32_APICBASE, l, h);
1371 l &= ~MSR_IA32_APICBASE_BASE;
1372 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1373 wrmsr(MSR_IA32_APICBASE, l, h);
1374 } else
1375 enable_x2apic();
1376
1377 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1378 apic_write(APIC_ID, apic_pm_state.apic_id);
1379 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1380 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1381 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1382 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1383 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1384 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1385 #ifdef CONFIG_X86_MCE_INTEL
1386 if (maxlvt >= 5)
1387 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1388 #endif
1389 if (maxlvt >= 4)
1390 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1391 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1392 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1393 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1394 apic_write(APIC_ESR, 0);
1395 apic_read(APIC_ESR);
1396 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1397 apic_write(APIC_ESR, 0);
1398 apic_read(APIC_ESR);
1399 local_irq_restore(flags);
1400 return 0;
1401 }
1402
1403 static struct sysdev_class lapic_sysclass = {
1404 .name = "lapic",
1405 .resume = lapic_resume,
1406 .suspend = lapic_suspend,
1407 };
1408
1409 static struct sys_device device_lapic = {
1410 .id = 0,
1411 .cls = &lapic_sysclass,
1412 };
1413
1414 static void __cpuinit apic_pm_activate(void)
1415 {
1416 apic_pm_state.active = 1;
1417 }
1418
1419 static int __init init_lapic_sysfs(void)
1420 {
1421 int error;
1422
1423 if (!cpu_has_apic)
1424 return 0;
1425 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1426
1427 error = sysdev_class_register(&lapic_sysclass);
1428 if (!error)
1429 error = sysdev_register(&device_lapic);
1430 return error;
1431 }
1432 device_initcall(init_lapic_sysfs);
1433
1434 #else /* CONFIG_PM */
1435
1436 static void apic_pm_activate(void) { }
1437
1438 #endif /* CONFIG_PM */
1439
1440 /*
1441 * apic_is_clustered_box() -- Check if we can expect good TSC
1442 *
1443 * Thus far, the major user of this is IBM's Summit2 series:
1444 *
1445 * Clustered boxes may have unsynced TSC problems if they are
1446 * multi-chassis. Use available data to take a good guess.
1447 * If in doubt, go HPET.
1448 */
1449 __cpuinit int apic_is_clustered_box(void)
1450 {
1451 int i, clusters, zeros;
1452 unsigned id;
1453 u16 *bios_cpu_apicid;
1454 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1455
1456 /*
1457 * there is not this kind of box with AMD CPU yet.
1458 * Some AMD box with quadcore cpu and 8 sockets apicid
1459 * will be [4, 0x23] or [8, 0x27] could be thought to
1460 * vsmp box still need checking...
1461 */
1462 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
1463 return 0;
1464
1465 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1466 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1467
1468 for (i = 0; i < NR_CPUS; i++) {
1469 /* are we being called early in kernel startup? */
1470 if (bios_cpu_apicid) {
1471 id = bios_cpu_apicid[i];
1472 }
1473 else if (i < nr_cpu_ids) {
1474 if (cpu_present(i))
1475 id = per_cpu(x86_bios_cpu_apicid, i);
1476 else
1477 continue;
1478 }
1479 else
1480 break;
1481
1482 if (id != BAD_APICID)
1483 __set_bit(APIC_CLUSTERID(id), clustermap);
1484 }
1485
1486 /* Problem: Partially populated chassis may not have CPUs in some of
1487 * the APIC clusters they have been allocated. Only present CPUs have
1488 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
1489 * Since clusters are allocated sequentially, count zeros only if
1490 * they are bounded by ones.
1491 */
1492 clusters = 0;
1493 zeros = 0;
1494 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1495 if (test_bit(i, clustermap)) {
1496 clusters += 1 + zeros;
1497 zeros = 0;
1498 } else
1499 ++zeros;
1500 }
1501
1502 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
1503 * not guaranteed to be synced between boards
1504 */
1505 if (is_vsmp_box() && clusters > 1)
1506 return 1;
1507
1508 /*
1509 * If clusters > 2, then should be multi-chassis.
1510 * May have to revisit this when multi-core + hyperthreaded CPUs come
1511 * out, but AFAIK this will work even for them.
1512 */
1513 return (clusters > 2);
1514 }
1515
1516 static __init int setup_nox2apic(char *str)
1517 {
1518 disable_x2apic = 1;
1519 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
1520 return 0;
1521 }
1522 early_param("nox2apic", setup_nox2apic);
1523
1524
1525 /*
1526 * APIC command line parameters
1527 */
1528 static int __init apic_set_verbosity(char *str)
1529 {
1530 if (str == NULL) {
1531 skip_ioapic_setup = 0;
1532 ioapic_force = 1;
1533 return 0;
1534 }
1535 if (strcmp("debug", str) == 0)
1536 apic_verbosity = APIC_DEBUG;
1537 else if (strcmp("verbose", str) == 0)
1538 apic_verbosity = APIC_VERBOSE;
1539 else {
1540 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1541 " use apic=verbose or apic=debug\n", str);
1542 return -EINVAL;
1543 }
1544
1545 return 0;
1546 }
1547 early_param("apic", apic_set_verbosity);
1548
1549 static __init int setup_disableapic(char *str)
1550 {
1551 disable_apic = 1;
1552 setup_clear_cpu_cap(X86_FEATURE_APIC);
1553 return 0;
1554 }
1555 early_param("disableapic", setup_disableapic);
1556
1557 /* same as disableapic, for compatibility */
1558 static __init int setup_nolapic(char *str)
1559 {
1560 return setup_disableapic(str);
1561 }
1562 early_param("nolapic", setup_nolapic);
1563
1564 static int __init parse_lapic_timer_c2_ok(char *arg)
1565 {
1566 local_apic_timer_c2_ok = 1;
1567 return 0;
1568 }
1569 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1570
1571 static __init int setup_noapictimer(char *str)
1572 {
1573 if (str[0] != ' ' && str[0] != 0)
1574 return 0;
1575 disable_apic_timer = 1;
1576 return 1;
1577 }
1578 __setup("noapictimer", setup_noapictimer);
1579
1580 static __init int setup_apicpmtimer(char *s)
1581 {
1582 apic_calibrate_pmtmr = 1;
1583 notsc_setup(NULL);
1584 return 0;
1585 }
1586 __setup("apicpmtimer", setup_apicpmtimer);
1587
1588 static int __init lapic_insert_resource(void)
1589 {
1590 if (!apic_phys)
1591 return -1;
1592
1593 /* Put local APIC into the resource map. */
1594 lapic_resource.start = apic_phys;
1595 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
1596 insert_resource(&iomem_resource, &lapic_resource);
1597
1598 return 0;
1599 }
1600
1601 /*
1602 * need call insert after e820_reserve_resources()
1603 * that is using request_resource
1604 */
1605 late_initcall(lapic_insert_resource);
This page took 0.061871 seconds and 6 git commands to generate.