Merge branch 'x86/cpu' into x86/core
[deliverable/linux.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/ioport.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30 #include <linux/dmar.h>
31
32 #include <asm/atomic.h>
33 #include <asm/smp.h>
34 #include <asm/mtrr.h>
35 #include <asm/mpspec.h>
36 #include <asm/hpet.h>
37 #include <asm/pgalloc.h>
38 #include <asm/nmi.h>
39 #include <asm/idle.h>
40 #include <asm/proto.h>
41 #include <asm/timex.h>
42 #include <asm/apic.h>
43 #include <asm/i8259.h>
44
45 #include <mach_ipi.h>
46 #include <mach_apic.h>
47
48 static int disable_apic_timer __cpuinitdata;
49 static int apic_calibrate_pmtmr __initdata;
50 int disable_apic;
51 int disable_x2apic;
52 int x2apic;
53
54 /* x2apic enabled before OS handover */
55 int x2apic_preenabled;
56
57 /* Local APIC timer works in C2 */
58 int local_apic_timer_c2_ok;
59 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
60
61 /*
62 * Debug level, exported for io_apic.c
63 */
64 unsigned int apic_verbosity;
65
66 /* Have we found an MP table */
67 int smp_found_config;
68
69 static struct resource lapic_resource = {
70 .name = "Local APIC",
71 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
72 };
73
74 static unsigned int calibration_result;
75
76 static int lapic_next_event(unsigned long delta,
77 struct clock_event_device *evt);
78 static void lapic_timer_setup(enum clock_event_mode mode,
79 struct clock_event_device *evt);
80 static void lapic_timer_broadcast(cpumask_t mask);
81 static void apic_pm_activate(void);
82
83 static struct clock_event_device lapic_clockevent = {
84 .name = "lapic",
85 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
86 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
87 .shift = 32,
88 .set_mode = lapic_timer_setup,
89 .set_next_event = lapic_next_event,
90 .broadcast = lapic_timer_broadcast,
91 .rating = 100,
92 .irq = -1,
93 };
94 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
95
96 static unsigned long apic_phys;
97
98 unsigned long mp_lapic_addr;
99
100 /*
101 * Get the LAPIC version
102 */
103 static inline int lapic_get_version(void)
104 {
105 return GET_APIC_VERSION(apic_read(APIC_LVR));
106 }
107
108 /*
109 * Check, if the APIC is integrated or a seperate chip
110 */
111 static inline int lapic_is_integrated(void)
112 {
113 return 1;
114 }
115
116 /*
117 * Check, whether this is a modern or a first generation APIC
118 */
119 static int modern_apic(void)
120 {
121 /* AMD systems use old APIC versions, so check the CPU */
122 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
123 boot_cpu_data.x86 >= 0xf)
124 return 1;
125 return lapic_get_version() >= 0x14;
126 }
127
128 void xapic_wait_icr_idle(void)
129 {
130 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
131 cpu_relax();
132 }
133
134 u32 safe_xapic_wait_icr_idle(void)
135 {
136 u32 send_status;
137 int timeout;
138
139 timeout = 0;
140 do {
141 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
142 if (!send_status)
143 break;
144 udelay(100);
145 } while (timeout++ < 1000);
146
147 return send_status;
148 }
149
150 void xapic_icr_write(u32 low, u32 id)
151 {
152 apic_write(APIC_ICR2, id << 24);
153 apic_write(APIC_ICR, low);
154 }
155
156 u64 xapic_icr_read(void)
157 {
158 u32 icr1, icr2;
159
160 icr2 = apic_read(APIC_ICR2);
161 icr1 = apic_read(APIC_ICR);
162
163 return (icr1 | ((u64)icr2 << 32));
164 }
165
166 static struct apic_ops xapic_ops = {
167 .read = native_apic_mem_read,
168 .write = native_apic_mem_write,
169 .icr_read = xapic_icr_read,
170 .icr_write = xapic_icr_write,
171 .wait_icr_idle = xapic_wait_icr_idle,
172 .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
173 };
174
175 struct apic_ops __read_mostly *apic_ops = &xapic_ops;
176
177 EXPORT_SYMBOL_GPL(apic_ops);
178
179 static void x2apic_wait_icr_idle(void)
180 {
181 /* no need to wait for icr idle in x2apic */
182 return;
183 }
184
185 static u32 safe_x2apic_wait_icr_idle(void)
186 {
187 /* no need to wait for icr idle in x2apic */
188 return 0;
189 }
190
191 void x2apic_icr_write(u32 low, u32 id)
192 {
193 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
194 }
195
196 u64 x2apic_icr_read(void)
197 {
198 unsigned long val;
199
200 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
201 return val;
202 }
203
204 static struct apic_ops x2apic_ops = {
205 .read = native_apic_msr_read,
206 .write = native_apic_msr_write,
207 .icr_read = x2apic_icr_read,
208 .icr_write = x2apic_icr_write,
209 .wait_icr_idle = x2apic_wait_icr_idle,
210 .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
211 };
212
213 /**
214 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
215 */
216 void __cpuinit enable_NMI_through_LVT0(void)
217 {
218 unsigned int v;
219
220 /* unmask and set to NMI */
221 v = APIC_DM_NMI;
222
223 /* Level triggered for 82489DX (32bit mode) */
224 if (!lapic_is_integrated())
225 v |= APIC_LVT_LEVEL_TRIGGER;
226
227 apic_write(APIC_LVT0, v);
228 }
229
230 /**
231 * lapic_get_maxlvt - get the maximum number of local vector table entries
232 */
233 int lapic_get_maxlvt(void)
234 {
235 unsigned int v;
236
237 v = apic_read(APIC_LVR);
238 /*
239 * - we always have APIC integrated on 64bit mode
240 * - 82489DXs do not report # of LVT entries
241 */
242 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
243 }
244
245 /*
246 * This function sets up the local APIC timer, with a timeout of
247 * 'clocks' APIC bus clock. During calibration we actually call
248 * this function twice on the boot CPU, once with a bogus timeout
249 * value, second time for real. The other (noncalibrating) CPUs
250 * call this function only once, with the real, calibrated value.
251 *
252 * We do reads before writes even if unnecessary, to get around the
253 * P5 APIC double write bug.
254 */
255
256 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
257 {
258 unsigned int lvtt_value, tmp_value;
259
260 lvtt_value = LOCAL_TIMER_VECTOR;
261 if (!oneshot)
262 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
263 if (!irqen)
264 lvtt_value |= APIC_LVT_MASKED;
265
266 apic_write(APIC_LVTT, lvtt_value);
267
268 /*
269 * Divide PICLK by 16
270 */
271 tmp_value = apic_read(APIC_TDCR);
272 apic_write(APIC_TDCR, (tmp_value
273 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
274 | APIC_TDR_DIV_16);
275
276 if (!oneshot)
277 apic_write(APIC_TMICT, clocks);
278 }
279
280 /*
281 * Setup extended LVT, AMD specific (K8, family 10h)
282 *
283 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
284 * MCE interrupts are supported. Thus MCE offset must be set to 0.
285 */
286
287 #define APIC_EILVT_LVTOFF_MCE 0
288 #define APIC_EILVT_LVTOFF_IBS 1
289
290 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
291 {
292 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
293 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
294
295 apic_write(reg, v);
296 }
297
298 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
299 {
300 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
301 return APIC_EILVT_LVTOFF_MCE;
302 }
303
304 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
305 {
306 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
307 return APIC_EILVT_LVTOFF_IBS;
308 }
309
310 /*
311 * Program the next event, relative to now
312 */
313 static int lapic_next_event(unsigned long delta,
314 struct clock_event_device *evt)
315 {
316 apic_write(APIC_TMICT, delta);
317 return 0;
318 }
319
320 /*
321 * Setup the lapic timer in periodic or oneshot mode
322 */
323 static void lapic_timer_setup(enum clock_event_mode mode,
324 struct clock_event_device *evt)
325 {
326 unsigned long flags;
327 unsigned int v;
328
329 /* Lapic used as dummy for broadcast ? */
330 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
331 return;
332
333 local_irq_save(flags);
334
335 switch (mode) {
336 case CLOCK_EVT_MODE_PERIODIC:
337 case CLOCK_EVT_MODE_ONESHOT:
338 __setup_APIC_LVTT(calibration_result,
339 mode != CLOCK_EVT_MODE_PERIODIC, 1);
340 break;
341 case CLOCK_EVT_MODE_UNUSED:
342 case CLOCK_EVT_MODE_SHUTDOWN:
343 v = apic_read(APIC_LVTT);
344 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
345 apic_write(APIC_LVTT, v);
346 break;
347 case CLOCK_EVT_MODE_RESUME:
348 /* Nothing to do here */
349 break;
350 }
351
352 local_irq_restore(flags);
353 }
354
355 /*
356 * Local APIC timer broadcast function
357 */
358 static void lapic_timer_broadcast(cpumask_t mask)
359 {
360 #ifdef CONFIG_SMP
361 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
362 #endif
363 }
364
365 /*
366 * Setup the local APIC timer for this CPU. Copy the initilized values
367 * of the boot CPU and register the clock event in the framework.
368 */
369 static void setup_APIC_timer(void)
370 {
371 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
372
373 memcpy(levt, &lapic_clockevent, sizeof(*levt));
374 levt->cpumask = cpumask_of_cpu(smp_processor_id());
375
376 clockevents_register_device(levt);
377 }
378
379 /*
380 * In this function we calibrate APIC bus clocks to the external
381 * timer. Unfortunately we cannot use jiffies and the timer irq
382 * to calibrate, since some later bootup code depends on getting
383 * the first irq? Ugh.
384 *
385 * We want to do the calibration only once since we
386 * want to have local timer irqs syncron. CPUs connected
387 * by the same APIC bus have the very same bus frequency.
388 * And we want to have irqs off anyways, no accidental
389 * APIC irq that way.
390 */
391
392 #define TICK_COUNT 100000000
393
394 static int __init calibrate_APIC_clock(void)
395 {
396 unsigned apic, apic_start;
397 unsigned long tsc, tsc_start;
398 int result;
399
400 local_irq_disable();
401
402 /*
403 * Put whatever arbitrary (but long enough) timeout
404 * value into the APIC clock, we just want to get the
405 * counter running for calibration.
406 *
407 * No interrupt enable !
408 */
409 __setup_APIC_LVTT(250000000, 0, 0);
410
411 apic_start = apic_read(APIC_TMCCT);
412 #ifdef CONFIG_X86_PM_TIMER
413 if (apic_calibrate_pmtmr && pmtmr_ioport) {
414 pmtimer_wait(5000); /* 5ms wait */
415 apic = apic_read(APIC_TMCCT);
416 result = (apic_start - apic) * 1000L / 5;
417 } else
418 #endif
419 {
420 rdtscll(tsc_start);
421
422 do {
423 apic = apic_read(APIC_TMCCT);
424 rdtscll(tsc);
425 } while ((tsc - tsc_start) < TICK_COUNT &&
426 (apic_start - apic) < TICK_COUNT);
427
428 result = (apic_start - apic) * 1000L * tsc_khz /
429 (tsc - tsc_start);
430 }
431
432 local_irq_enable();
433
434 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
435
436 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
437 result / 1000 / 1000, result / 1000 % 1000);
438
439 /* Calculate the scaled math multiplication factor */
440 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
441 lapic_clockevent.shift);
442 lapic_clockevent.max_delta_ns =
443 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
444 lapic_clockevent.min_delta_ns =
445 clockevent_delta2ns(0xF, &lapic_clockevent);
446
447 calibration_result = result / HZ;
448
449 /*
450 * Do a sanity check on the APIC calibration result
451 */
452 if (calibration_result < (1000000 / HZ)) {
453 printk(KERN_WARNING
454 "APIC frequency too slow, disabling apic timer\n");
455 return -1;
456 }
457
458 return 0;
459 }
460
461 /*
462 * Setup the boot APIC
463 *
464 * Calibrate and verify the result.
465 */
466 void __init setup_boot_APIC_clock(void)
467 {
468 /*
469 * The local apic timer can be disabled via the kernel commandline.
470 * Register the lapic timer as a dummy clock event source on SMP
471 * systems, so the broadcast mechanism is used. On UP systems simply
472 * ignore it.
473 */
474 if (disable_apic_timer) {
475 printk(KERN_INFO "Disabling APIC timer\n");
476 /* No broadcast on UP ! */
477 if (num_possible_cpus() > 1) {
478 lapic_clockevent.mult = 1;
479 setup_APIC_timer();
480 }
481 return;
482 }
483
484 printk(KERN_INFO "Using local APIC timer interrupts.\n");
485 if (calibrate_APIC_clock()) {
486 /* No broadcast on UP ! */
487 if (num_possible_cpus() > 1)
488 setup_APIC_timer();
489 return;
490 }
491
492 /*
493 * If nmi_watchdog is set to IO_APIC, we need the
494 * PIT/HPET going. Otherwise register lapic as a dummy
495 * device.
496 */
497 if (nmi_watchdog != NMI_IO_APIC)
498 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
499 else
500 printk(KERN_WARNING "APIC timer registered as dummy,"
501 " due to nmi_watchdog=%d!\n", nmi_watchdog);
502
503 setup_APIC_timer();
504 }
505
506 void __cpuinit setup_secondary_APIC_clock(void)
507 {
508 setup_APIC_timer();
509 }
510
511 /*
512 * The guts of the apic timer interrupt
513 */
514 static void local_apic_timer_interrupt(void)
515 {
516 int cpu = smp_processor_id();
517 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
518
519 /*
520 * Normally we should not be here till LAPIC has been initialized but
521 * in some cases like kdump, its possible that there is a pending LAPIC
522 * timer interrupt from previous kernel's context and is delivered in
523 * new kernel the moment interrupts are enabled.
524 *
525 * Interrupts are enabled early and LAPIC is setup much later, hence
526 * its possible that when we get here evt->event_handler is NULL.
527 * Check for event_handler being NULL and discard the interrupt as
528 * spurious.
529 */
530 if (!evt->event_handler) {
531 printk(KERN_WARNING
532 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
533 /* Switch it off */
534 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
535 return;
536 }
537
538 /*
539 * the NMI deadlock-detector uses this.
540 */
541 add_pda(apic_timer_irqs, 1);
542
543 evt->event_handler(evt);
544 }
545
546 /*
547 * Local APIC timer interrupt. This is the most natural way for doing
548 * local interrupts, but local timer interrupts can be emulated by
549 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
550 *
551 * [ if a single-CPU system runs an SMP kernel then we call the local
552 * interrupt as well. Thus we cannot inline the local irq ... ]
553 */
554 void smp_apic_timer_interrupt(struct pt_regs *regs)
555 {
556 struct pt_regs *old_regs = set_irq_regs(regs);
557
558 /*
559 * NOTE! We'd better ACK the irq immediately,
560 * because timer handling can be slow.
561 */
562 ack_APIC_irq();
563 /*
564 * update_process_times() expects us to have done irq_enter().
565 * Besides, if we don't timer interrupts ignore the global
566 * interrupt lock, which is the WrongThing (tm) to do.
567 */
568 exit_idle();
569 irq_enter();
570 local_apic_timer_interrupt();
571 irq_exit();
572 set_irq_regs(old_regs);
573 }
574
575 int setup_profiling_timer(unsigned int multiplier)
576 {
577 return -EINVAL;
578 }
579
580
581 /*
582 * Local APIC start and shutdown
583 */
584
585 /**
586 * clear_local_APIC - shutdown the local APIC
587 *
588 * This is called, when a CPU is disabled and before rebooting, so the state of
589 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
590 * leftovers during boot.
591 */
592 void clear_local_APIC(void)
593 {
594 int maxlvt;
595 u32 v;
596
597 /* APIC hasn't been mapped yet */
598 if (!apic_phys)
599 return;
600
601 maxlvt = lapic_get_maxlvt();
602 /*
603 * Masking an LVT entry can trigger a local APIC error
604 * if the vector is zero. Mask LVTERR first to prevent this.
605 */
606 if (maxlvt >= 3) {
607 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
608 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
609 }
610 /*
611 * Careful: we have to set masks only first to deassert
612 * any level-triggered sources.
613 */
614 v = apic_read(APIC_LVTT);
615 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
616 v = apic_read(APIC_LVT0);
617 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
618 v = apic_read(APIC_LVT1);
619 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
620 if (maxlvt >= 4) {
621 v = apic_read(APIC_LVTPC);
622 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
623 }
624
625 /*
626 * Clean APIC state for other OSs:
627 */
628 apic_write(APIC_LVTT, APIC_LVT_MASKED);
629 apic_write(APIC_LVT0, APIC_LVT_MASKED);
630 apic_write(APIC_LVT1, APIC_LVT_MASKED);
631 if (maxlvt >= 3)
632 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
633 if (maxlvt >= 4)
634 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
635 apic_write(APIC_ESR, 0);
636 apic_read(APIC_ESR);
637 }
638
639 /**
640 * disable_local_APIC - clear and disable the local APIC
641 */
642 void disable_local_APIC(void)
643 {
644 unsigned int value;
645
646 clear_local_APIC();
647
648 /*
649 * Disable APIC (implies clearing of registers
650 * for 82489DX!).
651 */
652 value = apic_read(APIC_SPIV);
653 value &= ~APIC_SPIV_APIC_ENABLED;
654 apic_write(APIC_SPIV, value);
655 }
656
657 void lapic_shutdown(void)
658 {
659 unsigned long flags;
660
661 if (!cpu_has_apic)
662 return;
663
664 local_irq_save(flags);
665
666 disable_local_APIC();
667
668 local_irq_restore(flags);
669 }
670
671 /*
672 * This is to verify that we're looking at a real local APIC.
673 * Check these against your board if the CPUs aren't getting
674 * started for no apparent reason.
675 */
676 int __init verify_local_APIC(void)
677 {
678 unsigned int reg0, reg1;
679
680 /*
681 * The version register is read-only in a real APIC.
682 */
683 reg0 = apic_read(APIC_LVR);
684 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
685 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
686 reg1 = apic_read(APIC_LVR);
687 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
688
689 /*
690 * The two version reads above should print the same
691 * numbers. If the second one is different, then we
692 * poke at a non-APIC.
693 */
694 if (reg1 != reg0)
695 return 0;
696
697 /*
698 * Check if the version looks reasonably.
699 */
700 reg1 = GET_APIC_VERSION(reg0);
701 if (reg1 == 0x00 || reg1 == 0xff)
702 return 0;
703 reg1 = lapic_get_maxlvt();
704 if (reg1 < 0x02 || reg1 == 0xff)
705 return 0;
706
707 /*
708 * The ID register is read/write in a real APIC.
709 */
710 reg0 = apic_read(APIC_ID);
711 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
712 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
713 reg1 = apic_read(APIC_ID);
714 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
715 apic_write(APIC_ID, reg0);
716 if (reg1 != (reg0 ^ APIC_ID_MASK))
717 return 0;
718
719 /*
720 * The next two are just to see if we have sane values.
721 * They're only really relevant if we're in Virtual Wire
722 * compatibility mode, but most boxes are anymore.
723 */
724 reg0 = apic_read(APIC_LVT0);
725 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
726 reg1 = apic_read(APIC_LVT1);
727 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
728
729 return 1;
730 }
731
732 /**
733 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
734 */
735 void __init sync_Arb_IDs(void)
736 {
737 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
738 if (modern_apic())
739 return;
740
741 /*
742 * Wait for idle.
743 */
744 apic_wait_icr_idle();
745
746 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
747 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
748 | APIC_DM_INIT);
749 }
750
751 /*
752 * An initial setup of the virtual wire mode.
753 */
754 void __init init_bsp_APIC(void)
755 {
756 unsigned int value;
757
758 /*
759 * Don't do the setup now if we have a SMP BIOS as the
760 * through-I/O-APIC virtual wire mode might be active.
761 */
762 if (smp_found_config || !cpu_has_apic)
763 return;
764
765 value = apic_read(APIC_LVR);
766
767 /*
768 * Do not trust the local APIC being empty at bootup.
769 */
770 clear_local_APIC();
771
772 /*
773 * Enable APIC.
774 */
775 value = apic_read(APIC_SPIV);
776 value &= ~APIC_VECTOR_MASK;
777 value |= APIC_SPIV_APIC_ENABLED;
778 value |= APIC_SPIV_FOCUS_DISABLED;
779 value |= SPURIOUS_APIC_VECTOR;
780 apic_write(APIC_SPIV, value);
781
782 /*
783 * Set up the virtual wire mode.
784 */
785 apic_write(APIC_LVT0, APIC_DM_EXTINT);
786 value = APIC_DM_NMI;
787 apic_write(APIC_LVT1, value);
788 }
789
790 /**
791 * setup_local_APIC - setup the local APIC
792 */
793 void __cpuinit setup_local_APIC(void)
794 {
795 unsigned int value;
796 int i, j;
797
798 preempt_disable();
799 value = apic_read(APIC_LVR);
800
801 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
802
803 /*
804 * Double-check whether this APIC is really registered.
805 * This is meaningless in clustered apic mode, so we skip it.
806 */
807 if (!apic_id_registered())
808 BUG();
809
810 /*
811 * Intel recommends to set DFR, LDR and TPR before enabling
812 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
813 * document number 292116). So here it goes...
814 */
815 init_apic_ldr();
816
817 /*
818 * Set Task Priority to 'accept all'. We never change this
819 * later on.
820 */
821 value = apic_read(APIC_TASKPRI);
822 value &= ~APIC_TPRI_MASK;
823 apic_write(APIC_TASKPRI, value);
824
825 /*
826 * After a crash, we no longer service the interrupts and a pending
827 * interrupt from previous kernel might still have ISR bit set.
828 *
829 * Most probably by now CPU has serviced that pending interrupt and
830 * it might not have done the ack_APIC_irq() because it thought,
831 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
832 * does not clear the ISR bit and cpu thinks it has already serivced
833 * the interrupt. Hence a vector might get locked. It was noticed
834 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
835 */
836 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
837 value = apic_read(APIC_ISR + i*0x10);
838 for (j = 31; j >= 0; j--) {
839 if (value & (1<<j))
840 ack_APIC_irq();
841 }
842 }
843
844 /*
845 * Now that we are all set up, enable the APIC
846 */
847 value = apic_read(APIC_SPIV);
848 value &= ~APIC_VECTOR_MASK;
849 /*
850 * Enable APIC
851 */
852 value |= APIC_SPIV_APIC_ENABLED;
853
854 /* We always use processor focus */
855
856 /*
857 * Set spurious IRQ vector
858 */
859 value |= SPURIOUS_APIC_VECTOR;
860 apic_write(APIC_SPIV, value);
861
862 /*
863 * Set up LVT0, LVT1:
864 *
865 * set up through-local-APIC on the BP's LINT0. This is not
866 * strictly necessary in pure symmetric-IO mode, but sometimes
867 * we delegate interrupts to the 8259A.
868 */
869 /*
870 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
871 */
872 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
873 if (!smp_processor_id() && !value) {
874 value = APIC_DM_EXTINT;
875 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
876 smp_processor_id());
877 } else {
878 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
879 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
880 smp_processor_id());
881 }
882 apic_write(APIC_LVT0, value);
883
884 /*
885 * only the BP should see the LINT1 NMI signal, obviously.
886 */
887 if (!smp_processor_id())
888 value = APIC_DM_NMI;
889 else
890 value = APIC_DM_NMI | APIC_LVT_MASKED;
891 apic_write(APIC_LVT1, value);
892 preempt_enable();
893 }
894
895 static void __cpuinit lapic_setup_esr(void)
896 {
897 unsigned maxlvt = lapic_get_maxlvt();
898
899 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR);
900 /*
901 * spec says clear errors after enabling vector.
902 */
903 if (maxlvt > 3)
904 apic_write(APIC_ESR, 0);
905 }
906
907 void __cpuinit end_local_APIC_setup(void)
908 {
909 lapic_setup_esr();
910 setup_apic_nmi_watchdog(NULL);
911 apic_pm_activate();
912 }
913
914 void check_x2apic(void)
915 {
916 int msr, msr2;
917
918 rdmsr(MSR_IA32_APICBASE, msr, msr2);
919
920 if (msr & X2APIC_ENABLE) {
921 printk("x2apic enabled by BIOS, switching to x2apic ops\n");
922 x2apic_preenabled = x2apic = 1;
923 apic_ops = &x2apic_ops;
924 }
925 }
926
927 void enable_x2apic(void)
928 {
929 int msr, msr2;
930
931 rdmsr(MSR_IA32_APICBASE, msr, msr2);
932 if (!(msr & X2APIC_ENABLE)) {
933 printk("Enabling x2apic\n");
934 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
935 }
936 }
937
938 void enable_IR_x2apic(void)
939 {
940 #ifdef CONFIG_INTR_REMAP
941 int ret;
942 unsigned long flags;
943
944 if (!cpu_has_x2apic)
945 return;
946
947 if (!x2apic_preenabled && disable_x2apic) {
948 printk(KERN_INFO
949 "Skipped enabling x2apic and Interrupt-remapping "
950 "because of nox2apic\n");
951 return;
952 }
953
954 if (x2apic_preenabled && disable_x2apic)
955 panic("Bios already enabled x2apic, can't enforce nox2apic");
956
957 if (!x2apic_preenabled && skip_ioapic_setup) {
958 printk(KERN_INFO
959 "Skipped enabling x2apic and Interrupt-remapping "
960 "because of skipping io-apic setup\n");
961 return;
962 }
963
964 ret = dmar_table_init();
965 if (ret) {
966 printk(KERN_INFO
967 "dmar_table_init() failed with %d:\n", ret);
968
969 if (x2apic_preenabled)
970 panic("x2apic enabled by bios. But IR enabling failed");
971 else
972 printk(KERN_INFO
973 "Not enabling x2apic,Intr-remapping\n");
974 return;
975 }
976
977 local_irq_save(flags);
978 mask_8259A();
979 save_mask_IO_APIC_setup();
980
981 ret = enable_intr_remapping(1);
982
983 if (ret && x2apic_preenabled) {
984 local_irq_restore(flags);
985 panic("x2apic enabled by bios. But IR enabling failed");
986 }
987
988 if (ret)
989 goto end;
990
991 if (!x2apic) {
992 x2apic = 1;
993 apic_ops = &x2apic_ops;
994 enable_x2apic();
995 }
996 end:
997 if (ret)
998 /*
999 * IR enabling failed
1000 */
1001 restore_IO_APIC_setup();
1002 else
1003 reinit_intr_remapped_IO_APIC(x2apic_preenabled);
1004
1005 unmask_8259A();
1006 local_irq_restore(flags);
1007
1008 if (!ret) {
1009 if (!x2apic_preenabled)
1010 printk(KERN_INFO
1011 "Enabled x2apic and interrupt-remapping\n");
1012 else
1013 printk(KERN_INFO
1014 "Enabled Interrupt-remapping\n");
1015 } else
1016 printk(KERN_ERR
1017 "Failed to enable Interrupt-remapping and x2apic\n");
1018 #else
1019 if (!cpu_has_x2apic)
1020 return;
1021
1022 if (x2apic_preenabled)
1023 panic("x2apic enabled prior OS handover,"
1024 " enable CONFIG_INTR_REMAP");
1025
1026 printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
1027 " and x2apic\n");
1028 #endif
1029
1030 return;
1031 }
1032
1033 /*
1034 * Detect and enable local APICs on non-SMP boards.
1035 * Original code written by Keir Fraser.
1036 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1037 * not correctly set up (usually the APIC timer won't work etc.)
1038 */
1039 static int __init detect_init_APIC(void)
1040 {
1041 if (!cpu_has_apic) {
1042 printk(KERN_INFO "No local APIC present\n");
1043 return -1;
1044 }
1045
1046 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1047 boot_cpu_physical_apicid = 0;
1048 return 0;
1049 }
1050
1051 void __init early_init_lapic_mapping(void)
1052 {
1053 unsigned long phys_addr;
1054
1055 /*
1056 * If no local APIC can be found then go out
1057 * : it means there is no mpatable and MADT
1058 */
1059 if (!smp_found_config)
1060 return;
1061
1062 phys_addr = mp_lapic_addr;
1063
1064 set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
1065 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1066 APIC_BASE, phys_addr);
1067
1068 /*
1069 * Fetch the APIC ID of the BSP in case we have a
1070 * default configuration (or the MP table is broken).
1071 */
1072 boot_cpu_physical_apicid = read_apic_id();
1073 }
1074
1075 /**
1076 * init_apic_mappings - initialize APIC mappings
1077 */
1078 void __init init_apic_mappings(void)
1079 {
1080 if (x2apic) {
1081 boot_cpu_physical_apicid = read_apic_id();
1082 return;
1083 }
1084
1085 /*
1086 * If no local APIC can be found then set up a fake all
1087 * zeroes page to simulate the local APIC and another
1088 * one for the IO-APIC.
1089 */
1090 if (!smp_found_config && detect_init_APIC()) {
1091 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
1092 apic_phys = __pa(apic_phys);
1093 } else
1094 apic_phys = mp_lapic_addr;
1095
1096 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1097 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1098 APIC_BASE, apic_phys);
1099
1100 /*
1101 * Fetch the APIC ID of the BSP in case we have a
1102 * default configuration (or the MP table is broken).
1103 */
1104 boot_cpu_physical_apicid = read_apic_id();
1105 }
1106
1107 /*
1108 * This initializes the IO-APIC and APIC hardware if this is
1109 * a UP kernel.
1110 */
1111 int __init APIC_init_uniprocessor(void)
1112 {
1113 if (disable_apic) {
1114 printk(KERN_INFO "Apic disabled\n");
1115 return -1;
1116 }
1117 if (!cpu_has_apic) {
1118 disable_apic = 1;
1119 printk(KERN_INFO "Apic disabled by BIOS\n");
1120 return -1;
1121 }
1122
1123 enable_IR_x2apic();
1124 setup_apic_routing();
1125
1126 verify_local_APIC();
1127
1128 connect_bsp_APIC();
1129
1130 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1131 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1132
1133 setup_local_APIC();
1134
1135 /*
1136 * Now enable IO-APICs, actually call clear_IO_APIC
1137 * We need clear_IO_APIC before enabling vector on BP
1138 */
1139 if (!skip_ioapic_setup && nr_ioapics)
1140 enable_IO_APIC();
1141
1142 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
1143 localise_nmi_watchdog();
1144 end_local_APIC_setup();
1145
1146 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1147 setup_IO_APIC();
1148 else
1149 nr_ioapics = 0;
1150 setup_boot_APIC_clock();
1151 check_nmi_watchdog();
1152 return 0;
1153 }
1154
1155 /*
1156 * Local APIC interrupts
1157 */
1158
1159 /*
1160 * This interrupt should _never_ happen with our APIC/SMP architecture
1161 */
1162 asmlinkage void smp_spurious_interrupt(void)
1163 {
1164 unsigned int v;
1165 exit_idle();
1166 irq_enter();
1167 /*
1168 * Check if this really is a spurious interrupt and ACK it
1169 * if it is a vectored one. Just in case...
1170 * Spurious interrupts should not be ACKed.
1171 */
1172 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1173 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1174 ack_APIC_irq();
1175
1176 add_pda(irq_spurious_count, 1);
1177 irq_exit();
1178 }
1179
1180 /*
1181 * This interrupt should never happen with our APIC/SMP architecture
1182 */
1183 asmlinkage void smp_error_interrupt(void)
1184 {
1185 unsigned int v, v1;
1186
1187 exit_idle();
1188 irq_enter();
1189 /* First tickle the hardware, only then report what went on. -- REW */
1190 v = apic_read(APIC_ESR);
1191 apic_write(APIC_ESR, 0);
1192 v1 = apic_read(APIC_ESR);
1193 ack_APIC_irq();
1194 atomic_inc(&irq_err_count);
1195
1196 /* Here is what the APIC error bits mean:
1197 0: Send CS error
1198 1: Receive CS error
1199 2: Send accept error
1200 3: Receive accept error
1201 4: Reserved
1202 5: Send illegal vector
1203 6: Received illegal vector
1204 7: Illegal register address
1205 */
1206 printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1207 smp_processor_id(), v , v1);
1208 irq_exit();
1209 }
1210
1211 /**
1212 * * connect_bsp_APIC - attach the APIC to the interrupt system
1213 * */
1214 void __init connect_bsp_APIC(void)
1215 {
1216 enable_apic_mode();
1217 }
1218
1219 void disconnect_bsp_APIC(int virt_wire_setup)
1220 {
1221 /* Go back to Virtual Wire compatibility mode */
1222 unsigned long value;
1223
1224 /* For the spurious interrupt use vector F, and enable it */
1225 value = apic_read(APIC_SPIV);
1226 value &= ~APIC_VECTOR_MASK;
1227 value |= APIC_SPIV_APIC_ENABLED;
1228 value |= 0xf;
1229 apic_write(APIC_SPIV, value);
1230
1231 if (!virt_wire_setup) {
1232 /*
1233 * For LVT0 make it edge triggered, active high,
1234 * external and enabled
1235 */
1236 value = apic_read(APIC_LVT0);
1237 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1238 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1239 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1240 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1241 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1242 apic_write(APIC_LVT0, value);
1243 } else {
1244 /* Disable LVT0 */
1245 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1246 }
1247
1248 /* For LVT1 make it edge triggered, active high, nmi and enabled */
1249 value = apic_read(APIC_LVT1);
1250 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1251 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1252 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1253 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1254 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1255 apic_write(APIC_LVT1, value);
1256 }
1257
1258 void __cpuinit generic_processor_info(int apicid, int version)
1259 {
1260 int cpu;
1261 cpumask_t tmp_map;
1262
1263 if (num_processors >= NR_CPUS) {
1264 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1265 " Processor ignored.\n", NR_CPUS);
1266 return;
1267 }
1268
1269 num_processors++;
1270 cpus_complement(tmp_map, cpu_present_map);
1271 cpu = first_cpu(tmp_map);
1272
1273 physid_set(apicid, phys_cpu_present_map);
1274 if (apicid == boot_cpu_physical_apicid) {
1275 /*
1276 * x86_bios_cpu_apicid is required to have processors listed
1277 * in same order as logical cpu numbers. Hence the first
1278 * entry is BSP, and so on.
1279 */
1280 cpu = 0;
1281 }
1282 if (apicid > max_physical_apicid)
1283 max_physical_apicid = apicid;
1284
1285 /* are we being called early in kernel startup? */
1286 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1287 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1288 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1289
1290 cpu_to_apicid[cpu] = apicid;
1291 bios_cpu_apicid[cpu] = apicid;
1292 } else {
1293 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1294 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1295 }
1296
1297 cpu_set(cpu, cpu_possible_map);
1298 cpu_set(cpu, cpu_present_map);
1299 }
1300
1301 int hard_smp_processor_id(void)
1302 {
1303 return read_apic_id();
1304 }
1305
1306 /*
1307 * Power management
1308 */
1309 #ifdef CONFIG_PM
1310
1311 static struct {
1312 /* 'active' is true if the local APIC was enabled by us and
1313 not the BIOS; this signifies that we are also responsible
1314 for disabling it before entering apm/acpi suspend */
1315 int active;
1316 /* r/w apic fields */
1317 unsigned int apic_id;
1318 unsigned int apic_taskpri;
1319 unsigned int apic_ldr;
1320 unsigned int apic_dfr;
1321 unsigned int apic_spiv;
1322 unsigned int apic_lvtt;
1323 unsigned int apic_lvtpc;
1324 unsigned int apic_lvt0;
1325 unsigned int apic_lvt1;
1326 unsigned int apic_lvterr;
1327 unsigned int apic_tmict;
1328 unsigned int apic_tdcr;
1329 unsigned int apic_thmr;
1330 } apic_pm_state;
1331
1332 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1333 {
1334 unsigned long flags;
1335 int maxlvt;
1336
1337 if (!apic_pm_state.active)
1338 return 0;
1339
1340 maxlvt = lapic_get_maxlvt();
1341
1342 apic_pm_state.apic_id = apic_read(APIC_ID);
1343 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1344 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1345 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1346 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1347 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1348 if (maxlvt >= 4)
1349 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1350 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1351 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1352 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1353 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1354 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1355 #ifdef CONFIG_X86_MCE_INTEL
1356 if (maxlvt >= 5)
1357 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1358 #endif
1359 local_irq_save(flags);
1360 disable_local_APIC();
1361 local_irq_restore(flags);
1362 return 0;
1363 }
1364
1365 static int lapic_resume(struct sys_device *dev)
1366 {
1367 unsigned int l, h;
1368 unsigned long flags;
1369 int maxlvt;
1370
1371 if (!apic_pm_state.active)
1372 return 0;
1373
1374 maxlvt = lapic_get_maxlvt();
1375
1376 local_irq_save(flags);
1377 if (!x2apic) {
1378 rdmsr(MSR_IA32_APICBASE, l, h);
1379 l &= ~MSR_IA32_APICBASE_BASE;
1380 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1381 wrmsr(MSR_IA32_APICBASE, l, h);
1382 } else
1383 enable_x2apic();
1384
1385 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1386 apic_write(APIC_ID, apic_pm_state.apic_id);
1387 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1388 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1389 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1390 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1391 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1392 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1393 #ifdef CONFIG_X86_MCE_INTEL
1394 if (maxlvt >= 5)
1395 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1396 #endif
1397 if (maxlvt >= 4)
1398 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1399 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1400 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1401 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1402 apic_write(APIC_ESR, 0);
1403 apic_read(APIC_ESR);
1404 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1405 apic_write(APIC_ESR, 0);
1406 apic_read(APIC_ESR);
1407 local_irq_restore(flags);
1408 return 0;
1409 }
1410
1411 static struct sysdev_class lapic_sysclass = {
1412 .name = "lapic",
1413 .resume = lapic_resume,
1414 .suspend = lapic_suspend,
1415 };
1416
1417 static struct sys_device device_lapic = {
1418 .id = 0,
1419 .cls = &lapic_sysclass,
1420 };
1421
1422 static void __cpuinit apic_pm_activate(void)
1423 {
1424 apic_pm_state.active = 1;
1425 }
1426
1427 static int __init init_lapic_sysfs(void)
1428 {
1429 int error;
1430
1431 if (!cpu_has_apic)
1432 return 0;
1433 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1434
1435 error = sysdev_class_register(&lapic_sysclass);
1436 if (!error)
1437 error = sysdev_register(&device_lapic);
1438 return error;
1439 }
1440 device_initcall(init_lapic_sysfs);
1441
1442 #else /* CONFIG_PM */
1443
1444 static void apic_pm_activate(void) { }
1445
1446 #endif /* CONFIG_PM */
1447
1448 /*
1449 * apic_is_clustered_box() -- Check if we can expect good TSC
1450 *
1451 * Thus far, the major user of this is IBM's Summit2 series:
1452 *
1453 * Clustered boxes may have unsynced TSC problems if they are
1454 * multi-chassis. Use available data to take a good guess.
1455 * If in doubt, go HPET.
1456 */
1457 __cpuinit int apic_is_clustered_box(void)
1458 {
1459 int i, clusters, zeros;
1460 unsigned id;
1461 u16 *bios_cpu_apicid;
1462 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1463
1464 /*
1465 * there is not this kind of box with AMD CPU yet.
1466 * Some AMD box with quadcore cpu and 8 sockets apicid
1467 * will be [4, 0x23] or [8, 0x27] could be thought to
1468 * vsmp box still need checking...
1469 */
1470 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
1471 return 0;
1472
1473 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1474 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1475
1476 for (i = 0; i < NR_CPUS; i++) {
1477 /* are we being called early in kernel startup? */
1478 if (bios_cpu_apicid) {
1479 id = bios_cpu_apicid[i];
1480 }
1481 else if (i < nr_cpu_ids) {
1482 if (cpu_present(i))
1483 id = per_cpu(x86_bios_cpu_apicid, i);
1484 else
1485 continue;
1486 }
1487 else
1488 break;
1489
1490 if (id != BAD_APICID)
1491 __set_bit(APIC_CLUSTERID(id), clustermap);
1492 }
1493
1494 /* Problem: Partially populated chassis may not have CPUs in some of
1495 * the APIC clusters they have been allocated. Only present CPUs have
1496 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
1497 * Since clusters are allocated sequentially, count zeros only if
1498 * they are bounded by ones.
1499 */
1500 clusters = 0;
1501 zeros = 0;
1502 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1503 if (test_bit(i, clustermap)) {
1504 clusters += 1 + zeros;
1505 zeros = 0;
1506 } else
1507 ++zeros;
1508 }
1509
1510 /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
1511 * not guaranteed to be synced between boards
1512 */
1513 if (is_vsmp_box() && clusters > 1)
1514 return 1;
1515
1516 /*
1517 * If clusters > 2, then should be multi-chassis.
1518 * May have to revisit this when multi-core + hyperthreaded CPUs come
1519 * out, but AFAIK this will work even for them.
1520 */
1521 return (clusters > 2);
1522 }
1523
1524 static __init int setup_nox2apic(char *str)
1525 {
1526 disable_x2apic = 1;
1527 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
1528 return 0;
1529 }
1530 early_param("nox2apic", setup_nox2apic);
1531
1532
1533 /*
1534 * APIC command line parameters
1535 */
1536 static int __init apic_set_verbosity(char *str)
1537 {
1538 if (str == NULL) {
1539 skip_ioapic_setup = 0;
1540 ioapic_force = 1;
1541 return 0;
1542 }
1543 if (strcmp("debug", str) == 0)
1544 apic_verbosity = APIC_DEBUG;
1545 else if (strcmp("verbose", str) == 0)
1546 apic_verbosity = APIC_VERBOSE;
1547 else {
1548 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
1549 " use apic=verbose or apic=debug\n", str);
1550 return -EINVAL;
1551 }
1552
1553 return 0;
1554 }
1555 early_param("apic", apic_set_verbosity);
1556
1557 static __init int setup_disableapic(char *str)
1558 {
1559 disable_apic = 1;
1560 setup_clear_cpu_cap(X86_FEATURE_APIC);
1561 return 0;
1562 }
1563 early_param("disableapic", setup_disableapic);
1564
1565 /* same as disableapic, for compatibility */
1566 static __init int setup_nolapic(char *str)
1567 {
1568 return setup_disableapic(str);
1569 }
1570 early_param("nolapic", setup_nolapic);
1571
1572 static int __init parse_lapic_timer_c2_ok(char *arg)
1573 {
1574 local_apic_timer_c2_ok = 1;
1575 return 0;
1576 }
1577 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1578
1579 static __init int setup_noapictimer(char *str)
1580 {
1581 if (str[0] != ' ' && str[0] != 0)
1582 return 0;
1583 disable_apic_timer = 1;
1584 return 1;
1585 }
1586 __setup("noapictimer", setup_noapictimer);
1587
1588 static __init int setup_apicpmtimer(char *s)
1589 {
1590 apic_calibrate_pmtmr = 1;
1591 notsc_setup(NULL);
1592 return 0;
1593 }
1594 __setup("apicpmtimer", setup_apicpmtimer);
1595
1596 static int __init lapic_insert_resource(void)
1597 {
1598 if (!apic_phys)
1599 return -1;
1600
1601 /* Put local APIC into the resource map. */
1602 lapic_resource.start = apic_phys;
1603 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
1604 insert_resource(&iomem_resource, &lapic_resource);
1605
1606 return 0;
1607 }
1608
1609 /*
1610 * need call insert after e820_reserve_resources()
1611 * that is using request_resource
1612 */
1613 late_initcall(lapic_insert_resource);
This page took 0.071417 seconds and 6 git commands to generate.