x64, x2apic/intr-remap: basic apic ops support
[deliverable/linux.git] / arch / x86 / kernel / apic_32.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/cpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30 #include <linux/dmi.h>
31
32 #include <asm/atomic.h>
33 #include <asm/smp.h>
34 #include <asm/mtrr.h>
35 #include <asm/mpspec.h>
36 #include <asm/desc.h>
37 #include <asm/arch_hooks.h>
38 #include <asm/hpet.h>
39 #include <asm/i8253.h>
40 #include <asm/nmi.h>
41
42 #include <mach_apic.h>
43 #include <mach_apicdef.h>
44 #include <mach_ipi.h>
45
46 /*
47 * Sanity check
48 */
49 #if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
50 # error SPURIOUS_APIC_VECTOR definition error
51 #endif
52
53 unsigned long mp_lapic_addr;
54
55 /*
56 * Knob to control our willingness to enable the local APIC.
57 *
58 * +1=force-enable
59 */
60 static int force_enable_local_apic;
61 int disable_apic;
62
63 /* Local APIC timer verification ok */
64 static int local_apic_timer_verify_ok;
65 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
66 static int local_apic_timer_disabled;
67 /* Local APIC timer works in C2 */
68 int local_apic_timer_c2_ok;
69 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
70
71 int first_system_vector = 0xfe;
72
73 char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
74
75 /*
76 * Debug level, exported for io_apic.c
77 */
78 int apic_verbosity;
79
80 int pic_mode;
81
82 /* Have we found an MP table */
83 int smp_found_config;
84
85 static struct resource lapic_resource = {
86 .name = "Local APIC",
87 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
88 };
89
90 static unsigned int calibration_result;
91
92 static int lapic_next_event(unsigned long delta,
93 struct clock_event_device *evt);
94 static void lapic_timer_setup(enum clock_event_mode mode,
95 struct clock_event_device *evt);
96 static void lapic_timer_broadcast(cpumask_t mask);
97 static void apic_pm_activate(void);
98
99 /*
100 * The local apic timer can be used for any function which is CPU local.
101 */
102 static struct clock_event_device lapic_clockevent = {
103 .name = "lapic",
104 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
105 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
106 .shift = 32,
107 .set_mode = lapic_timer_setup,
108 .set_next_event = lapic_next_event,
109 .broadcast = lapic_timer_broadcast,
110 .rating = 100,
111 .irq = -1,
112 };
113 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
114
115 /* Local APIC was disabled by the BIOS and enabled by the kernel */
116 static int enabled_via_apicbase;
117
118 static unsigned long apic_phys;
119
120 /*
121 * Get the LAPIC version
122 */
123 static inline int lapic_get_version(void)
124 {
125 return GET_APIC_VERSION(apic_read(APIC_LVR));
126 }
127
128 /*
129 * Check, if the APIC is integrated or a separate chip
130 */
131 static inline int lapic_is_integrated(void)
132 {
133 return APIC_INTEGRATED(lapic_get_version());
134 }
135
136 /*
137 * Check, whether this is a modern or a first generation APIC
138 */
139 static int modern_apic(void)
140 {
141 /* AMD systems use old APIC versions, so check the CPU */
142 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
143 boot_cpu_data.x86 >= 0xf)
144 return 1;
145 return lapic_get_version() >= 0x14;
146 }
147
148 void apic_icr_write(u32 low, u32 id)
149 {
150 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(id));
151 apic_write_around(APIC_ICR, low);
152 }
153
154 void apic_wait_icr_idle(void)
155 {
156 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
157 cpu_relax();
158 }
159
160 u32 safe_apic_wait_icr_idle(void)
161 {
162 u32 send_status;
163 int timeout;
164
165 timeout = 0;
166 do {
167 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
168 if (!send_status)
169 break;
170 udelay(100);
171 } while (timeout++ < 1000);
172
173 return send_status;
174 }
175
176 /**
177 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
178 */
179 void __cpuinit enable_NMI_through_LVT0(void)
180 {
181 unsigned int v = APIC_DM_NMI;
182
183 /* Level triggered for 82489DX */
184 if (!lapic_is_integrated())
185 v |= APIC_LVT_LEVEL_TRIGGER;
186 apic_write_around(APIC_LVT0, v);
187 }
188
189 /**
190 * get_physical_broadcast - Get number of physical broadcast IDs
191 */
192 int get_physical_broadcast(void)
193 {
194 return modern_apic() ? 0xff : 0xf;
195 }
196
197 /**
198 * lapic_get_maxlvt - get the maximum number of local vector table entries
199 */
200 int lapic_get_maxlvt(void)
201 {
202 unsigned int v = apic_read(APIC_LVR);
203
204 /* 82489DXs do not report # of LVT entries. */
205 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
206 }
207
208 /*
209 * Local APIC timer
210 */
211
212 /* Clock divisor is set to 16 */
213 #define APIC_DIVISOR 16
214
215 /*
216 * This function sets up the local APIC timer, with a timeout of
217 * 'clocks' APIC bus clock. During calibration we actually call
218 * this function twice on the boot CPU, once with a bogus timeout
219 * value, second time for real. The other (noncalibrating) CPUs
220 * call this function only once, with the real, calibrated value.
221 *
222 * We do reads before writes even if unnecessary, to get around the
223 * P5 APIC double write bug.
224 */
225 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
226 {
227 unsigned int lvtt_value, tmp_value;
228
229 lvtt_value = LOCAL_TIMER_VECTOR;
230 if (!oneshot)
231 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
232 if (!lapic_is_integrated())
233 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
234
235 if (!irqen)
236 lvtt_value |= APIC_LVT_MASKED;
237
238 apic_write_around(APIC_LVTT, lvtt_value);
239
240 /*
241 * Divide PICLK by 16
242 */
243 tmp_value = apic_read(APIC_TDCR);
244 apic_write_around(APIC_TDCR, (tmp_value
245 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
246 | APIC_TDR_DIV_16);
247
248 if (!oneshot)
249 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
250 }
251
252 /*
253 * Program the next event, relative to now
254 */
255 static int lapic_next_event(unsigned long delta,
256 struct clock_event_device *evt)
257 {
258 apic_write_around(APIC_TMICT, delta);
259 return 0;
260 }
261
262 /*
263 * Setup the lapic timer in periodic or oneshot mode
264 */
265 static void lapic_timer_setup(enum clock_event_mode mode,
266 struct clock_event_device *evt)
267 {
268 unsigned long flags;
269 unsigned int v;
270
271 /* Lapic used for broadcast ? */
272 if (!local_apic_timer_verify_ok)
273 return;
274
275 local_irq_save(flags);
276
277 switch (mode) {
278 case CLOCK_EVT_MODE_PERIODIC:
279 case CLOCK_EVT_MODE_ONESHOT:
280 __setup_APIC_LVTT(calibration_result,
281 mode != CLOCK_EVT_MODE_PERIODIC, 1);
282 break;
283 case CLOCK_EVT_MODE_UNUSED:
284 case CLOCK_EVT_MODE_SHUTDOWN:
285 v = apic_read(APIC_LVTT);
286 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
287 apic_write_around(APIC_LVTT, v);
288 break;
289 case CLOCK_EVT_MODE_RESUME:
290 /* Nothing to do here */
291 break;
292 }
293
294 local_irq_restore(flags);
295 }
296
297 /*
298 * Local APIC timer broadcast function
299 */
300 static void lapic_timer_broadcast(cpumask_t mask)
301 {
302 #ifdef CONFIG_SMP
303 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
304 #endif
305 }
306
307 /*
308 * Setup the local APIC timer for this CPU. Copy the initilized values
309 * of the boot CPU and register the clock event in the framework.
310 */
311 static void __devinit setup_APIC_timer(void)
312 {
313 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
314
315 memcpy(levt, &lapic_clockevent, sizeof(*levt));
316 levt->cpumask = cpumask_of_cpu(smp_processor_id());
317
318 clockevents_register_device(levt);
319 }
320
321 /*
322 * In this functions we calibrate APIC bus clocks to the external timer.
323 *
324 * We want to do the calibration only once since we want to have local timer
325 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
326 * frequency.
327 *
328 * This was previously done by reading the PIT/HPET and waiting for a wrap
329 * around to find out, that a tick has elapsed. I have a box, where the PIT
330 * readout is broken, so it never gets out of the wait loop again. This was
331 * also reported by others.
332 *
333 * Monitoring the jiffies value is inaccurate and the clockevents
334 * infrastructure allows us to do a simple substitution of the interrupt
335 * handler.
336 *
337 * The calibration routine also uses the pm_timer when possible, as the PIT
338 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
339 * back to normal later in the boot process).
340 */
341
342 #define LAPIC_CAL_LOOPS (HZ/10)
343
344 static __initdata int lapic_cal_loops = -1;
345 static __initdata long lapic_cal_t1, lapic_cal_t2;
346 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
347 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
348 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
349
350 /*
351 * Temporary interrupt handler.
352 */
353 static void __init lapic_cal_handler(struct clock_event_device *dev)
354 {
355 unsigned long long tsc = 0;
356 long tapic = apic_read(APIC_TMCCT);
357 unsigned long pm = acpi_pm_read_early();
358
359 if (cpu_has_tsc)
360 rdtscll(tsc);
361
362 switch (lapic_cal_loops++) {
363 case 0:
364 lapic_cal_t1 = tapic;
365 lapic_cal_tsc1 = tsc;
366 lapic_cal_pm1 = pm;
367 lapic_cal_j1 = jiffies;
368 break;
369
370 case LAPIC_CAL_LOOPS:
371 lapic_cal_t2 = tapic;
372 lapic_cal_tsc2 = tsc;
373 if (pm < lapic_cal_pm1)
374 pm += ACPI_PM_OVRRUN;
375 lapic_cal_pm2 = pm;
376 lapic_cal_j2 = jiffies;
377 break;
378 }
379 }
380
381 /*
382 * Setup the boot APIC
383 *
384 * Calibrate and verify the result.
385 */
386 void __init setup_boot_APIC_clock(void)
387 {
388 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
389 const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
390 const long pm_thresh = pm_100ms/100;
391 void (*real_handler)(struct clock_event_device *dev);
392 unsigned long deltaj;
393 long delta, deltapm;
394 int pm_referenced = 0;
395
396 /*
397 * The local apic timer can be disabled via the kernel
398 * commandline or from the CPU detection code. Register the lapic
399 * timer as a dummy clock event source on SMP systems, so the
400 * broadcast mechanism is used. On UP systems simply ignore it.
401 */
402 if (local_apic_timer_disabled) {
403 /* No broadcast on UP ! */
404 if (num_possible_cpus() > 1) {
405 lapic_clockevent.mult = 1;
406 setup_APIC_timer();
407 }
408 return;
409 }
410
411 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
412 "calibrating APIC timer ...\n");
413
414 local_irq_disable();
415
416 /* Replace the global interrupt handler */
417 real_handler = global_clock_event->event_handler;
418 global_clock_event->event_handler = lapic_cal_handler;
419
420 /*
421 * Setup the APIC counter to 1e9. There is no way the lapic
422 * can underflow in the 100ms detection time frame
423 */
424 __setup_APIC_LVTT(1000000000, 0, 0);
425
426 /* Let the interrupts run */
427 local_irq_enable();
428
429 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
430 cpu_relax();
431
432 local_irq_disable();
433
434 /* Restore the real event handler */
435 global_clock_event->event_handler = real_handler;
436
437 /* Build delta t1-t2 as apic timer counts down */
438 delta = lapic_cal_t1 - lapic_cal_t2;
439 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
440
441 /* Check, if the PM timer is available */
442 deltapm = lapic_cal_pm2 - lapic_cal_pm1;
443 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
444
445 if (deltapm) {
446 unsigned long mult;
447 u64 res;
448
449 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
450
451 if (deltapm > (pm_100ms - pm_thresh) &&
452 deltapm < (pm_100ms + pm_thresh)) {
453 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
454 } else {
455 res = (((u64) deltapm) * mult) >> 22;
456 do_div(res, 1000000);
457 printk(KERN_WARNING "APIC calibration not consistent "
458 "with PM Timer: %ldms instead of 100ms\n",
459 (long)res);
460 /* Correct the lapic counter value */
461 res = (((u64) delta) * pm_100ms);
462 do_div(res, deltapm);
463 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
464 "%lu (%ld)\n", (unsigned long) res, delta);
465 delta = (long) res;
466 }
467 pm_referenced = 1;
468 }
469
470 /* Calculate the scaled math multiplication factor */
471 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
472 lapic_clockevent.shift);
473 lapic_clockevent.max_delta_ns =
474 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
475 lapic_clockevent.min_delta_ns =
476 clockevent_delta2ns(0xF, &lapic_clockevent);
477
478 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
479
480 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
481 apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
482 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
483 calibration_result);
484
485 if (cpu_has_tsc) {
486 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
487 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
488 "%ld.%04ld MHz.\n",
489 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
490 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
491 }
492
493 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
494 "%u.%04u MHz.\n",
495 calibration_result / (1000000 / HZ),
496 calibration_result % (1000000 / HZ));
497
498 local_apic_timer_verify_ok = 1;
499
500 /*
501 * Do a sanity check on the APIC calibration result
502 */
503 if (calibration_result < (1000000 / HZ)) {
504 local_irq_enable();
505 printk(KERN_WARNING
506 "APIC frequency too slow, disabling apic timer\n");
507 /* No broadcast on UP ! */
508 if (num_possible_cpus() > 1)
509 setup_APIC_timer();
510 return;
511 }
512
513 /* We trust the pm timer based calibration */
514 if (!pm_referenced) {
515 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
516
517 /*
518 * Setup the apic timer manually
519 */
520 levt->event_handler = lapic_cal_handler;
521 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
522 lapic_cal_loops = -1;
523
524 /* Let the interrupts run */
525 local_irq_enable();
526
527 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
528 cpu_relax();
529
530 local_irq_disable();
531
532 /* Stop the lapic timer */
533 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
534
535 local_irq_enable();
536
537 /* Jiffies delta */
538 deltaj = lapic_cal_j2 - lapic_cal_j1;
539 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
540
541 /* Check, if the jiffies result is consistent */
542 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
543 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
544 else
545 local_apic_timer_verify_ok = 0;
546 } else
547 local_irq_enable();
548
549 if (!local_apic_timer_verify_ok) {
550 printk(KERN_WARNING
551 "APIC timer disabled due to verification failure.\n");
552 /* No broadcast on UP ! */
553 if (num_possible_cpus() == 1)
554 return;
555 } else {
556 /*
557 * If nmi_watchdog is set to IO_APIC, we need the
558 * PIT/HPET going. Otherwise register lapic as a dummy
559 * device.
560 */
561 if (nmi_watchdog != NMI_IO_APIC)
562 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
563 else
564 printk(KERN_WARNING "APIC timer registered as dummy,"
565 " due to nmi_watchdog=%d!\n", nmi_watchdog);
566 }
567
568 /* Setup the lapic or request the broadcast */
569 setup_APIC_timer();
570 }
571
572 void __devinit setup_secondary_APIC_clock(void)
573 {
574 setup_APIC_timer();
575 }
576
577 /*
578 * The guts of the apic timer interrupt
579 */
580 static void local_apic_timer_interrupt(void)
581 {
582 int cpu = smp_processor_id();
583 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
584
585 /*
586 * Normally we should not be here till LAPIC has been initialized but
587 * in some cases like kdump, its possible that there is a pending LAPIC
588 * timer interrupt from previous kernel's context and is delivered in
589 * new kernel the moment interrupts are enabled.
590 *
591 * Interrupts are enabled early and LAPIC is setup much later, hence
592 * its possible that when we get here evt->event_handler is NULL.
593 * Check for event_handler being NULL and discard the interrupt as
594 * spurious.
595 */
596 if (!evt->event_handler) {
597 printk(KERN_WARNING
598 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
599 /* Switch it off */
600 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
601 return;
602 }
603
604 /*
605 * the NMI deadlock-detector uses this.
606 */
607 per_cpu(irq_stat, cpu).apic_timer_irqs++;
608
609 evt->event_handler(evt);
610 }
611
612 /*
613 * Local APIC timer interrupt. This is the most natural way for doing
614 * local interrupts, but local timer interrupts can be emulated by
615 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
616 *
617 * [ if a single-CPU system runs an SMP kernel then we call the local
618 * interrupt as well. Thus we cannot inline the local irq ... ]
619 */
620 void smp_apic_timer_interrupt(struct pt_regs *regs)
621 {
622 struct pt_regs *old_regs = set_irq_regs(regs);
623
624 /*
625 * NOTE! We'd better ACK the irq immediately,
626 * because timer handling can be slow.
627 */
628 ack_APIC_irq();
629 /*
630 * update_process_times() expects us to have done irq_enter().
631 * Besides, if we don't timer interrupts ignore the global
632 * interrupt lock, which is the WrongThing (tm) to do.
633 */
634 irq_enter();
635 local_apic_timer_interrupt();
636 irq_exit();
637
638 set_irq_regs(old_regs);
639 }
640
641 int setup_profiling_timer(unsigned int multiplier)
642 {
643 return -EINVAL;
644 }
645
646 /*
647 * Setup extended LVT, AMD specific (K8, family 10h)
648 *
649 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
650 * MCE interrupts are supported. Thus MCE offset must be set to 0.
651 */
652
653 #define APIC_EILVT_LVTOFF_MCE 0
654 #define APIC_EILVT_LVTOFF_IBS 1
655
656 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
657 {
658 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
659 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
660 apic_write(reg, v);
661 }
662
663 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
664 {
665 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
666 return APIC_EILVT_LVTOFF_MCE;
667 }
668
669 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
670 {
671 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
672 return APIC_EILVT_LVTOFF_IBS;
673 }
674
675 /*
676 * Local APIC start and shutdown
677 */
678
679 /**
680 * clear_local_APIC - shutdown the local APIC
681 *
682 * This is called, when a CPU is disabled and before rebooting, so the state of
683 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
684 * leftovers during boot.
685 */
686 void clear_local_APIC(void)
687 {
688 int maxlvt;
689 u32 v;
690
691 /* APIC hasn't been mapped yet */
692 if (!apic_phys)
693 return;
694
695 maxlvt = lapic_get_maxlvt();
696 /*
697 * Masking an LVT entry can trigger a local APIC error
698 * if the vector is zero. Mask LVTERR first to prevent this.
699 */
700 if (maxlvt >= 3) {
701 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
702 apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
703 }
704 /*
705 * Careful: we have to set masks only first to deassert
706 * any level-triggered sources.
707 */
708 v = apic_read(APIC_LVTT);
709 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
710 v = apic_read(APIC_LVT0);
711 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
712 v = apic_read(APIC_LVT1);
713 apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
714 if (maxlvt >= 4) {
715 v = apic_read(APIC_LVTPC);
716 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
717 }
718
719 /* lets not touch this if we didn't frob it */
720 #ifdef CONFIG_X86_MCE_P4THERMAL
721 if (maxlvt >= 5) {
722 v = apic_read(APIC_LVTTHMR);
723 apic_write_around(APIC_LVTTHMR, v | APIC_LVT_MASKED);
724 }
725 #endif
726 /*
727 * Clean APIC state for other OSs:
728 */
729 apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
730 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
731 apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
732 if (maxlvt >= 3)
733 apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
734 if (maxlvt >= 4)
735 apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
736
737 #ifdef CONFIG_X86_MCE_P4THERMAL
738 if (maxlvt >= 5)
739 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
740 #endif
741 /* Integrated APIC (!82489DX) ? */
742 if (lapic_is_integrated()) {
743 if (maxlvt > 3)
744 /* Clear ESR due to Pentium errata 3AP and 11AP */
745 apic_write(APIC_ESR, 0);
746 apic_read(APIC_ESR);
747 }
748 }
749
750 /**
751 * disable_local_APIC - clear and disable the local APIC
752 */
753 void disable_local_APIC(void)
754 {
755 unsigned long value;
756
757 clear_local_APIC();
758
759 /*
760 * Disable APIC (implies clearing of registers
761 * for 82489DX!).
762 */
763 value = apic_read(APIC_SPIV);
764 value &= ~APIC_SPIV_APIC_ENABLED;
765 apic_write_around(APIC_SPIV, value);
766
767 /*
768 * When LAPIC was disabled by the BIOS and enabled by the kernel,
769 * restore the disabled state.
770 */
771 if (enabled_via_apicbase) {
772 unsigned int l, h;
773
774 rdmsr(MSR_IA32_APICBASE, l, h);
775 l &= ~MSR_IA32_APICBASE_ENABLE;
776 wrmsr(MSR_IA32_APICBASE, l, h);
777 }
778 }
779
780 /*
781 * If Linux enabled the LAPIC against the BIOS default disable it down before
782 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
783 * not power-off. Additionally clear all LVT entries before disable_local_APIC
784 * for the case where Linux didn't enable the LAPIC.
785 */
786 void lapic_shutdown(void)
787 {
788 unsigned long flags;
789
790 if (!cpu_has_apic)
791 return;
792
793 local_irq_save(flags);
794 clear_local_APIC();
795
796 if (enabled_via_apicbase)
797 disable_local_APIC();
798
799 local_irq_restore(flags);
800 }
801
802 /*
803 * This is to verify that we're looking at a real local APIC.
804 * Check these against your board if the CPUs aren't getting
805 * started for no apparent reason.
806 */
807 int __init verify_local_APIC(void)
808 {
809 unsigned int reg0, reg1;
810
811 /*
812 * The version register is read-only in a real APIC.
813 */
814 reg0 = apic_read(APIC_LVR);
815 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
816 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
817 reg1 = apic_read(APIC_LVR);
818 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
819
820 /*
821 * The two version reads above should print the same
822 * numbers. If the second one is different, then we
823 * poke at a non-APIC.
824 */
825 if (reg1 != reg0)
826 return 0;
827
828 /*
829 * Check if the version looks reasonably.
830 */
831 reg1 = GET_APIC_VERSION(reg0);
832 if (reg1 == 0x00 || reg1 == 0xff)
833 return 0;
834 reg1 = lapic_get_maxlvt();
835 if (reg1 < 0x02 || reg1 == 0xff)
836 return 0;
837
838 /*
839 * The ID register is read/write in a real APIC.
840 */
841 reg0 = apic_read(APIC_ID);
842 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
843
844 /*
845 * The next two are just to see if we have sane values.
846 * They're only really relevant if we're in Virtual Wire
847 * compatibility mode, but most boxes are anymore.
848 */
849 reg0 = apic_read(APIC_LVT0);
850 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
851 reg1 = apic_read(APIC_LVT1);
852 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
853
854 return 1;
855 }
856
857 /**
858 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
859 */
860 void __init sync_Arb_IDs(void)
861 {
862 /*
863 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
864 * needed on AMD.
865 */
866 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
867 return;
868 /*
869 * Wait for idle.
870 */
871 apic_wait_icr_idle();
872
873 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
874 apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
875 | APIC_DM_INIT);
876 }
877
878 /*
879 * An initial setup of the virtual wire mode.
880 */
881 void __init init_bsp_APIC(void)
882 {
883 unsigned long value;
884
885 /*
886 * Don't do the setup now if we have a SMP BIOS as the
887 * through-I/O-APIC virtual wire mode might be active.
888 */
889 if (smp_found_config || !cpu_has_apic)
890 return;
891
892 /*
893 * Do not trust the local APIC being empty at bootup.
894 */
895 clear_local_APIC();
896
897 /*
898 * Enable APIC.
899 */
900 value = apic_read(APIC_SPIV);
901 value &= ~APIC_VECTOR_MASK;
902 value |= APIC_SPIV_APIC_ENABLED;
903
904 /* This bit is reserved on P4/Xeon and should be cleared */
905 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
906 (boot_cpu_data.x86 == 15))
907 value &= ~APIC_SPIV_FOCUS_DISABLED;
908 else
909 value |= APIC_SPIV_FOCUS_DISABLED;
910 value |= SPURIOUS_APIC_VECTOR;
911 apic_write_around(APIC_SPIV, value);
912
913 /*
914 * Set up the virtual wire mode.
915 */
916 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
917 value = APIC_DM_NMI;
918 if (!lapic_is_integrated()) /* 82489DX */
919 value |= APIC_LVT_LEVEL_TRIGGER;
920 apic_write_around(APIC_LVT1, value);
921 }
922
923 static void __cpuinit lapic_setup_esr(void)
924 {
925 unsigned long oldvalue, value, maxlvt;
926 if (lapic_is_integrated() && !esr_disable) {
927 /* !82489DX */
928 maxlvt = lapic_get_maxlvt();
929 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
930 apic_write(APIC_ESR, 0);
931 oldvalue = apic_read(APIC_ESR);
932
933 /* enables sending errors */
934 value = ERROR_APIC_VECTOR;
935 apic_write_around(APIC_LVTERR, value);
936 /*
937 * spec says clear errors after enabling vector.
938 */
939 if (maxlvt > 3)
940 apic_write(APIC_ESR, 0);
941 value = apic_read(APIC_ESR);
942 if (value != oldvalue)
943 apic_printk(APIC_VERBOSE, "ESR value before enabling "
944 "vector: 0x%08lx after: 0x%08lx\n",
945 oldvalue, value);
946 } else {
947 if (esr_disable)
948 /*
949 * Something untraceable is creating bad interrupts on
950 * secondary quads ... for the moment, just leave the
951 * ESR disabled - we can't do anything useful with the
952 * errors anyway - mbligh
953 */
954 printk(KERN_INFO "Leaving ESR disabled.\n");
955 else
956 printk(KERN_INFO "No ESR for 82489DX.\n");
957 }
958 }
959
960
961 /**
962 * setup_local_APIC - setup the local APIC
963 */
964 void __cpuinit setup_local_APIC(void)
965 {
966 unsigned long value, integrated;
967 int i, j;
968
969 /* Pound the ESR really hard over the head with a big hammer - mbligh */
970 if (esr_disable) {
971 apic_write(APIC_ESR, 0);
972 apic_write(APIC_ESR, 0);
973 apic_write(APIC_ESR, 0);
974 apic_write(APIC_ESR, 0);
975 }
976
977 integrated = lapic_is_integrated();
978
979 /*
980 * Double-check whether this APIC is really registered.
981 */
982 if (!apic_id_registered())
983 WARN_ON_ONCE(1);
984
985 /*
986 * Intel recommends to set DFR, LDR and TPR before enabling
987 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
988 * document number 292116). So here it goes...
989 */
990 init_apic_ldr();
991
992 /*
993 * Set Task Priority to 'accept all'. We never change this
994 * later on.
995 */
996 value = apic_read(APIC_TASKPRI);
997 value &= ~APIC_TPRI_MASK;
998 apic_write_around(APIC_TASKPRI, value);
999
1000 /*
1001 * After a crash, we no longer service the interrupts and a pending
1002 * interrupt from previous kernel might still have ISR bit set.
1003 *
1004 * Most probably by now CPU has serviced that pending interrupt and
1005 * it might not have done the ack_APIC_irq() because it thought,
1006 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1007 * does not clear the ISR bit and cpu thinks it has already serivced
1008 * the interrupt. Hence a vector might get locked. It was noticed
1009 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1010 */
1011 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1012 value = apic_read(APIC_ISR + i*0x10);
1013 for (j = 31; j >= 0; j--) {
1014 if (value & (1<<j))
1015 ack_APIC_irq();
1016 }
1017 }
1018
1019 /*
1020 * Now that we are all set up, enable the APIC
1021 */
1022 value = apic_read(APIC_SPIV);
1023 value &= ~APIC_VECTOR_MASK;
1024 /*
1025 * Enable APIC
1026 */
1027 value |= APIC_SPIV_APIC_ENABLED;
1028
1029 /*
1030 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1031 * certain networking cards. If high frequency interrupts are
1032 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1033 * entry is masked/unmasked at a high rate as well then sooner or
1034 * later IOAPIC line gets 'stuck', no more interrupts are received
1035 * from the device. If focus CPU is disabled then the hang goes
1036 * away, oh well :-(
1037 *
1038 * [ This bug can be reproduced easily with a level-triggered
1039 * PCI Ne2000 networking cards and PII/PIII processors, dual
1040 * BX chipset. ]
1041 */
1042 /*
1043 * Actually disabling the focus CPU check just makes the hang less
1044 * frequent as it makes the interrupt distributon model be more
1045 * like LRU than MRU (the short-term load is more even across CPUs).
1046 * See also the comment in end_level_ioapic_irq(). --macro
1047 */
1048
1049 /* Enable focus processor (bit==0) */
1050 value &= ~APIC_SPIV_FOCUS_DISABLED;
1051
1052 /*
1053 * Set spurious IRQ vector
1054 */
1055 value |= SPURIOUS_APIC_VECTOR;
1056 apic_write_around(APIC_SPIV, value);
1057
1058 /*
1059 * Set up LVT0, LVT1:
1060 *
1061 * set up through-local-APIC on the BP's LINT0. This is not
1062 * strictly necessary in pure symmetric-IO mode, but sometimes
1063 * we delegate interrupts to the 8259A.
1064 */
1065 /*
1066 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1067 */
1068 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1069 if (!smp_processor_id() && (pic_mode || !value)) {
1070 value = APIC_DM_EXTINT;
1071 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
1072 smp_processor_id());
1073 } else {
1074 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1075 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
1076 smp_processor_id());
1077 }
1078 apic_write_around(APIC_LVT0, value);
1079
1080 /*
1081 * only the BP should see the LINT1 NMI signal, obviously.
1082 */
1083 if (!smp_processor_id())
1084 value = APIC_DM_NMI;
1085 else
1086 value = APIC_DM_NMI | APIC_LVT_MASKED;
1087 if (!integrated) /* 82489DX */
1088 value |= APIC_LVT_LEVEL_TRIGGER;
1089 apic_write_around(APIC_LVT1, value);
1090 }
1091
1092 void __cpuinit end_local_APIC_setup(void)
1093 {
1094 unsigned long value;
1095
1096 lapic_setup_esr();
1097 /* Disable the local apic timer */
1098 value = apic_read(APIC_LVTT);
1099 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1100 apic_write_around(APIC_LVTT, value);
1101
1102 setup_apic_nmi_watchdog(NULL);
1103 apic_pm_activate();
1104 }
1105
1106 /*
1107 * Detect and initialize APIC
1108 */
1109 static int __init detect_init_APIC(void)
1110 {
1111 u32 h, l, features;
1112
1113 /* Disabled by kernel option? */
1114 if (disable_apic)
1115 return -1;
1116
1117 switch (boot_cpu_data.x86_vendor) {
1118 case X86_VENDOR_AMD:
1119 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1120 (boot_cpu_data.x86 == 15))
1121 break;
1122 goto no_apic;
1123 case X86_VENDOR_INTEL:
1124 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1125 (boot_cpu_data.x86 == 5 && cpu_has_apic))
1126 break;
1127 goto no_apic;
1128 default:
1129 goto no_apic;
1130 }
1131
1132 if (!cpu_has_apic) {
1133 /*
1134 * Over-ride BIOS and try to enable the local APIC only if
1135 * "lapic" specified.
1136 */
1137 if (!force_enable_local_apic) {
1138 printk(KERN_INFO "Local APIC disabled by BIOS -- "
1139 "you can enable it with \"lapic\"\n");
1140 return -1;
1141 }
1142 /*
1143 * Some BIOSes disable the local APIC in the APIC_BASE
1144 * MSR. This can only be done in software for Intel P6 or later
1145 * and AMD K7 (Model > 1) or later.
1146 */
1147 rdmsr(MSR_IA32_APICBASE, l, h);
1148 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1149 printk(KERN_INFO
1150 "Local APIC disabled by BIOS -- reenabling.\n");
1151 l &= ~MSR_IA32_APICBASE_BASE;
1152 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1153 wrmsr(MSR_IA32_APICBASE, l, h);
1154 enabled_via_apicbase = 1;
1155 }
1156 }
1157 /*
1158 * The APIC feature bit should now be enabled
1159 * in `cpuid'
1160 */
1161 features = cpuid_edx(1);
1162 if (!(features & (1 << X86_FEATURE_APIC))) {
1163 printk(KERN_WARNING "Could not enable APIC!\n");
1164 return -1;
1165 }
1166 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1167 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1168
1169 /* The BIOS may have set up the APIC at some other address */
1170 rdmsr(MSR_IA32_APICBASE, l, h);
1171 if (l & MSR_IA32_APICBASE_ENABLE)
1172 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1173
1174 printk(KERN_INFO "Found and enabled local APIC!\n");
1175
1176 apic_pm_activate();
1177
1178 return 0;
1179
1180 no_apic:
1181 printk(KERN_INFO "No local APIC present or hardware disabled\n");
1182 return -1;
1183 }
1184
1185 /**
1186 * init_apic_mappings - initialize APIC mappings
1187 */
1188 void __init init_apic_mappings(void)
1189 {
1190 /*
1191 * If no local APIC can be found then set up a fake all
1192 * zeroes page to simulate the local APIC and another
1193 * one for the IO-APIC.
1194 */
1195 if (!smp_found_config && detect_init_APIC()) {
1196 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
1197 apic_phys = __pa(apic_phys);
1198 } else
1199 apic_phys = mp_lapic_addr;
1200
1201 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1202 printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
1203 apic_phys);
1204
1205 /*
1206 * Fetch the APIC ID of the BSP in case we have a
1207 * default configuration (or the MP table is broken).
1208 */
1209 if (boot_cpu_physical_apicid == -1U)
1210 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1211
1212 }
1213
1214 /*
1215 * This initializes the IO-APIC and APIC hardware if this is
1216 * a UP kernel.
1217 */
1218
1219 int apic_version[MAX_APICS];
1220
1221 int __init APIC_init_uniprocessor(void)
1222 {
1223 if (disable_apic)
1224 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1225
1226 if (!smp_found_config && !cpu_has_apic)
1227 return -1;
1228
1229 /*
1230 * Complain if the BIOS pretends there is one.
1231 */
1232 if (!cpu_has_apic &&
1233 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1234 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1235 boot_cpu_physical_apicid);
1236 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1237 return -1;
1238 }
1239
1240 verify_local_APIC();
1241
1242 connect_bsp_APIC();
1243
1244 /*
1245 * Hack: In case of kdump, after a crash, kernel might be booting
1246 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1247 * might be zero if read from MP tables. Get it from LAPIC.
1248 */
1249 #ifdef CONFIG_CRASH_DUMP
1250 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1251 #endif
1252 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1253
1254 setup_local_APIC();
1255
1256 #ifdef CONFIG_X86_IO_APIC
1257 if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
1258 #endif
1259 localise_nmi_watchdog();
1260 end_local_APIC_setup();
1261 #ifdef CONFIG_X86_IO_APIC
1262 if (smp_found_config)
1263 if (!skip_ioapic_setup && nr_ioapics)
1264 setup_IO_APIC();
1265 #endif
1266 setup_boot_clock();
1267
1268 return 0;
1269 }
1270
1271 /*
1272 * Local APIC interrupts
1273 */
1274
1275 /*
1276 * This interrupt should _never_ happen with our APIC/SMP architecture
1277 */
1278 void smp_spurious_interrupt(struct pt_regs *regs)
1279 {
1280 unsigned long v;
1281
1282 irq_enter();
1283 /*
1284 * Check if this really is a spurious interrupt and ACK it
1285 * if it is a vectored one. Just in case...
1286 * Spurious interrupts should not be ACKed.
1287 */
1288 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1289 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1290 ack_APIC_irq();
1291
1292 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1293 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1294 "should never happen.\n", smp_processor_id());
1295 __get_cpu_var(irq_stat).irq_spurious_count++;
1296 irq_exit();
1297 }
1298
1299 /*
1300 * This interrupt should never happen with our APIC/SMP architecture
1301 */
1302 void smp_error_interrupt(struct pt_regs *regs)
1303 {
1304 unsigned long v, v1;
1305
1306 irq_enter();
1307 /* First tickle the hardware, only then report what went on. -- REW */
1308 v = apic_read(APIC_ESR);
1309 apic_write(APIC_ESR, 0);
1310 v1 = apic_read(APIC_ESR);
1311 ack_APIC_irq();
1312 atomic_inc(&irq_err_count);
1313
1314 /* Here is what the APIC error bits mean:
1315 0: Send CS error
1316 1: Receive CS error
1317 2: Send accept error
1318 3: Receive accept error
1319 4: Reserved
1320 5: Send illegal vector
1321 6: Received illegal vector
1322 7: Illegal register address
1323 */
1324 printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
1325 smp_processor_id(), v , v1);
1326 irq_exit();
1327 }
1328
1329 #ifdef CONFIG_SMP
1330 void __init smp_intr_init(void)
1331 {
1332 /*
1333 * IRQ0 must be given a fixed assignment and initialized,
1334 * because it's used before the IO-APIC is set up.
1335 */
1336 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1337
1338 /*
1339 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1340 * IPI, driven by wakeup.
1341 */
1342 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1343
1344 /* IPI for invalidation */
1345 alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1346
1347 /* IPI for generic function call */
1348 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1349 }
1350 #endif
1351
1352 /*
1353 * Initialize APIC interrupts
1354 */
1355 void __init apic_intr_init(void)
1356 {
1357 #ifdef CONFIG_SMP
1358 smp_intr_init();
1359 #endif
1360 /* self generated IPI for local APIC timer */
1361 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1362
1363 /* IPI vectors for APIC spurious and error interrupts */
1364 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1365 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1366
1367 /* thermal monitor LVT interrupt */
1368 #ifdef CONFIG_X86_MCE_P4THERMAL
1369 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1370 #endif
1371 }
1372
1373 /**
1374 * connect_bsp_APIC - attach the APIC to the interrupt system
1375 */
1376 void __init connect_bsp_APIC(void)
1377 {
1378 if (pic_mode) {
1379 /*
1380 * Do not trust the local APIC being empty at bootup.
1381 */
1382 clear_local_APIC();
1383 /*
1384 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1385 * local APIC to INT and NMI lines.
1386 */
1387 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1388 "enabling APIC mode.\n");
1389 outb(0x70, 0x22);
1390 outb(0x01, 0x23);
1391 }
1392 enable_apic_mode();
1393 }
1394
1395 /**
1396 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1397 * @virt_wire_setup: indicates, whether virtual wire mode is selected
1398 *
1399 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1400 * APIC is disabled.
1401 */
1402 void disconnect_bsp_APIC(int virt_wire_setup)
1403 {
1404 if (pic_mode) {
1405 /*
1406 * Put the board back into PIC mode (has an effect only on
1407 * certain older boards). Note that APIC interrupts, including
1408 * IPIs, won't work beyond this point! The only exception are
1409 * INIT IPIs.
1410 */
1411 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1412 "entering PIC mode.\n");
1413 outb(0x70, 0x22);
1414 outb(0x00, 0x23);
1415 } else {
1416 /* Go back to Virtual Wire compatibility mode */
1417 unsigned long value;
1418
1419 /* For the spurious interrupt use vector F, and enable it */
1420 value = apic_read(APIC_SPIV);
1421 value &= ~APIC_VECTOR_MASK;
1422 value |= APIC_SPIV_APIC_ENABLED;
1423 value |= 0xf;
1424 apic_write_around(APIC_SPIV, value);
1425
1426 if (!virt_wire_setup) {
1427 /*
1428 * For LVT0 make it edge triggered, active high,
1429 * external and enabled
1430 */
1431 value = apic_read(APIC_LVT0);
1432 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1433 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1434 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1435 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1436 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1437 apic_write_around(APIC_LVT0, value);
1438 } else {
1439 /* Disable LVT0 */
1440 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
1441 }
1442
1443 /*
1444 * For LVT1 make it edge triggered, active high, nmi and
1445 * enabled
1446 */
1447 value = apic_read(APIC_LVT1);
1448 value &= ~(
1449 APIC_MODE_MASK | APIC_SEND_PENDING |
1450 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1451 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1452 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1453 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1454 apic_write_around(APIC_LVT1, value);
1455 }
1456 }
1457
1458 unsigned int __cpuinitdata maxcpus = NR_CPUS;
1459
1460 void __cpuinit generic_processor_info(int apicid, int version)
1461 {
1462 int cpu;
1463 cpumask_t tmp_map;
1464 physid_mask_t phys_cpu;
1465
1466 /*
1467 * Validate version
1468 */
1469 if (version == 0x0) {
1470 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
1471 "fixing up to 0x10. (tell your hw vendor)\n",
1472 version);
1473 version = 0x10;
1474 }
1475 apic_version[apicid] = version;
1476
1477 phys_cpu = apicid_to_cpu_present(apicid);
1478 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1479
1480 if (num_processors >= NR_CPUS) {
1481 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1482 " Processor ignored.\n", NR_CPUS);
1483 return;
1484 }
1485
1486 if (num_processors >= maxcpus) {
1487 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1488 " Processor ignored.\n", maxcpus);
1489 return;
1490 }
1491
1492 num_processors++;
1493 cpus_complement(tmp_map, cpu_present_map);
1494 cpu = first_cpu(tmp_map);
1495
1496 if (apicid == boot_cpu_physical_apicid)
1497 /*
1498 * x86_bios_cpu_apicid is required to have processors listed
1499 * in same order as logical cpu numbers. Hence the first
1500 * entry is BSP, and so on.
1501 */
1502 cpu = 0;
1503
1504 if (apicid > max_physical_apicid)
1505 max_physical_apicid = apicid;
1506
1507 /*
1508 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1509 * but we need to work other dependencies like SMP_SUSPEND etc
1510 * before this can be done without some confusion.
1511 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
1512 * - Ashok Raj <ashok.raj@intel.com>
1513 */
1514 if (max_physical_apicid >= 8) {
1515 switch (boot_cpu_data.x86_vendor) {
1516 case X86_VENDOR_INTEL:
1517 if (!APIC_XAPIC(version)) {
1518 def_to_bigsmp = 0;
1519 break;
1520 }
1521 /* If P4 and above fall through */
1522 case X86_VENDOR_AMD:
1523 def_to_bigsmp = 1;
1524 }
1525 }
1526 #ifdef CONFIG_SMP
1527 /* are we being called early in kernel startup? */
1528 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1529 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1530 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1531
1532 cpu_to_apicid[cpu] = apicid;
1533 bios_cpu_apicid[cpu] = apicid;
1534 } else {
1535 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1536 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1537 }
1538 #endif
1539 cpu_set(cpu, cpu_possible_map);
1540 cpu_set(cpu, cpu_present_map);
1541 }
1542
1543 /*
1544 * Power management
1545 */
1546 #ifdef CONFIG_PM
1547
1548 static struct {
1549 int active;
1550 /* r/w apic fields */
1551 unsigned int apic_id;
1552 unsigned int apic_taskpri;
1553 unsigned int apic_ldr;
1554 unsigned int apic_dfr;
1555 unsigned int apic_spiv;
1556 unsigned int apic_lvtt;
1557 unsigned int apic_lvtpc;
1558 unsigned int apic_lvt0;
1559 unsigned int apic_lvt1;
1560 unsigned int apic_lvterr;
1561 unsigned int apic_tmict;
1562 unsigned int apic_tdcr;
1563 unsigned int apic_thmr;
1564 } apic_pm_state;
1565
1566 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1567 {
1568 unsigned long flags;
1569 int maxlvt;
1570
1571 if (!apic_pm_state.active)
1572 return 0;
1573
1574 maxlvt = lapic_get_maxlvt();
1575
1576 apic_pm_state.apic_id = apic_read(APIC_ID);
1577 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1578 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1579 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1580 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1581 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1582 if (maxlvt >= 4)
1583 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1584 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1585 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1586 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1587 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1588 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1589 #ifdef CONFIG_X86_MCE_P4THERMAL
1590 if (maxlvt >= 5)
1591 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1592 #endif
1593
1594 local_irq_save(flags);
1595 disable_local_APIC();
1596 local_irq_restore(flags);
1597 return 0;
1598 }
1599
1600 static int lapic_resume(struct sys_device *dev)
1601 {
1602 unsigned int l, h;
1603 unsigned long flags;
1604 int maxlvt;
1605
1606 if (!apic_pm_state.active)
1607 return 0;
1608
1609 maxlvt = lapic_get_maxlvt();
1610
1611 local_irq_save(flags);
1612
1613 /*
1614 * Make sure the APICBASE points to the right address
1615 *
1616 * FIXME! This will be wrong if we ever support suspend on
1617 * SMP! We'll need to do this as part of the CPU restore!
1618 */
1619 rdmsr(MSR_IA32_APICBASE, l, h);
1620 l &= ~MSR_IA32_APICBASE_BASE;
1621 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1622 wrmsr(MSR_IA32_APICBASE, l, h);
1623
1624 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1625 apic_write(APIC_ID, apic_pm_state.apic_id);
1626 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1627 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1628 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1629 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1630 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1631 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1632 #ifdef CONFIG_X86_MCE_P4THERMAL
1633 if (maxlvt >= 5)
1634 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1635 #endif
1636 if (maxlvt >= 4)
1637 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1638 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1639 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1640 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1641 apic_write(APIC_ESR, 0);
1642 apic_read(APIC_ESR);
1643 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1644 apic_write(APIC_ESR, 0);
1645 apic_read(APIC_ESR);
1646 local_irq_restore(flags);
1647 return 0;
1648 }
1649
1650 /*
1651 * This device has no shutdown method - fully functioning local APICs
1652 * are needed on every CPU up until machine_halt/restart/poweroff.
1653 */
1654
1655 static struct sysdev_class lapic_sysclass = {
1656 .name = "lapic",
1657 .resume = lapic_resume,
1658 .suspend = lapic_suspend,
1659 };
1660
1661 static struct sys_device device_lapic = {
1662 .id = 0,
1663 .cls = &lapic_sysclass,
1664 };
1665
1666 static void __devinit apic_pm_activate(void)
1667 {
1668 apic_pm_state.active = 1;
1669 }
1670
1671 static int __init init_lapic_sysfs(void)
1672 {
1673 int error;
1674
1675 if (!cpu_has_apic)
1676 return 0;
1677 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1678
1679 error = sysdev_class_register(&lapic_sysclass);
1680 if (!error)
1681 error = sysdev_register(&device_lapic);
1682 return error;
1683 }
1684 device_initcall(init_lapic_sysfs);
1685
1686 #else /* CONFIG_PM */
1687
1688 static void apic_pm_activate(void) { }
1689
1690 #endif /* CONFIG_PM */
1691
1692 /*
1693 * APIC command line parameters
1694 */
1695 static int __init parse_lapic(char *arg)
1696 {
1697 force_enable_local_apic = 1;
1698 return 0;
1699 }
1700 early_param("lapic", parse_lapic);
1701
1702 static int __init parse_nolapic(char *arg)
1703 {
1704 disable_apic = 1;
1705 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1706 return 0;
1707 }
1708 early_param("nolapic", parse_nolapic);
1709
1710 static int __init parse_disable_lapic_timer(char *arg)
1711 {
1712 local_apic_timer_disabled = 1;
1713 return 0;
1714 }
1715 early_param("nolapic_timer", parse_disable_lapic_timer);
1716
1717 static int __init parse_lapic_timer_c2_ok(char *arg)
1718 {
1719 local_apic_timer_c2_ok = 1;
1720 return 0;
1721 }
1722 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1723
1724 static int __init apic_set_verbosity(char *str)
1725 {
1726 if (strcmp("debug", str) == 0)
1727 apic_verbosity = APIC_DEBUG;
1728 else if (strcmp("verbose", str) == 0)
1729 apic_verbosity = APIC_VERBOSE;
1730 return 1;
1731 }
1732 __setup("apic=", apic_set_verbosity);
1733
1734 static int __init lapic_insert_resource(void)
1735 {
1736 if (!apic_phys)
1737 return -1;
1738
1739 /* Put local APIC into the resource map. */
1740 lapic_resource.start = apic_phys;
1741 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
1742 insert_resource(&iomem_resource, &lapic_resource);
1743
1744 return 0;
1745 }
1746
1747 /*
1748 * need call insert after e820_reserve_resources()
1749 * that is using request_resource
1750 */
1751 late_initcall(lapic_insert_resource);
This page took 0.082287 seconds and 5 git commands to generate.