x86: cleanup early per cpu variables/accesses v4
[deliverable/linux.git] / arch / x86 / kernel / apic_32.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/cpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/acpi_pmtmr.h>
29 #include <linux/module.h>
30 #include <linux/dmi.h>
31
32 #include <asm/atomic.h>
33 #include <asm/smp.h>
34 #include <asm/mtrr.h>
35 #include <asm/mpspec.h>
36 #include <asm/desc.h>
37 #include <asm/arch_hooks.h>
38 #include <asm/hpet.h>
39 #include <asm/i8253.h>
40 #include <asm/nmi.h>
41
42 #include <mach_apic.h>
43 #include <mach_apicdef.h>
44 #include <mach_ipi.h>
45
46 /*
47 * Sanity check
48 */
49 #if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
50 # error SPURIOUS_APIC_VECTOR definition error
51 #endif
52
53 unsigned long mp_lapic_addr;
54
55 /*
56 * Knob to control our willingness to enable the local APIC.
57 *
58 * -1=force-disable, +1=force-enable
59 */
60 static int enable_local_apic __initdata;
61
62 /* Local APIC timer verification ok */
63 static int local_apic_timer_verify_ok;
64 /* Disable local APIC timer from the kernel commandline or via dmi quirk
65 or using CPU MSR check */
66 int local_apic_timer_disabled;
67 /* Local APIC timer works in C2 */
68 int local_apic_timer_c2_ok;
69 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
70
71 /*
72 * Debug level, exported for io_apic.c
73 */
74 int apic_verbosity;
75
76 static unsigned int calibration_result;
77
78 static int lapic_next_event(unsigned long delta,
79 struct clock_event_device *evt);
80 static void lapic_timer_setup(enum clock_event_mode mode,
81 struct clock_event_device *evt);
82 static void lapic_timer_broadcast(cpumask_t mask);
83 static void apic_pm_activate(void);
84
85 /*
86 * The local apic timer can be used for any function which is CPU local.
87 */
88 static struct clock_event_device lapic_clockevent = {
89 .name = "lapic",
90 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
91 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
92 .shift = 32,
93 .set_mode = lapic_timer_setup,
94 .set_next_event = lapic_next_event,
95 .broadcast = lapic_timer_broadcast,
96 .rating = 100,
97 .irq = -1,
98 };
99 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
100
101 /* Local APIC was disabled by the BIOS and enabled by the kernel */
102 static int enabled_via_apicbase;
103
104 static unsigned long apic_phys;
105
106 /*
107 * Get the LAPIC version
108 */
109 static inline int lapic_get_version(void)
110 {
111 return GET_APIC_VERSION(apic_read(APIC_LVR));
112 }
113
114 /*
115 * Check, if the APIC is integrated or a separate chip
116 */
117 static inline int lapic_is_integrated(void)
118 {
119 return APIC_INTEGRATED(lapic_get_version());
120 }
121
122 /*
123 * Check, whether this is a modern or a first generation APIC
124 */
125 static int modern_apic(void)
126 {
127 /* AMD systems use old APIC versions, so check the CPU */
128 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
129 boot_cpu_data.x86 >= 0xf)
130 return 1;
131 return lapic_get_version() >= 0x14;
132 }
133
134 void apic_wait_icr_idle(void)
135 {
136 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
137 cpu_relax();
138 }
139
140 u32 safe_apic_wait_icr_idle(void)
141 {
142 u32 send_status;
143 int timeout;
144
145 timeout = 0;
146 do {
147 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
148 if (!send_status)
149 break;
150 udelay(100);
151 } while (timeout++ < 1000);
152
153 return send_status;
154 }
155
156 /**
157 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
158 */
159 void __cpuinit enable_NMI_through_LVT0(void)
160 {
161 unsigned int v = APIC_DM_NMI;
162
163 /* Level triggered for 82489DX */
164 if (!lapic_is_integrated())
165 v |= APIC_LVT_LEVEL_TRIGGER;
166 apic_write_around(APIC_LVT0, v);
167 }
168
169 /**
170 * get_physical_broadcast - Get number of physical broadcast IDs
171 */
172 int get_physical_broadcast(void)
173 {
174 return modern_apic() ? 0xff : 0xf;
175 }
176
177 /**
178 * lapic_get_maxlvt - get the maximum number of local vector table entries
179 */
180 int lapic_get_maxlvt(void)
181 {
182 unsigned int v = apic_read(APIC_LVR);
183
184 /* 82489DXs do not report # of LVT entries. */
185 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
186 }
187
188 /*
189 * Local APIC timer
190 */
191
192 /* Clock divisor is set to 16 */
193 #define APIC_DIVISOR 16
194
195 /*
196 * This function sets up the local APIC timer, with a timeout of
197 * 'clocks' APIC bus clock. During calibration we actually call
198 * this function twice on the boot CPU, once with a bogus timeout
199 * value, second time for real. The other (noncalibrating) CPUs
200 * call this function only once, with the real, calibrated value.
201 *
202 * We do reads before writes even if unnecessary, to get around the
203 * P5 APIC double write bug.
204 */
205 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
206 {
207 unsigned int lvtt_value, tmp_value;
208
209 lvtt_value = LOCAL_TIMER_VECTOR;
210 if (!oneshot)
211 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
212 if (!lapic_is_integrated())
213 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
214
215 if (!irqen)
216 lvtt_value |= APIC_LVT_MASKED;
217
218 apic_write_around(APIC_LVTT, lvtt_value);
219
220 /*
221 * Divide PICLK by 16
222 */
223 tmp_value = apic_read(APIC_TDCR);
224 apic_write_around(APIC_TDCR, (tmp_value
225 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
226 | APIC_TDR_DIV_16);
227
228 if (!oneshot)
229 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
230 }
231
232 /*
233 * Program the next event, relative to now
234 */
235 static int lapic_next_event(unsigned long delta,
236 struct clock_event_device *evt)
237 {
238 apic_write_around(APIC_TMICT, delta);
239 return 0;
240 }
241
242 /*
243 * Setup the lapic timer in periodic or oneshot mode
244 */
245 static void lapic_timer_setup(enum clock_event_mode mode,
246 struct clock_event_device *evt)
247 {
248 unsigned long flags;
249 unsigned int v;
250
251 /* Lapic used for broadcast ? */
252 if (!local_apic_timer_verify_ok)
253 return;
254
255 local_irq_save(flags);
256
257 switch (mode) {
258 case CLOCK_EVT_MODE_PERIODIC:
259 case CLOCK_EVT_MODE_ONESHOT:
260 __setup_APIC_LVTT(calibration_result,
261 mode != CLOCK_EVT_MODE_PERIODIC, 1);
262 break;
263 case CLOCK_EVT_MODE_UNUSED:
264 case CLOCK_EVT_MODE_SHUTDOWN:
265 v = apic_read(APIC_LVTT);
266 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
267 apic_write_around(APIC_LVTT, v);
268 break;
269 case CLOCK_EVT_MODE_RESUME:
270 /* Nothing to do here */
271 break;
272 }
273
274 local_irq_restore(flags);
275 }
276
277 /*
278 * Local APIC timer broadcast function
279 */
280 static void lapic_timer_broadcast(cpumask_t mask)
281 {
282 #ifdef CONFIG_SMP
283 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
284 #endif
285 }
286
287 /*
288 * Setup the local APIC timer for this CPU. Copy the initilized values
289 * of the boot CPU and register the clock event in the framework.
290 */
291 static void __devinit setup_APIC_timer(void)
292 {
293 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
294
295 memcpy(levt, &lapic_clockevent, sizeof(*levt));
296 levt->cpumask = cpumask_of_cpu(smp_processor_id());
297
298 clockevents_register_device(levt);
299 }
300
301 /*
302 * In this functions we calibrate APIC bus clocks to the external timer.
303 *
304 * We want to do the calibration only once since we want to have local timer
305 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
306 * frequency.
307 *
308 * This was previously done by reading the PIT/HPET and waiting for a wrap
309 * around to find out, that a tick has elapsed. I have a box, where the PIT
310 * readout is broken, so it never gets out of the wait loop again. This was
311 * also reported by others.
312 *
313 * Monitoring the jiffies value is inaccurate and the clockevents
314 * infrastructure allows us to do a simple substitution of the interrupt
315 * handler.
316 *
317 * The calibration routine also uses the pm_timer when possible, as the PIT
318 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
319 * back to normal later in the boot process).
320 */
321
322 #define LAPIC_CAL_LOOPS (HZ/10)
323
324 static __initdata int lapic_cal_loops = -1;
325 static __initdata long lapic_cal_t1, lapic_cal_t2;
326 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
327 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
328 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
329
330 /*
331 * Temporary interrupt handler.
332 */
333 static void __init lapic_cal_handler(struct clock_event_device *dev)
334 {
335 unsigned long long tsc = 0;
336 long tapic = apic_read(APIC_TMCCT);
337 unsigned long pm = acpi_pm_read_early();
338
339 if (cpu_has_tsc)
340 rdtscll(tsc);
341
342 switch (lapic_cal_loops++) {
343 case 0:
344 lapic_cal_t1 = tapic;
345 lapic_cal_tsc1 = tsc;
346 lapic_cal_pm1 = pm;
347 lapic_cal_j1 = jiffies;
348 break;
349
350 case LAPIC_CAL_LOOPS:
351 lapic_cal_t2 = tapic;
352 lapic_cal_tsc2 = tsc;
353 if (pm < lapic_cal_pm1)
354 pm += ACPI_PM_OVRRUN;
355 lapic_cal_pm2 = pm;
356 lapic_cal_j2 = jiffies;
357 break;
358 }
359 }
360
361 /*
362 * Setup the boot APIC
363 *
364 * Calibrate and verify the result.
365 */
366 void __init setup_boot_APIC_clock(void)
367 {
368 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
369 const long pm_100ms = PMTMR_TICKS_PER_SEC/10;
370 const long pm_thresh = pm_100ms/100;
371 void (*real_handler)(struct clock_event_device *dev);
372 unsigned long deltaj;
373 long delta, deltapm;
374 int pm_referenced = 0;
375
376 /*
377 * The local apic timer can be disabled via the kernel
378 * commandline or from the CPU detection code. Register the lapic
379 * timer as a dummy clock event source on SMP systems, so the
380 * broadcast mechanism is used. On UP systems simply ignore it.
381 */
382 if (local_apic_timer_disabled) {
383 /* No broadcast on UP ! */
384 if (num_possible_cpus() > 1) {
385 lapic_clockevent.mult = 1;
386 setup_APIC_timer();
387 }
388 return;
389 }
390
391 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
392 "calibrating APIC timer ...\n");
393
394 local_irq_disable();
395
396 /* Replace the global interrupt handler */
397 real_handler = global_clock_event->event_handler;
398 global_clock_event->event_handler = lapic_cal_handler;
399
400 /*
401 * Setup the APIC counter to 1e9. There is no way the lapic
402 * can underflow in the 100ms detection time frame
403 */
404 __setup_APIC_LVTT(1000000000, 0, 0);
405
406 /* Let the interrupts run */
407 local_irq_enable();
408
409 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
410 cpu_relax();
411
412 local_irq_disable();
413
414 /* Restore the real event handler */
415 global_clock_event->event_handler = real_handler;
416
417 /* Build delta t1-t2 as apic timer counts down */
418 delta = lapic_cal_t1 - lapic_cal_t2;
419 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
420
421 /* Check, if the PM timer is available */
422 deltapm = lapic_cal_pm2 - lapic_cal_pm1;
423 apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
424
425 if (deltapm) {
426 unsigned long mult;
427 u64 res;
428
429 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
430
431 if (deltapm > (pm_100ms - pm_thresh) &&
432 deltapm < (pm_100ms + pm_thresh)) {
433 apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
434 } else {
435 res = (((u64) deltapm) * mult) >> 22;
436 do_div(res, 1000000);
437 printk(KERN_WARNING "APIC calibration not consistent "
438 "with PM Timer: %ldms instead of 100ms\n",
439 (long)res);
440 /* Correct the lapic counter value */
441 res = (((u64) delta) * pm_100ms);
442 do_div(res, deltapm);
443 printk(KERN_INFO "APIC delta adjusted to PM-Timer: "
444 "%lu (%ld)\n", (unsigned long) res, delta);
445 delta = (long) res;
446 }
447 pm_referenced = 1;
448 }
449
450 /* Calculate the scaled math multiplication factor */
451 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
452 lapic_clockevent.shift);
453 lapic_clockevent.max_delta_ns =
454 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
455 lapic_clockevent.min_delta_ns =
456 clockevent_delta2ns(0xF, &lapic_clockevent);
457
458 calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
459
460 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
461 apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
462 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
463 calibration_result);
464
465 if (cpu_has_tsc) {
466 delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
467 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
468 "%ld.%04ld MHz.\n",
469 (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
470 (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
471 }
472
473 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
474 "%u.%04u MHz.\n",
475 calibration_result / (1000000 / HZ),
476 calibration_result % (1000000 / HZ));
477
478 local_apic_timer_verify_ok = 1;
479
480 /*
481 * Do a sanity check on the APIC calibration result
482 */
483 if (calibration_result < (1000000 / HZ)) {
484 local_irq_enable();
485 printk(KERN_WARNING
486 "APIC frequency too slow, disabling apic timer\n");
487 /* No broadcast on UP ! */
488 if (num_possible_cpus() > 1)
489 setup_APIC_timer();
490 return;
491 }
492
493 /* We trust the pm timer based calibration */
494 if (!pm_referenced) {
495 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
496
497 /*
498 * Setup the apic timer manually
499 */
500 levt->event_handler = lapic_cal_handler;
501 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
502 lapic_cal_loops = -1;
503
504 /* Let the interrupts run */
505 local_irq_enable();
506
507 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
508 cpu_relax();
509
510 local_irq_disable();
511
512 /* Stop the lapic timer */
513 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
514
515 local_irq_enable();
516
517 /* Jiffies delta */
518 deltaj = lapic_cal_j2 - lapic_cal_j1;
519 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
520
521 /* Check, if the jiffies result is consistent */
522 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
523 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
524 else
525 local_apic_timer_verify_ok = 0;
526 } else
527 local_irq_enable();
528
529 if (!local_apic_timer_verify_ok) {
530 printk(KERN_WARNING
531 "APIC timer disabled due to verification failure.\n");
532 /* No broadcast on UP ! */
533 if (num_possible_cpus() == 1)
534 return;
535 } else {
536 /*
537 * If nmi_watchdog is set to IO_APIC, we need the
538 * PIT/HPET going. Otherwise register lapic as a dummy
539 * device.
540 */
541 if (nmi_watchdog != NMI_IO_APIC)
542 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
543 else
544 printk(KERN_WARNING "APIC timer registered as dummy,"
545 " due to nmi_watchdog=1!\n");
546 }
547
548 /* Setup the lapic or request the broadcast */
549 setup_APIC_timer();
550 }
551
552 void __devinit setup_secondary_APIC_clock(void)
553 {
554 setup_APIC_timer();
555 }
556
557 /*
558 * The guts of the apic timer interrupt
559 */
560 static void local_apic_timer_interrupt(void)
561 {
562 int cpu = smp_processor_id();
563 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
564
565 /*
566 * Normally we should not be here till LAPIC has been initialized but
567 * in some cases like kdump, its possible that there is a pending LAPIC
568 * timer interrupt from previous kernel's context and is delivered in
569 * new kernel the moment interrupts are enabled.
570 *
571 * Interrupts are enabled early and LAPIC is setup much later, hence
572 * its possible that when we get here evt->event_handler is NULL.
573 * Check for event_handler being NULL and discard the interrupt as
574 * spurious.
575 */
576 if (!evt->event_handler) {
577 printk(KERN_WARNING
578 "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
579 /* Switch it off */
580 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
581 return;
582 }
583
584 /*
585 * the NMI deadlock-detector uses this.
586 */
587 per_cpu(irq_stat, cpu).apic_timer_irqs++;
588
589 evt->event_handler(evt);
590 }
591
592 /*
593 * Local APIC timer interrupt. This is the most natural way for doing
594 * local interrupts, but local timer interrupts can be emulated by
595 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
596 *
597 * [ if a single-CPU system runs an SMP kernel then we call the local
598 * interrupt as well. Thus we cannot inline the local irq ... ]
599 */
600 void smp_apic_timer_interrupt(struct pt_regs *regs)
601 {
602 struct pt_regs *old_regs = set_irq_regs(regs);
603
604 /*
605 * NOTE! We'd better ACK the irq immediately,
606 * because timer handling can be slow.
607 */
608 ack_APIC_irq();
609 /*
610 * update_process_times() expects us to have done irq_enter().
611 * Besides, if we don't timer interrupts ignore the global
612 * interrupt lock, which is the WrongThing (tm) to do.
613 */
614 irq_enter();
615 local_apic_timer_interrupt();
616 irq_exit();
617
618 set_irq_regs(old_regs);
619 }
620
621 int setup_profiling_timer(unsigned int multiplier)
622 {
623 return -EINVAL;
624 }
625
626 /*
627 * Setup extended LVT, AMD specific (K8, family 10h)
628 *
629 * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
630 * MCE interrupts are supported. Thus MCE offset must be set to 0.
631 */
632
633 #define APIC_EILVT_LVTOFF_MCE 0
634 #define APIC_EILVT_LVTOFF_IBS 1
635
636 static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
637 {
638 unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
639 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
640 apic_write(reg, v);
641 }
642
643 u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
644 {
645 setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
646 return APIC_EILVT_LVTOFF_MCE;
647 }
648
649 u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
650 {
651 setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
652 return APIC_EILVT_LVTOFF_IBS;
653 }
654
655 /*
656 * Local APIC start and shutdown
657 */
658
659 /**
660 * clear_local_APIC - shutdown the local APIC
661 *
662 * This is called, when a CPU is disabled and before rebooting, so the state of
663 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
664 * leftovers during boot.
665 */
666 void clear_local_APIC(void)
667 {
668 int maxlvt;
669 u32 v;
670
671 /* APIC hasn't been mapped yet */
672 if (!apic_phys)
673 return;
674
675 maxlvt = lapic_get_maxlvt();
676 /*
677 * Masking an LVT entry can trigger a local APIC error
678 * if the vector is zero. Mask LVTERR first to prevent this.
679 */
680 if (maxlvt >= 3) {
681 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
682 apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
683 }
684 /*
685 * Careful: we have to set masks only first to deassert
686 * any level-triggered sources.
687 */
688 v = apic_read(APIC_LVTT);
689 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
690 v = apic_read(APIC_LVT0);
691 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
692 v = apic_read(APIC_LVT1);
693 apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
694 if (maxlvt >= 4) {
695 v = apic_read(APIC_LVTPC);
696 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
697 }
698
699 /* lets not touch this if we didn't frob it */
700 #ifdef CONFIG_X86_MCE_P4THERMAL
701 if (maxlvt >= 5) {
702 v = apic_read(APIC_LVTTHMR);
703 apic_write_around(APIC_LVTTHMR, v | APIC_LVT_MASKED);
704 }
705 #endif
706 /*
707 * Clean APIC state for other OSs:
708 */
709 apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
710 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
711 apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
712 if (maxlvt >= 3)
713 apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
714 if (maxlvt >= 4)
715 apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
716
717 #ifdef CONFIG_X86_MCE_P4THERMAL
718 if (maxlvt >= 5)
719 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
720 #endif
721 /* Integrated APIC (!82489DX) ? */
722 if (lapic_is_integrated()) {
723 if (maxlvt > 3)
724 /* Clear ESR due to Pentium errata 3AP and 11AP */
725 apic_write(APIC_ESR, 0);
726 apic_read(APIC_ESR);
727 }
728 }
729
730 /**
731 * disable_local_APIC - clear and disable the local APIC
732 */
733 void disable_local_APIC(void)
734 {
735 unsigned long value;
736
737 clear_local_APIC();
738
739 /*
740 * Disable APIC (implies clearing of registers
741 * for 82489DX!).
742 */
743 value = apic_read(APIC_SPIV);
744 value &= ~APIC_SPIV_APIC_ENABLED;
745 apic_write_around(APIC_SPIV, value);
746
747 /*
748 * When LAPIC was disabled by the BIOS and enabled by the kernel,
749 * restore the disabled state.
750 */
751 if (enabled_via_apicbase) {
752 unsigned int l, h;
753
754 rdmsr(MSR_IA32_APICBASE, l, h);
755 l &= ~MSR_IA32_APICBASE_ENABLE;
756 wrmsr(MSR_IA32_APICBASE, l, h);
757 }
758 }
759
760 /*
761 * If Linux enabled the LAPIC against the BIOS default disable it down before
762 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
763 * not power-off. Additionally clear all LVT entries before disable_local_APIC
764 * for the case where Linux didn't enable the LAPIC.
765 */
766 void lapic_shutdown(void)
767 {
768 unsigned long flags;
769
770 if (!cpu_has_apic)
771 return;
772
773 local_irq_save(flags);
774 clear_local_APIC();
775
776 if (enabled_via_apicbase)
777 disable_local_APIC();
778
779 local_irq_restore(flags);
780 }
781
782 /*
783 * This is to verify that we're looking at a real local APIC.
784 * Check these against your board if the CPUs aren't getting
785 * started for no apparent reason.
786 */
787 int __init verify_local_APIC(void)
788 {
789 unsigned int reg0, reg1;
790
791 /*
792 * The version register is read-only in a real APIC.
793 */
794 reg0 = apic_read(APIC_LVR);
795 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
796 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
797 reg1 = apic_read(APIC_LVR);
798 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
799
800 /*
801 * The two version reads above should print the same
802 * numbers. If the second one is different, then we
803 * poke at a non-APIC.
804 */
805 if (reg1 != reg0)
806 return 0;
807
808 /*
809 * Check if the version looks reasonably.
810 */
811 reg1 = GET_APIC_VERSION(reg0);
812 if (reg1 == 0x00 || reg1 == 0xff)
813 return 0;
814 reg1 = lapic_get_maxlvt();
815 if (reg1 < 0x02 || reg1 == 0xff)
816 return 0;
817
818 /*
819 * The ID register is read/write in a real APIC.
820 */
821 reg0 = apic_read(APIC_ID);
822 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
823
824 /*
825 * The next two are just to see if we have sane values.
826 * They're only really relevant if we're in Virtual Wire
827 * compatibility mode, but most boxes are anymore.
828 */
829 reg0 = apic_read(APIC_LVT0);
830 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
831 reg1 = apic_read(APIC_LVT1);
832 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
833
834 return 1;
835 }
836
837 /**
838 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
839 */
840 void __init sync_Arb_IDs(void)
841 {
842 /*
843 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
844 * needed on AMD.
845 */
846 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
847 return;
848 /*
849 * Wait for idle.
850 */
851 apic_wait_icr_idle();
852
853 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
854 apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
855 | APIC_DM_INIT);
856 }
857
858 /*
859 * An initial setup of the virtual wire mode.
860 */
861 void __init init_bsp_APIC(void)
862 {
863 unsigned long value;
864
865 /*
866 * Don't do the setup now if we have a SMP BIOS as the
867 * through-I/O-APIC virtual wire mode might be active.
868 */
869 if (smp_found_config || !cpu_has_apic)
870 return;
871
872 /*
873 * Do not trust the local APIC being empty at bootup.
874 */
875 clear_local_APIC();
876
877 /*
878 * Enable APIC.
879 */
880 value = apic_read(APIC_SPIV);
881 value &= ~APIC_VECTOR_MASK;
882 value |= APIC_SPIV_APIC_ENABLED;
883
884 /* This bit is reserved on P4/Xeon and should be cleared */
885 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
886 (boot_cpu_data.x86 == 15))
887 value &= ~APIC_SPIV_FOCUS_DISABLED;
888 else
889 value |= APIC_SPIV_FOCUS_DISABLED;
890 value |= SPURIOUS_APIC_VECTOR;
891 apic_write_around(APIC_SPIV, value);
892
893 /*
894 * Set up the virtual wire mode.
895 */
896 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
897 value = APIC_DM_NMI;
898 if (!lapic_is_integrated()) /* 82489DX */
899 value |= APIC_LVT_LEVEL_TRIGGER;
900 apic_write_around(APIC_LVT1, value);
901 }
902
903 static void __cpuinit lapic_setup_esr(void)
904 {
905 unsigned long oldvalue, value, maxlvt;
906 if (lapic_is_integrated() && !esr_disable) {
907 /* !82489DX */
908 maxlvt = lapic_get_maxlvt();
909 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
910 apic_write(APIC_ESR, 0);
911 oldvalue = apic_read(APIC_ESR);
912
913 /* enables sending errors */
914 value = ERROR_APIC_VECTOR;
915 apic_write_around(APIC_LVTERR, value);
916 /*
917 * spec says clear errors after enabling vector.
918 */
919 if (maxlvt > 3)
920 apic_write(APIC_ESR, 0);
921 value = apic_read(APIC_ESR);
922 if (value != oldvalue)
923 apic_printk(APIC_VERBOSE, "ESR value before enabling "
924 "vector: 0x%08lx after: 0x%08lx\n",
925 oldvalue, value);
926 } else {
927 if (esr_disable)
928 /*
929 * Something untraceable is creating bad interrupts on
930 * secondary quads ... for the moment, just leave the
931 * ESR disabled - we can't do anything useful with the
932 * errors anyway - mbligh
933 */
934 printk(KERN_INFO "Leaving ESR disabled.\n");
935 else
936 printk(KERN_INFO "No ESR for 82489DX.\n");
937 }
938 }
939
940
941 /**
942 * setup_local_APIC - setup the local APIC
943 */
944 void __cpuinit setup_local_APIC(void)
945 {
946 unsigned long value, integrated;
947 int i, j;
948
949 /* Pound the ESR really hard over the head with a big hammer - mbligh */
950 if (esr_disable) {
951 apic_write(APIC_ESR, 0);
952 apic_write(APIC_ESR, 0);
953 apic_write(APIC_ESR, 0);
954 apic_write(APIC_ESR, 0);
955 }
956
957 integrated = lapic_is_integrated();
958
959 /*
960 * Double-check whether this APIC is really registered.
961 */
962 if (!apic_id_registered())
963 BUG();
964
965 /*
966 * Intel recommends to set DFR, LDR and TPR before enabling
967 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
968 * document number 292116). So here it goes...
969 */
970 init_apic_ldr();
971
972 /*
973 * Set Task Priority to 'accept all'. We never change this
974 * later on.
975 */
976 value = apic_read(APIC_TASKPRI);
977 value &= ~APIC_TPRI_MASK;
978 apic_write_around(APIC_TASKPRI, value);
979
980 /*
981 * After a crash, we no longer service the interrupts and a pending
982 * interrupt from previous kernel might still have ISR bit set.
983 *
984 * Most probably by now CPU has serviced that pending interrupt and
985 * it might not have done the ack_APIC_irq() because it thought,
986 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
987 * does not clear the ISR bit and cpu thinks it has already serivced
988 * the interrupt. Hence a vector might get locked. It was noticed
989 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
990 */
991 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
992 value = apic_read(APIC_ISR + i*0x10);
993 for (j = 31; j >= 0; j--) {
994 if (value & (1<<j))
995 ack_APIC_irq();
996 }
997 }
998
999 /*
1000 * Now that we are all set up, enable the APIC
1001 */
1002 value = apic_read(APIC_SPIV);
1003 value &= ~APIC_VECTOR_MASK;
1004 /*
1005 * Enable APIC
1006 */
1007 value |= APIC_SPIV_APIC_ENABLED;
1008
1009 /*
1010 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1011 * certain networking cards. If high frequency interrupts are
1012 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1013 * entry is masked/unmasked at a high rate as well then sooner or
1014 * later IOAPIC line gets 'stuck', no more interrupts are received
1015 * from the device. If focus CPU is disabled then the hang goes
1016 * away, oh well :-(
1017 *
1018 * [ This bug can be reproduced easily with a level-triggered
1019 * PCI Ne2000 networking cards and PII/PIII processors, dual
1020 * BX chipset. ]
1021 */
1022 /*
1023 * Actually disabling the focus CPU check just makes the hang less
1024 * frequent as it makes the interrupt distributon model be more
1025 * like LRU than MRU (the short-term load is more even across CPUs).
1026 * See also the comment in end_level_ioapic_irq(). --macro
1027 */
1028
1029 /* Enable focus processor (bit==0) */
1030 value &= ~APIC_SPIV_FOCUS_DISABLED;
1031
1032 /*
1033 * Set spurious IRQ vector
1034 */
1035 value |= SPURIOUS_APIC_VECTOR;
1036 apic_write_around(APIC_SPIV, value);
1037
1038 /*
1039 * Set up LVT0, LVT1:
1040 *
1041 * set up through-local-APIC on the BP's LINT0. This is not
1042 * strictly necessary in pure symmetric-IO mode, but sometimes
1043 * we delegate interrupts to the 8259A.
1044 */
1045 /*
1046 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1047 */
1048 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1049 if (!smp_processor_id() && (pic_mode || !value)) {
1050 value = APIC_DM_EXTINT;
1051 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
1052 smp_processor_id());
1053 } else {
1054 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1055 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
1056 smp_processor_id());
1057 }
1058 apic_write_around(APIC_LVT0, value);
1059
1060 /*
1061 * only the BP should see the LINT1 NMI signal, obviously.
1062 */
1063 if (!smp_processor_id())
1064 value = APIC_DM_NMI;
1065 else
1066 value = APIC_DM_NMI | APIC_LVT_MASKED;
1067 if (!integrated) /* 82489DX */
1068 value |= APIC_LVT_LEVEL_TRIGGER;
1069 apic_write_around(APIC_LVT1, value);
1070 }
1071
1072 void __cpuinit end_local_APIC_setup(void)
1073 {
1074 unsigned long value;
1075
1076 lapic_setup_esr();
1077 /* Disable the local apic timer */
1078 value = apic_read(APIC_LVTT);
1079 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1080 apic_write_around(APIC_LVTT, value);
1081
1082 setup_apic_nmi_watchdog(NULL);
1083 apic_pm_activate();
1084 }
1085
1086 /*
1087 * Detect and initialize APIC
1088 */
1089 static int __init detect_init_APIC(void)
1090 {
1091 u32 h, l, features;
1092
1093 /* Disabled by kernel option? */
1094 if (enable_local_apic < 0)
1095 return -1;
1096
1097 switch (boot_cpu_data.x86_vendor) {
1098 case X86_VENDOR_AMD:
1099 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1100 (boot_cpu_data.x86 == 15))
1101 break;
1102 goto no_apic;
1103 case X86_VENDOR_INTEL:
1104 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1105 (boot_cpu_data.x86 == 5 && cpu_has_apic))
1106 break;
1107 goto no_apic;
1108 default:
1109 goto no_apic;
1110 }
1111
1112 if (!cpu_has_apic) {
1113 /*
1114 * Over-ride BIOS and try to enable the local APIC only if
1115 * "lapic" specified.
1116 */
1117 if (enable_local_apic <= 0) {
1118 printk(KERN_INFO "Local APIC disabled by BIOS -- "
1119 "you can enable it with \"lapic\"\n");
1120 return -1;
1121 }
1122 /*
1123 * Some BIOSes disable the local APIC in the APIC_BASE
1124 * MSR. This can only be done in software for Intel P6 or later
1125 * and AMD K7 (Model > 1) or later.
1126 */
1127 rdmsr(MSR_IA32_APICBASE, l, h);
1128 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1129 printk(KERN_INFO
1130 "Local APIC disabled by BIOS -- reenabling.\n");
1131 l &= ~MSR_IA32_APICBASE_BASE;
1132 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
1133 wrmsr(MSR_IA32_APICBASE, l, h);
1134 enabled_via_apicbase = 1;
1135 }
1136 }
1137 /*
1138 * The APIC feature bit should now be enabled
1139 * in `cpuid'
1140 */
1141 features = cpuid_edx(1);
1142 if (!(features & (1 << X86_FEATURE_APIC))) {
1143 printk(KERN_WARNING "Could not enable APIC!\n");
1144 return -1;
1145 }
1146 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1147 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1148
1149 /* The BIOS may have set up the APIC at some other address */
1150 rdmsr(MSR_IA32_APICBASE, l, h);
1151 if (l & MSR_IA32_APICBASE_ENABLE)
1152 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1153
1154 if (nmi_watchdog != NMI_NONE && nmi_watchdog != NMI_DISABLED)
1155 nmi_watchdog = NMI_LOCAL_APIC;
1156
1157 printk(KERN_INFO "Found and enabled local APIC!\n");
1158
1159 apic_pm_activate();
1160
1161 return 0;
1162
1163 no_apic:
1164 printk(KERN_INFO "No local APIC present or hardware disabled\n");
1165 return -1;
1166 }
1167
1168 /**
1169 * init_apic_mappings - initialize APIC mappings
1170 */
1171 void __init init_apic_mappings(void)
1172 {
1173 /*
1174 * If no local APIC can be found then set up a fake all
1175 * zeroes page to simulate the local APIC and another
1176 * one for the IO-APIC.
1177 */
1178 if (!smp_found_config && detect_init_APIC()) {
1179 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
1180 apic_phys = __pa(apic_phys);
1181 } else
1182 apic_phys = mp_lapic_addr;
1183
1184 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1185 printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
1186 apic_phys);
1187
1188 /*
1189 * Fetch the APIC ID of the BSP in case we have a
1190 * default configuration (or the MP table is broken).
1191 */
1192 if (boot_cpu_physical_apicid == -1U)
1193 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1194
1195 #ifdef CONFIG_X86_IO_APIC
1196 {
1197 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
1198 int i;
1199
1200 for (i = 0; i < nr_ioapics; i++) {
1201 if (smp_found_config) {
1202 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
1203 if (!ioapic_phys) {
1204 printk(KERN_ERR
1205 "WARNING: bogus zero IO-APIC "
1206 "address found in MPTABLE, "
1207 "disabling IO/APIC support!\n");
1208 smp_found_config = 0;
1209 skip_ioapic_setup = 1;
1210 goto fake_ioapic_page;
1211 }
1212 } else {
1213 fake_ioapic_page:
1214 ioapic_phys = (unsigned long)
1215 alloc_bootmem_pages(PAGE_SIZE);
1216 ioapic_phys = __pa(ioapic_phys);
1217 }
1218 set_fixmap_nocache(idx, ioapic_phys);
1219 printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
1220 __fix_to_virt(idx), ioapic_phys);
1221 idx++;
1222 }
1223 }
1224 #endif
1225 }
1226
1227 /*
1228 * This initializes the IO-APIC and APIC hardware if this is
1229 * a UP kernel.
1230 */
1231
1232 int apic_version[MAX_APICS];
1233
1234 int __init APIC_init_uniprocessor(void)
1235 {
1236 if (enable_local_apic < 0)
1237 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1238
1239 if (!smp_found_config && !cpu_has_apic)
1240 return -1;
1241
1242 /*
1243 * Complain if the BIOS pretends there is one.
1244 */
1245 if (!cpu_has_apic &&
1246 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1247 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1248 boot_cpu_physical_apicid);
1249 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1250 return -1;
1251 }
1252
1253 verify_local_APIC();
1254
1255 connect_bsp_APIC();
1256
1257 /*
1258 * Hack: In case of kdump, after a crash, kernel might be booting
1259 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1260 * might be zero if read from MP tables. Get it from LAPIC.
1261 */
1262 #ifdef CONFIG_CRASH_DUMP
1263 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
1264 #endif
1265 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
1266
1267 setup_local_APIC();
1268
1269 end_local_APIC_setup();
1270 #ifdef CONFIG_X86_IO_APIC
1271 if (smp_found_config)
1272 if (!skip_ioapic_setup && nr_ioapics)
1273 setup_IO_APIC();
1274 #endif
1275 setup_boot_clock();
1276
1277 return 0;
1278 }
1279
1280 /*
1281 * Local APIC interrupts
1282 */
1283
1284 /*
1285 * This interrupt should _never_ happen with our APIC/SMP architecture
1286 */
1287 void smp_spurious_interrupt(struct pt_regs *regs)
1288 {
1289 unsigned long v;
1290
1291 irq_enter();
1292 /*
1293 * Check if this really is a spurious interrupt and ACK it
1294 * if it is a vectored one. Just in case...
1295 * Spurious interrupts should not be ACKed.
1296 */
1297 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1298 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1299 ack_APIC_irq();
1300
1301 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1302 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
1303 "should never happen.\n", smp_processor_id());
1304 __get_cpu_var(irq_stat).irq_spurious_count++;
1305 irq_exit();
1306 }
1307
1308 /*
1309 * This interrupt should never happen with our APIC/SMP architecture
1310 */
1311 void smp_error_interrupt(struct pt_regs *regs)
1312 {
1313 unsigned long v, v1;
1314
1315 irq_enter();
1316 /* First tickle the hardware, only then report what went on. -- REW */
1317 v = apic_read(APIC_ESR);
1318 apic_write(APIC_ESR, 0);
1319 v1 = apic_read(APIC_ESR);
1320 ack_APIC_irq();
1321 atomic_inc(&irq_err_count);
1322
1323 /* Here is what the APIC error bits mean:
1324 0: Send CS error
1325 1: Receive CS error
1326 2: Send accept error
1327 3: Receive accept error
1328 4: Reserved
1329 5: Send illegal vector
1330 6: Received illegal vector
1331 7: Illegal register address
1332 */
1333 printk(KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
1334 smp_processor_id(), v , v1);
1335 irq_exit();
1336 }
1337
1338 #ifdef CONFIG_SMP
1339 void __init smp_intr_init(void)
1340 {
1341 /*
1342 * IRQ0 must be given a fixed assignment and initialized,
1343 * because it's used before the IO-APIC is set up.
1344 */
1345 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1346
1347 /*
1348 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1349 * IPI, driven by wakeup.
1350 */
1351 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1352
1353 /* IPI for invalidation */
1354 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1355
1356 /* IPI for generic function call */
1357 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
1358 }
1359 #endif
1360
1361 /*
1362 * Initialize APIC interrupts
1363 */
1364 void __init apic_intr_init(void)
1365 {
1366 #ifdef CONFIG_SMP
1367 smp_intr_init();
1368 #endif
1369 /* self generated IPI for local APIC timer */
1370 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
1371
1372 /* IPI vectors for APIC spurious and error interrupts */
1373 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
1374 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
1375
1376 /* thermal monitor LVT interrupt */
1377 #ifdef CONFIG_X86_MCE_P4THERMAL
1378 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
1379 #endif
1380 }
1381
1382 /**
1383 * connect_bsp_APIC - attach the APIC to the interrupt system
1384 */
1385 void __init connect_bsp_APIC(void)
1386 {
1387 if (pic_mode) {
1388 /*
1389 * Do not trust the local APIC being empty at bootup.
1390 */
1391 clear_local_APIC();
1392 /*
1393 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1394 * local APIC to INT and NMI lines.
1395 */
1396 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1397 "enabling APIC mode.\n");
1398 outb(0x70, 0x22);
1399 outb(0x01, 0x23);
1400 }
1401 enable_apic_mode();
1402 }
1403
1404 /**
1405 * disconnect_bsp_APIC - detach the APIC from the interrupt system
1406 * @virt_wire_setup: indicates, whether virtual wire mode is selected
1407 *
1408 * Virtual wire mode is necessary to deliver legacy interrupts even when the
1409 * APIC is disabled.
1410 */
1411 void disconnect_bsp_APIC(int virt_wire_setup)
1412 {
1413 if (pic_mode) {
1414 /*
1415 * Put the board back into PIC mode (has an effect only on
1416 * certain older boards). Note that APIC interrupts, including
1417 * IPIs, won't work beyond this point! The only exception are
1418 * INIT IPIs.
1419 */
1420 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
1421 "entering PIC mode.\n");
1422 outb(0x70, 0x22);
1423 outb(0x00, 0x23);
1424 } else {
1425 /* Go back to Virtual Wire compatibility mode */
1426 unsigned long value;
1427
1428 /* For the spurious interrupt use vector F, and enable it */
1429 value = apic_read(APIC_SPIV);
1430 value &= ~APIC_VECTOR_MASK;
1431 value |= APIC_SPIV_APIC_ENABLED;
1432 value |= 0xf;
1433 apic_write_around(APIC_SPIV, value);
1434
1435 if (!virt_wire_setup) {
1436 /*
1437 * For LVT0 make it edge triggered, active high,
1438 * external and enabled
1439 */
1440 value = apic_read(APIC_LVT0);
1441 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
1442 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1443 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1444 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1445 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
1446 apic_write_around(APIC_LVT0, value);
1447 } else {
1448 /* Disable LVT0 */
1449 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
1450 }
1451
1452 /*
1453 * For LVT1 make it edge triggered, active high, nmi and
1454 * enabled
1455 */
1456 value = apic_read(APIC_LVT1);
1457 value &= ~(
1458 APIC_MODE_MASK | APIC_SEND_PENDING |
1459 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
1460 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
1461 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
1462 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
1463 apic_write_around(APIC_LVT1, value);
1464 }
1465 }
1466
1467 unsigned int __cpuinitdata maxcpus = NR_CPUS;
1468
1469 void __cpuinit generic_processor_info(int apicid, int version)
1470 {
1471 int cpu;
1472 cpumask_t tmp_map;
1473 physid_mask_t phys_cpu;
1474
1475 /*
1476 * Validate version
1477 */
1478 if (version == 0x0) {
1479 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
1480 "fixing up to 0x10. (tell your hw vendor)\n",
1481 version);
1482 version = 0x10;
1483 }
1484 apic_version[apicid] = version;
1485
1486 phys_cpu = apicid_to_cpu_present(apicid);
1487 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
1488
1489 if (num_processors >= NR_CPUS) {
1490 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
1491 " Processor ignored.\n", NR_CPUS);
1492 return;
1493 }
1494
1495 if (num_processors >= maxcpus) {
1496 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1497 " Processor ignored.\n", maxcpus);
1498 return;
1499 }
1500
1501 num_processors++;
1502 cpus_complement(tmp_map, cpu_present_map);
1503 cpu = first_cpu(tmp_map);
1504
1505 if (apicid == boot_cpu_physical_apicid)
1506 /*
1507 * x86_bios_cpu_apicid is required to have processors listed
1508 * in same order as logical cpu numbers. Hence the first
1509 * entry is BSP, and so on.
1510 */
1511 cpu = 0;
1512
1513 /*
1514 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
1515 * but we need to work other dependencies like SMP_SUSPEND etc
1516 * before this can be done without some confusion.
1517 * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
1518 * - Ashok Raj <ashok.raj@intel.com>
1519 */
1520 if (num_processors > 8) {
1521 switch (boot_cpu_data.x86_vendor) {
1522 case X86_VENDOR_INTEL:
1523 if (!APIC_XAPIC(version)) {
1524 def_to_bigsmp = 0;
1525 break;
1526 }
1527 /* If P4 and above fall through */
1528 case X86_VENDOR_AMD:
1529 def_to_bigsmp = 1;
1530 }
1531 }
1532 #ifdef CONFIG_SMP
1533 /* are we being called early in kernel startup? */
1534 if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
1535 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
1536 u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
1537
1538 cpu_to_apicid[cpu] = apicid;
1539 bios_cpu_apicid[cpu] = apicid;
1540 } else {
1541 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1542 per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1543 }
1544 #endif
1545 cpu_set(cpu, cpu_possible_map);
1546 cpu_set(cpu, cpu_present_map);
1547 }
1548
1549 /*
1550 * Power management
1551 */
1552 #ifdef CONFIG_PM
1553
1554 static struct {
1555 int active;
1556 /* r/w apic fields */
1557 unsigned int apic_id;
1558 unsigned int apic_taskpri;
1559 unsigned int apic_ldr;
1560 unsigned int apic_dfr;
1561 unsigned int apic_spiv;
1562 unsigned int apic_lvtt;
1563 unsigned int apic_lvtpc;
1564 unsigned int apic_lvt0;
1565 unsigned int apic_lvt1;
1566 unsigned int apic_lvterr;
1567 unsigned int apic_tmict;
1568 unsigned int apic_tdcr;
1569 unsigned int apic_thmr;
1570 } apic_pm_state;
1571
1572 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1573 {
1574 unsigned long flags;
1575 int maxlvt;
1576
1577 if (!apic_pm_state.active)
1578 return 0;
1579
1580 maxlvt = lapic_get_maxlvt();
1581
1582 apic_pm_state.apic_id = apic_read(APIC_ID);
1583 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
1584 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
1585 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
1586 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
1587 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
1588 if (maxlvt >= 4)
1589 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
1590 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
1591 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
1592 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
1593 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
1594 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
1595 #ifdef CONFIG_X86_MCE_P4THERMAL
1596 if (maxlvt >= 5)
1597 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
1598 #endif
1599
1600 local_irq_save(flags);
1601 disable_local_APIC();
1602 local_irq_restore(flags);
1603 return 0;
1604 }
1605
1606 static int lapic_resume(struct sys_device *dev)
1607 {
1608 unsigned int l, h;
1609 unsigned long flags;
1610 int maxlvt;
1611
1612 if (!apic_pm_state.active)
1613 return 0;
1614
1615 maxlvt = lapic_get_maxlvt();
1616
1617 local_irq_save(flags);
1618
1619 /*
1620 * Make sure the APICBASE points to the right address
1621 *
1622 * FIXME! This will be wrong if we ever support suspend on
1623 * SMP! We'll need to do this as part of the CPU restore!
1624 */
1625 rdmsr(MSR_IA32_APICBASE, l, h);
1626 l &= ~MSR_IA32_APICBASE_BASE;
1627 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
1628 wrmsr(MSR_IA32_APICBASE, l, h);
1629
1630 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
1631 apic_write(APIC_ID, apic_pm_state.apic_id);
1632 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
1633 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
1634 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
1635 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
1636 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
1637 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
1638 #ifdef CONFIG_X86_MCE_P4THERMAL
1639 if (maxlvt >= 5)
1640 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
1641 #endif
1642 if (maxlvt >= 4)
1643 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
1644 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
1645 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
1646 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
1647 apic_write(APIC_ESR, 0);
1648 apic_read(APIC_ESR);
1649 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
1650 apic_write(APIC_ESR, 0);
1651 apic_read(APIC_ESR);
1652 local_irq_restore(flags);
1653 return 0;
1654 }
1655
1656 /*
1657 * This device has no shutdown method - fully functioning local APICs
1658 * are needed on every CPU up until machine_halt/restart/poweroff.
1659 */
1660
1661 static struct sysdev_class lapic_sysclass = {
1662 .name = "lapic",
1663 .resume = lapic_resume,
1664 .suspend = lapic_suspend,
1665 };
1666
1667 static struct sys_device device_lapic = {
1668 .id = 0,
1669 .cls = &lapic_sysclass,
1670 };
1671
1672 static void __devinit apic_pm_activate(void)
1673 {
1674 apic_pm_state.active = 1;
1675 }
1676
1677 static int __init init_lapic_sysfs(void)
1678 {
1679 int error;
1680
1681 if (!cpu_has_apic)
1682 return 0;
1683 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
1684
1685 error = sysdev_class_register(&lapic_sysclass);
1686 if (!error)
1687 error = sysdev_register(&device_lapic);
1688 return error;
1689 }
1690 device_initcall(init_lapic_sysfs);
1691
1692 #else /* CONFIG_PM */
1693
1694 static void apic_pm_activate(void) { }
1695
1696 #endif /* CONFIG_PM */
1697
1698 /*
1699 * APIC command line parameters
1700 */
1701 static int __init parse_lapic(char *arg)
1702 {
1703 enable_local_apic = 1;
1704 return 0;
1705 }
1706 early_param("lapic", parse_lapic);
1707
1708 static int __init parse_nolapic(char *arg)
1709 {
1710 enable_local_apic = -1;
1711 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1712 return 0;
1713 }
1714 early_param("nolapic", parse_nolapic);
1715
1716 static int __init parse_disable_lapic_timer(char *arg)
1717 {
1718 local_apic_timer_disabled = 1;
1719 return 0;
1720 }
1721 early_param("nolapic_timer", parse_disable_lapic_timer);
1722
1723 static int __init parse_lapic_timer_c2_ok(char *arg)
1724 {
1725 local_apic_timer_c2_ok = 1;
1726 return 0;
1727 }
1728 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1729
1730 static int __init apic_set_verbosity(char *str)
1731 {
1732 if (strcmp("debug", str) == 0)
1733 apic_verbosity = APIC_DEBUG;
1734 else if (strcmp("verbose", str) == 0)
1735 apic_verbosity = APIC_VERBOSE;
1736 return 1;
1737 }
1738 __setup("apic=", apic_set_verbosity);
1739
This page took 0.064187 seconds and 5 git commands to generate.