[PATCH] x86_64: nmi watchdog header cleanup
[deliverable/linux.git] / arch / i386 / kernel / apic.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/config.h>
18 #include <linux/init.h>
19
20 #include <linux/mm.h>
21 #include <linux/delay.h>
22 #include <linux/bootmem.h>
23 #include <linux/smp_lock.h>
24 #include <linux/interrupt.h>
25 #include <linux/mc146818rtc.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/sysdev.h>
28 #include <linux/cpu.h>
29 #include <linux/module.h>
30
31 #include <asm/atomic.h>
32 #include <asm/smp.h>
33 #include <asm/mtrr.h>
34 #include <asm/mpspec.h>
35 #include <asm/desc.h>
36 #include <asm/arch_hooks.h>
37 #include <asm/hpet.h>
38 #include <asm/i8253.h>
39 #include <asm/nmi.h>
40
41 #include <mach_apic.h>
42 #include <mach_apicdef.h>
43 #include <mach_ipi.h>
44
45 #include "io_ports.h"
46
47 /*
48 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
49 * IPIs in place of local APIC timers
50 */
51 static cpumask_t timer_bcast_ipi;
52
53 /*
54 * Knob to control our willingness to enable the local APIC.
55 */
56 int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
57
58 /*
59 * Debug level
60 */
61 int apic_verbosity;
62
63
64 static void apic_pm_activate(void);
65
66 static int modern_apic(void)
67 {
68 unsigned int lvr, version;
69 /* AMD systems use old APIC versions, so check the CPU */
70 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
71 boot_cpu_data.x86 >= 0xf)
72 return 1;
73 lvr = apic_read(APIC_LVR);
74 version = GET_APIC_VERSION(lvr);
75 return version >= 0x14;
76 }
77
78 /*
79 * 'what should we do if we get a hw irq event on an illegal vector'.
80 * each architecture has to answer this themselves.
81 */
82 void ack_bad_irq(unsigned int irq)
83 {
84 printk("unexpected IRQ trap at vector %02x\n", irq);
85 /*
86 * Currently unexpected vectors happen only on SMP and APIC.
87 * We _must_ ack these because every local APIC has only N
88 * irq slots per priority level, and a 'hanging, unacked' IRQ
89 * holds up an irq slot - in excessive cases (when multiple
90 * unexpected vectors occur) that might lock up the APIC
91 * completely.
92 * But only ack when the APIC is enabled -AK
93 */
94 if (cpu_has_apic)
95 ack_APIC_irq();
96 }
97
98 void __init apic_intr_init(void)
99 {
100 #ifdef CONFIG_SMP
101 smp_intr_init();
102 #endif
103 /* self generated IPI for local APIC timer */
104 set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
105
106 /* IPI vectors for APIC spurious and error interrupts */
107 set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
108 set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
109
110 /* thermal monitor LVT interrupt */
111 #ifdef CONFIG_X86_MCE_P4THERMAL
112 set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
113 #endif
114 }
115
116 /* Using APIC to generate smp_local_timer_interrupt? */
117 int using_apic_timer __read_mostly = 0;
118
119 static int enabled_via_apicbase;
120
121 void enable_NMI_through_LVT0 (void * dummy)
122 {
123 unsigned int v, ver;
124
125 ver = apic_read(APIC_LVR);
126 ver = GET_APIC_VERSION(ver);
127 v = APIC_DM_NMI; /* unmask and set to NMI */
128 if (!APIC_INTEGRATED(ver)) /* 82489DX */
129 v |= APIC_LVT_LEVEL_TRIGGER;
130 apic_write_around(APIC_LVT0, v);
131 }
132
133 int get_physical_broadcast(void)
134 {
135 if (modern_apic())
136 return 0xff;
137 else
138 return 0xf;
139 }
140
141 int get_maxlvt(void)
142 {
143 unsigned int v, ver, maxlvt;
144
145 v = apic_read(APIC_LVR);
146 ver = GET_APIC_VERSION(v);
147 /* 82489DXs do not report # of LVT entries. */
148 maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2;
149 return maxlvt;
150 }
151
152 void clear_local_APIC(void)
153 {
154 int maxlvt;
155 unsigned long v;
156
157 maxlvt = get_maxlvt();
158
159 /*
160 * Masking an LVT entry on a P6 can trigger a local APIC error
161 * if the vector is zero. Mask LVTERR first to prevent this.
162 */
163 if (maxlvt >= 3) {
164 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
165 apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
166 }
167 /*
168 * Careful: we have to set masks only first to deassert
169 * any level-triggered sources.
170 */
171 v = apic_read(APIC_LVTT);
172 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
173 v = apic_read(APIC_LVT0);
174 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
175 v = apic_read(APIC_LVT1);
176 apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
177 if (maxlvt >= 4) {
178 v = apic_read(APIC_LVTPC);
179 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
180 }
181
182 /* lets not touch this if we didn't frob it */
183 #ifdef CONFIG_X86_MCE_P4THERMAL
184 if (maxlvt >= 5) {
185 v = apic_read(APIC_LVTTHMR);
186 apic_write_around(APIC_LVTTHMR, v | APIC_LVT_MASKED);
187 }
188 #endif
189 /*
190 * Clean APIC state for other OSs:
191 */
192 apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
193 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
194 apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
195 if (maxlvt >= 3)
196 apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
197 if (maxlvt >= 4)
198 apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
199
200 #ifdef CONFIG_X86_MCE_P4THERMAL
201 if (maxlvt >= 5)
202 apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED);
203 #endif
204 v = GET_APIC_VERSION(apic_read(APIC_LVR));
205 if (APIC_INTEGRATED(v)) { /* !82489DX */
206 if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */
207 apic_write(APIC_ESR, 0);
208 apic_read(APIC_ESR);
209 }
210 }
211
212 void __init connect_bsp_APIC(void)
213 {
214 if (pic_mode) {
215 /*
216 * Do not trust the local APIC being empty at bootup.
217 */
218 clear_local_APIC();
219 /*
220 * PIC mode, enable APIC mode in the IMCR, i.e.
221 * connect BSP's local APIC to INT and NMI lines.
222 */
223 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
224 "enabling APIC mode.\n");
225 outb(0x70, 0x22);
226 outb(0x01, 0x23);
227 }
228 enable_apic_mode();
229 }
230
231 void disconnect_bsp_APIC(int virt_wire_setup)
232 {
233 if (pic_mode) {
234 /*
235 * Put the board back into PIC mode (has an effect
236 * only on certain older boards). Note that APIC
237 * interrupts, including IPIs, won't work beyond
238 * this point! The only exception are INIT IPIs.
239 */
240 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
241 "entering PIC mode.\n");
242 outb(0x70, 0x22);
243 outb(0x00, 0x23);
244 }
245 else {
246 /* Go back to Virtual Wire compatibility mode */
247 unsigned long value;
248
249 /* For the spurious interrupt use vector F, and enable it */
250 value = apic_read(APIC_SPIV);
251 value &= ~APIC_VECTOR_MASK;
252 value |= APIC_SPIV_APIC_ENABLED;
253 value |= 0xf;
254 apic_write_around(APIC_SPIV, value);
255
256 if (!virt_wire_setup) {
257 /* For LVT0 make it edge triggered, active high, external and enabled */
258 value = apic_read(APIC_LVT0);
259 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
260 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
261 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
262 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
263 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
264 apic_write_around(APIC_LVT0, value);
265 }
266 else {
267 /* Disable LVT0 */
268 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
269 }
270
271 /* For LVT1 make it edge triggered, active high, nmi and enabled */
272 value = apic_read(APIC_LVT1);
273 value &= ~(
274 APIC_MODE_MASK | APIC_SEND_PENDING |
275 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
276 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
277 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
278 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
279 apic_write_around(APIC_LVT1, value);
280 }
281 }
282
283 void disable_local_APIC(void)
284 {
285 unsigned long value;
286
287 clear_local_APIC();
288
289 /*
290 * Disable APIC (implies clearing of registers
291 * for 82489DX!).
292 */
293 value = apic_read(APIC_SPIV);
294 value &= ~APIC_SPIV_APIC_ENABLED;
295 apic_write_around(APIC_SPIV, value);
296
297 if (enabled_via_apicbase) {
298 unsigned int l, h;
299 rdmsr(MSR_IA32_APICBASE, l, h);
300 l &= ~MSR_IA32_APICBASE_ENABLE;
301 wrmsr(MSR_IA32_APICBASE, l, h);
302 }
303 }
304
305 /*
306 * This is to verify that we're looking at a real local APIC.
307 * Check these against your board if the CPUs aren't getting
308 * started for no apparent reason.
309 */
310 int __init verify_local_APIC(void)
311 {
312 unsigned int reg0, reg1;
313
314 /*
315 * The version register is read-only in a real APIC.
316 */
317 reg0 = apic_read(APIC_LVR);
318 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
319 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
320 reg1 = apic_read(APIC_LVR);
321 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
322
323 /*
324 * The two version reads above should print the same
325 * numbers. If the second one is different, then we
326 * poke at a non-APIC.
327 */
328 if (reg1 != reg0)
329 return 0;
330
331 /*
332 * Check if the version looks reasonably.
333 */
334 reg1 = GET_APIC_VERSION(reg0);
335 if (reg1 == 0x00 || reg1 == 0xff)
336 return 0;
337 reg1 = get_maxlvt();
338 if (reg1 < 0x02 || reg1 == 0xff)
339 return 0;
340
341 /*
342 * The ID register is read/write in a real APIC.
343 */
344 reg0 = apic_read(APIC_ID);
345 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
346
347 /*
348 * The next two are just to see if we have sane values.
349 * They're only really relevant if we're in Virtual Wire
350 * compatibility mode, but most boxes are anymore.
351 */
352 reg0 = apic_read(APIC_LVT0);
353 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
354 reg1 = apic_read(APIC_LVT1);
355 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
356
357 return 1;
358 }
359
360 void __init sync_Arb_IDs(void)
361 {
362 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1
363 And not needed on AMD */
364 if (modern_apic())
365 return;
366 /*
367 * Wait for idle.
368 */
369 apic_wait_icr_idle();
370
371 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
372 apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
373 | APIC_DM_INIT);
374 }
375
376 extern void __error_in_apic_c (void);
377
378 /*
379 * An initial setup of the virtual wire mode.
380 */
381 void __init init_bsp_APIC(void)
382 {
383 unsigned long value, ver;
384
385 /*
386 * Don't do the setup now if we have a SMP BIOS as the
387 * through-I/O-APIC virtual wire mode might be active.
388 */
389 if (smp_found_config || !cpu_has_apic)
390 return;
391
392 value = apic_read(APIC_LVR);
393 ver = GET_APIC_VERSION(value);
394
395 /*
396 * Do not trust the local APIC being empty at bootup.
397 */
398 clear_local_APIC();
399
400 /*
401 * Enable APIC.
402 */
403 value = apic_read(APIC_SPIV);
404 value &= ~APIC_VECTOR_MASK;
405 value |= APIC_SPIV_APIC_ENABLED;
406
407 /* This bit is reserved on P4/Xeon and should be cleared */
408 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15))
409 value &= ~APIC_SPIV_FOCUS_DISABLED;
410 else
411 value |= APIC_SPIV_FOCUS_DISABLED;
412 value |= SPURIOUS_APIC_VECTOR;
413 apic_write_around(APIC_SPIV, value);
414
415 /*
416 * Set up the virtual wire mode.
417 */
418 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
419 value = APIC_DM_NMI;
420 if (!APIC_INTEGRATED(ver)) /* 82489DX */
421 value |= APIC_LVT_LEVEL_TRIGGER;
422 apic_write_around(APIC_LVT1, value);
423 }
424
425 void __devinit setup_local_APIC(void)
426 {
427 unsigned long oldvalue, value, ver, maxlvt;
428 int i, j;
429
430 /* Pound the ESR really hard over the head with a big hammer - mbligh */
431 if (esr_disable) {
432 apic_write(APIC_ESR, 0);
433 apic_write(APIC_ESR, 0);
434 apic_write(APIC_ESR, 0);
435 apic_write(APIC_ESR, 0);
436 }
437
438 value = apic_read(APIC_LVR);
439 ver = GET_APIC_VERSION(value);
440
441 if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
442 __error_in_apic_c();
443
444 /*
445 * Double-check whether this APIC is really registered.
446 */
447 if (!apic_id_registered())
448 BUG();
449
450 /*
451 * Intel recommends to set DFR, LDR and TPR before enabling
452 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
453 * document number 292116). So here it goes...
454 */
455 init_apic_ldr();
456
457 /*
458 * Set Task Priority to 'accept all'. We never change this
459 * later on.
460 */
461 value = apic_read(APIC_TASKPRI);
462 value &= ~APIC_TPRI_MASK;
463 apic_write_around(APIC_TASKPRI, value);
464
465 /*
466 * After a crash, we no longer service the interrupts and a pending
467 * interrupt from previous kernel might still have ISR bit set.
468 *
469 * Most probably by now CPU has serviced that pending interrupt and
470 * it might not have done the ack_APIC_irq() because it thought,
471 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
472 * does not clear the ISR bit and cpu thinks it has already serivced
473 * the interrupt. Hence a vector might get locked. It was noticed
474 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
475 */
476 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
477 value = apic_read(APIC_ISR + i*0x10);
478 for (j = 31; j >= 0; j--) {
479 if (value & (1<<j))
480 ack_APIC_irq();
481 }
482 }
483
484 /*
485 * Now that we are all set up, enable the APIC
486 */
487 value = apic_read(APIC_SPIV);
488 value &= ~APIC_VECTOR_MASK;
489 /*
490 * Enable APIC
491 */
492 value |= APIC_SPIV_APIC_ENABLED;
493
494 /*
495 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
496 * certain networking cards. If high frequency interrupts are
497 * happening on a particular IOAPIC pin, plus the IOAPIC routing
498 * entry is masked/unmasked at a high rate as well then sooner or
499 * later IOAPIC line gets 'stuck', no more interrupts are received
500 * from the device. If focus CPU is disabled then the hang goes
501 * away, oh well :-(
502 *
503 * [ This bug can be reproduced easily with a level-triggered
504 * PCI Ne2000 networking cards and PII/PIII processors, dual
505 * BX chipset. ]
506 */
507 /*
508 * Actually disabling the focus CPU check just makes the hang less
509 * frequent as it makes the interrupt distributon model be more
510 * like LRU than MRU (the short-term load is more even across CPUs).
511 * See also the comment in end_level_ioapic_irq(). --macro
512 */
513 #if 1
514 /* Enable focus processor (bit==0) */
515 value &= ~APIC_SPIV_FOCUS_DISABLED;
516 #else
517 /* Disable focus processor (bit==1) */
518 value |= APIC_SPIV_FOCUS_DISABLED;
519 #endif
520 /*
521 * Set spurious IRQ vector
522 */
523 value |= SPURIOUS_APIC_VECTOR;
524 apic_write_around(APIC_SPIV, value);
525
526 /*
527 * Set up LVT0, LVT1:
528 *
529 * set up through-local-APIC on the BP's LINT0. This is not
530 * strictly necessery in pure symmetric-IO mode, but sometimes
531 * we delegate interrupts to the 8259A.
532 */
533 /*
534 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
535 */
536 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
537 if (!smp_processor_id() && (pic_mode || !value)) {
538 value = APIC_DM_EXTINT;
539 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
540 smp_processor_id());
541 } else {
542 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
543 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
544 smp_processor_id());
545 }
546 apic_write_around(APIC_LVT0, value);
547
548 /*
549 * only the BP should see the LINT1 NMI signal, obviously.
550 */
551 if (!smp_processor_id())
552 value = APIC_DM_NMI;
553 else
554 value = APIC_DM_NMI | APIC_LVT_MASKED;
555 if (!APIC_INTEGRATED(ver)) /* 82489DX */
556 value |= APIC_LVT_LEVEL_TRIGGER;
557 apic_write_around(APIC_LVT1, value);
558
559 if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */
560 maxlvt = get_maxlvt();
561 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
562 apic_write(APIC_ESR, 0);
563 oldvalue = apic_read(APIC_ESR);
564
565 value = ERROR_APIC_VECTOR; // enables sending errors
566 apic_write_around(APIC_LVTERR, value);
567 /*
568 * spec says clear errors after enabling vector.
569 */
570 if (maxlvt > 3)
571 apic_write(APIC_ESR, 0);
572 value = apic_read(APIC_ESR);
573 if (value != oldvalue)
574 apic_printk(APIC_VERBOSE, "ESR value before enabling "
575 "vector: 0x%08lx after: 0x%08lx\n",
576 oldvalue, value);
577 } else {
578 if (esr_disable)
579 /*
580 * Something untraceble is creating bad interrupts on
581 * secondary quads ... for the moment, just leave the
582 * ESR disabled - we can't do anything useful with the
583 * errors anyway - mbligh
584 */
585 printk("Leaving ESR disabled.\n");
586 else
587 printk("No ESR for 82489DX.\n");
588 }
589
590 if (nmi_watchdog == NMI_LOCAL_APIC)
591 setup_apic_nmi_watchdog();
592 apic_pm_activate();
593 }
594
595 /*
596 * If Linux enabled the LAPIC against the BIOS default
597 * disable it down before re-entering the BIOS on shutdown.
598 * Otherwise the BIOS may get confused and not power-off.
599 * Additionally clear all LVT entries before disable_local_APIC
600 * for the case where Linux didn't enable the LAPIC.
601 */
602 void lapic_shutdown(void)
603 {
604 unsigned long flags;
605
606 if (!cpu_has_apic)
607 return;
608
609 local_irq_save(flags);
610 clear_local_APIC();
611
612 if (enabled_via_apicbase)
613 disable_local_APIC();
614
615 local_irq_restore(flags);
616 }
617
618 #ifdef CONFIG_PM
619
620 static struct {
621 int active;
622 /* r/w apic fields */
623 unsigned int apic_id;
624 unsigned int apic_taskpri;
625 unsigned int apic_ldr;
626 unsigned int apic_dfr;
627 unsigned int apic_spiv;
628 unsigned int apic_lvtt;
629 unsigned int apic_lvtpc;
630 unsigned int apic_lvt0;
631 unsigned int apic_lvt1;
632 unsigned int apic_lvterr;
633 unsigned int apic_tmict;
634 unsigned int apic_tdcr;
635 unsigned int apic_thmr;
636 } apic_pm_state;
637
638 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
639 {
640 unsigned long flags;
641
642 if (!apic_pm_state.active)
643 return 0;
644
645 apic_pm_state.apic_id = apic_read(APIC_ID);
646 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
647 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
648 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
649 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
650 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
651 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
652 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
653 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
654 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
655 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
656 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
657 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
658
659 local_irq_save(flags);
660 disable_local_APIC();
661 local_irq_restore(flags);
662 return 0;
663 }
664
665 static int lapic_resume(struct sys_device *dev)
666 {
667 unsigned int l, h;
668 unsigned long flags;
669
670 if (!apic_pm_state.active)
671 return 0;
672
673 local_irq_save(flags);
674
675 /*
676 * Make sure the APICBASE points to the right address
677 *
678 * FIXME! This will be wrong if we ever support suspend on
679 * SMP! We'll need to do this as part of the CPU restore!
680 */
681 rdmsr(MSR_IA32_APICBASE, l, h);
682 l &= ~MSR_IA32_APICBASE_BASE;
683 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
684 wrmsr(MSR_IA32_APICBASE, l, h);
685
686 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
687 apic_write(APIC_ID, apic_pm_state.apic_id);
688 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
689 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
690 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
691 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
692 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
693 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
694 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
695 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
696 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
697 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
698 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
699 apic_write(APIC_ESR, 0);
700 apic_read(APIC_ESR);
701 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
702 apic_write(APIC_ESR, 0);
703 apic_read(APIC_ESR);
704 local_irq_restore(flags);
705 return 0;
706 }
707
708 /*
709 * This device has no shutdown method - fully functioning local APICs
710 * are needed on every CPU up until machine_halt/restart/poweroff.
711 */
712
713 static struct sysdev_class lapic_sysclass = {
714 set_kset_name("lapic"),
715 .resume = lapic_resume,
716 .suspend = lapic_suspend,
717 };
718
719 static struct sys_device device_lapic = {
720 .id = 0,
721 .cls = &lapic_sysclass,
722 };
723
724 static void __devinit apic_pm_activate(void)
725 {
726 apic_pm_state.active = 1;
727 }
728
729 static int __init init_lapic_sysfs(void)
730 {
731 int error;
732
733 if (!cpu_has_apic)
734 return 0;
735 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
736
737 error = sysdev_class_register(&lapic_sysclass);
738 if (!error)
739 error = sysdev_register(&device_lapic);
740 return error;
741 }
742 device_initcall(init_lapic_sysfs);
743
744 #else /* CONFIG_PM */
745
746 static void apic_pm_activate(void) { }
747
748 #endif /* CONFIG_PM */
749
750 /*
751 * Detect and enable local APICs on non-SMP boards.
752 * Original code written by Keir Fraser.
753 */
754
755 static int __init apic_set_verbosity(char *str)
756 {
757 if (strcmp("debug", str) == 0)
758 apic_verbosity = APIC_DEBUG;
759 else if (strcmp("verbose", str) == 0)
760 apic_verbosity = APIC_VERBOSE;
761 return 1;
762 }
763
764 __setup("apic=", apic_set_verbosity);
765
766 static int __init detect_init_APIC (void)
767 {
768 u32 h, l, features;
769
770 /* Disabled by kernel option? */
771 if (enable_local_apic < 0)
772 return -1;
773
774 switch (boot_cpu_data.x86_vendor) {
775 case X86_VENDOR_AMD:
776 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
777 (boot_cpu_data.x86 == 15))
778 break;
779 goto no_apic;
780 case X86_VENDOR_INTEL:
781 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
782 (boot_cpu_data.x86 == 5 && cpu_has_apic))
783 break;
784 goto no_apic;
785 default:
786 goto no_apic;
787 }
788
789 if (!cpu_has_apic) {
790 /*
791 * Over-ride BIOS and try to enable the local
792 * APIC only if "lapic" specified.
793 */
794 if (enable_local_apic <= 0) {
795 printk("Local APIC disabled by BIOS -- "
796 "you can enable it with \"lapic\"\n");
797 return -1;
798 }
799 /*
800 * Some BIOSes disable the local APIC in the
801 * APIC_BASE MSR. This can only be done in
802 * software for Intel P6 or later and AMD K7
803 * (Model > 1) or later.
804 */
805 rdmsr(MSR_IA32_APICBASE, l, h);
806 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
807 printk("Local APIC disabled by BIOS -- reenabling.\n");
808 l &= ~MSR_IA32_APICBASE_BASE;
809 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
810 wrmsr(MSR_IA32_APICBASE, l, h);
811 enabled_via_apicbase = 1;
812 }
813 }
814 /*
815 * The APIC feature bit should now be enabled
816 * in `cpuid'
817 */
818 features = cpuid_edx(1);
819 if (!(features & (1 << X86_FEATURE_APIC))) {
820 printk("Could not enable APIC!\n");
821 return -1;
822 }
823 set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
824 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
825
826 /* The BIOS may have set up the APIC at some other address */
827 rdmsr(MSR_IA32_APICBASE, l, h);
828 if (l & MSR_IA32_APICBASE_ENABLE)
829 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
830
831 if (nmi_watchdog != NMI_NONE)
832 nmi_watchdog = NMI_LOCAL_APIC;
833
834 printk("Found and enabled local APIC!\n");
835
836 apic_pm_activate();
837
838 return 0;
839
840 no_apic:
841 printk("No local APIC present or hardware disabled\n");
842 return -1;
843 }
844
845 void __init init_apic_mappings(void)
846 {
847 unsigned long apic_phys;
848
849 /*
850 * If no local APIC can be found then set up a fake all
851 * zeroes page to simulate the local APIC and another
852 * one for the IO-APIC.
853 */
854 if (!smp_found_config && detect_init_APIC()) {
855 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
856 apic_phys = __pa(apic_phys);
857 } else
858 apic_phys = mp_lapic_addr;
859
860 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
861 printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE,
862 apic_phys);
863
864 /*
865 * Fetch the APIC ID of the BSP in case we have a
866 * default configuration (or the MP table is broken).
867 */
868 if (boot_cpu_physical_apicid == -1U)
869 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
870
871 #ifdef CONFIG_X86_IO_APIC
872 {
873 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
874 int i;
875
876 for (i = 0; i < nr_ioapics; i++) {
877 if (smp_found_config) {
878 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
879 if (!ioapic_phys) {
880 printk(KERN_ERR
881 "WARNING: bogus zero IO-APIC "
882 "address found in MPTABLE, "
883 "disabling IO/APIC support!\n");
884 smp_found_config = 0;
885 skip_ioapic_setup = 1;
886 goto fake_ioapic_page;
887 }
888 } else {
889 fake_ioapic_page:
890 ioapic_phys = (unsigned long)
891 alloc_bootmem_pages(PAGE_SIZE);
892 ioapic_phys = __pa(ioapic_phys);
893 }
894 set_fixmap_nocache(idx, ioapic_phys);
895 printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
896 __fix_to_virt(idx), ioapic_phys);
897 idx++;
898 }
899 }
900 #endif
901 }
902
903 /*
904 * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts
905 * per second. We assume that the caller has already set up the local
906 * APIC.
907 *
908 * The APIC timer is not exactly sync with the external timer chip, it
909 * closely follows bus clocks.
910 */
911
912 /*
913 * The timer chip is already set up at HZ interrupts per second here,
914 * but we do not accept timer interrupts yet. We only allow the BP
915 * to calibrate.
916 */
917 static unsigned int __devinit get_8254_timer_count(void)
918 {
919 unsigned long flags;
920
921 unsigned int count;
922
923 spin_lock_irqsave(&i8253_lock, flags);
924
925 outb_p(0x00, PIT_MODE);
926 count = inb_p(PIT_CH0);
927 count |= inb_p(PIT_CH0) << 8;
928
929 spin_unlock_irqrestore(&i8253_lock, flags);
930
931 return count;
932 }
933
934 /* next tick in 8254 can be caught by catching timer wraparound */
935 static void __devinit wait_8254_wraparound(void)
936 {
937 unsigned int curr_count, prev_count;
938
939 curr_count = get_8254_timer_count();
940 do {
941 prev_count = curr_count;
942 curr_count = get_8254_timer_count();
943
944 /* workaround for broken Mercury/Neptune */
945 if (prev_count >= curr_count + 0x100)
946 curr_count = get_8254_timer_count();
947
948 } while (prev_count >= curr_count);
949 }
950
951 /*
952 * Default initialization for 8254 timers. If we use other timers like HPET,
953 * we override this later
954 */
955 void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
956
957 /*
958 * This function sets up the local APIC timer, with a timeout of
959 * 'clocks' APIC bus clock. During calibration we actually call
960 * this function twice on the boot CPU, once with a bogus timeout
961 * value, second time for real. The other (noncalibrating) CPUs
962 * call this function only once, with the real, calibrated value.
963 *
964 * We do reads before writes even if unnecessary, to get around the
965 * P5 APIC double write bug.
966 */
967
968 #define APIC_DIVISOR 16
969
970 static void __setup_APIC_LVTT(unsigned int clocks)
971 {
972 unsigned int lvtt_value, tmp_value, ver;
973 int cpu = smp_processor_id();
974
975 ver = GET_APIC_VERSION(apic_read(APIC_LVR));
976 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
977 if (!APIC_INTEGRATED(ver))
978 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
979
980 if (cpu_isset(cpu, timer_bcast_ipi))
981 lvtt_value |= APIC_LVT_MASKED;
982
983 apic_write_around(APIC_LVTT, lvtt_value);
984
985 /*
986 * Divide PICLK by 16
987 */
988 tmp_value = apic_read(APIC_TDCR);
989 apic_write_around(APIC_TDCR, (tmp_value
990 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
991 | APIC_TDR_DIV_16);
992
993 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
994 }
995
996 static void __devinit setup_APIC_timer(unsigned int clocks)
997 {
998 unsigned long flags;
999
1000 local_irq_save(flags);
1001
1002 /*
1003 * Wait for IRQ0's slice:
1004 */
1005 wait_timer_tick();
1006
1007 __setup_APIC_LVTT(clocks);
1008
1009 local_irq_restore(flags);
1010 }
1011
1012 /*
1013 * In this function we calibrate APIC bus clocks to the external
1014 * timer. Unfortunately we cannot use jiffies and the timer irq
1015 * to calibrate, since some later bootup code depends on getting
1016 * the first irq? Ugh.
1017 *
1018 * We want to do the calibration only once since we
1019 * want to have local timer irqs syncron. CPUs connected
1020 * by the same APIC bus have the very same bus frequency.
1021 * And we want to have irqs off anyways, no accidental
1022 * APIC irq that way.
1023 */
1024
1025 static int __init calibrate_APIC_clock(void)
1026 {
1027 unsigned long long t1 = 0, t2 = 0;
1028 long tt1, tt2;
1029 long result;
1030 int i;
1031 const int LOOPS = HZ/10;
1032
1033 apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n");
1034
1035 /*
1036 * Put whatever arbitrary (but long enough) timeout
1037 * value into the APIC clock, we just want to get the
1038 * counter running for calibration.
1039 */
1040 __setup_APIC_LVTT(1000000000);
1041
1042 /*
1043 * The timer chip counts down to zero. Let's wait
1044 * for a wraparound to start exact measurement:
1045 * (the current tick might have been already half done)
1046 */
1047
1048 wait_timer_tick();
1049
1050 /*
1051 * We wrapped around just now. Let's start:
1052 */
1053 if (cpu_has_tsc)
1054 rdtscll(t1);
1055 tt1 = apic_read(APIC_TMCCT);
1056
1057 /*
1058 * Let's wait LOOPS wraprounds:
1059 */
1060 for (i = 0; i < LOOPS; i++)
1061 wait_timer_tick();
1062
1063 tt2 = apic_read(APIC_TMCCT);
1064 if (cpu_has_tsc)
1065 rdtscll(t2);
1066
1067 /*
1068 * The APIC bus clock counter is 32 bits only, it
1069 * might have overflown, but note that we use signed
1070 * longs, thus no extra care needed.
1071 *
1072 * underflown to be exact, as the timer counts down ;)
1073 */
1074
1075 result = (tt1-tt2)*APIC_DIVISOR/LOOPS;
1076
1077 if (cpu_has_tsc)
1078 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
1079 "%ld.%04ld MHz.\n",
1080 ((long)(t2-t1)/LOOPS)/(1000000/HZ),
1081 ((long)(t2-t1)/LOOPS)%(1000000/HZ));
1082
1083 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
1084 "%ld.%04ld MHz.\n",
1085 result/(1000000/HZ),
1086 result%(1000000/HZ));
1087
1088 return result;
1089 }
1090
1091 static unsigned int calibration_result;
1092
1093 void __init setup_boot_APIC_clock(void)
1094 {
1095 unsigned long flags;
1096 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
1097 using_apic_timer = 1;
1098
1099 local_irq_save(flags);
1100
1101 calibration_result = calibrate_APIC_clock();
1102 /*
1103 * Now set up the timer for real.
1104 */
1105 setup_APIC_timer(calibration_result);
1106
1107 local_irq_restore(flags);
1108 }
1109
1110 void __devinit setup_secondary_APIC_clock(void)
1111 {
1112 setup_APIC_timer(calibration_result);
1113 }
1114
1115 void disable_APIC_timer(void)
1116 {
1117 if (using_apic_timer) {
1118 unsigned long v;
1119
1120 v = apic_read(APIC_LVTT);
1121 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
1122 }
1123 }
1124
1125 void enable_APIC_timer(void)
1126 {
1127 int cpu = smp_processor_id();
1128
1129 if (using_apic_timer &&
1130 !cpu_isset(cpu, timer_bcast_ipi)) {
1131 unsigned long v;
1132
1133 v = apic_read(APIC_LVTT);
1134 apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
1135 }
1136 }
1137
1138 void switch_APIC_timer_to_ipi(void *cpumask)
1139 {
1140 cpumask_t mask = *(cpumask_t *)cpumask;
1141 int cpu = smp_processor_id();
1142
1143 if (cpu_isset(cpu, mask) &&
1144 !cpu_isset(cpu, timer_bcast_ipi)) {
1145 disable_APIC_timer();
1146 cpu_set(cpu, timer_bcast_ipi);
1147 }
1148 }
1149 EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
1150
1151 void switch_ipi_to_APIC_timer(void *cpumask)
1152 {
1153 cpumask_t mask = *(cpumask_t *)cpumask;
1154 int cpu = smp_processor_id();
1155
1156 if (cpu_isset(cpu, mask) &&
1157 cpu_isset(cpu, timer_bcast_ipi)) {
1158 cpu_clear(cpu, timer_bcast_ipi);
1159 enable_APIC_timer();
1160 }
1161 }
1162 EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
1163
1164 #undef APIC_DIVISOR
1165
1166 /*
1167 * Local timer interrupt handler. It does both profiling and
1168 * process statistics/rescheduling.
1169 *
1170 * We do profiling in every local tick, statistics/rescheduling
1171 * happen only every 'profiling multiplier' ticks. The default
1172 * multiplier is 1 and it can be changed by writing the new multiplier
1173 * value into /proc/profile.
1174 */
1175
1176 inline void smp_local_timer_interrupt(struct pt_regs * regs)
1177 {
1178 profile_tick(CPU_PROFILING, regs);
1179 #ifdef CONFIG_SMP
1180 update_process_times(user_mode_vm(regs));
1181 #endif
1182
1183 /*
1184 * We take the 'long' return path, and there every subsystem
1185 * grabs the apropriate locks (kernel lock/ irq lock).
1186 *
1187 * we might want to decouple profiling from the 'long path',
1188 * and do the profiling totally in assembly.
1189 *
1190 * Currently this isn't too much of an issue (performance wise),
1191 * we can take more than 100K local irqs per second on a 100 MHz P5.
1192 */
1193 }
1194
1195 /*
1196 * Local APIC timer interrupt. This is the most natural way for doing
1197 * local interrupts, but local timer interrupts can be emulated by
1198 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1199 *
1200 * [ if a single-CPU system runs an SMP kernel then we call the local
1201 * interrupt as well. Thus we cannot inline the local irq ... ]
1202 */
1203
1204 fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1205 {
1206 int cpu = smp_processor_id();
1207
1208 /*
1209 * the NMI deadlock-detector uses this.
1210 */
1211 per_cpu(irq_stat, cpu).apic_timer_irqs++;
1212
1213 /*
1214 * NOTE! We'd better ACK the irq immediately,
1215 * because timer handling can be slow.
1216 */
1217 ack_APIC_irq();
1218 /*
1219 * update_process_times() expects us to have done irq_enter().
1220 * Besides, if we don't timer interrupts ignore the global
1221 * interrupt lock, which is the WrongThing (tm) to do.
1222 */
1223 irq_enter();
1224 smp_local_timer_interrupt(regs);
1225 irq_exit();
1226 }
1227
1228 #ifndef CONFIG_SMP
1229 static void up_apic_timer_interrupt_call(struct pt_regs *regs)
1230 {
1231 int cpu = smp_processor_id();
1232
1233 /*
1234 * the NMI deadlock-detector uses this.
1235 */
1236 per_cpu(irq_stat, cpu).apic_timer_irqs++;
1237
1238 smp_local_timer_interrupt(regs);
1239 }
1240 #endif
1241
1242 void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
1243 {
1244 cpumask_t mask;
1245
1246 cpus_and(mask, cpu_online_map, timer_bcast_ipi);
1247 if (!cpus_empty(mask)) {
1248 #ifdef CONFIG_SMP
1249 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
1250 #else
1251 /*
1252 * We can directly call the apic timer interrupt handler
1253 * in UP case. Minus all irq related functions
1254 */
1255 up_apic_timer_interrupt_call(regs);
1256 #endif
1257 }
1258 }
1259
1260 int setup_profiling_timer(unsigned int multiplier)
1261 {
1262 return -EINVAL;
1263 }
1264
1265 /*
1266 * This interrupt should _never_ happen with our APIC/SMP architecture
1267 */
1268 fastcall void smp_spurious_interrupt(struct pt_regs *regs)
1269 {
1270 unsigned long v;
1271
1272 irq_enter();
1273 /*
1274 * Check if this really is a spurious interrupt and ACK it
1275 * if it is a vectored one. Just in case...
1276 * Spurious interrupts should not be ACKed.
1277 */
1278 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1279 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1280 ack_APIC_irq();
1281
1282 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1283 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
1284 smp_processor_id());
1285 irq_exit();
1286 }
1287
1288 /*
1289 * This interrupt should never happen with our APIC/SMP architecture
1290 */
1291
1292 fastcall void smp_error_interrupt(struct pt_regs *regs)
1293 {
1294 unsigned long v, v1;
1295
1296 irq_enter();
1297 /* First tickle the hardware, only then report what went on. -- REW */
1298 v = apic_read(APIC_ESR);
1299 apic_write(APIC_ESR, 0);
1300 v1 = apic_read(APIC_ESR);
1301 ack_APIC_irq();
1302 atomic_inc(&irq_err_count);
1303
1304 /* Here is what the APIC error bits mean:
1305 0: Send CS error
1306 1: Receive CS error
1307 2: Send accept error
1308 3: Receive accept error
1309 4: Reserved
1310 5: Send illegal vector
1311 6: Received illegal vector
1312 7: Illegal register address
1313 */
1314 printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
1315 smp_processor_id(), v , v1);
1316 irq_exit();
1317 }
1318
1319 /*
1320 * This initializes the IO-APIC and APIC hardware if this is
1321 * a UP kernel.
1322 */
1323 int __init APIC_init_uniprocessor (void)
1324 {
1325 if (enable_local_apic < 0)
1326 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1327
1328 if (!smp_found_config && !cpu_has_apic)
1329 return -1;
1330
1331 /*
1332 * Complain if the BIOS pretends there is one.
1333 */
1334 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1335 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1336 boot_cpu_physical_apicid);
1337 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1338 return -1;
1339 }
1340
1341 verify_local_APIC();
1342
1343 connect_bsp_APIC();
1344
1345 /*
1346 * Hack: In case of kdump, after a crash, kernel might be booting
1347 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1348 * might be zero if read from MP tables. Get it from LAPIC.
1349 */
1350 #ifdef CONFIG_CRASH_DUMP
1351 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1352 #endif
1353 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
1354
1355 setup_local_APIC();
1356
1357 #ifdef CONFIG_X86_IO_APIC
1358 if (smp_found_config)
1359 if (!skip_ioapic_setup && nr_ioapics)
1360 setup_IO_APIC();
1361 #endif
1362 setup_boot_APIC_clock();
1363
1364 return 0;
1365 }
This page took 0.093488 seconds and 5 git commands to generate.