[PATCH] make smp_prepare_cpu to a weak function
[deliverable/linux.git] / arch / x86_64 / kernel / apic.c
CommitLineData
1da177e4
LT
1/*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17#include <linux/config.h>
18#include <linux/init.h>
19
20#include <linux/mm.h>
21#include <linux/irq.h>
22#include <linux/delay.h>
23#include <linux/bootmem.h>
24#include <linux/smp_lock.h>
25#include <linux/interrupt.h>
26#include <linux/mc146818rtc.h>
27#include <linux/kernel_stat.h>
28#include <linux/sysdev.h>
29
30#include <asm/atomic.h>
31#include <asm/smp.h>
32#include <asm/mtrr.h>
33#include <asm/mpspec.h>
34#include <asm/pgalloc.h>
35#include <asm/mach_apic.h>
75152114 36#include <asm/nmi.h>
1da177e4
LT
37
38int apic_verbosity;
39
40int disable_apic_timer __initdata;
41
42/* Using APIC to generate smp_local_timer_interrupt? */
43int using_apic_timer = 0;
44
45static DEFINE_PER_CPU(int, prof_multiplier) = 1;
46static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
47static DEFINE_PER_CPU(int, prof_counter) = 1;
48
49static void apic_pm_activate(void);
50
51void enable_NMI_through_LVT0 (void * dummy)
52{
53 unsigned int v, ver;
54
55 ver = apic_read(APIC_LVR);
56 ver = GET_APIC_VERSION(ver);
57 v = APIC_DM_NMI; /* unmask and set to NMI */
58 apic_write_around(APIC_LVT0, v);
59}
60
61int get_maxlvt(void)
62{
63 unsigned int v, ver, maxlvt;
64
65 v = apic_read(APIC_LVR);
66 ver = GET_APIC_VERSION(v);
67 maxlvt = GET_APIC_MAXLVT(v);
68 return maxlvt;
69}
70
71void clear_local_APIC(void)
72{
73 int maxlvt;
74 unsigned int v;
75
76 maxlvt = get_maxlvt();
77
78 /*
79 * Masking an LVT entry on a P6 can trigger a local APIC error
80 * if the vector is zero. Mask LVTERR first to prevent this.
81 */
82 if (maxlvt >= 3) {
83 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
84 apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
85 }
86 /*
87 * Careful: we have to set masks only first to deassert
88 * any level-triggered sources.
89 */
90 v = apic_read(APIC_LVTT);
91 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
92 v = apic_read(APIC_LVT0);
93 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
94 v = apic_read(APIC_LVT1);
95 apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
96 if (maxlvt >= 4) {
97 v = apic_read(APIC_LVTPC);
98 apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
99 }
100
101 /*
102 * Clean APIC state for other OSs:
103 */
104 apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
105 apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
106 apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
107 if (maxlvt >= 3)
108 apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
109 if (maxlvt >= 4)
110 apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
111 v = GET_APIC_VERSION(apic_read(APIC_LVR));
112 if (APIC_INTEGRATED(v)) { /* !82489DX */
113 if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */
114 apic_write(APIC_ESR, 0);
115 apic_read(APIC_ESR);
116 }
117}
118
119void __init connect_bsp_APIC(void)
120{
121 if (pic_mode) {
122 /*
123 * Do not trust the local APIC being empty at bootup.
124 */
125 clear_local_APIC();
126 /*
127 * PIC mode, enable APIC mode in the IMCR, i.e.
128 * connect BSP's local APIC to INT and NMI lines.
129 */
130 apic_printk(APIC_VERBOSE, "leaving PIC mode, enabling APIC mode.\n");
131 outb(0x70, 0x22);
132 outb(0x01, 0x23);
133 }
134}
135
136void disconnect_bsp_APIC(void)
137{
138 if (pic_mode) {
139 /*
140 * Put the board back into PIC mode (has an effect
141 * only on certain older boards). Note that APIC
142 * interrupts, including IPIs, won't work beyond
143 * this point! The only exception are INIT IPIs.
144 */
145 apic_printk(APIC_QUIET, "disabling APIC mode, entering PIC mode.\n");
146 outb(0x70, 0x22);
147 outb(0x00, 0x23);
148 }
149}
150
151void disable_local_APIC(void)
152{
153 unsigned int value;
154
155 clear_local_APIC();
156
157 /*
158 * Disable APIC (implies clearing of registers
159 * for 82489DX!).
160 */
161 value = apic_read(APIC_SPIV);
162 value &= ~APIC_SPIV_APIC_ENABLED;
163 apic_write_around(APIC_SPIV, value);
164}
165
166/*
167 * This is to verify that we're looking at a real local APIC.
168 * Check these against your board if the CPUs aren't getting
169 * started for no apparent reason.
170 */
171int __init verify_local_APIC(void)
172{
173 unsigned int reg0, reg1;
174
175 /*
176 * The version register is read-only in a real APIC.
177 */
178 reg0 = apic_read(APIC_LVR);
179 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
180 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
181 reg1 = apic_read(APIC_LVR);
182 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
183
184 /*
185 * The two version reads above should print the same
186 * numbers. If the second one is different, then we
187 * poke at a non-APIC.
188 */
189 if (reg1 != reg0)
190 return 0;
191
192 /*
193 * Check if the version looks reasonably.
194 */
195 reg1 = GET_APIC_VERSION(reg0);
196 if (reg1 == 0x00 || reg1 == 0xff)
197 return 0;
198 reg1 = get_maxlvt();
199 if (reg1 < 0x02 || reg1 == 0xff)
200 return 0;
201
202 /*
203 * The ID register is read/write in a real APIC.
204 */
205 reg0 = apic_read(APIC_ID);
206 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
207 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
208 reg1 = apic_read(APIC_ID);
209 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
210 apic_write(APIC_ID, reg0);
211 if (reg1 != (reg0 ^ APIC_ID_MASK))
212 return 0;
213
214 /*
215 * The next two are just to see if we have sane values.
216 * They're only really relevant if we're in Virtual Wire
217 * compatibility mode, but most boxes are anymore.
218 */
219 reg0 = apic_read(APIC_LVT0);
220 apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
221 reg1 = apic_read(APIC_LVT1);
222 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
223
224 return 1;
225}
226
227void __init sync_Arb_IDs(void)
228{
229 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
230 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
231 if (ver >= 0x14) /* P4 or higher */
232 return;
233
234 /*
235 * Wait for idle.
236 */
237 apic_wait_icr_idle();
238
239 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
240 apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
241 | APIC_DM_INIT);
242}
243
244extern void __error_in_apic_c (void);
245
246/*
247 * An initial setup of the virtual wire mode.
248 */
249void __init init_bsp_APIC(void)
250{
251 unsigned int value, ver;
252
253 /*
254 * Don't do the setup now if we have a SMP BIOS as the
255 * through-I/O-APIC virtual wire mode might be active.
256 */
257 if (smp_found_config || !cpu_has_apic)
258 return;
259
260 value = apic_read(APIC_LVR);
261 ver = GET_APIC_VERSION(value);
262
263 /*
264 * Do not trust the local APIC being empty at bootup.
265 */
266 clear_local_APIC();
267
268 /*
269 * Enable APIC.
270 */
271 value = apic_read(APIC_SPIV);
272 value &= ~APIC_VECTOR_MASK;
273 value |= APIC_SPIV_APIC_ENABLED;
274 value |= APIC_SPIV_FOCUS_DISABLED;
275 value |= SPURIOUS_APIC_VECTOR;
276 apic_write_around(APIC_SPIV, value);
277
278 /*
279 * Set up the virtual wire mode.
280 */
281 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
282 value = APIC_DM_NMI;
283 if (!APIC_INTEGRATED(ver)) /* 82489DX */
284 value |= APIC_LVT_LEVEL_TRIGGER;
285 apic_write_around(APIC_LVT1, value);
286}
287
288void __init setup_local_APIC (void)
289{
290 unsigned int value, ver, maxlvt;
291
292 /* Pound the ESR really hard over the head with a big hammer - mbligh */
293 if (esr_disable) {
294 apic_write(APIC_ESR, 0);
295 apic_write(APIC_ESR, 0);
296 apic_write(APIC_ESR, 0);
297 apic_write(APIC_ESR, 0);
298 }
299
300 value = apic_read(APIC_LVR);
301 ver = GET_APIC_VERSION(value);
302
303 if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
304 __error_in_apic_c();
305
306 /*
307 * Double-check whether this APIC is really registered.
308 * This is meaningless in clustered apic mode, so we skip it.
309 */
310 if (!apic_id_registered())
311 BUG();
312
313 /*
314 * Intel recommends to set DFR, LDR and TPR before enabling
315 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
316 * document number 292116). So here it goes...
317 */
318 init_apic_ldr();
319
320 /*
321 * Set Task Priority to 'accept all'. We never change this
322 * later on.
323 */
324 value = apic_read(APIC_TASKPRI);
325 value &= ~APIC_TPRI_MASK;
326 apic_write_around(APIC_TASKPRI, value);
327
328 /*
329 * Now that we are all set up, enable the APIC
330 */
331 value = apic_read(APIC_SPIV);
332 value &= ~APIC_VECTOR_MASK;
333 /*
334 * Enable APIC
335 */
336 value |= APIC_SPIV_APIC_ENABLED;
337
338 /*
339 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
340 * certain networking cards. If high frequency interrupts are
341 * happening on a particular IOAPIC pin, plus the IOAPIC routing
342 * entry is masked/unmasked at a high rate as well then sooner or
343 * later IOAPIC line gets 'stuck', no more interrupts are received
344 * from the device. If focus CPU is disabled then the hang goes
345 * away, oh well :-(
346 *
347 * [ This bug can be reproduced easily with a level-triggered
348 * PCI Ne2000 networking cards and PII/PIII processors, dual
349 * BX chipset. ]
350 */
351 /*
352 * Actually disabling the focus CPU check just makes the hang less
353 * frequent as it makes the interrupt distributon model be more
354 * like LRU than MRU (the short-term load is more even across CPUs).
355 * See also the comment in end_level_ioapic_irq(). --macro
356 */
357#if 1
358 /* Enable focus processor (bit==0) */
359 value &= ~APIC_SPIV_FOCUS_DISABLED;
360#else
361 /* Disable focus processor (bit==1) */
362 value |= APIC_SPIV_FOCUS_DISABLED;
363#endif
364 /*
365 * Set spurious IRQ vector
366 */
367 value |= SPURIOUS_APIC_VECTOR;
368 apic_write_around(APIC_SPIV, value);
369
370 /*
371 * Set up LVT0, LVT1:
372 *
373 * set up through-local-APIC on the BP's LINT0. This is not
374 * strictly necessary in pure symmetric-IO mode, but sometimes
375 * we delegate interrupts to the 8259A.
376 */
377 /*
378 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
379 */
380 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
381 if (!smp_processor_id() && (pic_mode || !value)) {
382 value = APIC_DM_EXTINT;
383 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id());
384 } else {
385 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
386 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
387 }
388 apic_write_around(APIC_LVT0, value);
389
390 /*
391 * only the BP should see the LINT1 NMI signal, obviously.
392 */
393 if (!smp_processor_id())
394 value = APIC_DM_NMI;
395 else
396 value = APIC_DM_NMI | APIC_LVT_MASKED;
397 if (!APIC_INTEGRATED(ver)) /* 82489DX */
398 value |= APIC_LVT_LEVEL_TRIGGER;
399 apic_write_around(APIC_LVT1, value);
400
401 if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */
402 unsigned oldvalue;
403 maxlvt = get_maxlvt();
404 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
405 apic_write(APIC_ESR, 0);
406 oldvalue = apic_read(APIC_ESR);
407 value = ERROR_APIC_VECTOR; // enables sending errors
408 apic_write_around(APIC_LVTERR, value);
409 /*
410 * spec says clear errors after enabling vector.
411 */
412 if (maxlvt > 3)
413 apic_write(APIC_ESR, 0);
414 value = apic_read(APIC_ESR);
415 if (value != oldvalue)
416 apic_printk(APIC_VERBOSE,
417 "ESR value after enabling vector: %08x, after %08x\n",
418 oldvalue, value);
419 } else {
420 if (esr_disable)
421 /*
422 * Something untraceble is creating bad interrupts on
423 * secondary quads ... for the moment, just leave the
424 * ESR disabled - we can't do anything useful with the
425 * errors anyway - mbligh
426 */
427 apic_printk(APIC_DEBUG, "Leaving ESR disabled.\n");
428 else
429 apic_printk(APIC_DEBUG, "No ESR for 82489DX.\n");
430 }
431
432 nmi_watchdog_default();
433 if (nmi_watchdog == NMI_LOCAL_APIC)
434 setup_apic_nmi_watchdog();
435 apic_pm_activate();
436}
437
438#ifdef CONFIG_PM
439
440static struct {
441 /* 'active' is true if the local APIC was enabled by us and
442 not the BIOS; this signifies that we are also responsible
443 for disabling it before entering apm/acpi suspend */
444 int active;
445 /* r/w apic fields */
446 unsigned int apic_id;
447 unsigned int apic_taskpri;
448 unsigned int apic_ldr;
449 unsigned int apic_dfr;
450 unsigned int apic_spiv;
451 unsigned int apic_lvtt;
452 unsigned int apic_lvtpc;
453 unsigned int apic_lvt0;
454 unsigned int apic_lvt1;
455 unsigned int apic_lvterr;
456 unsigned int apic_tmict;
457 unsigned int apic_tdcr;
458 unsigned int apic_thmr;
459} apic_pm_state;
460
0b9c33a7 461static int lapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
462{
463 unsigned long flags;
464
465 if (!apic_pm_state.active)
466 return 0;
467
468 apic_pm_state.apic_id = apic_read(APIC_ID);
469 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
470 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
471 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
472 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
473 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
474 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
475 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
476 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
477 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
478 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
479 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
480 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
481 local_save_flags(flags);
482 local_irq_disable();
483 disable_local_APIC();
484 local_irq_restore(flags);
485 return 0;
486}
487
488static int lapic_resume(struct sys_device *dev)
489{
490 unsigned int l, h;
491 unsigned long flags;
492
493 if (!apic_pm_state.active)
494 return 0;
495
496 /* XXX: Pavel needs this for S3 resume, but can't explain why */
497 set_fixmap_nocache(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE);
498
499 local_irq_save(flags);
500 rdmsr(MSR_IA32_APICBASE, l, h);
501 l &= ~MSR_IA32_APICBASE_BASE;
502 l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
503 wrmsr(MSR_IA32_APICBASE, l, h);
504 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
505 apic_write(APIC_ID, apic_pm_state.apic_id);
506 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
507 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
508 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
509 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
510 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
511 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
512 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
513 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
514 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
515 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
516 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
517 apic_write(APIC_ESR, 0);
518 apic_read(APIC_ESR);
519 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
520 apic_write(APIC_ESR, 0);
521 apic_read(APIC_ESR);
522 local_irq_restore(flags);
523 return 0;
524}
525
526static struct sysdev_class lapic_sysclass = {
527 set_kset_name("lapic"),
528 .resume = lapic_resume,
529 .suspend = lapic_suspend,
530};
531
532static struct sys_device device_lapic = {
533 .id = 0,
534 .cls = &lapic_sysclass,
535};
536
537static void __init apic_pm_activate(void)
538{
539 apic_pm_state.active = 1;
540}
541
542static int __init init_lapic_sysfs(void)
543{
544 int error;
545 if (!cpu_has_apic)
546 return 0;
547 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
548 error = sysdev_class_register(&lapic_sysclass);
549 if (!error)
550 error = sysdev_register(&device_lapic);
551 return error;
552}
553device_initcall(init_lapic_sysfs);
554
555#else /* CONFIG_PM */
556
557static void apic_pm_activate(void) { }
558
559#endif /* CONFIG_PM */
560
561static int __init apic_set_verbosity(char *str)
562{
563 if (strcmp("debug", str) == 0)
564 apic_verbosity = APIC_DEBUG;
565 else if (strcmp("verbose", str) == 0)
566 apic_verbosity = APIC_VERBOSE;
567 else
568 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
569 " use apic=verbose or apic=debug", str);
570
571 return 0;
572}
573
574__setup("apic=", apic_set_verbosity);
575
576/*
577 * Detect and enable local APICs on non-SMP boards.
578 * Original code written by Keir Fraser.
579 * On AMD64 we trust the BIOS - if it says no APIC it is likely
580 * not correctly set up (usually the APIC timer won't work etc.)
581 */
582
583static int __init detect_init_APIC (void)
584{
585 if (!cpu_has_apic) {
586 printk(KERN_INFO "No local APIC present\n");
587 return -1;
588 }
589
590 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
591 boot_cpu_id = 0;
592 return 0;
593}
594
595void __init init_apic_mappings(void)
596{
597 unsigned long apic_phys;
598
599 /*
600 * If no local APIC can be found then set up a fake all
601 * zeroes page to simulate the local APIC and another
602 * one for the IO-APIC.
603 */
604 if (!smp_found_config && detect_init_APIC()) {
605 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
606 apic_phys = __pa(apic_phys);
607 } else
608 apic_phys = mp_lapic_addr;
609
610 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
611 apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
612
613 /*
614 * Fetch the APIC ID of the BSP in case we have a
615 * default configuration (or the MP table is broken).
616 */
617 if (boot_cpu_id == -1U)
618 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
619
620#ifdef CONFIG_X86_IO_APIC
621 {
622 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
623 int i;
624
625 for (i = 0; i < nr_ioapics; i++) {
626 if (smp_found_config) {
627 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
628 } else {
629 ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
630 ioapic_phys = __pa(ioapic_phys);
631 }
632 set_fixmap_nocache(idx, ioapic_phys);
633 apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
634 __fix_to_virt(idx), ioapic_phys);
635 idx++;
636 }
637 }
638#endif
639}
640
641/*
642 * This function sets up the local APIC timer, with a timeout of
643 * 'clocks' APIC bus clock. During calibration we actually call
644 * this function twice on the boot CPU, once with a bogus timeout
645 * value, second time for real. The other (noncalibrating) CPUs
646 * call this function only once, with the real, calibrated value.
647 *
648 * We do reads before writes even if unnecessary, to get around the
649 * P5 APIC double write bug.
650 */
651
652#define APIC_DIVISOR 16
653
654static void __setup_APIC_LVTT(unsigned int clocks)
655{
656 unsigned int lvtt_value, tmp_value, ver;
657
658 ver = GET_APIC_VERSION(apic_read(APIC_LVR));
659 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
660 if (!APIC_INTEGRATED(ver))
661 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
662 apic_write_around(APIC_LVTT, lvtt_value);
663
664 /*
665 * Divide PICLK by 16
666 */
667 tmp_value = apic_read(APIC_TDCR);
668 apic_write_around(APIC_TDCR, (tmp_value
669 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
670 | APIC_TDR_DIV_16);
671
672 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
673}
674
675static void setup_APIC_timer(unsigned int clocks)
676{
677 unsigned long flags;
678
679 local_irq_save(flags);
680
681 /* For some reasons this doesn't work on Simics, so fake it for now */
682 if (!strstr(boot_cpu_data.x86_model_id, "Screwdriver")) {
683 __setup_APIC_LVTT(clocks);
684 return;
685 }
686
687 /* wait for irq slice */
688 if (vxtime.hpet_address) {
689 int trigger = hpet_readl(HPET_T0_CMP);
690 while (hpet_readl(HPET_COUNTER) >= trigger)
691 /* do nothing */ ;
692 while (hpet_readl(HPET_COUNTER) < trigger)
693 /* do nothing */ ;
694 } else {
695 int c1, c2;
696 outb_p(0x00, 0x43);
697 c2 = inb_p(0x40);
698 c2 |= inb_p(0x40) << 8;
699 do {
700 c1 = c2;
701 outb_p(0x00, 0x43);
702 c2 = inb_p(0x40);
703 c2 |= inb_p(0x40) << 8;
704 } while (c2 - c1 < 300);
705 }
706
707 __setup_APIC_LVTT(clocks);
708
709 local_irq_restore(flags);
710}
711
712/*
713 * In this function we calibrate APIC bus clocks to the external
714 * timer. Unfortunately we cannot use jiffies and the timer irq
715 * to calibrate, since some later bootup code depends on getting
716 * the first irq? Ugh.
717 *
718 * We want to do the calibration only once since we
719 * want to have local timer irqs syncron. CPUs connected
720 * by the same APIC bus have the very same bus frequency.
721 * And we want to have irqs off anyways, no accidental
722 * APIC irq that way.
723 */
724
725#define TICK_COUNT 100000000
726
727static int __init calibrate_APIC_clock(void)
728{
729 int apic, apic_start, tsc, tsc_start;
730 int result;
731 /*
732 * Put whatever arbitrary (but long enough) timeout
733 * value into the APIC clock, we just want to get the
734 * counter running for calibration.
735 */
736 __setup_APIC_LVTT(1000000000);
737
738 apic_start = apic_read(APIC_TMCCT);
739 rdtscl(tsc_start);
740
741 do {
742 apic = apic_read(APIC_TMCCT);
743 rdtscl(tsc);
744 } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT);
745
746 result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
747
748 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
749 result / 1000 / 1000, result / 1000 % 1000);
750
751 return result * APIC_DIVISOR / HZ;
752}
753
754static unsigned int calibration_result;
755
756void __init setup_boot_APIC_clock (void)
757{
758 if (disable_apic_timer) {
759 printk(KERN_INFO "Disabling APIC timer\n");
760 return;
761 }
762
763 printk(KERN_INFO "Using local APIC timer interrupts.\n");
764 using_apic_timer = 1;
765
766 local_irq_disable();
767
768 calibration_result = calibrate_APIC_clock();
769 /*
770 * Now set up the timer for real.
771 */
772 setup_APIC_timer(calibration_result);
773
774 local_irq_enable();
775}
776
777void __init setup_secondary_APIC_clock(void)
778{
779 local_irq_disable(); /* FIXME: Do we need this? --RR */
780 setup_APIC_timer(calibration_result);
781 local_irq_enable();
782}
783
784void __init disable_APIC_timer(void)
785{
786 if (using_apic_timer) {
787 unsigned long v;
788
789 v = apic_read(APIC_LVTT);
790 apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
791 }
792}
793
794void enable_APIC_timer(void)
795{
796 if (using_apic_timer) {
797 unsigned long v;
798
799 v = apic_read(APIC_LVTT);
800 apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
801 }
802}
803
804/*
805 * the frequency of the profiling timer can be changed
806 * by writing a multiplier value into /proc/profile.
807 */
808int setup_profiling_timer(unsigned int multiplier)
809{
810 int i;
811
812 /*
813 * Sanity check. [at least 500 APIC cycles should be
814 * between APIC interrupts as a rule of thumb, to avoid
815 * irqs flooding us]
816 */
817 if ( (!multiplier) || (calibration_result/multiplier < 500))
818 return -EINVAL;
819
820 /*
821 * Set the new multiplier for each CPU. CPUs don't start using the
822 * new values until the next timer interrupt in which they do process
823 * accounting. At that time they also adjust their APIC timers
824 * accordingly.
825 */
826 for (i = 0; i < NR_CPUS; ++i)
827 per_cpu(prof_multiplier, i) = multiplier;
828
829 return 0;
830}
831
832#undef APIC_DIVISOR
833
834/*
835 * Local timer interrupt handler. It does both profiling and
836 * process statistics/rescheduling.
837 *
838 * We do profiling in every local tick, statistics/rescheduling
839 * happen only every 'profiling multiplier' ticks. The default
840 * multiplier is 1 and it can be changed by writing the new multiplier
841 * value into /proc/profile.
842 */
843
844void smp_local_timer_interrupt(struct pt_regs *regs)
845{
846 int cpu = smp_processor_id();
847
848 profile_tick(CPU_PROFILING, regs);
849 if (--per_cpu(prof_counter, cpu) <= 0) {
850 /*
851 * The multiplier may have changed since the last time we got
852 * to this point as a result of the user writing to
853 * /proc/profile. In this case we need to adjust the APIC
854 * timer accordingly.
855 *
856 * Interrupts are already masked off at this point.
857 */
858 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
859 if (per_cpu(prof_counter, cpu) !=
860 per_cpu(prof_old_multiplier, cpu)) {
861 __setup_APIC_LVTT(calibration_result/
862 per_cpu(prof_counter, cpu));
863 per_cpu(prof_old_multiplier, cpu) =
864 per_cpu(prof_counter, cpu);
865 }
866
867#ifdef CONFIG_SMP
868 update_process_times(user_mode(regs));
869#endif
870 }
871
872 /*
873 * We take the 'long' return path, and there every subsystem
874 * grabs the appropriate locks (kernel lock/ irq lock).
875 *
876 * we might want to decouple profiling from the 'long path',
877 * and do the profiling totally in assembly.
878 *
879 * Currently this isn't too much of an issue (performance wise),
880 * we can take more than 100K local irqs per second on a 100 MHz P5.
881 */
882}
883
884/*
885 * Local APIC timer interrupt. This is the most natural way for doing
886 * local interrupts, but local timer interrupts can be emulated by
887 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
888 *
889 * [ if a single-CPU system runs an SMP kernel then we call the local
890 * interrupt as well. Thus we cannot inline the local irq ... ]
891 */
892void smp_apic_timer_interrupt(struct pt_regs *regs)
893{
894 /*
895 * the NMI deadlock-detector uses this.
896 */
897 add_pda(apic_timer_irqs, 1);
898
899 /*
900 * NOTE! We'd better ACK the irq immediately,
901 * because timer handling can be slow.
902 */
903 ack_APIC_irq();
904 /*
905 * update_process_times() expects us to have done irq_enter().
906 * Besides, if we don't timer interrupts ignore the global
907 * interrupt lock, which is the WrongThing (tm) to do.
908 */
909 irq_enter();
910 smp_local_timer_interrupt(regs);
911 irq_exit();
912}
913
914/*
915 * oem_force_hpet_timer -- force HPET mode for some boxes.
916 *
917 * Thus far, the major user of this is IBM's Summit2 series:
918 *
919 * Clustered boxes may have unsynced TSC problems if they are
920 * multi-chassis. Use available data to take a good guess.
921 * If in doubt, go HPET.
922 */
923__init int oem_force_hpet_timer(void)
924{
925 int i, clusters, zeros;
926 unsigned id;
927 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
928
376ec33f 929 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1da177e4
LT
930
931 for (i = 0; i < NR_CPUS; i++) {
932 id = bios_cpu_apicid[i];
933 if (id != BAD_APICID)
934 __set_bit(APIC_CLUSTERID(id), clustermap);
935 }
936
937 /* Problem: Partially populated chassis may not have CPUs in some of
938 * the APIC clusters they have been allocated. Only present CPUs have
939 * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since
940 * clusters are allocated sequentially, count zeros only if they are
941 * bounded by ones.
942 */
943 clusters = 0;
944 zeros = 0;
945 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
946 if (test_bit(i, clustermap)) {
947 clusters += 1 + zeros;
948 zeros = 0;
949 } else
950 ++zeros;
951 }
952
953 /*
954 * If clusters > 2, then should be multi-chassis. Return 1 for HPET.
955 * Else return 0 to use TSC.
956 * May have to revisit this when multi-core + hyperthreaded CPUs come
957 * out, but AFAIK this will work even for them.
958 */
959 return (clusters > 2);
960}
961
962/*
963 * This interrupt should _never_ happen with our APIC/SMP architecture
964 */
965asmlinkage void smp_spurious_interrupt(void)
966{
967 unsigned int v;
968 irq_enter();
969 /*
970 * Check if this really is a spurious interrupt and ACK it
971 * if it is a vectored one. Just in case...
972 * Spurious interrupts should not be ACKed.
973 */
974 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
975 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
976 ack_APIC_irq();
977
978#if 0
979 static unsigned long last_warning;
980 static unsigned long skipped;
981
982 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
983 if (time_before(last_warning+30*HZ,jiffies)) {
984 printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
985 smp_processor_id(), skipped);
986 last_warning = jiffies;
987 skipped = 0;
988 } else {
989 skipped++;
990 }
991#endif
992 irq_exit();
993}
994
995/*
996 * This interrupt should never happen with our APIC/SMP architecture
997 */
998
999asmlinkage void smp_error_interrupt(void)
1000{
1001 unsigned int v, v1;
1002
1003 irq_enter();
1004 /* First tickle the hardware, only then report what went on. -- REW */
1005 v = apic_read(APIC_ESR);
1006 apic_write(APIC_ESR, 0);
1007 v1 = apic_read(APIC_ESR);
1008 ack_APIC_irq();
1009 atomic_inc(&irq_err_count);
1010
1011 /* Here is what the APIC error bits mean:
1012 0: Send CS error
1013 1: Receive CS error
1014 2: Send accept error
1015 3: Receive accept error
1016 4: Reserved
1017 5: Send illegal vector
1018 6: Received illegal vector
1019 7: Illegal register address
1020 */
1021 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1022 smp_processor_id(), v , v1);
1023 irq_exit();
1024}
1025
1026int disable_apic;
1027
1028/*
1029 * This initializes the IO-APIC and APIC hardware if this is
1030 * a UP kernel.
1031 */
1032int __init APIC_init_uniprocessor (void)
1033{
1034 if (disable_apic) {
1035 printk(KERN_INFO "Apic disabled\n");
1036 return -1;
1037 }
1038 if (!cpu_has_apic) {
1039 disable_apic = 1;
1040 printk(KERN_INFO "Apic disabled by BIOS\n");
1041 return -1;
1042 }
1043
1044 verify_local_APIC();
1045
1046 connect_bsp_APIC();
1047
1048 phys_cpu_present_map = physid_mask_of_physid(0);
1049 apic_write_around(APIC_ID, boot_cpu_id);
1050
1051 setup_local_APIC();
1052
1053#ifdef CONFIG_X86_IO_APIC
1054 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1055 setup_IO_APIC();
1056 else
1057 nr_ioapics = 0;
1058#endif
1059 setup_boot_APIC_clock();
75152114 1060 check_nmi_watchdog();
1da177e4
LT
1061 return 0;
1062}
1063
1064static __init int setup_disableapic(char *str)
1065{
1066 disable_apic = 1;
1067 return 0;
1068}
1069
1070static __init int setup_nolapic(char *str)
1071{
1072 disable_apic = 1;
1073 return 0;
1074}
1075
1076static __init int setup_noapictimer(char *str)
1077{
1078 disable_apic_timer = 1;
1079 return 0;
1080}
1081
1082/* dummy parsing: see setup.c */
1083
1084__setup("disableapic", setup_disableapic);
1085__setup("nolapic", setup_nolapic); /* same as disableapic, for compatibility */
1086
1087__setup("noapictimer", setup_noapictimer);
1088
1089/* no "lapic" flag - we only use the lapic when the BIOS tells us so. */
This page took 0.079102 seconds and 5 git commands to generate.