X86: drivers: remove __dev* attributes.
[deliverable/linux.git] / arch / x86 / kernel / apic / apic.c
CommitLineData
1da177e4
LT
1/*
2 * Local APIC handling, local APIC timers
3 *
8f47e163 4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
1da177e4
LT
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
cdd6c482 17#include <linux/perf_event.h>
1da177e4 18#include <linux/kernel_stat.h>
d1de36f5 19#include <linux/mc146818rtc.h>
70a20025 20#include <linux/acpi_pmtmr.h>
d1de36f5
IM
21#include <linux/clockchips.h>
22#include <linux/interrupt.h>
23#include <linux/bootmem.h>
24#include <linux/ftrace.h>
25#include <linux/ioport.h>
e83a5fdc 26#include <linux/module.h>
f3c6ea1b 27#include <linux/syscore_ops.h>
d1de36f5
IM
28#include <linux/delay.h>
29#include <linux/timex.h>
334955ef 30#include <linux/i8253.h>
6e1cb38a 31#include <linux/dmar.h>
d1de36f5
IM
32#include <linux/init.h>
33#include <linux/cpu.h>
34#include <linux/dmi.h>
d1de36f5
IM
35#include <linux/smp.h>
36#include <linux/mm.h>
1da177e4 37
8a8f422d 38#include <asm/irq_remapping.h>
cdd6c482 39#include <asm/perf_event.h>
736decac 40#include <asm/x86_init.h>
1da177e4 41#include <asm/pgalloc.h>
60063497 42#include <linux/atomic.h>
1da177e4 43#include <asm/mpspec.h>
d1de36f5 44#include <asm/i8259.h>
73dea47f 45#include <asm/proto.h>
2c8c0e6b 46#include <asm/apic.h>
7167d08e 47#include <asm/io_apic.h>
d1de36f5
IM
48#include <asm/desc.h>
49#include <asm/hpet.h>
50#include <asm/idle.h>
51#include <asm/mtrr.h>
16f871bc 52#include <asm/time.h>
2bc13797 53#include <asm/smp.h>
be71b855 54#include <asm/mce.h>
8c3ba8d0 55#include <asm/tsc.h>
2904ed8d 56#include <asm/hypervisor.h>
1da177e4 57
ec70de8b 58unsigned int num_processors;
fdbecd9f 59
ec70de8b 60unsigned disabled_cpus __cpuinitdata;
fdbecd9f 61
ec70de8b
BG
62/* Processor that is doing the boot up */
63unsigned int boot_cpu_physical_apicid = -1U;
5af5573e 64
80e5609c 65/*
fdbecd9f 66 * The highest APIC ID seen during enumeration.
80e5609c 67 */
ec70de8b 68unsigned int max_physical_apicid;
5af5573e 69
80e5609c 70/*
fdbecd9f 71 * Bitmask of physically existing CPUs:
80e5609c 72 */
ec70de8b
BG
73physid_mask_t phys_cpu_present_map;
74
75/*
76 * Map cpu index to physical APIC ID
77 */
0816b0f0
VZ
78DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
79DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
ec70de8b
BG
80EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
81EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
80e5609c 82
b3c51170 83#ifdef CONFIG_X86_32
4c321ff8 84
4c321ff8
TH
85/*
86 * On x86_32, the mapping between cpu and logical apicid may vary
87 * depending on apic in use. The following early percpu variable is
88 * used for the mapping. This is where the behaviors of x86_64 and 32
89 * actually diverge. Let's keep it ugly for now.
90 */
0816b0f0 91DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
4c321ff8 92
f28c0ae2
YL
93/* Local APIC was disabled by the BIOS and enabled by the kernel */
94static int enabled_via_apicbase;
95
c0eaa453
CG
96/*
97 * Handle interrupt mode configuration register (IMCR).
98 * This register controls whether the interrupt signals
99 * that reach the BSP come from the master PIC or from the
100 * local APIC. Before entering Symmetric I/O Mode, either
101 * the BIOS or the operating system must switch out of
102 * PIC Mode by changing the IMCR.
103 */
5cda395f 104static inline void imcr_pic_to_apic(void)
c0eaa453
CG
105{
106 /* select IMCR register */
107 outb(0x70, 0x22);
108 /* NMI and 8259 INTR go through APIC */
109 outb(0x01, 0x23);
110}
111
5cda395f 112static inline void imcr_apic_to_pic(void)
c0eaa453
CG
113{
114 /* select IMCR register */
115 outb(0x70, 0x22);
116 /* NMI and 8259 INTR go directly to BSP */
117 outb(0x00, 0x23);
118}
b3c51170
YL
119#endif
120
279f1461
SS
121/*
122 * Knob to control our willingness to enable the local APIC.
123 *
124 * +1=force-enable
125 */
126static int force_enable_local_apic __initdata;
127/*
128 * APIC command line parameters
129 */
130static int __init parse_lapic(char *arg)
131{
132 if (config_enabled(CONFIG_X86_32) && !arg)
133 force_enable_local_apic = 1;
134 else if (!strncmp(arg, "notscdeadline", 13))
135 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
136 return 0;
137}
138early_param("lapic", parse_lapic);
139
b3c51170 140#ifdef CONFIG_X86_64
bc1d99c1 141static int apic_calibrate_pmtmr __initdata;
b3c51170
YL
142static __init int setup_apicpmtimer(char *s)
143{
144 apic_calibrate_pmtmr = 1;
145 notsc_setup(NULL);
146 return 0;
147}
148__setup("apicpmtimer", setup_apicpmtimer);
149#endif
150
fc1edaf9 151int x2apic_mode;
06cd9a7d 152#ifdef CONFIG_X86_X2APIC
6e1cb38a 153/* x2apic enabled before OS handover */
fb209bd8
YL
154int x2apic_preenabled;
155static int x2apic_disabled;
a31bc327 156static int nox2apic;
49899eac
YL
157static __init int setup_nox2apic(char *str)
158{
39d83a5d 159 if (x2apic_enabled()) {
a31bc327
YL
160 int apicid = native_apic_msr_read(APIC_ID);
161
162 if (apicid >= 255) {
163 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
164 apicid);
165 return 0;
166 }
167
168 pr_warning("x2apic already enabled. will disable it\n");
169 } else
170 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
171
172 nox2apic = 1;
39d83a5d 173
49899eac
YL
174 return 0;
175}
176early_param("nox2apic", setup_nox2apic);
177#endif
1da177e4 178
b3c51170
YL
179unsigned long mp_lapic_addr;
180int disable_apic;
181/* Disable local APIC timer from the kernel commandline or via dmi quirk */
25874a29 182static int disable_apic_timer __initdata;
e83a5fdc 183/* Local APIC timer works in C2 */
2e7c2838
LT
184int local_apic_timer_c2_ok;
185EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
186
efa2559f
YL
187int first_system_vector = 0xfe;
188
e83a5fdc
HS
189/*
190 * Debug level, exported for io_apic.c
191 */
baa13188 192unsigned int apic_verbosity;
e83a5fdc 193
89c38c28
CG
194int pic_mode;
195
bab4b27c
AS
196/* Have we found an MP table */
197int smp_found_config;
198
39928722
AD
199static struct resource lapic_resource = {
200 .name = "Local APIC",
201 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
202};
203
1ade93ef 204unsigned int lapic_timer_frequency = 0;
d03030e9 205
0e078e2f 206static void apic_pm_activate(void);
ba7eda4c 207
d3432896
AK
208static unsigned long apic_phys;
209
0e078e2f
TG
210/*
211 * Get the LAPIC version
212 */
213static inline int lapic_get_version(void)
ba7eda4c 214{
0e078e2f 215 return GET_APIC_VERSION(apic_read(APIC_LVR));
ba7eda4c
TG
216}
217
0e078e2f 218/*
9c803869 219 * Check, if the APIC is integrated or a separate chip
0e078e2f
TG
220 */
221static inline int lapic_is_integrated(void)
ba7eda4c 222{
9c803869 223#ifdef CONFIG_X86_64
0e078e2f 224 return 1;
9c803869
CG
225#else
226 return APIC_INTEGRATED(lapic_get_version());
227#endif
ba7eda4c
TG
228}
229
230/*
0e078e2f 231 * Check, whether this is a modern or a first generation APIC
ba7eda4c 232 */
0e078e2f 233static int modern_apic(void)
ba7eda4c 234{
0e078e2f
TG
235 /* AMD systems use old APIC versions, so check the CPU */
236 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
237 boot_cpu_data.x86 >= 0xf)
238 return 1;
239 return lapic_get_version() >= 0x14;
ba7eda4c
TG
240}
241
08306ce6 242/*
a933c618
CG
243 * right after this call apic become NOOP driven
244 * so apic->write/read doesn't do anything
08306ce6 245 */
25874a29 246static void __init apic_disable(void)
08306ce6 247{
f88f2b4f 248 pr_info("APIC: switched to apic NOOP\n");
a933c618 249 apic = &apic_noop;
08306ce6
CG
250}
251
c1eeb2de 252void native_apic_wait_icr_idle(void)
8339e9fb
FLV
253{
254 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
255 cpu_relax();
256}
257
c1eeb2de 258u32 native_safe_apic_wait_icr_idle(void)
8339e9fb 259{
3c6bb07a 260 u32 send_status;
8339e9fb
FLV
261 int timeout;
262
263 timeout = 0;
264 do {
265 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
266 if (!send_status)
267 break;
b49d7d87 268 inc_irq_stat(icr_read_retry_count);
8339e9fb
FLV
269 udelay(100);
270 } while (timeout++ < 1000);
271
272 return send_status;
273}
274
c1eeb2de 275void native_apic_icr_write(u32 low, u32 id)
1b374e4d 276{
ed4e5ec1 277 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
1b374e4d
SS
278 apic_write(APIC_ICR, low);
279}
280
c1eeb2de 281u64 native_apic_icr_read(void)
1b374e4d
SS
282{
283 u32 icr1, icr2;
284
285 icr2 = apic_read(APIC_ICR2);
286 icr1 = apic_read(APIC_ICR);
287
cf9768d7 288 return icr1 | ((u64)icr2 << 32);
1b374e4d
SS
289}
290
7c37e48b
CG
291#ifdef CONFIG_X86_32
292/**
293 * get_physical_broadcast - Get number of physical broadcast IDs
294 */
295int get_physical_broadcast(void)
296{
297 return modern_apic() ? 0xff : 0xf;
298}
299#endif
300
0e078e2f
TG
301/**
302 * lapic_get_maxlvt - get the maximum number of local vector table entries
303 */
37e650c7 304int lapic_get_maxlvt(void)
1da177e4 305{
36a028de 306 unsigned int v;
1da177e4
LT
307
308 v = apic_read(APIC_LVR);
36a028de
CG
309 /*
310 * - we always have APIC integrated on 64bit mode
311 * - 82489DXs do not report # of LVT entries
312 */
313 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
1da177e4
LT
314}
315
274cfe59
CG
316/*
317 * Local APIC timer
318 */
319
c40aaec6 320/* Clock divisor */
c40aaec6 321#define APIC_DIVISOR 16
279f1461 322#define TSC_DIVISOR 32
f07f4f90 323
0e078e2f
TG
324/*
325 * This function sets up the local APIC timer, with a timeout of
326 * 'clocks' APIC bus clock. During calibration we actually call
327 * this function twice on the boot CPU, once with a bogus timeout
328 * value, second time for real. The other (noncalibrating) CPUs
329 * call this function only once, with the real, calibrated value.
330 *
331 * We do reads before writes even if unnecessary, to get around the
332 * P5 APIC double write bug.
333 */
0e078e2f 334static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
1da177e4 335{
0e078e2f 336 unsigned int lvtt_value, tmp_value;
1da177e4 337
0e078e2f
TG
338 lvtt_value = LOCAL_TIMER_VECTOR;
339 if (!oneshot)
340 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
279f1461
SS
341 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
342 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
343
f07f4f90
CG
344 if (!lapic_is_integrated())
345 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
346
0e078e2f
TG
347 if (!irqen)
348 lvtt_value |= APIC_LVT_MASKED;
1da177e4 349
0e078e2f 350 apic_write(APIC_LVTT, lvtt_value);
1da177e4 351
279f1461
SS
352 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
353 printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
354 return;
355 }
356
1da177e4 357 /*
0e078e2f 358 * Divide PICLK by 16
1da177e4 359 */
0e078e2f 360 tmp_value = apic_read(APIC_TDCR);
c40aaec6
CG
361 apic_write(APIC_TDCR,
362 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
363 APIC_TDR_DIV_16);
0e078e2f
TG
364
365 if (!oneshot)
f07f4f90 366 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
1da177e4
LT
367}
368
0e078e2f 369/*
a68c439b 370 * Setup extended LVT, AMD specific
7b83dae7 371 *
a68c439b
RR
372 * Software should use the LVT offsets the BIOS provides. The offsets
373 * are determined by the subsystems using it like those for MCE
374 * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
375 * are supported. Beginning with family 10h at least 4 offsets are
376 * available.
286f5718 377 *
a68c439b
RR
378 * Since the offsets must be consistent for all cores, we keep track
379 * of the LVT offsets in software and reserve the offset for the same
380 * vector also to be used on other cores. An offset is freed by
381 * setting the entry to APIC_EILVT_MASKED.
382 *
383 * If the BIOS is right, there should be no conflicts. Otherwise a
384 * "[Firmware Bug]: ..." error message is generated. However, if
385 * software does not properly determines the offsets, it is not
386 * necessarily a BIOS bug.
0e078e2f 387 */
7b83dae7 388
a68c439b
RR
389static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
390
391static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
392{
393 return (old & APIC_EILVT_MASKED)
394 || (new == APIC_EILVT_MASKED)
395 || ((new & ~APIC_EILVT_MASKED) == old);
396}
397
398static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
399{
8abc3122 400 unsigned int rsvd, vector;
a68c439b
RR
401
402 if (offset >= APIC_EILVT_NR_MAX)
403 return ~0;
404
8abc3122 405 rsvd = atomic_read(&eilvt_offsets[offset]);
a68c439b 406 do {
8abc3122
RR
407 vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
408 if (vector && !eilvt_entry_is_changeable(vector, new))
a68c439b
RR
409 /* may not change if vectors are different */
410 return rsvd;
411 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
412 } while (rsvd != new);
413
8abc3122
RR
414 rsvd &= ~APIC_EILVT_MASKED;
415 if (rsvd && rsvd != vector)
416 pr_info("LVT offset %d assigned for vector 0x%02x\n",
417 offset, rsvd);
418
a68c439b
RR
419 return new;
420}
421
422/*
423 * If mask=1, the LVT entry does not generate interrupts while mask=0
cbf74cea
RR
424 * enables the vector. See also the BKDGs. Must be called with
425 * preemption disabled.
a68c439b
RR
426 */
427
27afdf20 428int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
1da177e4 429{
a68c439b
RR
430 unsigned long reg = APIC_EILVTn(offset);
431 unsigned int new, old, reserved;
432
433 new = (mask << 16) | (msg_type << 8) | vector;
434 old = apic_read(reg);
435 reserved = reserve_eilvt_offset(offset, new);
436
437 if (reserved != new) {
eb48c9cb
RR
438 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
439 "vector 0x%x, but the register is already in use for "
440 "vector 0x%x on another cpu\n",
441 smp_processor_id(), reg, offset, new, reserved);
a68c439b
RR
442 return -EINVAL;
443 }
444
445 if (!eilvt_entry_is_changeable(old, new)) {
eb48c9cb
RR
446 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
447 "vector 0x%x, but the register is already in use for "
448 "vector 0x%x on this cpu\n",
449 smp_processor_id(), reg, offset, new, old);
a68c439b
RR
450 return -EBUSY;
451 }
452
453 apic_write(reg, new);
a8fcf1a2 454
a68c439b 455 return 0;
1da177e4 456}
27afdf20 457EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
7b83dae7 458
0e078e2f
TG
459/*
460 * Program the next event, relative to now
461 */
462static int lapic_next_event(unsigned long delta,
463 struct clock_event_device *evt)
1da177e4 464{
0e078e2f
TG
465 apic_write(APIC_TMICT, delta);
466 return 0;
1da177e4
LT
467}
468
279f1461
SS
469static int lapic_next_deadline(unsigned long delta,
470 struct clock_event_device *evt)
471{
472 u64 tsc;
473
474 rdtscll(tsc);
475 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
476 return 0;
477}
478
0e078e2f
TG
479/*
480 * Setup the lapic timer in periodic or oneshot mode
481 */
482static void lapic_timer_setup(enum clock_event_mode mode,
483 struct clock_event_device *evt)
9b7711f0
HS
484{
485 unsigned long flags;
0e078e2f 486 unsigned int v;
9b7711f0 487
0e078e2f
TG
488 /* Lapic used as dummy for broadcast ? */
489 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
9b7711f0
HS
490 return;
491
492 local_irq_save(flags);
493
0e078e2f
TG
494 switch (mode) {
495 case CLOCK_EVT_MODE_PERIODIC:
496 case CLOCK_EVT_MODE_ONESHOT:
1ade93ef 497 __setup_APIC_LVTT(lapic_timer_frequency,
0e078e2f
TG
498 mode != CLOCK_EVT_MODE_PERIODIC, 1);
499 break;
500 case CLOCK_EVT_MODE_UNUSED:
501 case CLOCK_EVT_MODE_SHUTDOWN:
502 v = apic_read(APIC_LVTT);
503 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
504 apic_write(APIC_LVTT, v);
6f9b4100 505 apic_write(APIC_TMICT, 0);
0e078e2f
TG
506 break;
507 case CLOCK_EVT_MODE_RESUME:
508 /* Nothing to do here */
509 break;
510 }
9b7711f0
HS
511
512 local_irq_restore(flags);
513}
514
1da177e4 515/*
0e078e2f 516 * Local APIC timer broadcast function
1da177e4 517 */
9628937d 518static void lapic_timer_broadcast(const struct cpumask *mask)
1da177e4 519{
0e078e2f 520#ifdef CONFIG_SMP
dac5f412 521 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
0e078e2f
TG
522#endif
523}
1da177e4 524
25874a29
HK
525
526/*
527 * The local apic timer can be used for any function which is CPU local.
528 */
529static struct clock_event_device lapic_clockevent = {
530 .name = "lapic",
531 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
532 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
533 .shift = 32,
534 .set_mode = lapic_timer_setup,
535 .set_next_event = lapic_next_event,
536 .broadcast = lapic_timer_broadcast,
537 .rating = 100,
538 .irq = -1,
539};
540static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
541
0e078e2f 542/*
421f91d2 543 * Setup the local APIC timer for this CPU. Copy the initialized values
0e078e2f
TG
544 * of the boot CPU and register the clock event in the framework.
545 */
db4b5525 546static void __cpuinit setup_APIC_timer(void)
0e078e2f
TG
547{
548 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
1da177e4 549
349c004e 550 if (this_cpu_has(X86_FEATURE_ARAT)) {
db954b58
VP
551 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
552 /* Make LAPIC timer preferrable over percpu HPET */
553 lapic_clockevent.rating = 150;
554 }
555
0e078e2f 556 memcpy(levt, &lapic_clockevent, sizeof(*levt));
320ab2b0 557 levt->cpumask = cpumask_of(smp_processor_id());
1da177e4 558
279f1461
SS
559 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
560 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
561 CLOCK_EVT_FEAT_DUMMY);
562 levt->set_next_event = lapic_next_deadline;
563 clockevents_config_and_register(levt,
564 (tsc_khz / TSC_DIVISOR) * 1000,
565 0xF, ~0UL);
566 } else
567 clockevents_register_device(levt);
0e078e2f 568}
1da177e4 569
2f04fa88
YL
570/*
571 * In this functions we calibrate APIC bus clocks to the external timer.
572 *
573 * We want to do the calibration only once since we want to have local timer
574 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
575 * frequency.
576 *
577 * This was previously done by reading the PIT/HPET and waiting for a wrap
578 * around to find out, that a tick has elapsed. I have a box, where the PIT
579 * readout is broken, so it never gets out of the wait loop again. This was
580 * also reported by others.
581 *
582 * Monitoring the jiffies value is inaccurate and the clockevents
583 * infrastructure allows us to do a simple substitution of the interrupt
584 * handler.
585 *
586 * The calibration routine also uses the pm_timer when possible, as the PIT
587 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
588 * back to normal later in the boot process).
589 */
590
591#define LAPIC_CAL_LOOPS (HZ/10)
592
593static __initdata int lapic_cal_loops = -1;
594static __initdata long lapic_cal_t1, lapic_cal_t2;
595static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
596static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
597static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
598
599/*
600 * Temporary interrupt handler.
601 */
602static void __init lapic_cal_handler(struct clock_event_device *dev)
603{
604 unsigned long long tsc = 0;
605 long tapic = apic_read(APIC_TMCCT);
606 unsigned long pm = acpi_pm_read_early();
607
608 if (cpu_has_tsc)
609 rdtscll(tsc);
610
611 switch (lapic_cal_loops++) {
612 case 0:
613 lapic_cal_t1 = tapic;
614 lapic_cal_tsc1 = tsc;
615 lapic_cal_pm1 = pm;
616 lapic_cal_j1 = jiffies;
617 break;
618
619 case LAPIC_CAL_LOOPS:
620 lapic_cal_t2 = tapic;
621 lapic_cal_tsc2 = tsc;
622 if (pm < lapic_cal_pm1)
623 pm += ACPI_PM_OVRRUN;
624 lapic_cal_pm2 = pm;
625 lapic_cal_j2 = jiffies;
626 break;
627 }
628}
629
754ef0cd
YI
630static int __init
631calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
b189892d
CG
632{
633 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
634 const long pm_thresh = pm_100ms / 100;
635 unsigned long mult;
636 u64 res;
637
638#ifndef CONFIG_X86_PM_TIMER
639 return -1;
640#endif
641
39ba5d43 642 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
b189892d
CG
643
644 /* Check, if the PM timer is available */
645 if (!deltapm)
646 return -1;
647
648 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
649
650 if (deltapm > (pm_100ms - pm_thresh) &&
651 deltapm < (pm_100ms + pm_thresh)) {
39ba5d43 652 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
754ef0cd
YI
653 return 0;
654 }
655
656 res = (((u64)deltapm) * mult) >> 22;
657 do_div(res, 1000000);
658 pr_warning("APIC calibration not consistent "
39ba5d43 659 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
754ef0cd
YI
660
661 /* Correct the lapic counter value */
662 res = (((u64)(*delta)) * pm_100ms);
663 do_div(res, deltapm);
664 pr_info("APIC delta adjusted to PM-Timer: "
665 "%lu (%ld)\n", (unsigned long)res, *delta);
666 *delta = (long)res;
667
668 /* Correct the tsc counter value */
669 if (cpu_has_tsc) {
670 res = (((u64)(*deltatsc)) * pm_100ms);
b189892d 671 do_div(res, deltapm);
754ef0cd 672 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
3235dc3f 673 "PM-Timer: %lu (%ld)\n",
754ef0cd
YI
674 (unsigned long)res, *deltatsc);
675 *deltatsc = (long)res;
b189892d
CG
676 }
677
678 return 0;
679}
680
2f04fa88
YL
681static int __init calibrate_APIC_clock(void)
682{
683 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
2f04fa88
YL
684 void (*real_handler)(struct clock_event_device *dev);
685 unsigned long deltaj;
754ef0cd 686 long delta, deltatsc;
2f04fa88
YL
687 int pm_referenced = 0;
688
1ade93ef
JP
689 /**
690 * check if lapic timer has already been calibrated by platform
691 * specific routine, such as tsc calibration code. if so, we just fill
692 * in the clockevent structure and return.
693 */
694
279f1461
SS
695 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
696 return 0;
697 } else if (lapic_timer_frequency) {
1ade93ef
JP
698 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
699 lapic_timer_frequency);
700 lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
701 TICK_NSEC, lapic_clockevent.shift);
702 lapic_clockevent.max_delta_ns =
703 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
704 lapic_clockevent.min_delta_ns =
705 clockevent_delta2ns(0xF, &lapic_clockevent);
706 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
707 return 0;
708 }
709
279f1461
SS
710 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
711 "calibrating APIC timer ...\n");
712
2f04fa88
YL
713 local_irq_disable();
714
715 /* Replace the global interrupt handler */
716 real_handler = global_clock_event->event_handler;
717 global_clock_event->event_handler = lapic_cal_handler;
718
719 /*
81608f3c 720 * Setup the APIC counter to maximum. There is no way the lapic
2f04fa88
YL
721 * can underflow in the 100ms detection time frame
722 */
81608f3c 723 __setup_APIC_LVTT(0xffffffff, 0, 0);
2f04fa88
YL
724
725 /* Let the interrupts run */
726 local_irq_enable();
727
728 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
729 cpu_relax();
730
731 local_irq_disable();
732
733 /* Restore the real event handler */
734 global_clock_event->event_handler = real_handler;
735
736 /* Build delta t1-t2 as apic timer counts down */
737 delta = lapic_cal_t1 - lapic_cal_t2;
738 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
739
754ef0cd
YI
740 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
741
b189892d
CG
742 /* we trust the PM based calibration if possible */
743 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
754ef0cd 744 &delta, &deltatsc);
2f04fa88
YL
745
746 /* Calculate the scaled math multiplication factor */
747 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
748 lapic_clockevent.shift);
749 lapic_clockevent.max_delta_ns =
4aed89d6 750 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
2f04fa88
YL
751 lapic_clockevent.min_delta_ns =
752 clockevent_delta2ns(0xF, &lapic_clockevent);
753
1ade93ef 754 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
2f04fa88
YL
755
756 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
411462f6 757 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
2f04fa88 758 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
1ade93ef 759 lapic_timer_frequency);
2f04fa88
YL
760
761 if (cpu_has_tsc) {
2f04fa88
YL
762 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
763 "%ld.%04ld MHz.\n",
754ef0cd
YI
764 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
765 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
2f04fa88
YL
766 }
767
768 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
769 "%u.%04u MHz.\n",
1ade93ef
JP
770 lapic_timer_frequency / (1000000 / HZ),
771 lapic_timer_frequency % (1000000 / HZ));
2f04fa88
YL
772
773 /*
774 * Do a sanity check on the APIC calibration result
775 */
1ade93ef 776 if (lapic_timer_frequency < (1000000 / HZ)) {
2f04fa88 777 local_irq_enable();
ba21ebb6 778 pr_warning("APIC frequency too slow, disabling apic timer\n");
2f04fa88
YL
779 return -1;
780 }
781
782 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
783
b189892d
CG
784 /*
785 * PM timer calibration failed or not turned on
786 * so lets try APIC timer based calibration
787 */
2f04fa88
YL
788 if (!pm_referenced) {
789 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
790
791 /*
792 * Setup the apic timer manually
793 */
794 levt->event_handler = lapic_cal_handler;
795 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
796 lapic_cal_loops = -1;
797
798 /* Let the interrupts run */
799 local_irq_enable();
800
801 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
802 cpu_relax();
803
2f04fa88
YL
804 /* Stop the lapic timer */
805 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
806
2f04fa88
YL
807 /* Jiffies delta */
808 deltaj = lapic_cal_j2 - lapic_cal_j1;
809 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
810
811 /* Check, if the jiffies result is consistent */
812 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
813 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
814 else
815 levt->features |= CLOCK_EVT_FEAT_DUMMY;
816 } else
817 local_irq_enable();
818
819 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
e423e33e 820 pr_warning("APIC timer disabled due to verification failure\n");
2f04fa88
YL
821 return -1;
822 }
823
824 return 0;
825}
826
e83a5fdc
HS
827/*
828 * Setup the boot APIC
829 *
830 * Calibrate and verify the result.
831 */
0e078e2f
TG
832void __init setup_boot_APIC_clock(void)
833{
834 /*
274cfe59
CG
835 * The local apic timer can be disabled via the kernel
836 * commandline or from the CPU detection code. Register the lapic
837 * timer as a dummy clock event source on SMP systems, so the
838 * broadcast mechanism is used. On UP systems simply ignore it.
0e078e2f
TG
839 */
840 if (disable_apic_timer) {
ba21ebb6 841 pr_info("Disabling APIC timer\n");
0e078e2f 842 /* No broadcast on UP ! */
9d09951d
TG
843 if (num_possible_cpus() > 1) {
844 lapic_clockevent.mult = 1;
0e078e2f 845 setup_APIC_timer();
9d09951d 846 }
0e078e2f
TG
847 return;
848 }
849
89b3b1f4 850 if (calibrate_APIC_clock()) {
c2b84b30
TG
851 /* No broadcast on UP ! */
852 if (num_possible_cpus() > 1)
853 setup_APIC_timer();
854 return;
855 }
856
0e078e2f
TG
857 /*
858 * If nmi_watchdog is set to IO_APIC, we need the
859 * PIT/HPET going. Otherwise register lapic as a dummy
860 * device.
861 */
072b198a 862 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
0e078e2f 863
274cfe59 864 /* Setup the lapic or request the broadcast */
0e078e2f
TG
865 setup_APIC_timer();
866}
867
0e078e2f
TG
868void __cpuinit setup_secondary_APIC_clock(void)
869{
0e078e2f
TG
870 setup_APIC_timer();
871}
872
873/*
874 * The guts of the apic timer interrupt
875 */
876static void local_apic_timer_interrupt(void)
877{
878 int cpu = smp_processor_id();
879 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
880
881 /*
882 * Normally we should not be here till LAPIC has been initialized but
883 * in some cases like kdump, its possible that there is a pending LAPIC
884 * timer interrupt from previous kernel's context and is delivered in
885 * new kernel the moment interrupts are enabled.
886 *
887 * Interrupts are enabled early and LAPIC is setup much later, hence
888 * its possible that when we get here evt->event_handler is NULL.
889 * Check for event_handler being NULL and discard the interrupt as
890 * spurious.
891 */
892 if (!evt->event_handler) {
ba21ebb6 893 pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
0e078e2f
TG
894 /* Switch it off */
895 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
896 return;
897 }
898
899 /*
900 * the NMI deadlock-detector uses this.
901 */
915b0d01 902 inc_irq_stat(apic_timer_irqs);
0e078e2f
TG
903
904 evt->event_handler(evt);
905}
906
907/*
908 * Local APIC timer interrupt. This is the most natural way for doing
909 * local interrupts, but local timer interrupts can be emulated by
910 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
911 *
912 * [ if a single-CPU system runs an SMP kernel then we call the local
913 * interrupt as well. Thus we cannot inline the local irq ... ]
914 */
bcbc4f20 915void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
0e078e2f
TG
916{
917 struct pt_regs *old_regs = set_irq_regs(regs);
918
919 /*
920 * NOTE! We'd better ACK the irq immediately,
921 * because timer handling can be slow.
922 */
923 ack_APIC_irq();
924 /*
925 * update_process_times() expects us to have done irq_enter().
926 * Besides, if we don't timer interrupts ignore the global
927 * interrupt lock, which is the WrongThing (tm) to do.
928 */
0e078e2f 929 irq_enter();
98ad1cc1 930 exit_idle();
0e078e2f
TG
931 local_apic_timer_interrupt();
932 irq_exit();
274cfe59 933
0e078e2f
TG
934 set_irq_regs(old_regs);
935}
936
937int setup_profiling_timer(unsigned int multiplier)
938{
939 return -EINVAL;
940}
941
0e078e2f
TG
942/*
943 * Local APIC start and shutdown
944 */
945
946/**
947 * clear_local_APIC - shutdown the local APIC
948 *
949 * This is called, when a CPU is disabled and before rebooting, so the state of
950 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
951 * leftovers during boot.
952 */
953void clear_local_APIC(void)
954{
2584a82d 955 int maxlvt;
0e078e2f
TG
956 u32 v;
957
d3432896 958 /* APIC hasn't been mapped yet */
fc1edaf9 959 if (!x2apic_mode && !apic_phys)
d3432896
AK
960 return;
961
962 maxlvt = lapic_get_maxlvt();
0e078e2f
TG
963 /*
964 * Masking an LVT entry can trigger a local APIC error
965 * if the vector is zero. Mask LVTERR first to prevent this.
966 */
967 if (maxlvt >= 3) {
968 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
969 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
970 }
971 /*
972 * Careful: we have to set masks only first to deassert
973 * any level-triggered sources.
974 */
975 v = apic_read(APIC_LVTT);
976 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
977 v = apic_read(APIC_LVT0);
978 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
979 v = apic_read(APIC_LVT1);
980 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
981 if (maxlvt >= 4) {
982 v = apic_read(APIC_LVTPC);
983 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
984 }
985
6764014b 986 /* lets not touch this if we didn't frob it */
4efc0670 987#ifdef CONFIG_X86_THERMAL_VECTOR
6764014b
CG
988 if (maxlvt >= 5) {
989 v = apic_read(APIC_LVTTHMR);
990 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
991 }
992#endif
5ca8681c
AK
993#ifdef CONFIG_X86_MCE_INTEL
994 if (maxlvt >= 6) {
995 v = apic_read(APIC_LVTCMCI);
996 if (!(v & APIC_LVT_MASKED))
997 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
998 }
999#endif
1000
0e078e2f
TG
1001 /*
1002 * Clean APIC state for other OSs:
1003 */
1004 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1005 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1006 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1007 if (maxlvt >= 3)
1008 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1009 if (maxlvt >= 4)
1010 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
6764014b
CG
1011
1012 /* Integrated APIC (!82489DX) ? */
1013 if (lapic_is_integrated()) {
1014 if (maxlvt > 3)
1015 /* Clear ESR due to Pentium errata 3AP and 11AP */
1016 apic_write(APIC_ESR, 0);
1017 apic_read(APIC_ESR);
1018 }
0e078e2f
TG
1019}
1020
1021/**
1022 * disable_local_APIC - clear and disable the local APIC
1023 */
1024void disable_local_APIC(void)
1025{
1026 unsigned int value;
1027
4a13ad0b 1028 /* APIC hasn't been mapped yet */
fd19dce7 1029 if (!x2apic_mode && !apic_phys)
4a13ad0b
JB
1030 return;
1031
0e078e2f
TG
1032 clear_local_APIC();
1033
1034 /*
1035 * Disable APIC (implies clearing of registers
1036 * for 82489DX!).
1037 */
1038 value = apic_read(APIC_SPIV);
1039 value &= ~APIC_SPIV_APIC_ENABLED;
1040 apic_write(APIC_SPIV, value);
990b183e
CG
1041
1042#ifdef CONFIG_X86_32
1043 /*
1044 * When LAPIC was disabled by the BIOS and enabled by the kernel,
1045 * restore the disabled state.
1046 */
1047 if (enabled_via_apicbase) {
1048 unsigned int l, h;
1049
1050 rdmsr(MSR_IA32_APICBASE, l, h);
1051 l &= ~MSR_IA32_APICBASE_ENABLE;
1052 wrmsr(MSR_IA32_APICBASE, l, h);
1053 }
1054#endif
0e078e2f
TG
1055}
1056
fe4024dc
CG
1057/*
1058 * If Linux enabled the LAPIC against the BIOS default disable it down before
1059 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
1060 * not power-off. Additionally clear all LVT entries before disable_local_APIC
1061 * for the case where Linux didn't enable the LAPIC.
1062 */
0e078e2f
TG
1063void lapic_shutdown(void)
1064{
1065 unsigned long flags;
1066
8312136f 1067 if (!cpu_has_apic && !apic_from_smp_config())
0e078e2f
TG
1068 return;
1069
1070 local_irq_save(flags);
1071
fe4024dc
CG
1072#ifdef CONFIG_X86_32
1073 if (!enabled_via_apicbase)
1074 clear_local_APIC();
1075 else
1076#endif
1077 disable_local_APIC();
1078
0e078e2f
TG
1079
1080 local_irq_restore(flags);
1081}
1082
1083/*
1084 * This is to verify that we're looking at a real local APIC.
1085 * Check these against your board if the CPUs aren't getting
1086 * started for no apparent reason.
1087 */
1088int __init verify_local_APIC(void)
1089{
1090 unsigned int reg0, reg1;
1091
1092 /*
1093 * The version register is read-only in a real APIC.
1094 */
1095 reg0 = apic_read(APIC_LVR);
1096 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
1097 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
1098 reg1 = apic_read(APIC_LVR);
1099 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
1100
1101 /*
1102 * The two version reads above should print the same
1103 * numbers. If the second one is different, then we
1104 * poke at a non-APIC.
1105 */
1106 if (reg1 != reg0)
1107 return 0;
1108
1109 /*
1110 * Check if the version looks reasonably.
1111 */
1112 reg1 = GET_APIC_VERSION(reg0);
1113 if (reg1 == 0x00 || reg1 == 0xff)
1114 return 0;
1115 reg1 = lapic_get_maxlvt();
1116 if (reg1 < 0x02 || reg1 == 0xff)
1117 return 0;
1118
1119 /*
1120 * The ID register is read/write in a real APIC.
1121 */
2d7a66d0 1122 reg0 = apic_read(APIC_ID);
0e078e2f 1123 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
5b812727 1124 apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
2d7a66d0 1125 reg1 = apic_read(APIC_ID);
0e078e2f
TG
1126 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
1127 apic_write(APIC_ID, reg0);
5b812727 1128 if (reg1 != (reg0 ^ apic->apic_id_mask))
0e078e2f
TG
1129 return 0;
1130
1131 /*
1da177e4
LT
1132 * The next two are just to see if we have sane values.
1133 * They're only really relevant if we're in Virtual Wire
1134 * compatibility mode, but most boxes are anymore.
1135 */
1136 reg0 = apic_read(APIC_LVT0);
0e078e2f 1137 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
1da177e4
LT
1138 reg1 = apic_read(APIC_LVT1);
1139 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
1140
1141 return 1;
1142}
1143
0e078e2f
TG
1144/**
1145 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
1146 */
1da177e4
LT
1147void __init sync_Arb_IDs(void)
1148{
296cb951
CG
1149 /*
1150 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1151 * needed on AMD.
1152 */
1153 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1da177e4
LT
1154 return;
1155
1156 /*
1157 * Wait for idle.
1158 */
1159 apic_wait_icr_idle();
1160
1161 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
6f6da97f
CG
1162 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1163 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1da177e4
LT
1164}
1165
1da177e4
LT
1166/*
1167 * An initial setup of the virtual wire mode.
1168 */
1169void __init init_bsp_APIC(void)
1170{
11a8e778 1171 unsigned int value;
1da177e4
LT
1172
1173 /*
1174 * Don't do the setup now if we have a SMP BIOS as the
1175 * through-I/O-APIC virtual wire mode might be active.
1176 */
1177 if (smp_found_config || !cpu_has_apic)
1178 return;
1179
1da177e4
LT
1180 /*
1181 * Do not trust the local APIC being empty at bootup.
1182 */
1183 clear_local_APIC();
1184
1185 /*
1186 * Enable APIC.
1187 */
1188 value = apic_read(APIC_SPIV);
1189 value &= ~APIC_VECTOR_MASK;
1190 value |= APIC_SPIV_APIC_ENABLED;
638c0411
CG
1191
1192#ifdef CONFIG_X86_32
1193 /* This bit is reserved on P4/Xeon and should be cleared */
1194 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1195 (boot_cpu_data.x86 == 15))
1196 value &= ~APIC_SPIV_FOCUS_DISABLED;
1197 else
1198#endif
1199 value |= APIC_SPIV_FOCUS_DISABLED;
1da177e4 1200 value |= SPURIOUS_APIC_VECTOR;
11a8e778 1201 apic_write(APIC_SPIV, value);
1da177e4
LT
1202
1203 /*
1204 * Set up the virtual wire mode.
1205 */
11a8e778 1206 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1da177e4 1207 value = APIC_DM_NMI;
638c0411
CG
1208 if (!lapic_is_integrated()) /* 82489DX */
1209 value |= APIC_LVT_LEVEL_TRIGGER;
11a8e778 1210 apic_write(APIC_LVT1, value);
1da177e4
LT
1211}
1212
c43da2f5
CG
1213static void __cpuinit lapic_setup_esr(void)
1214{
9df08f10
CG
1215 unsigned int oldvalue, value, maxlvt;
1216
1217 if (!lapic_is_integrated()) {
ba21ebb6 1218 pr_info("No ESR for 82489DX.\n");
9df08f10
CG
1219 return;
1220 }
c43da2f5 1221
08125d3e 1222 if (apic->disable_esr) {
c43da2f5 1223 /*
9df08f10
CG
1224 * Something untraceable is creating bad interrupts on
1225 * secondary quads ... for the moment, just leave the
1226 * ESR disabled - we can't do anything useful with the
1227 * errors anyway - mbligh
c43da2f5 1228 */
ba21ebb6 1229 pr_info("Leaving ESR disabled.\n");
9df08f10 1230 return;
c43da2f5 1231 }
9df08f10
CG
1232
1233 maxlvt = lapic_get_maxlvt();
1234 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1235 apic_write(APIC_ESR, 0);
1236 oldvalue = apic_read(APIC_ESR);
1237
1238 /* enables sending errors */
1239 value = ERROR_APIC_VECTOR;
1240 apic_write(APIC_LVTERR, value);
1241
1242 /*
1243 * spec says clear errors after enabling vector.
1244 */
1245 if (maxlvt > 3)
1246 apic_write(APIC_ESR, 0);
1247 value = apic_read(APIC_ESR);
1248 if (value != oldvalue)
1249 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1250 "vector: 0x%08x after: 0x%08x\n",
1251 oldvalue, value);
c43da2f5
CG
1252}
1253
0e078e2f
TG
1254/**
1255 * setup_local_APIC - setup the local APIC
0aa002fe
TH
1256 *
1257 * Used to setup local APIC while initializing BSP or bringin up APs.
1258 * Always called with preemption disabled.
0e078e2f
TG
1259 */
1260void __cpuinit setup_local_APIC(void)
1da177e4 1261{
0aa002fe 1262 int cpu = smp_processor_id();
8c3ba8d0
KJ
1263 unsigned int value, queued;
1264 int i, j, acked = 0;
1265 unsigned long long tsc = 0, ntsc;
1266 long long max_loops = cpu_khz;
1267
1268 if (cpu_has_tsc)
1269 rdtscll(tsc);
1da177e4 1270
f1182638 1271 if (disable_apic) {
7167d08e 1272 disable_ioapic_support();
f1182638
JB
1273 return;
1274 }
1275
89c38c28
CG
1276#ifdef CONFIG_X86_32
1277 /* Pound the ESR really hard over the head with a big hammer - mbligh */
08125d3e 1278 if (lapic_is_integrated() && apic->disable_esr) {
89c38c28
CG
1279 apic_write(APIC_ESR, 0);
1280 apic_write(APIC_ESR, 0);
1281 apic_write(APIC_ESR, 0);
1282 apic_write(APIC_ESR, 0);
1283 }
1284#endif
cdd6c482 1285 perf_events_lapic_init();
89c38c28 1286
1da177e4
LT
1287 /*
1288 * Double-check whether this APIC is really registered.
1289 * This is meaningless in clustered apic mode, so we skip it.
1290 */
c2777f98 1291 BUG_ON(!apic->apic_id_registered());
1da177e4
LT
1292
1293 /*
1294 * Intel recommends to set DFR, LDR and TPR before enabling
1295 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1296 * document number 292116). So here it goes...
1297 */
a5c43296 1298 apic->init_apic_ldr();
1da177e4 1299
6f802c4b
TH
1300#ifdef CONFIG_X86_32
1301 /*
acb8bc09
TH
1302 * APIC LDR is initialized. If logical_apicid mapping was
1303 * initialized during get_smp_config(), make sure it matches the
1304 * actual value.
6f802c4b 1305 */
acb8bc09
TH
1306 i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1307 WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
1308 /* always use the value from LDR */
6f802c4b
TH
1309 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
1310 logical_smp_processor_id();
c4b90c11
TH
1311
1312 /*
1313 * Some NUMA implementations (NUMAQ) don't initialize apicid to
1314 * node mapping during NUMA init. Now that logical apicid is
1315 * guaranteed to be known, give it another chance. This is already
1316 * a bit too late - percpu allocation has already happened without
1317 * proper NUMA affinity.
1318 */
84914ed0
TH
1319 if (apic->x86_32_numa_cpu_node)
1320 set_apicid_to_node(early_per_cpu(x86_cpu_to_apicid, cpu),
1321 apic->x86_32_numa_cpu_node(cpu));
6f802c4b
TH
1322#endif
1323
1da177e4
LT
1324 /*
1325 * Set Task Priority to 'accept all'. We never change this
1326 * later on.
1327 */
1328 value = apic_read(APIC_TASKPRI);
1329 value &= ~APIC_TPRI_MASK;
11a8e778 1330 apic_write(APIC_TASKPRI, value);
1da177e4 1331
da7ed9f9
VG
1332 /*
1333 * After a crash, we no longer service the interrupts and a pending
1334 * interrupt from previous kernel might still have ISR bit set.
1335 *
1336 * Most probably by now CPU has serviced that pending interrupt and
1337 * it might not have done the ack_APIC_irq() because it thought,
1338 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1339 * does not clear the ISR bit and cpu thinks it has already serivced
1340 * the interrupt. Hence a vector might get locked. It was noticed
1341 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1342 */
8c3ba8d0
KJ
1343 do {
1344 queued = 0;
1345 for (i = APIC_ISR_NR - 1; i >= 0; i--)
1346 queued |= apic_read(APIC_IRR + i*0x10);
1347
1348 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1349 value = apic_read(APIC_ISR + i*0x10);
1350 for (j = 31; j >= 0; j--) {
1351 if (value & (1<<j)) {
1352 ack_APIC_irq();
1353 acked++;
1354 }
1355 }
da7ed9f9 1356 }
8c3ba8d0
KJ
1357 if (acked > 256) {
1358 printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
1359 acked);
1360 break;
1361 }
42fa4250
SF
1362 if (queued) {
1363 if (cpu_has_tsc) {
1364 rdtscll(ntsc);
1365 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1366 } else
1367 max_loops--;
1368 }
8c3ba8d0
KJ
1369 } while (queued && max_loops > 0);
1370 WARN_ON(max_loops <= 0);
da7ed9f9 1371
1da177e4
LT
1372 /*
1373 * Now that we are all set up, enable the APIC
1374 */
1375 value = apic_read(APIC_SPIV);
1376 value &= ~APIC_VECTOR_MASK;
1377 /*
1378 * Enable APIC
1379 */
1380 value |= APIC_SPIV_APIC_ENABLED;
1381
89c38c28
CG
1382#ifdef CONFIG_X86_32
1383 /*
1384 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1385 * certain networking cards. If high frequency interrupts are
1386 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1387 * entry is masked/unmasked at a high rate as well then sooner or
1388 * later IOAPIC line gets 'stuck', no more interrupts are received
1389 * from the device. If focus CPU is disabled then the hang goes
1390 * away, oh well :-(
1391 *
1392 * [ This bug can be reproduced easily with a level-triggered
1393 * PCI Ne2000 networking cards and PII/PIII processors, dual
1394 * BX chipset. ]
1395 */
1396 /*
1397 * Actually disabling the focus CPU check just makes the hang less
1398 * frequent as it makes the interrupt distributon model be more
1399 * like LRU than MRU (the short-term load is more even across CPUs).
1400 * See also the comment in end_level_ioapic_irq(). --macro
1401 */
1402
1403 /*
1404 * - enable focus processor (bit==0)
1405 * - 64bit mode always use processor focus
1406 * so no need to set it
1407 */
1408 value &= ~APIC_SPIV_FOCUS_DISABLED;
1409#endif
3f14c746 1410
1da177e4
LT
1411 /*
1412 * Set spurious IRQ vector
1413 */
1414 value |= SPURIOUS_APIC_VECTOR;
11a8e778 1415 apic_write(APIC_SPIV, value);
1da177e4
LT
1416
1417 /*
1418 * Set up LVT0, LVT1:
1419 *
1420 * set up through-local-APIC on the BP's LINT0. This is not
1421 * strictly necessary in pure symmetric-IO mode, but sometimes
1422 * we delegate interrupts to the 8259A.
1423 */
1424 /*
1425 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1426 */
1427 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
0aa002fe 1428 if (!cpu && (pic_mode || !value)) {
1da177e4 1429 value = APIC_DM_EXTINT;
0aa002fe 1430 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1da177e4
LT
1431 } else {
1432 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
0aa002fe 1433 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1da177e4 1434 }
11a8e778 1435 apic_write(APIC_LVT0, value);
1da177e4
LT
1436
1437 /*
1438 * only the BP should see the LINT1 NMI signal, obviously.
1439 */
0aa002fe 1440 if (!cpu)
1da177e4
LT
1441 value = APIC_DM_NMI;
1442 else
1443 value = APIC_DM_NMI | APIC_LVT_MASKED;
89c38c28
CG
1444 if (!lapic_is_integrated()) /* 82489DX */
1445 value |= APIC_LVT_LEVEL_TRIGGER;
11a8e778 1446 apic_write(APIC_LVT1, value);
89c38c28 1447
be71b855
AK
1448#ifdef CONFIG_X86_MCE_INTEL
1449 /* Recheck CMCI information after local APIC is up on CPU #0 */
0aa002fe 1450 if (!cpu)
be71b855
AK
1451 cmci_recheck();
1452#endif
739f33b3 1453}
1da177e4 1454
739f33b3
AK
1455void __cpuinit end_local_APIC_setup(void)
1456{
1457 lapic_setup_esr();
fa6b95fc
CG
1458
1459#ifdef CONFIG_X86_32
1b4ee4e4
CG
1460 {
1461 unsigned int value;
1462 /* Disable the local apic timer */
1463 value = apic_read(APIC_LVTT);
1464 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1465 apic_write(APIC_LVTT, value);
1466 }
fa6b95fc
CG
1467#endif
1468
0e078e2f 1469 apic_pm_activate();
2fb270f3
JB
1470}
1471
1472void __init bsp_end_local_APIC_setup(void)
1473{
1474 end_local_APIC_setup();
7f7fbf45
KK
1475
1476 /*
1477 * Now that local APIC setup is completed for BP, configure the fault
1478 * handling for interrupt remapping.
1479 */
95a02e97
SS
1480 if (irq_remapping_enabled)
1481 irq_remap_enable_fault_handling();
7f7fbf45 1482
1da177e4 1483}
1da177e4 1484
06cd9a7d 1485#ifdef CONFIG_X86_X2APIC
fb209bd8
YL
1486/*
1487 * Need to disable xapic and x2apic at the same time and then enable xapic mode
1488 */
1489static inline void __disable_x2apic(u64 msr)
1490{
1491 wrmsrl(MSR_IA32_APICBASE,
1492 msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1493 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1494}
1495
a31bc327 1496static __init void disable_x2apic(void)
fb209bd8
YL
1497{
1498 u64 msr;
1499
1500 if (!cpu_has_x2apic)
1501 return;
1502
1503 rdmsrl(MSR_IA32_APICBASE, msr);
1504 if (msr & X2APIC_ENABLE) {
1505 u32 x2apic_id = read_apic_id();
1506
1507 if (x2apic_id >= 255)
1508 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1509
1510 pr_info("Disabling x2apic\n");
1511 __disable_x2apic(msr);
1512
a31bc327
YL
1513 if (nox2apic) {
1514 clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
1515 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1516 }
1517
fb209bd8
YL
1518 x2apic_disabled = 1;
1519 x2apic_mode = 0;
1520
1521 register_lapic_address(mp_lapic_addr);
1522 }
1523}
1524
6e1cb38a
SS
1525void check_x2apic(void)
1526{
ef1f87aa 1527 if (x2apic_enabled()) {
ba21ebb6 1528 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
fc1edaf9 1529 x2apic_preenabled = x2apic_mode = 1;
6e1cb38a
SS
1530 }
1531}
1532
1533void enable_x2apic(void)
1534{
fb209bd8
YL
1535 u64 msr;
1536
1537 rdmsrl(MSR_IA32_APICBASE, msr);
1538 if (x2apic_disabled) {
1539 __disable_x2apic(msr);
1540 return;
1541 }
6e1cb38a 1542
fc1edaf9 1543 if (!x2apic_mode)
06cd9a7d
YL
1544 return;
1545
6e1cb38a 1546 if (!(msr & X2APIC_ENABLE)) {
450b1e8d 1547 printk_once(KERN_INFO "Enabling x2apic\n");
fb209bd8 1548 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
6e1cb38a
SS
1549 }
1550}
93758238 1551#endif /* CONFIG_X86_X2APIC */
6e1cb38a 1552
ce69a784 1553int __init enable_IR(void)
6e1cb38a 1554{
d3f13810 1555#ifdef CONFIG_IRQ_REMAP
95a02e97 1556 if (!irq_remapping_supported()) {
93758238 1557 pr_debug("intr-remapping not supported\n");
41750d31 1558 return -1;
6e1cb38a
SS
1559 }
1560
93758238
WH
1561 if (!x2apic_preenabled && skip_ioapic_setup) {
1562 pr_info("Skipped enabling intr-remap because of skipping "
1563 "io-apic setup\n");
41750d31 1564 return -1;
6e1cb38a
SS
1565 }
1566
95a02e97 1567 return irq_remapping_enable();
ce69a784 1568#endif
41750d31 1569 return -1;
ce69a784
GN
1570}
1571
1572void __init enable_IR_x2apic(void)
1573{
1574 unsigned long flags;
ce69a784 1575 int ret, x2apic_enabled = 0;
736baef4 1576 int hardware_init_ret;
b7f42ab2 1577
736baef4 1578 /* Make sure irq_remap_ops are initialized */
95a02e97 1579 setup_irq_remapping_ops();
736baef4 1580
95a02e97 1581 hardware_init_ret = irq_remapping_prepare();
736baef4 1582 if (hardware_init_ret && !x2apic_supported())
e670761f 1583 return;
ce69a784 1584
31dce14a 1585 ret = save_ioapic_entries();
5ffa4eb2 1586 if (ret) {
ba21ebb6 1587 pr_info("Saving IO-APIC state failed: %d\n", ret);
fb209bd8 1588 return;
5ffa4eb2 1589 }
6e1cb38a 1590
05c3dc2c 1591 local_irq_save(flags);
b81bb373 1592 legacy_pic->mask_all();
31dce14a 1593 mask_ioapic_entries();
05c3dc2c 1594
a31bc327
YL
1595 if (x2apic_preenabled && nox2apic)
1596 disable_x2apic();
1597
736baef4 1598 if (hardware_init_ret)
41750d31 1599 ret = -1;
b7f42ab2
YL
1600 else
1601 ret = enable_IR();
1602
fb209bd8 1603 if (!x2apic_supported())
a31bc327 1604 goto skip_x2apic;
fb209bd8 1605
41750d31 1606 if (ret < 0) {
ce69a784
GN
1607 /* IR is required if there is APIC ID > 255 even when running
1608 * under KVM
1609 */
2904ed8d 1610 if (max_physical_apicid > 255 ||
fb209bd8
YL
1611 !hypervisor_x2apic_available()) {
1612 if (x2apic_preenabled)
1613 disable_x2apic();
a31bc327 1614 goto skip_x2apic;
fb209bd8 1615 }
ce69a784
GN
1616 /*
1617 * without IR all CPUs can be addressed by IOAPIC/MSI
1618 * only in physical mode
1619 */
1620 x2apic_force_phys();
1621 }
6e1cb38a 1622
fb209bd8
YL
1623 if (ret == IRQ_REMAP_XAPIC_MODE) {
1624 pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
a31bc327 1625 goto skip_x2apic;
fb209bd8 1626 }
41750d31 1627
ce69a784 1628 x2apic_enabled = 1;
93758238 1629
fc1edaf9
SS
1630 if (x2apic_supported() && !x2apic_mode) {
1631 x2apic_mode = 1;
6e1cb38a 1632 enable_x2apic();
93758238 1633 pr_info("Enabled x2apic\n");
6e1cb38a 1634 }
5ffa4eb2 1635
a31bc327 1636skip_x2apic:
41750d31 1637 if (ret < 0) /* IR enabling failed */
31dce14a 1638 restore_ioapic_entries();
b81bb373 1639 legacy_pic->restore_mask();
6e1cb38a 1640 local_irq_restore(flags);
6e1cb38a 1641}
93758238 1642
be7a656f 1643#ifdef CONFIG_X86_64
1da177e4
LT
1644/*
1645 * Detect and enable local APICs on non-SMP boards.
1646 * Original code written by Keir Fraser.
1647 * On AMD64 we trust the BIOS - if it says no APIC it is likely
6935d1f9 1648 * not correctly set up (usually the APIC timer won't work etc.)
1da177e4 1649 */
0e078e2f 1650static int __init detect_init_APIC(void)
1da177e4
LT
1651{
1652 if (!cpu_has_apic) {
ba21ebb6 1653 pr_info("No local APIC present\n");
1da177e4
LT
1654 return -1;
1655 }
1656
1657 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1da177e4
LT
1658 return 0;
1659}
be7a656f 1660#else
5a7ae78f 1661
25874a29 1662static int __init apic_verify(void)
5a7ae78f
TG
1663{
1664 u32 features, h, l;
1665
1666 /*
1667 * The APIC feature bit should now be enabled
1668 * in `cpuid'
1669 */
1670 features = cpuid_edx(1);
1671 if (!(features & (1 << X86_FEATURE_APIC))) {
1672 pr_warning("Could not enable APIC!\n");
1673 return -1;
1674 }
1675 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1676 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1677
1678 /* The BIOS may have set up the APIC at some other address */
cbf2829b
BD
1679 if (boot_cpu_data.x86 >= 6) {
1680 rdmsr(MSR_IA32_APICBASE, l, h);
1681 if (l & MSR_IA32_APICBASE_ENABLE)
1682 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1683 }
5a7ae78f
TG
1684
1685 pr_info("Found and enabled local APIC!\n");
1686 return 0;
1687}
1688
25874a29 1689int __init apic_force_enable(unsigned long addr)
5a7ae78f
TG
1690{
1691 u32 h, l;
1692
1693 if (disable_apic)
1694 return -1;
1695
1696 /*
1697 * Some BIOSes disable the local APIC in the APIC_BASE
1698 * MSR. This can only be done in software for Intel P6 or later
1699 * and AMD K7 (Model > 1) or later.
1700 */
cbf2829b
BD
1701 if (boot_cpu_data.x86 >= 6) {
1702 rdmsr(MSR_IA32_APICBASE, l, h);
1703 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1704 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1705 l &= ~MSR_IA32_APICBASE_BASE;
1706 l |= MSR_IA32_APICBASE_ENABLE | addr;
1707 wrmsr(MSR_IA32_APICBASE, l, h);
1708 enabled_via_apicbase = 1;
1709 }
5a7ae78f
TG
1710 }
1711 return apic_verify();
1712}
1713
be7a656f
YL
1714/*
1715 * Detect and initialize APIC
1716 */
1717static int __init detect_init_APIC(void)
1718{
be7a656f
YL
1719 /* Disabled by kernel option? */
1720 if (disable_apic)
1721 return -1;
1722
1723 switch (boot_cpu_data.x86_vendor) {
1724 case X86_VENDOR_AMD:
1725 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
85877061 1726 (boot_cpu_data.x86 >= 15))
be7a656f
YL
1727 break;
1728 goto no_apic;
1729 case X86_VENDOR_INTEL:
1730 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1731 (boot_cpu_data.x86 == 5 && cpu_has_apic))
1732 break;
1733 goto no_apic;
1734 default:
1735 goto no_apic;
1736 }
1737
1738 if (!cpu_has_apic) {
1739 /*
1740 * Over-ride BIOS and try to enable the local APIC only if
1741 * "lapic" specified.
1742 */
1743 if (!force_enable_local_apic) {
ba21ebb6
CG
1744 pr_info("Local APIC disabled by BIOS -- "
1745 "you can enable it with \"lapic\"\n");
be7a656f
YL
1746 return -1;
1747 }
a906fdaa 1748 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
5a7ae78f
TG
1749 return -1;
1750 } else {
1751 if (apic_verify())
1752 return -1;
be7a656f 1753 }
be7a656f
YL
1754
1755 apic_pm_activate();
1756
1757 return 0;
1758
1759no_apic:
ba21ebb6 1760 pr_info("No local APIC present or hardware disabled\n");
be7a656f
YL
1761 return -1;
1762}
1763#endif
1da177e4 1764
0e078e2f
TG
1765/**
1766 * init_apic_mappings - initialize APIC mappings
1767 */
1da177e4
LT
1768void __init init_apic_mappings(void)
1769{
4401da61
YL
1770 unsigned int new_apicid;
1771
fc1edaf9 1772 if (x2apic_mode) {
4c9961d5 1773 boot_cpu_physical_apicid = read_apic_id();
6e1cb38a
SS
1774 return;
1775 }
1776
4797f6b0 1777 /* If no local APIC can be found return early */
1da177e4 1778 if (!smp_found_config && detect_init_APIC()) {
4797f6b0
YL
1779 /* lets NOP'ify apic operations */
1780 pr_info("APIC: disable apic facility\n");
1781 apic_disable();
1782 } else {
1da177e4
LT
1783 apic_phys = mp_lapic_addr;
1784
4797f6b0
YL
1785 /*
1786 * acpi lapic path already maps that address in
1787 * acpi_register_lapic_address()
1788 */
5989cd6a 1789 if (!acpi_lapic && !smp_found_config)
326a2e6b 1790 register_lapic_address(apic_phys);
cec6be6d 1791 }
1da177e4
LT
1792
1793 /*
1794 * Fetch the APIC ID of the BSP in case we have a
1795 * default configuration (or the MP table is broken).
1796 */
4401da61
YL
1797 new_apicid = read_apic_id();
1798 if (boot_cpu_physical_apicid != new_apicid) {
1799 boot_cpu_physical_apicid = new_apicid;
103428e5
CG
1800 /*
1801 * yeah -- we lie about apic_version
1802 * in case if apic was disabled via boot option
1803 * but it's not a problem for SMP compiled kernel
1804 * since smp_sanity_check is prepared for such a case
1805 * and disable smp mode
1806 */
4401da61
YL
1807 apic_version[new_apicid] =
1808 GET_APIC_VERSION(apic_read(APIC_LVR));
08306ce6 1809 }
1da177e4
LT
1810}
1811
c0104d38
YL
1812void __init register_lapic_address(unsigned long address)
1813{
1814 mp_lapic_addr = address;
1815
0450193b
YL
1816 if (!x2apic_mode) {
1817 set_fixmap_nocache(FIX_APIC_BASE, address);
1818 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1819 APIC_BASE, mp_lapic_addr);
1820 }
c0104d38
YL
1821 if (boot_cpu_physical_apicid == -1U) {
1822 boot_cpu_physical_apicid = read_apic_id();
1823 apic_version[boot_cpu_physical_apicid] =
1824 GET_APIC_VERSION(apic_read(APIC_LVR));
1825 }
1826}
1827
1da177e4 1828/*
0e078e2f
TG
1829 * This initializes the IO-APIC and APIC hardware if this is
1830 * a UP kernel.
1da177e4 1831 */
56d91f13 1832int apic_version[MAX_LOCAL_APIC];
1b313f4a 1833
0e078e2f 1834int __init APIC_init_uniprocessor(void)
1da177e4 1835{
0e078e2f 1836 if (disable_apic) {
ba21ebb6 1837 pr_info("Apic disabled\n");
0e078e2f
TG
1838 return -1;
1839 }
f1182638 1840#ifdef CONFIG_X86_64
0e078e2f
TG
1841 if (!cpu_has_apic) {
1842 disable_apic = 1;
ba21ebb6 1843 pr_info("Apic disabled by BIOS\n");
0e078e2f
TG
1844 return -1;
1845 }
fa2bd35a
YL
1846#else
1847 if (!smp_found_config && !cpu_has_apic)
1848 return -1;
1849
1850 /*
1851 * Complain if the BIOS pretends there is one.
1852 */
1853 if (!cpu_has_apic &&
1854 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
ba21ebb6
CG
1855 pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1856 boot_cpu_physical_apicid);
fa2bd35a
YL
1857 return -1;
1858 }
1859#endif
1860
72ce0165 1861 default_setup_apic_routing();
6e1cb38a 1862
0e078e2f 1863 verify_local_APIC();
b5841765
GC
1864 connect_bsp_APIC();
1865
fa2bd35a 1866#ifdef CONFIG_X86_64
c70dcb74 1867 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
fa2bd35a
YL
1868#else
1869 /*
1870 * Hack: In case of kdump, after a crash, kernel might be booting
1871 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1872 * might be zero if read from MP tables. Get it from LAPIC.
1873 */
1874# ifdef CONFIG_CRASH_DUMP
1875 boot_cpu_physical_apicid = read_apic_id();
1876# endif
1877#endif
1878 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
0e078e2f 1879 setup_local_APIC();
1da177e4 1880
88d0f550 1881#ifdef CONFIG_X86_IO_APIC
739f33b3
AK
1882 /*
1883 * Now enable IO-APICs, actually call clear_IO_APIC
98c061b6 1884 * We need clear_IO_APIC before enabling error vector
739f33b3
AK
1885 */
1886 if (!skip_ioapic_setup && nr_ioapics)
1887 enable_IO_APIC();
fa2bd35a 1888#endif
739f33b3 1889
2fb270f3 1890 bsp_end_local_APIC_setup();
739f33b3 1891
fa2bd35a 1892#ifdef CONFIG_X86_IO_APIC
0e078e2f
TG
1893 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1894 setup_IO_APIC();
98c061b6 1895 else {
0e078e2f 1896 nr_ioapics = 0;
98c061b6 1897 }
fa2bd35a
YL
1898#endif
1899
736decac 1900 x86_init.timers.setup_percpu_clockev();
0e078e2f 1901 return 0;
1da177e4
LT
1902}
1903
1904/*
0e078e2f 1905 * Local APIC interrupts
1da177e4
LT
1906 */
1907
0e078e2f
TG
1908/*
1909 * This interrupt should _never_ happen with our APIC/SMP architecture
1910 */
dc1528dd 1911void smp_spurious_interrupt(struct pt_regs *regs)
1da177e4 1912{
dc1528dd
YL
1913 u32 v;
1914
0e078e2f 1915 irq_enter();
98ad1cc1 1916 exit_idle();
1da177e4 1917 /*
0e078e2f
TG
1918 * Check if this really is a spurious interrupt and ACK it
1919 * if it is a vectored one. Just in case...
1920 * Spurious interrupts should not be ACKed.
1da177e4 1921 */
0e078e2f
TG
1922 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1923 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1924 ack_APIC_irq();
c4d58cbd 1925
915b0d01
HS
1926 inc_irq_stat(irq_spurious_count);
1927
dc1528dd 1928 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
ba21ebb6
CG
1929 pr_info("spurious APIC interrupt on CPU#%d, "
1930 "should never happen.\n", smp_processor_id());
0e078e2f
TG
1931 irq_exit();
1932}
1da177e4 1933
0e078e2f
TG
1934/*
1935 * This interrupt should never happen with our APIC/SMP architecture
1936 */
dc1528dd 1937void smp_error_interrupt(struct pt_regs *regs)
0e078e2f 1938{
2b398bd9
YS
1939 u32 v0, v1;
1940 u32 i = 0;
1941 static const char * const error_interrupt_reason[] = {
1942 "Send CS error", /* APIC Error Bit 0 */
1943 "Receive CS error", /* APIC Error Bit 1 */
1944 "Send accept error", /* APIC Error Bit 2 */
1945 "Receive accept error", /* APIC Error Bit 3 */
1946 "Redirectable IPI", /* APIC Error Bit 4 */
1947 "Send illegal vector", /* APIC Error Bit 5 */
1948 "Received illegal vector", /* APIC Error Bit 6 */
1949 "Illegal register address", /* APIC Error Bit 7 */
1950 };
1da177e4 1951
0e078e2f 1952 irq_enter();
98ad1cc1 1953 exit_idle();
0e078e2f 1954 /* First tickle the hardware, only then report what went on. -- REW */
2b398bd9 1955 v0 = apic_read(APIC_ESR);
0e078e2f
TG
1956 apic_write(APIC_ESR, 0);
1957 v1 = apic_read(APIC_ESR);
1958 ack_APIC_irq();
1959 atomic_inc(&irq_err_count);
ba7eda4c 1960
2b398bd9
YS
1961 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
1962 smp_processor_id(), v0 , v1);
1963
1964 v1 = v1 & 0xff;
1965 while (v1) {
1966 if (v1 & 0x1)
1967 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
1968 i++;
1969 v1 >>= 1;
4b8073e4 1970 }
2b398bd9
YS
1971
1972 apic_printk(APIC_DEBUG, KERN_CONT "\n");
1973
0e078e2f 1974 irq_exit();
1da177e4
LT
1975}
1976
b5841765 1977/**
36c9d674
CG
1978 * connect_bsp_APIC - attach the APIC to the interrupt system
1979 */
b5841765
GC
1980void __init connect_bsp_APIC(void)
1981{
36c9d674
CG
1982#ifdef CONFIG_X86_32
1983 if (pic_mode) {
1984 /*
1985 * Do not trust the local APIC being empty at bootup.
1986 */
1987 clear_local_APIC();
1988 /*
1989 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
1990 * local APIC to INT and NMI lines.
1991 */
1992 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
1993 "enabling APIC mode.\n");
c0eaa453 1994 imcr_pic_to_apic();
36c9d674
CG
1995 }
1996#endif
49040333
IM
1997 if (apic->enable_apic_mode)
1998 apic->enable_apic_mode();
b5841765
GC
1999}
2000
274cfe59
CG
2001/**
2002 * disconnect_bsp_APIC - detach the APIC from the interrupt system
2003 * @virt_wire_setup: indicates, whether virtual wire mode is selected
2004 *
2005 * Virtual wire mode is necessary to deliver legacy interrupts even when the
2006 * APIC is disabled.
2007 */
0e078e2f 2008void disconnect_bsp_APIC(int virt_wire_setup)
1da177e4 2009{
1b4ee4e4
CG
2010 unsigned int value;
2011
c177b0bc
CG
2012#ifdef CONFIG_X86_32
2013 if (pic_mode) {
2014 /*
2015 * Put the board back into PIC mode (has an effect only on
2016 * certain older boards). Note that APIC interrupts, including
2017 * IPIs, won't work beyond this point! The only exception are
2018 * INIT IPIs.
2019 */
2020 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2021 "entering PIC mode.\n");
c0eaa453 2022 imcr_apic_to_pic();
c177b0bc
CG
2023 return;
2024 }
2025#endif
2026
0e078e2f 2027 /* Go back to Virtual Wire compatibility mode */
1da177e4 2028
0e078e2f
TG
2029 /* For the spurious interrupt use vector F, and enable it */
2030 value = apic_read(APIC_SPIV);
2031 value &= ~APIC_VECTOR_MASK;
2032 value |= APIC_SPIV_APIC_ENABLED;
2033 value |= 0xf;
2034 apic_write(APIC_SPIV, value);
b8ce3359 2035
0e078e2f
TG
2036 if (!virt_wire_setup) {
2037 /*
2038 * For LVT0 make it edge triggered, active high,
2039 * external and enabled
2040 */
2041 value = apic_read(APIC_LVT0);
2042 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2043 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2044 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2045 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2046 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2047 apic_write(APIC_LVT0, value);
2048 } else {
2049 /* Disable LVT0 */
2050 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2051 }
b8ce3359 2052
c177b0bc
CG
2053 /*
2054 * For LVT1 make it edge triggered, active high,
2055 * nmi and enabled
2056 */
0e078e2f
TG
2057 value = apic_read(APIC_LVT1);
2058 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2059 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2060 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2061 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2062 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2063 apic_write(APIC_LVT1, value);
1da177e4
LT
2064}
2065
be8a5685
AS
2066void __cpuinit generic_processor_info(int apicid, int version)
2067{
14cb6dcf
VG
2068 int cpu, max = nr_cpu_ids;
2069 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2070 phys_cpu_present_map);
2071
2072 /*
2073 * If boot cpu has not been detected yet, then only allow upto
2074 * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
2075 */
2076 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2077 apicid != boot_cpu_physical_apicid) {
2078 int thiscpu = max + disabled_cpus - 1;
2079
2080 pr_warning(
2081 "ACPI: NR_CPUS/possible_cpus limit of %i almost"
2082 " reached. Keeping one slot for boot cpu."
2083 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2084
2085 disabled_cpus++;
2086 return;
2087 }
be8a5685 2088
3b11ce7f 2089 if (num_processors >= nr_cpu_ids) {
3b11ce7f
MT
2090 int thiscpu = max + disabled_cpus;
2091
2092 pr_warning(
2093 "ACPI: NR_CPUS/possible_cpus limit of %i reached."
2094 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2095
2096 disabled_cpus++;
be8a5685
AS
2097 return;
2098 }
2099
2100 num_processors++;
be8a5685
AS
2101 if (apicid == boot_cpu_physical_apicid) {
2102 /*
2103 * x86_bios_cpu_apicid is required to have processors listed
2104 * in same order as logical cpu numbers. Hence the first
2105 * entry is BSP, and so on.
e5fea868
YL
2106 * boot_cpu_init() already hold bit 0 in cpu_present_mask
2107 * for BSP.
be8a5685
AS
2108 */
2109 cpu = 0;
e5fea868
YL
2110 } else
2111 cpu = cpumask_next_zero(-1, cpu_present_mask);
2112
2113 /*
2114 * Validate version
2115 */
2116 if (version == 0x0) {
2117 pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2118 cpu, apicid);
2119 version = 0x10;
be8a5685 2120 }
e5fea868
YL
2121 apic_version[apicid] = version;
2122
2123 if (version != apic_version[boot_cpu_physical_apicid]) {
2124 pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2125 apic_version[boot_cpu_physical_apicid], cpu, version);
2126 }
2127
2128 physid_set(apicid, phys_cpu_present_map);
e0da3364
YL
2129 if (apicid > max_physical_apicid)
2130 max_physical_apicid = apicid;
2131
3e5095d1 2132#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
f10fcd47
TH
2133 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2134 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1b313f4a 2135#endif
acb8bc09
TH
2136#ifdef CONFIG_X86_32
2137 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2138 apic->x86_32_early_logical_apicid(cpu);
2139#endif
1de88cd4
MT
2140 set_cpu_possible(cpu, true);
2141 set_cpu_present(cpu, true);
be8a5685
AS
2142}
2143
0c81c746
SS
2144int hard_smp_processor_id(void)
2145{
2146 return read_apic_id();
2147}
1dcdd3d1
IM
2148
2149void default_init_apic_ldr(void)
2150{
2151 unsigned long val;
2152
2153 apic_write(APIC_DFR, APIC_DFR_VALUE);
2154 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
2155 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
2156 apic_write(APIC_LDR, val);
2157}
2158
ff164324
AG
2159int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2160 const struct cpumask *andmask,
2161 unsigned int *apicid)
6398268d 2162{
ea3807ea 2163 unsigned int cpu;
6398268d
AG
2164
2165 for_each_cpu_and(cpu, cpumask, andmask) {
2166 if (cpumask_test_cpu(cpu, cpu_online_mask))
2167 break;
2168 }
ff164324 2169
ea3807ea 2170 if (likely(cpu < nr_cpu_ids)) {
a5a39156
AG
2171 *apicid = per_cpu(x86_cpu_to_apicid, cpu);
2172 return 0;
a5a39156 2173 }
ea3807ea
AG
2174
2175 return -EINVAL;
6398268d
AG
2176}
2177
1551df64
MT
2178/*
2179 * Override the generic EOI implementation with an optimized version.
2180 * Only called during early boot when only one CPU is active and with
2181 * interrupts disabled, so we know this does not race with actual APIC driver
2182 * use.
2183 */
2184void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2185{
2186 struct apic **drv;
2187
2188 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2189 /* Should happen once for each apic */
2190 WARN_ON((*drv)->eoi_write == eoi_write);
2191 (*drv)->eoi_write = eoi_write;
2192 }
2193}
2194
89039b37 2195/*
0e078e2f 2196 * Power management
89039b37 2197 */
0e078e2f
TG
2198#ifdef CONFIG_PM
2199
2200static struct {
274cfe59
CG
2201 /*
2202 * 'active' is true if the local APIC was enabled by us and
2203 * not the BIOS; this signifies that we are also responsible
2204 * for disabling it before entering apm/acpi suspend
2205 */
0e078e2f
TG
2206 int active;
2207 /* r/w apic fields */
2208 unsigned int apic_id;
2209 unsigned int apic_taskpri;
2210 unsigned int apic_ldr;
2211 unsigned int apic_dfr;
2212 unsigned int apic_spiv;
2213 unsigned int apic_lvtt;
2214 unsigned int apic_lvtpc;
2215 unsigned int apic_lvt0;
2216 unsigned int apic_lvt1;
2217 unsigned int apic_lvterr;
2218 unsigned int apic_tmict;
2219 unsigned int apic_tdcr;
2220 unsigned int apic_thmr;
2221} apic_pm_state;
2222
f3c6ea1b 2223static int lapic_suspend(void)
0e078e2f
TG
2224{
2225 unsigned long flags;
2226 int maxlvt;
89039b37 2227
0e078e2f
TG
2228 if (!apic_pm_state.active)
2229 return 0;
89039b37 2230
0e078e2f 2231 maxlvt = lapic_get_maxlvt();
89039b37 2232
2d7a66d0 2233 apic_pm_state.apic_id = apic_read(APIC_ID);
0e078e2f
TG
2234 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2235 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2236 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2237 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2238 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2239 if (maxlvt >= 4)
2240 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2241 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2242 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2243 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2244 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2245 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
4efc0670 2246#ifdef CONFIG_X86_THERMAL_VECTOR
0e078e2f
TG
2247 if (maxlvt >= 5)
2248 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2249#endif
24968cfd 2250
0e078e2f
TG
2251 local_irq_save(flags);
2252 disable_local_APIC();
fc1edaf9 2253
95a02e97
SS
2254 if (irq_remapping_enabled)
2255 irq_remapping_disable();
fc1edaf9 2256
0e078e2f
TG
2257 local_irq_restore(flags);
2258 return 0;
1da177e4
LT
2259}
2260
f3c6ea1b 2261static void lapic_resume(void)
1da177e4 2262{
0e078e2f
TG
2263 unsigned int l, h;
2264 unsigned long flags;
31dce14a 2265 int maxlvt;
b24696bc 2266
0e078e2f 2267 if (!apic_pm_state.active)
f3c6ea1b 2268 return;
89b831ef 2269
0e078e2f 2270 local_irq_save(flags);
95a02e97 2271 if (irq_remapping_enabled) {
31dce14a
SS
2272 /*
2273 * IO-APIC and PIC have their own resume routines.
2274 * We just mask them here to make sure the interrupt
2275 * subsystem is completely quiet while we enable x2apic
2276 * and interrupt-remapping.
2277 */
2278 mask_ioapic_entries();
b81bb373 2279 legacy_pic->mask_all();
b24696bc 2280 }
92206c90 2281
fc1edaf9 2282 if (x2apic_mode)
92206c90 2283 enable_x2apic();
cf6567fe 2284 else {
92206c90
CG
2285 /*
2286 * Make sure the APICBASE points to the right address
2287 *
2288 * FIXME! This will be wrong if we ever support suspend on
2289 * SMP! We'll need to do this as part of the CPU restore!
2290 */
cbf2829b
BD
2291 if (boot_cpu_data.x86 >= 6) {
2292 rdmsr(MSR_IA32_APICBASE, l, h);
2293 l &= ~MSR_IA32_APICBASE_BASE;
2294 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2295 wrmsr(MSR_IA32_APICBASE, l, h);
2296 }
d5e629a6 2297 }
6e1cb38a 2298
b24696bc 2299 maxlvt = lapic_get_maxlvt();
0e078e2f
TG
2300 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2301 apic_write(APIC_ID, apic_pm_state.apic_id);
2302 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2303 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2304 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2305 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2306 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2307 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
92206c90 2308#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
0e078e2f
TG
2309 if (maxlvt >= 5)
2310 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2311#endif
2312 if (maxlvt >= 4)
2313 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2314 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2315 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2316 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2317 apic_write(APIC_ESR, 0);
2318 apic_read(APIC_ESR);
2319 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2320 apic_write(APIC_ESR, 0);
2321 apic_read(APIC_ESR);
92206c90 2322
95a02e97
SS
2323 if (irq_remapping_enabled)
2324 irq_remapping_reenable(x2apic_mode);
31dce14a 2325
0e078e2f 2326 local_irq_restore(flags);
0e078e2f 2327}
b8ce3359 2328
274cfe59
CG
2329/*
2330 * This device has no shutdown method - fully functioning local APICs
2331 * are needed on every CPU up until machine_halt/restart/poweroff.
2332 */
2333
f3c6ea1b 2334static struct syscore_ops lapic_syscore_ops = {
0e078e2f
TG
2335 .resume = lapic_resume,
2336 .suspend = lapic_suspend,
2337};
b8ce3359 2338
0e078e2f
TG
2339static void __cpuinit apic_pm_activate(void)
2340{
2341 apic_pm_state.active = 1;
1da177e4
LT
2342}
2343
0e078e2f 2344static int __init init_lapic_sysfs(void)
1da177e4 2345{
0e078e2f 2346 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
f3c6ea1b
RW
2347 if (cpu_has_apic)
2348 register_syscore_ops(&lapic_syscore_ops);
e83a5fdc 2349
f3c6ea1b 2350 return 0;
1da177e4 2351}
b24696bc
FY
2352
2353/* local apic needs to resume before other devices access its registers. */
2354core_initcall(init_lapic_sysfs);
0e078e2f
TG
2355
2356#else /* CONFIG_PM */
2357
2358static void apic_pm_activate(void) { }
2359
2360#endif /* CONFIG_PM */
1da177e4 2361
f28c0ae2 2362#ifdef CONFIG_X86_64
e0e42142
YL
2363
2364static int __cpuinit apic_cluster_num(void)
1da177e4
LT
2365{
2366 int i, clusters, zeros;
2367 unsigned id;
322850af 2368 u16 *bios_cpu_apicid;
1da177e4
LT
2369 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2370
23ca4bba 2371 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
376ec33f 2372 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1da177e4 2373
168ef543 2374 for (i = 0; i < nr_cpu_ids; i++) {
e8c10ef9 2375 /* are we being called early in kernel startup? */
693e3c56
MT
2376 if (bios_cpu_apicid) {
2377 id = bios_cpu_apicid[i];
e423e33e 2378 } else if (i < nr_cpu_ids) {
e8c10ef9 2379 if (cpu_present(i))
2380 id = per_cpu(x86_bios_cpu_apicid, i);
2381 else
2382 continue;
e423e33e 2383 } else
e8c10ef9 2384 break;
2385
1da177e4
LT
2386 if (id != BAD_APICID)
2387 __set_bit(APIC_CLUSTERID(id), clustermap);
2388 }
2389
2390 /* Problem: Partially populated chassis may not have CPUs in some of
2391 * the APIC clusters they have been allocated. Only present CPUs have
602a54a8 2392 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
2393 * Since clusters are allocated sequentially, count zeros only if
2394 * they are bounded by ones.
1da177e4
LT
2395 */
2396 clusters = 0;
2397 zeros = 0;
2398 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
2399 if (test_bit(i, clustermap)) {
2400 clusters += 1 + zeros;
2401 zeros = 0;
2402 } else
2403 ++zeros;
2404 }
2405
e0e42142
YL
2406 return clusters;
2407}
2408
2409static int __cpuinitdata multi_checked;
2410static int __cpuinitdata multi;
2411
2412static int __cpuinit set_multi(const struct dmi_system_id *d)
2413{
2414 if (multi)
2415 return 0;
6f0aced6 2416 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
e0e42142
YL
2417 multi = 1;
2418 return 0;
2419}
2420
2421static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2422 {
2423 .callback = set_multi,
2424 .ident = "IBM System Summit2",
2425 .matches = {
2426 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2427 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2428 },
2429 },
2430 {}
2431};
2432
2433static void __cpuinit dmi_check_multi(void)
2434{
2435 if (multi_checked)
2436 return;
2437
2438 dmi_check_system(multi_dmi_table);
2439 multi_checked = 1;
2440}
2441
2442/*
2443 * apic_is_clustered_box() -- Check if we can expect good TSC
2444 *
2445 * Thus far, the major user of this is IBM's Summit2 series:
2446 * Clustered boxes may have unsynced TSC problems if they are
2447 * multi-chassis.
2448 * Use DMI to check them
2449 */
2450__cpuinit int apic_is_clustered_box(void)
2451{
2452 dmi_check_multi();
2453 if (multi)
1cb68487
RT
2454 return 1;
2455
e0e42142
YL
2456 if (!is_vsmp_box())
2457 return 0;
2458
1da177e4 2459 /*
e0e42142
YL
2460 * ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2461 * not guaranteed to be synced between boards
1da177e4 2462 */
e0e42142
YL
2463 if (apic_cluster_num() > 1)
2464 return 1;
2465
2466 return 0;
1da177e4 2467}
f28c0ae2 2468#endif
1da177e4
LT
2469
2470/*
0e078e2f 2471 * APIC command line parameters
1da177e4 2472 */
789fa735 2473static int __init setup_disableapic(char *arg)
6935d1f9 2474{
1da177e4 2475 disable_apic = 1;
9175fc06 2476 setup_clear_cpu_cap(X86_FEATURE_APIC);
2c8c0e6b
AK
2477 return 0;
2478}
2479early_param("disableapic", setup_disableapic);
1da177e4 2480
2c8c0e6b 2481/* same as disableapic, for compatibility */
789fa735 2482static int __init setup_nolapic(char *arg)
6935d1f9 2483{
789fa735 2484 return setup_disableapic(arg);
6935d1f9 2485}
2c8c0e6b 2486early_param("nolapic", setup_nolapic);
1da177e4 2487
2e7c2838
LT
2488static int __init parse_lapic_timer_c2_ok(char *arg)
2489{
2490 local_apic_timer_c2_ok = 1;
2491 return 0;
2492}
2493early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2494
36fef094 2495static int __init parse_disable_apic_timer(char *arg)
6935d1f9 2496{
1da177e4 2497 disable_apic_timer = 1;
36fef094 2498 return 0;
6935d1f9 2499}
36fef094
CG
2500early_param("noapictimer", parse_disable_apic_timer);
2501
2502static int __init parse_nolapic_timer(char *arg)
2503{
2504 disable_apic_timer = 1;
2505 return 0;
6935d1f9 2506}
36fef094 2507early_param("nolapic_timer", parse_nolapic_timer);
73dea47f 2508
79af9bec
CG
2509static int __init apic_set_verbosity(char *arg)
2510{
2511 if (!arg) {
2512#ifdef CONFIG_X86_64
2513 skip_ioapic_setup = 0;
79af9bec
CG
2514 return 0;
2515#endif
2516 return -EINVAL;
2517 }
2518
2519 if (strcmp("debug", arg) == 0)
2520 apic_verbosity = APIC_DEBUG;
2521 else if (strcmp("verbose", arg) == 0)
2522 apic_verbosity = APIC_VERBOSE;
2523 else {
ba21ebb6 2524 pr_warning("APIC Verbosity level %s not recognised"
79af9bec
CG
2525 " use apic=verbose or apic=debug\n", arg);
2526 return -EINVAL;
2527 }
2528
2529 return 0;
2530}
2531early_param("apic", apic_set_verbosity);
2532
1e934dda
YL
2533static int __init lapic_insert_resource(void)
2534{
2535 if (!apic_phys)
2536 return -1;
2537
2538 /* Put local APIC into the resource map. */
2539 lapic_resource.start = apic_phys;
2540 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2541 insert_resource(&iomem_resource, &lapic_resource);
2542
2543 return 0;
2544}
2545
2546/*
2547 * need call insert after e820_reserve_resources()
2548 * that is using request_resource
2549 */
2550late_initcall(lapic_insert_resource);
This page took 1.58373 seconds and 5 git commands to generate.