x86_64: Add (not yet used) clock event functions
[deliverable/linux.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/module.h>
27 #include <linux/ioport.h>
28 #include <linux/clockchips.h>
29
30 #include <asm/atomic.h>
31 #include <asm/smp.h>
32 #include <asm/mtrr.h>
33 #include <asm/mpspec.h>
34 #include <asm/pgalloc.h>
35 #include <asm/mach_apic.h>
36 #include <asm/nmi.h>
37 #include <asm/idle.h>
38 #include <asm/proto.h>
39 #include <asm/timex.h>
40 #include <asm/hpet.h>
41 #include <asm/apic.h>
42
43 int apic_verbosity;
44 int apic_runs_main_timer;
45 int apic_calibrate_pmtmr __initdata;
46
47 int disable_apic_timer __initdata;
48
49 /* Local APIC timer works in C2? */
50 int local_apic_timer_c2_ok;
51 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
52
53 static struct resource *ioapic_resources;
54 static struct resource lapic_resource = {
55 .name = "Local APIC",
56 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
57 };
58
59 static unsigned int calibration_result;
60
61 static int lapic_next_event(unsigned long delta,
62 struct clock_event_device *evt);
63 static void lapic_timer_setup(enum clock_event_mode mode,
64 struct clock_event_device *evt);
65
66 static void lapic_timer_broadcast(cpumask_t mask);
67
68 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
69
70 static struct clock_event_device lapic_clockevent = {
71 .name = "lapic",
72 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
73 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
74 .shift = 32,
75 .set_mode = lapic_timer_setup,
76 .set_next_event = lapic_next_event,
77 .broadcast = lapic_timer_broadcast,
78 .rating = 100,
79 .irq = -1,
80 };
81 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
82
83 static int lapic_next_event(unsigned long delta,
84 struct clock_event_device *evt)
85 {
86 apic_write(APIC_TMICT, delta);
87 return 0;
88 }
89
90 static void lapic_timer_setup(enum clock_event_mode mode,
91 struct clock_event_device *evt)
92 {
93 unsigned long flags;
94 unsigned int v;
95
96 /* Lapic used as dummy for broadcast ? */
97 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
98 return;
99
100 local_irq_save(flags);
101
102 switch (mode) {
103 case CLOCK_EVT_MODE_PERIODIC:
104 case CLOCK_EVT_MODE_ONESHOT:
105 __setup_APIC_LVTT(calibration_result,
106 mode != CLOCK_EVT_MODE_PERIODIC, 1);
107 break;
108 case CLOCK_EVT_MODE_UNUSED:
109 case CLOCK_EVT_MODE_SHUTDOWN:
110 v = apic_read(APIC_LVTT);
111 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
112 apic_write(APIC_LVTT, v);
113 break;
114 case CLOCK_EVT_MODE_RESUME:
115 /* Nothing to do here */
116 break;
117 }
118
119 local_irq_restore(flags);
120 }
121
122 /*
123 * Local APIC timer broadcast function
124 */
125 static void lapic_timer_broadcast(cpumask_t mask)
126 {
127 #ifdef CONFIG_SMP
128 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
129 #endif
130 }
131
132 /*
133 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
134 * IPIs in place of local APIC timers
135 */
136 static cpumask_t timer_interrupt_broadcast_ipi_mask;
137
138 /* Using APIC to generate smp_local_timer_interrupt? */
139 int using_apic_timer __read_mostly = 0;
140
141 static void apic_pm_activate(void);
142
143 void apic_wait_icr_idle(void)
144 {
145 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
146 cpu_relax();
147 }
148
149 unsigned int safe_apic_wait_icr_idle(void)
150 {
151 unsigned int send_status;
152 int timeout;
153
154 timeout = 0;
155 do {
156 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
157 if (!send_status)
158 break;
159 udelay(100);
160 } while (timeout++ < 1000);
161
162 return send_status;
163 }
164
165 void enable_NMI_through_LVT0 (void * dummy)
166 {
167 unsigned int v;
168
169 /* unmask and set to NMI */
170 v = APIC_DM_NMI;
171 apic_write(APIC_LVT0, v);
172 }
173
174 int get_maxlvt(void)
175 {
176 unsigned int v, maxlvt;
177
178 v = apic_read(APIC_LVR);
179 maxlvt = GET_APIC_MAXLVT(v);
180 return maxlvt;
181 }
182
183 /*
184 * 'what should we do if we get a hw irq event on an illegal vector'.
185 * each architecture has to answer this themselves.
186 */
187 void ack_bad_irq(unsigned int irq)
188 {
189 printk("unexpected IRQ trap at vector %02x\n", irq);
190 /*
191 * Currently unexpected vectors happen only on SMP and APIC.
192 * We _must_ ack these because every local APIC has only N
193 * irq slots per priority level, and a 'hanging, unacked' IRQ
194 * holds up an irq slot - in excessive cases (when multiple
195 * unexpected vectors occur) that might lock up the APIC
196 * completely.
197 * But don't ack when the APIC is disabled. -AK
198 */
199 if (!disable_apic)
200 ack_APIC_irq();
201 }
202
203 void clear_local_APIC(void)
204 {
205 int maxlvt;
206 unsigned int v;
207
208 maxlvt = get_maxlvt();
209
210 /*
211 * Masking an LVT entry can trigger a local APIC error
212 * if the vector is zero. Mask LVTERR first to prevent this.
213 */
214 if (maxlvt >= 3) {
215 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
216 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
217 }
218 /*
219 * Careful: we have to set masks only first to deassert
220 * any level-triggered sources.
221 */
222 v = apic_read(APIC_LVTT);
223 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
224 v = apic_read(APIC_LVT0);
225 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
226 v = apic_read(APIC_LVT1);
227 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
228 if (maxlvt >= 4) {
229 v = apic_read(APIC_LVTPC);
230 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
231 }
232
233 /*
234 * Clean APIC state for other OSs:
235 */
236 apic_write(APIC_LVTT, APIC_LVT_MASKED);
237 apic_write(APIC_LVT0, APIC_LVT_MASKED);
238 apic_write(APIC_LVT1, APIC_LVT_MASKED);
239 if (maxlvt >= 3)
240 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
241 if (maxlvt >= 4)
242 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
243 apic_write(APIC_ESR, 0);
244 apic_read(APIC_ESR);
245 }
246
247 void disconnect_bsp_APIC(int virt_wire_setup)
248 {
249 /* Go back to Virtual Wire compatibility mode */
250 unsigned long value;
251
252 /* For the spurious interrupt use vector F, and enable it */
253 value = apic_read(APIC_SPIV);
254 value &= ~APIC_VECTOR_MASK;
255 value |= APIC_SPIV_APIC_ENABLED;
256 value |= 0xf;
257 apic_write(APIC_SPIV, value);
258
259 if (!virt_wire_setup) {
260 /* For LVT0 make it edge triggered, active high, external and enabled */
261 value = apic_read(APIC_LVT0);
262 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
263 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
264 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
265 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
266 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
267 apic_write(APIC_LVT0, value);
268 } else {
269 /* Disable LVT0 */
270 apic_write(APIC_LVT0, APIC_LVT_MASKED);
271 }
272
273 /* For LVT1 make it edge triggered, active high, nmi and enabled */
274 value = apic_read(APIC_LVT1);
275 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
276 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
277 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
278 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
279 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
280 apic_write(APIC_LVT1, value);
281 }
282
283 void disable_local_APIC(void)
284 {
285 unsigned int value;
286
287 clear_local_APIC();
288
289 /*
290 * Disable APIC (implies clearing of registers
291 * for 82489DX!).
292 */
293 value = apic_read(APIC_SPIV);
294 value &= ~APIC_SPIV_APIC_ENABLED;
295 apic_write(APIC_SPIV, value);
296 }
297
298 /*
299 * This is to verify that we're looking at a real local APIC.
300 * Check these against your board if the CPUs aren't getting
301 * started for no apparent reason.
302 */
303 int __init verify_local_APIC(void)
304 {
305 unsigned int reg0, reg1;
306
307 /*
308 * The version register is read-only in a real APIC.
309 */
310 reg0 = apic_read(APIC_LVR);
311 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
312 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
313 reg1 = apic_read(APIC_LVR);
314 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
315
316 /*
317 * The two version reads above should print the same
318 * numbers. If the second one is different, then we
319 * poke at a non-APIC.
320 */
321 if (reg1 != reg0)
322 return 0;
323
324 /*
325 * Check if the version looks reasonably.
326 */
327 reg1 = GET_APIC_VERSION(reg0);
328 if (reg1 == 0x00 || reg1 == 0xff)
329 return 0;
330 reg1 = get_maxlvt();
331 if (reg1 < 0x02 || reg1 == 0xff)
332 return 0;
333
334 /*
335 * The ID register is read/write in a real APIC.
336 */
337 reg0 = apic_read(APIC_ID);
338 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
339 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
340 reg1 = apic_read(APIC_ID);
341 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
342 apic_write(APIC_ID, reg0);
343 if (reg1 != (reg0 ^ APIC_ID_MASK))
344 return 0;
345
346 /*
347 * The next two are just to see if we have sane values.
348 * They're only really relevant if we're in Virtual Wire
349 * compatibility mode, but most boxes are anymore.
350 */
351 reg0 = apic_read(APIC_LVT0);
352 apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
353 reg1 = apic_read(APIC_LVT1);
354 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
355
356 return 1;
357 }
358
359 void __init sync_Arb_IDs(void)
360 {
361 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
362 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
363 if (ver >= 0x14) /* P4 or higher */
364 return;
365
366 /*
367 * Wait for idle.
368 */
369 apic_wait_icr_idle();
370
371 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
372 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
373 | APIC_DM_INIT);
374 }
375
376 /*
377 * An initial setup of the virtual wire mode.
378 */
379 void __init init_bsp_APIC(void)
380 {
381 unsigned int value;
382
383 /*
384 * Don't do the setup now if we have a SMP BIOS as the
385 * through-I/O-APIC virtual wire mode might be active.
386 */
387 if (smp_found_config || !cpu_has_apic)
388 return;
389
390 value = apic_read(APIC_LVR);
391
392 /*
393 * Do not trust the local APIC being empty at bootup.
394 */
395 clear_local_APIC();
396
397 /*
398 * Enable APIC.
399 */
400 value = apic_read(APIC_SPIV);
401 value &= ~APIC_VECTOR_MASK;
402 value |= APIC_SPIV_APIC_ENABLED;
403 value |= APIC_SPIV_FOCUS_DISABLED;
404 value |= SPURIOUS_APIC_VECTOR;
405 apic_write(APIC_SPIV, value);
406
407 /*
408 * Set up the virtual wire mode.
409 */
410 apic_write(APIC_LVT0, APIC_DM_EXTINT);
411 value = APIC_DM_NMI;
412 apic_write(APIC_LVT1, value);
413 }
414
415 void __cpuinit setup_local_APIC (void)
416 {
417 unsigned int value, maxlvt;
418 int i, j;
419
420 value = apic_read(APIC_LVR);
421
422 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
423
424 /*
425 * Double-check whether this APIC is really registered.
426 * This is meaningless in clustered apic mode, so we skip it.
427 */
428 if (!apic_id_registered())
429 BUG();
430
431 /*
432 * Intel recommends to set DFR, LDR and TPR before enabling
433 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
434 * document number 292116). So here it goes...
435 */
436 init_apic_ldr();
437
438 /*
439 * Set Task Priority to 'accept all'. We never change this
440 * later on.
441 */
442 value = apic_read(APIC_TASKPRI);
443 value &= ~APIC_TPRI_MASK;
444 apic_write(APIC_TASKPRI, value);
445
446 /*
447 * After a crash, we no longer service the interrupts and a pending
448 * interrupt from previous kernel might still have ISR bit set.
449 *
450 * Most probably by now CPU has serviced that pending interrupt and
451 * it might not have done the ack_APIC_irq() because it thought,
452 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
453 * does not clear the ISR bit and cpu thinks it has already serivced
454 * the interrupt. Hence a vector might get locked. It was noticed
455 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
456 */
457 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
458 value = apic_read(APIC_ISR + i*0x10);
459 for (j = 31; j >= 0; j--) {
460 if (value & (1<<j))
461 ack_APIC_irq();
462 }
463 }
464
465 /*
466 * Now that we are all set up, enable the APIC
467 */
468 value = apic_read(APIC_SPIV);
469 value &= ~APIC_VECTOR_MASK;
470 /*
471 * Enable APIC
472 */
473 value |= APIC_SPIV_APIC_ENABLED;
474
475 /* We always use processor focus */
476
477 /*
478 * Set spurious IRQ vector
479 */
480 value |= SPURIOUS_APIC_VECTOR;
481 apic_write(APIC_SPIV, value);
482
483 /*
484 * Set up LVT0, LVT1:
485 *
486 * set up through-local-APIC on the BP's LINT0. This is not
487 * strictly necessary in pure symmetric-IO mode, but sometimes
488 * we delegate interrupts to the 8259A.
489 */
490 /*
491 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
492 */
493 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
494 if (!smp_processor_id() && !value) {
495 value = APIC_DM_EXTINT;
496 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id());
497 } else {
498 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
499 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
500 }
501 apic_write(APIC_LVT0, value);
502
503 /*
504 * only the BP should see the LINT1 NMI signal, obviously.
505 */
506 if (!smp_processor_id())
507 value = APIC_DM_NMI;
508 else
509 value = APIC_DM_NMI | APIC_LVT_MASKED;
510 apic_write(APIC_LVT1, value);
511
512 {
513 unsigned oldvalue;
514 maxlvt = get_maxlvt();
515 oldvalue = apic_read(APIC_ESR);
516 value = ERROR_APIC_VECTOR; // enables sending errors
517 apic_write(APIC_LVTERR, value);
518 /*
519 * spec says clear errors after enabling vector.
520 */
521 if (maxlvt > 3)
522 apic_write(APIC_ESR, 0);
523 value = apic_read(APIC_ESR);
524 if (value != oldvalue)
525 apic_printk(APIC_VERBOSE,
526 "ESR value after enabling vector: %08x, after %08x\n",
527 oldvalue, value);
528 }
529
530 nmi_watchdog_default();
531 setup_apic_nmi_watchdog(NULL);
532 apic_pm_activate();
533 }
534
535 #ifdef CONFIG_PM
536
537 static struct {
538 /* 'active' is true if the local APIC was enabled by us and
539 not the BIOS; this signifies that we are also responsible
540 for disabling it before entering apm/acpi suspend */
541 int active;
542 /* r/w apic fields */
543 unsigned int apic_id;
544 unsigned int apic_taskpri;
545 unsigned int apic_ldr;
546 unsigned int apic_dfr;
547 unsigned int apic_spiv;
548 unsigned int apic_lvtt;
549 unsigned int apic_lvtpc;
550 unsigned int apic_lvt0;
551 unsigned int apic_lvt1;
552 unsigned int apic_lvterr;
553 unsigned int apic_tmict;
554 unsigned int apic_tdcr;
555 unsigned int apic_thmr;
556 } apic_pm_state;
557
558 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
559 {
560 unsigned long flags;
561 int maxlvt;
562
563 if (!apic_pm_state.active)
564 return 0;
565
566 maxlvt = get_maxlvt();
567
568 apic_pm_state.apic_id = apic_read(APIC_ID);
569 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
570 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
571 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
572 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
573 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
574 if (maxlvt >= 4)
575 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
576 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
577 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
578 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
579 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
580 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
581 #ifdef CONFIG_X86_MCE_INTEL
582 if (maxlvt >= 5)
583 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
584 #endif
585 local_irq_save(flags);
586 disable_local_APIC();
587 local_irq_restore(flags);
588 return 0;
589 }
590
591 static int lapic_resume(struct sys_device *dev)
592 {
593 unsigned int l, h;
594 unsigned long flags;
595 int maxlvt;
596
597 if (!apic_pm_state.active)
598 return 0;
599
600 maxlvt = get_maxlvt();
601
602 local_irq_save(flags);
603 rdmsr(MSR_IA32_APICBASE, l, h);
604 l &= ~MSR_IA32_APICBASE_BASE;
605 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
606 wrmsr(MSR_IA32_APICBASE, l, h);
607 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
608 apic_write(APIC_ID, apic_pm_state.apic_id);
609 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
610 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
611 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
612 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
613 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
614 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
615 #ifdef CONFIG_X86_MCE_INTEL
616 if (maxlvt >= 5)
617 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
618 #endif
619 if (maxlvt >= 4)
620 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
621 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
622 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
623 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
624 apic_write(APIC_ESR, 0);
625 apic_read(APIC_ESR);
626 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
627 apic_write(APIC_ESR, 0);
628 apic_read(APIC_ESR);
629 local_irq_restore(flags);
630 return 0;
631 }
632
633 static struct sysdev_class lapic_sysclass = {
634 set_kset_name("lapic"),
635 .resume = lapic_resume,
636 .suspend = lapic_suspend,
637 };
638
639 static struct sys_device device_lapic = {
640 .id = 0,
641 .cls = &lapic_sysclass,
642 };
643
644 static void __cpuinit apic_pm_activate(void)
645 {
646 apic_pm_state.active = 1;
647 }
648
649 static int __init init_lapic_sysfs(void)
650 {
651 int error;
652 if (!cpu_has_apic)
653 return 0;
654 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
655 error = sysdev_class_register(&lapic_sysclass);
656 if (!error)
657 error = sysdev_register(&device_lapic);
658 return error;
659 }
660 device_initcall(init_lapic_sysfs);
661
662 #else /* CONFIG_PM */
663
664 static void apic_pm_activate(void) { }
665
666 #endif /* CONFIG_PM */
667
668 static int __init apic_set_verbosity(char *str)
669 {
670 if (str == NULL) {
671 skip_ioapic_setup = 0;
672 ioapic_force = 1;
673 return 0;
674 }
675 if (strcmp("debug", str) == 0)
676 apic_verbosity = APIC_DEBUG;
677 else if (strcmp("verbose", str) == 0)
678 apic_verbosity = APIC_VERBOSE;
679 else {
680 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
681 " use apic=verbose or apic=debug\n", str);
682 return -EINVAL;
683 }
684
685 return 0;
686 }
687 early_param("apic", apic_set_verbosity);
688
689 /*
690 * Detect and enable local APICs on non-SMP boards.
691 * Original code written by Keir Fraser.
692 * On AMD64 we trust the BIOS - if it says no APIC it is likely
693 * not correctly set up (usually the APIC timer won't work etc.)
694 */
695
696 static int __init detect_init_APIC (void)
697 {
698 if (!cpu_has_apic) {
699 printk(KERN_INFO "No local APIC present\n");
700 return -1;
701 }
702
703 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
704 boot_cpu_id = 0;
705 return 0;
706 }
707
708 #ifdef CONFIG_X86_IO_APIC
709 static struct resource * __init ioapic_setup_resources(void)
710 {
711 #define IOAPIC_RESOURCE_NAME_SIZE 11
712 unsigned long n;
713 struct resource *res;
714 char *mem;
715 int i;
716
717 if (nr_ioapics <= 0)
718 return NULL;
719
720 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
721 n *= nr_ioapics;
722
723 mem = alloc_bootmem(n);
724 res = (void *)mem;
725
726 if (mem != NULL) {
727 memset(mem, 0, n);
728 mem += sizeof(struct resource) * nr_ioapics;
729
730 for (i = 0; i < nr_ioapics; i++) {
731 res[i].name = mem;
732 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
733 sprintf(mem, "IOAPIC %u", i);
734 mem += IOAPIC_RESOURCE_NAME_SIZE;
735 }
736 }
737
738 ioapic_resources = res;
739
740 return res;
741 }
742
743 static int __init ioapic_insert_resources(void)
744 {
745 int i;
746 struct resource *r = ioapic_resources;
747
748 if (!r) {
749 printk("IO APIC resources could be not be allocated.\n");
750 return -1;
751 }
752
753 for (i = 0; i < nr_ioapics; i++) {
754 insert_resource(&iomem_resource, r);
755 r++;
756 }
757
758 return 0;
759 }
760
761 /* Insert the IO APIC resources after PCI initialization has occured to handle
762 * IO APICS that are mapped in on a BAR in PCI space. */
763 late_initcall(ioapic_insert_resources);
764 #endif
765
766 void __init init_apic_mappings(void)
767 {
768 unsigned long apic_phys;
769
770 /*
771 * If no local APIC can be found then set up a fake all
772 * zeroes page to simulate the local APIC and another
773 * one for the IO-APIC.
774 */
775 if (!smp_found_config && detect_init_APIC()) {
776 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
777 apic_phys = __pa(apic_phys);
778 } else
779 apic_phys = mp_lapic_addr;
780
781 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
782 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
783 APIC_BASE, apic_phys);
784
785 /* Put local APIC into the resource map. */
786 lapic_resource.start = apic_phys;
787 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
788 insert_resource(&iomem_resource, &lapic_resource);
789
790 /*
791 * Fetch the APIC ID of the BSP in case we have a
792 * default configuration (or the MP table is broken).
793 */
794 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
795
796 {
797 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
798 int i;
799 struct resource *ioapic_res;
800
801 ioapic_res = ioapic_setup_resources();
802 for (i = 0; i < nr_ioapics; i++) {
803 if (smp_found_config) {
804 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
805 } else {
806 ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
807 ioapic_phys = __pa(ioapic_phys);
808 }
809 set_fixmap_nocache(idx, ioapic_phys);
810 apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
811 __fix_to_virt(idx), ioapic_phys);
812 idx++;
813
814 if (ioapic_res != NULL) {
815 ioapic_res->start = ioapic_phys;
816 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
817 ioapic_res++;
818 }
819 }
820 }
821 }
822
823 /*
824 * This function sets up the local APIC timer, with a timeout of
825 * 'clocks' APIC bus clock. During calibration we actually call
826 * this function twice on the boot CPU, once with a bogus timeout
827 * value, second time for real. The other (noncalibrating) CPUs
828 * call this function only once, with the real, calibrated value.
829 *
830 * We do reads before writes even if unnecessary, to get around the
831 * P5 APIC double write bug.
832 */
833
834 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
835 {
836 unsigned int lvtt_value, tmp_value;
837
838 lvtt_value = LOCAL_TIMER_VECTOR;
839 if (!oneshot)
840 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
841 if (!irqen)
842 lvtt_value |= APIC_LVT_MASKED;
843
844 apic_write(APIC_LVTT, lvtt_value);
845
846 /*
847 * Divide PICLK by 16
848 */
849 tmp_value = apic_read(APIC_TDCR);
850 apic_write(APIC_TDCR, (tmp_value
851 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
852 | APIC_TDR_DIV_16);
853
854 if (!oneshot)
855 apic_write(APIC_TMICT, clocks);
856 }
857
858 static void setup_APIC_timer(void)
859 {
860 unsigned long flags;
861 int irqen;
862
863 local_irq_save(flags);
864
865 irqen = ! cpu_isset(smp_processor_id(),
866 timer_interrupt_broadcast_ipi_mask);
867 __setup_APIC_LVTT(calibration_result, 0, irqen);
868 /* Turn off PIT interrupt if we use APIC timer as main timer.
869 Only works with the PM timer right now
870 TBD fix it for HPET too. */
871 if ((pmtmr_ioport != 0) &&
872 smp_processor_id() == boot_cpu_id &&
873 apic_runs_main_timer == 1 &&
874 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
875 stop_timer_interrupt();
876 apic_runs_main_timer++;
877 }
878 local_irq_restore(flags);
879 }
880
881 /*
882 * In this function we calibrate APIC bus clocks to the external
883 * timer. Unfortunately we cannot use jiffies and the timer irq
884 * to calibrate, since some later bootup code depends on getting
885 * the first irq? Ugh.
886 *
887 * We want to do the calibration only once since we
888 * want to have local timer irqs syncron. CPUs connected
889 * by the same APIC bus have the very same bus frequency.
890 * And we want to have irqs off anyways, no accidental
891 * APIC irq that way.
892 */
893
894 #define TICK_COUNT 100000000
895
896 static void __init calibrate_APIC_clock(void)
897 {
898 unsigned apic, apic_start;
899 unsigned long tsc, tsc_start;
900 int result;
901
902 local_irq_disable();
903
904 /*
905 * Put whatever arbitrary (but long enough) timeout
906 * value into the APIC clock, we just want to get the
907 * counter running for calibration.
908 *
909 * No interrupt enable !
910 */
911 __setup_APIC_LVTT(250000000, 0, 0);
912
913 apic_start = apic_read(APIC_TMCCT);
914 #ifdef CONFIG_X86_PM_TIMER
915 if (apic_calibrate_pmtmr && pmtmr_ioport) {
916 pmtimer_wait(5000); /* 5ms wait */
917 apic = apic_read(APIC_TMCCT);
918 result = (apic_start - apic) * 1000L / 5;
919 } else
920 #endif
921 {
922 rdtscll(tsc_start);
923
924 do {
925 apic = apic_read(APIC_TMCCT);
926 rdtscll(tsc);
927 } while ((tsc - tsc_start) < TICK_COUNT &&
928 (apic_start - apic) < TICK_COUNT);
929
930 result = (apic_start - apic) * 1000L * tsc_khz /
931 (tsc - tsc_start);
932 }
933
934 local_irq_enable();
935
936 printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
937
938 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
939 result / 1000 / 1000, result / 1000 % 1000);
940
941 /* Calculate the scaled math multiplication factor */
942 lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
943 lapic_clockevent.max_delta_ns =
944 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
945 lapic_clockevent.min_delta_ns =
946 clockevent_delta2ns(0xF, &lapic_clockevent);
947
948 calibration_result = result / HZ;
949 }
950
951 void __init setup_boot_APIC_clock (void)
952 {
953 if (disable_apic_timer) {
954 printk(KERN_INFO "Disabling APIC timer\n");
955 return;
956 }
957
958 printk(KERN_INFO "Using local APIC timer interrupts.\n");
959 using_apic_timer = 1;
960
961 calibrate_APIC_clock();
962 /*
963 * Now set up the timer for real.
964 */
965 setup_APIC_timer();
966 }
967
968 void __cpuinit setup_secondary_APIC_clock(void)
969 {
970 setup_APIC_timer();
971 }
972
973 void disable_APIC_timer(void)
974 {
975 if (using_apic_timer) {
976 unsigned long v;
977
978 v = apic_read(APIC_LVTT);
979 /*
980 * When an illegal vector value (0-15) is written to an LVT
981 * entry and delivery mode is Fixed, the APIC may signal an
982 * illegal vector error, with out regard to whether the mask
983 * bit is set or whether an interrupt is actually seen on input.
984 *
985 * Boot sequence might call this function when the LVTT has
986 * '0' vector value. So make sure vector field is set to
987 * valid value.
988 */
989 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
990 apic_write(APIC_LVTT, v);
991 }
992 }
993
994 void enable_APIC_timer(void)
995 {
996 int cpu = smp_processor_id();
997
998 if (using_apic_timer &&
999 !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
1000 unsigned long v;
1001
1002 v = apic_read(APIC_LVTT);
1003 apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
1004 }
1005 }
1006
1007 void switch_APIC_timer_to_ipi(void *cpumask)
1008 {
1009 cpumask_t mask = *(cpumask_t *)cpumask;
1010 int cpu = smp_processor_id();
1011
1012 if (cpu_isset(cpu, mask) &&
1013 !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
1014 disable_APIC_timer();
1015 cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
1016 }
1017 }
1018 EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
1019
1020 void smp_send_timer_broadcast_ipi(void)
1021 {
1022 int cpu = smp_processor_id();
1023 cpumask_t mask;
1024
1025 cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
1026
1027 if (cpu_isset(cpu, mask)) {
1028 cpu_clear(cpu, mask);
1029 add_pda(apic_timer_irqs, 1);
1030 smp_local_timer_interrupt();
1031 }
1032
1033 if (!cpus_empty(mask)) {
1034 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
1035 }
1036 }
1037
1038 void switch_ipi_to_APIC_timer(void *cpumask)
1039 {
1040 cpumask_t mask = *(cpumask_t *)cpumask;
1041 int cpu = smp_processor_id();
1042
1043 if (cpu_isset(cpu, mask) &&
1044 cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
1045 cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
1046 enable_APIC_timer();
1047 }
1048 }
1049 EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
1050
1051 int setup_profiling_timer(unsigned int multiplier)
1052 {
1053 return -EINVAL;
1054 }
1055
1056 void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
1057 unsigned char msg_type, unsigned char mask)
1058 {
1059 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
1060 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
1061 apic_write(reg, v);
1062 }
1063
1064 /*
1065 * Local timer interrupt handler. It does both profiling and
1066 * process statistics/rescheduling.
1067 *
1068 * We do profiling in every local tick, statistics/rescheduling
1069 * happen only every 'profiling multiplier' ticks. The default
1070 * multiplier is 1 and it can be changed by writing the new multiplier
1071 * value into /proc/profile.
1072 */
1073
1074 void smp_local_timer_interrupt(void)
1075 {
1076 profile_tick(CPU_PROFILING);
1077 #ifdef CONFIG_SMP
1078 update_process_times(user_mode(get_irq_regs()));
1079 #endif
1080 if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
1081 main_timer_handler();
1082 /*
1083 * We take the 'long' return path, and there every subsystem
1084 * grabs the appropriate locks (kernel lock/ irq lock).
1085 *
1086 * We might want to decouple profiling from the 'long path',
1087 * and do the profiling totally in assembly.
1088 *
1089 * Currently this isn't too much of an issue (performance wise),
1090 * we can take more than 100K local irqs per second on a 100 MHz P5.
1091 */
1092 }
1093
1094 /*
1095 * Local APIC timer interrupt. This is the most natural way for doing
1096 * local interrupts, but local timer interrupts can be emulated by
1097 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1098 *
1099 * [ if a single-CPU system runs an SMP kernel then we call the local
1100 * interrupt as well. Thus we cannot inline the local irq ... ]
1101 */
1102 void smp_apic_timer_interrupt(struct pt_regs *regs)
1103 {
1104 struct pt_regs *old_regs = set_irq_regs(regs);
1105
1106 /*
1107 * the NMI deadlock-detector uses this.
1108 */
1109 add_pda(apic_timer_irqs, 1);
1110
1111 /*
1112 * NOTE! We'd better ACK the irq immediately,
1113 * because timer handling can be slow.
1114 */
1115 ack_APIC_irq();
1116 /*
1117 * update_process_times() expects us to have done irq_enter().
1118 * Besides, if we don't timer interrupts ignore the global
1119 * interrupt lock, which is the WrongThing (tm) to do.
1120 */
1121 exit_idle();
1122 irq_enter();
1123 smp_local_timer_interrupt();
1124 irq_exit();
1125 set_irq_regs(old_regs);
1126 }
1127
1128 /*
1129 * apic_is_clustered_box() -- Check if we can expect good TSC
1130 *
1131 * Thus far, the major user of this is IBM's Summit2 series:
1132 *
1133 * Clustered boxes may have unsynced TSC problems if they are
1134 * multi-chassis. Use available data to take a good guess.
1135 * If in doubt, go HPET.
1136 */
1137 __cpuinit int apic_is_clustered_box(void)
1138 {
1139 int i, clusters, zeros;
1140 unsigned id;
1141 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1142
1143 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1144
1145 for (i = 0; i < NR_CPUS; i++) {
1146 id = bios_cpu_apicid[i];
1147 if (id != BAD_APICID)
1148 __set_bit(APIC_CLUSTERID(id), clustermap);
1149 }
1150
1151 /* Problem: Partially populated chassis may not have CPUs in some of
1152 * the APIC clusters they have been allocated. Only present CPUs have
1153 * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since
1154 * clusters are allocated sequentially, count zeros only if they are
1155 * bounded by ones.
1156 */
1157 clusters = 0;
1158 zeros = 0;
1159 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1160 if (test_bit(i, clustermap)) {
1161 clusters += 1 + zeros;
1162 zeros = 0;
1163 } else
1164 ++zeros;
1165 }
1166
1167 /*
1168 * If clusters > 2, then should be multi-chassis.
1169 * May have to revisit this when multi-core + hyperthreaded CPUs come
1170 * out, but AFAIK this will work even for them.
1171 */
1172 return (clusters > 2);
1173 }
1174
1175 /*
1176 * This interrupt should _never_ happen with our APIC/SMP architecture
1177 */
1178 asmlinkage void smp_spurious_interrupt(void)
1179 {
1180 unsigned int v;
1181 exit_idle();
1182 irq_enter();
1183 /*
1184 * Check if this really is a spurious interrupt and ACK it
1185 * if it is a vectored one. Just in case...
1186 * Spurious interrupts should not be ACKed.
1187 */
1188 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1189 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1190 ack_APIC_irq();
1191
1192 irq_exit();
1193 }
1194
1195 /*
1196 * This interrupt should never happen with our APIC/SMP architecture
1197 */
1198
1199 asmlinkage void smp_error_interrupt(void)
1200 {
1201 unsigned int v, v1;
1202
1203 exit_idle();
1204 irq_enter();
1205 /* First tickle the hardware, only then report what went on. -- REW */
1206 v = apic_read(APIC_ESR);
1207 apic_write(APIC_ESR, 0);
1208 v1 = apic_read(APIC_ESR);
1209 ack_APIC_irq();
1210 atomic_inc(&irq_err_count);
1211
1212 /* Here is what the APIC error bits mean:
1213 0: Send CS error
1214 1: Receive CS error
1215 2: Send accept error
1216 3: Receive accept error
1217 4: Reserved
1218 5: Send illegal vector
1219 6: Received illegal vector
1220 7: Illegal register address
1221 */
1222 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1223 smp_processor_id(), v , v1);
1224 irq_exit();
1225 }
1226
1227 int disable_apic;
1228
1229 /*
1230 * This initializes the IO-APIC and APIC hardware if this is
1231 * a UP kernel.
1232 */
1233 int __init APIC_init_uniprocessor (void)
1234 {
1235 if (disable_apic) {
1236 printk(KERN_INFO "Apic disabled\n");
1237 return -1;
1238 }
1239 if (!cpu_has_apic) {
1240 disable_apic = 1;
1241 printk(KERN_INFO "Apic disabled by BIOS\n");
1242 return -1;
1243 }
1244
1245 verify_local_APIC();
1246
1247 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
1248 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
1249
1250 setup_local_APIC();
1251
1252 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1253 setup_IO_APIC();
1254 else
1255 nr_ioapics = 0;
1256 setup_boot_APIC_clock();
1257 check_nmi_watchdog();
1258 return 0;
1259 }
1260
1261 static __init int setup_disableapic(char *str)
1262 {
1263 disable_apic = 1;
1264 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1265 return 0;
1266 }
1267 early_param("disableapic", setup_disableapic);
1268
1269 /* same as disableapic, for compatibility */
1270 static __init int setup_nolapic(char *str)
1271 {
1272 return setup_disableapic(str);
1273 }
1274 early_param("nolapic", setup_nolapic);
1275
1276 static int __init parse_lapic_timer_c2_ok(char *arg)
1277 {
1278 local_apic_timer_c2_ok = 1;
1279 return 0;
1280 }
1281 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1282
1283 static __init int setup_noapictimer(char *str)
1284 {
1285 if (str[0] != ' ' && str[0] != 0)
1286 return 0;
1287 disable_apic_timer = 1;
1288 return 1;
1289 }
1290
1291 static __init int setup_apicmaintimer(char *str)
1292 {
1293 apic_runs_main_timer = 1;
1294 nohpet = 1;
1295 return 1;
1296 }
1297 __setup("apicmaintimer", setup_apicmaintimer);
1298
1299 static __init int setup_noapicmaintimer(char *str)
1300 {
1301 apic_runs_main_timer = -1;
1302 return 1;
1303 }
1304 __setup("noapicmaintimer", setup_noapicmaintimer);
1305
1306 static __init int setup_apicpmtimer(char *s)
1307 {
1308 apic_calibrate_pmtmr = 1;
1309 notsc_setup(NULL);
1310 return setup_apicmaintimer(NULL);
1311 }
1312 __setup("apicpmtimer", setup_apicpmtimer);
1313
1314 __setup("noapictimer", setup_noapictimer);
1315
This page took 0.055583 seconds and 6 git commands to generate.