[PATCH] x86_64: actively synchronize vmalloc area when registering certain callbacks
[deliverable/linux.git] / arch / x86_64 / kernel / nmi.c
1 /*
2 * linux/arch/x86_64/nmi.c
3 *
4 * NMI watchdog support on APIC systems
5 *
6 * Started by Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes:
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Pavel Machek and
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 */
14
15 #include <linux/config.h>
16 #include <linux/mm.h>
17 #include <linux/delay.h>
18 #include <linux/bootmem.h>
19 #include <linux/smp_lock.h>
20 #include <linux/interrupt.h>
21 #include <linux/mc146818rtc.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/sysdev.h>
25 #include <linux/nmi.h>
26 #include <linux/sysctl.h>
27 #include <linux/kprobes.h>
28
29 #include <asm/smp.h>
30 #include <asm/mtrr.h>
31 #include <asm/mpspec.h>
32 #include <asm/nmi.h>
33 #include <asm/msr.h>
34 #include <asm/proto.h>
35 #include <asm/kdebug.h>
36 #include <asm/local.h>
37
38 /*
39 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
40 * - it may be reserved by some other driver, or not
41 * - when not reserved by some other driver, it may be used for
42 * the NMI watchdog, or not
43 *
44 * This is maintained separately from nmi_active because the NMI
45 * watchdog may also be driven from the I/O APIC timer.
46 */
47 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
48 static unsigned int lapic_nmi_owner;
49 #define LAPIC_NMI_WATCHDOG (1<<0)
50 #define LAPIC_NMI_RESERVED (1<<1)
51
52 /* nmi_active:
53 * +1: the lapic NMI watchdog is active, but can be disabled
54 * 0: the lapic NMI watchdog has not been set up, and cannot
55 * be enabled
56 * -1: the lapic NMI watchdog is disabled, but can be enabled
57 */
58 int nmi_active; /* oprofile uses this */
59 int panic_on_timeout;
60
61 unsigned int nmi_watchdog = NMI_DEFAULT;
62 static unsigned int nmi_hz = HZ;
63 static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
64 static unsigned int nmi_p4_cccr_val;
65
66 /* Note that these events don't tick when the CPU idles. This means
67 the frequency varies with CPU load. */
68
69 #define K7_EVNTSEL_ENABLE (1 << 22)
70 #define K7_EVNTSEL_INT (1 << 20)
71 #define K7_EVNTSEL_OS (1 << 17)
72 #define K7_EVNTSEL_USR (1 << 16)
73 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
74 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
75
76 #define MSR_P4_MISC_ENABLE 0x1A0
77 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
78 #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
79 #define MSR_P4_PERFCTR0 0x300
80 #define MSR_P4_CCCR0 0x360
81 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
82 #define P4_ESCR_OS (1<<3)
83 #define P4_ESCR_USR (1<<2)
84 #define P4_CCCR_OVF_PMI0 (1<<26)
85 #define P4_CCCR_OVF_PMI1 (1<<27)
86 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
87 #define P4_CCCR_COMPLEMENT (1<<19)
88 #define P4_CCCR_COMPARE (1<<18)
89 #define P4_CCCR_REQUIRED (3<<16)
90 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
91 #define P4_CCCR_ENABLE (1<<12)
92 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
93 CRU_ESCR0 (with any non-null event selector) through a complemented
94 max threshold. [IA32-Vol3, Section 14.9.9] */
95 #define MSR_P4_IQ_COUNTER0 0x30C
96 #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
97 #define P4_NMI_IQ_CCCR0 \
98 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
99 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
100
101 static __cpuinit inline int nmi_known_cpu(void)
102 {
103 switch (boot_cpu_data.x86_vendor) {
104 case X86_VENDOR_AMD:
105 return boot_cpu_data.x86 == 15;
106 case X86_VENDOR_INTEL:
107 return boot_cpu_data.x86 == 15;
108 }
109 return 0;
110 }
111
112 /* Run after command line and cpu_init init, but before all other checks */
113 void __cpuinit nmi_watchdog_default(void)
114 {
115 if (nmi_watchdog != NMI_DEFAULT)
116 return;
117 if (nmi_known_cpu())
118 nmi_watchdog = NMI_LOCAL_APIC;
119 else
120 nmi_watchdog = NMI_IO_APIC;
121 }
122
123 #ifdef CONFIG_SMP
124 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
125 * the CPU is idle. To make sure the NMI watchdog really ticks on all
126 * CPUs during the test make them busy.
127 */
128 static __init void nmi_cpu_busy(void *data)
129 {
130 volatile int *endflag = data;
131 local_irq_enable();
132 /* Intentionally don't use cpu_relax here. This is
133 to make sure that the performance counter really ticks,
134 even if there is a simulator or similar that catches the
135 pause instruction. On a real HT machine this is fine because
136 all other CPUs are busy with "useless" delay loops and don't
137 care if they get somewhat less cycles. */
138 while (*endflag == 0)
139 barrier();
140 }
141 #endif
142
143 int __init check_nmi_watchdog (void)
144 {
145 volatile int endflag = 0;
146 int *counts;
147 int cpu;
148
149 counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
150 if (!counts)
151 return -1;
152
153 printk(KERN_INFO "testing NMI watchdog ... ");
154
155 #ifdef CONFIG_SMP
156 if (nmi_watchdog == NMI_LOCAL_APIC)
157 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
158 #endif
159
160 for (cpu = 0; cpu < NR_CPUS; cpu++)
161 counts[cpu] = cpu_pda(cpu)->__nmi_count;
162 local_irq_enable();
163 mdelay((10*1000)/nmi_hz); // wait 10 ticks
164
165 for_each_online_cpu(cpu) {
166 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
167 endflag = 1;
168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
169 cpu,
170 counts[cpu],
171 cpu_pda(cpu)->__nmi_count);
172 nmi_active = 0;
173 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
174 nmi_perfctr_msr = 0;
175 kfree(counts);
176 return -1;
177 }
178 }
179 endflag = 1;
180 printk("OK.\n");
181
182 /* now that we know it works we can reduce NMI frequency to
183 something more reasonable; makes a difference in some configs */
184 if (nmi_watchdog == NMI_LOCAL_APIC)
185 nmi_hz = 1;
186
187 kfree(counts);
188 return 0;
189 }
190
191 int __init setup_nmi_watchdog(char *str)
192 {
193 int nmi;
194
195 if (!strncmp(str,"panic",5)) {
196 panic_on_timeout = 1;
197 str = strchr(str, ',');
198 if (!str)
199 return 1;
200 ++str;
201 }
202
203 get_option(&str, &nmi);
204
205 if (nmi >= NMI_INVALID)
206 return 0;
207 nmi_watchdog = nmi;
208 return 1;
209 }
210
211 __setup("nmi_watchdog=", setup_nmi_watchdog);
212
213 static void disable_lapic_nmi_watchdog(void)
214 {
215 if (nmi_active <= 0)
216 return;
217 switch (boot_cpu_data.x86_vendor) {
218 case X86_VENDOR_AMD:
219 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
220 break;
221 case X86_VENDOR_INTEL:
222 if (boot_cpu_data.x86 == 15) {
223 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
224 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
225 }
226 break;
227 }
228 nmi_active = -1;
229 /* tell do_nmi() and others that we're not active any more */
230 nmi_watchdog = 0;
231 }
232
233 static void enable_lapic_nmi_watchdog(void)
234 {
235 if (nmi_active < 0) {
236 nmi_watchdog = NMI_LOCAL_APIC;
237 touch_nmi_watchdog();
238 setup_apic_nmi_watchdog();
239 }
240 }
241
242 int reserve_lapic_nmi(void)
243 {
244 unsigned int old_owner;
245
246 spin_lock(&lapic_nmi_owner_lock);
247 old_owner = lapic_nmi_owner;
248 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
249 spin_unlock(&lapic_nmi_owner_lock);
250 if (old_owner & LAPIC_NMI_RESERVED)
251 return -EBUSY;
252 if (old_owner & LAPIC_NMI_WATCHDOG)
253 disable_lapic_nmi_watchdog();
254 return 0;
255 }
256
257 void release_lapic_nmi(void)
258 {
259 unsigned int new_owner;
260
261 spin_lock(&lapic_nmi_owner_lock);
262 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
263 lapic_nmi_owner = new_owner;
264 spin_unlock(&lapic_nmi_owner_lock);
265 if (new_owner & LAPIC_NMI_WATCHDOG)
266 enable_lapic_nmi_watchdog();
267 }
268
269 void disable_timer_nmi_watchdog(void)
270 {
271 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
272 return;
273
274 disable_irq(0);
275 unset_nmi_callback();
276 nmi_active = -1;
277 nmi_watchdog = NMI_NONE;
278 }
279
280 void enable_timer_nmi_watchdog(void)
281 {
282 if (nmi_active < 0) {
283 nmi_watchdog = NMI_IO_APIC;
284 touch_nmi_watchdog();
285 nmi_active = 1;
286 enable_irq(0);
287 }
288 }
289
290 #ifdef CONFIG_PM
291
292 static int nmi_pm_active; /* nmi_active before suspend */
293
294 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
295 {
296 nmi_pm_active = nmi_active;
297 disable_lapic_nmi_watchdog();
298 return 0;
299 }
300
301 static int lapic_nmi_resume(struct sys_device *dev)
302 {
303 if (nmi_pm_active > 0)
304 enable_lapic_nmi_watchdog();
305 return 0;
306 }
307
308 static struct sysdev_class nmi_sysclass = {
309 set_kset_name("lapic_nmi"),
310 .resume = lapic_nmi_resume,
311 .suspend = lapic_nmi_suspend,
312 };
313
314 static struct sys_device device_lapic_nmi = {
315 .id = 0,
316 .cls = &nmi_sysclass,
317 };
318
319 static int __init init_lapic_nmi_sysfs(void)
320 {
321 int error;
322
323 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
324 return 0;
325
326 error = sysdev_class_register(&nmi_sysclass);
327 if (!error)
328 error = sysdev_register(&device_lapic_nmi);
329 return error;
330 }
331 /* must come after the local APIC's device_initcall() */
332 late_initcall(init_lapic_nmi_sysfs);
333
334 #endif /* CONFIG_PM */
335
336 /*
337 * Activate the NMI watchdog via the local APIC.
338 * Original code written by Keith Owens.
339 */
340
341 static void clear_msr_range(unsigned int base, unsigned int n)
342 {
343 unsigned int i;
344
345 for(i = 0; i < n; ++i)
346 wrmsr(base+i, 0, 0);
347 }
348
349 static void setup_k7_watchdog(void)
350 {
351 int i;
352 unsigned int evntsel;
353
354 nmi_perfctr_msr = MSR_K7_PERFCTR0;
355
356 for(i = 0; i < 4; ++i) {
357 /* Simulator may not support it */
358 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) {
359 nmi_perfctr_msr = 0;
360 return;
361 }
362 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
363 }
364
365 evntsel = K7_EVNTSEL_INT
366 | K7_EVNTSEL_OS
367 | K7_EVNTSEL_USR
368 | K7_NMI_EVENT;
369
370 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
371 wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
372 apic_write(APIC_LVTPC, APIC_DM_NMI);
373 evntsel |= K7_EVNTSEL_ENABLE;
374 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
375 }
376
377
378 static int setup_p4_watchdog(void)
379 {
380 unsigned int misc_enable, dummy;
381
382 rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy);
383 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
384 return 0;
385
386 nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
387 nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
388 #ifdef CONFIG_SMP
389 if (smp_num_siblings == 2)
390 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
391 #endif
392
393 if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL))
394 clear_msr_range(0x3F1, 2);
395 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
396 docs doesn't fully define it, so leave it alone for now. */
397 if (boot_cpu_data.x86_model >= 0x3) {
398 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
399 clear_msr_range(0x3A0, 26);
400 clear_msr_range(0x3BC, 3);
401 } else {
402 clear_msr_range(0x3A0, 31);
403 }
404 clear_msr_range(0x3C0, 6);
405 clear_msr_range(0x3C8, 6);
406 clear_msr_range(0x3E0, 2);
407 clear_msr_range(MSR_P4_CCCR0, 18);
408 clear_msr_range(MSR_P4_PERFCTR0, 18);
409
410 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
411 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
412 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz * 1000UL / nmi_hz));
413 wrmsrl(MSR_P4_IQ_COUNTER0, -((u64)cpu_khz * 1000 / nmi_hz));
414 apic_write(APIC_LVTPC, APIC_DM_NMI);
415 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
416 return 1;
417 }
418
419 void setup_apic_nmi_watchdog(void)
420 {
421 switch (boot_cpu_data.x86_vendor) {
422 case X86_VENDOR_AMD:
423 if (boot_cpu_data.x86 != 15)
424 return;
425 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
426 return;
427 setup_k7_watchdog();
428 break;
429 case X86_VENDOR_INTEL:
430 if (boot_cpu_data.x86 != 15)
431 return;
432 if (!setup_p4_watchdog())
433 return;
434 break;
435
436 default:
437 return;
438 }
439 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
440 nmi_active = 1;
441 }
442
443 /*
444 * the best way to detect whether a CPU has a 'hard lockup' problem
445 * is to check it's local APIC timer IRQ counts. If they are not
446 * changing then that CPU has some problem.
447 *
448 * as these watchdog NMI IRQs are generated on every CPU, we only
449 * have to check the current processor.
450 */
451
452 static DEFINE_PER_CPU(unsigned, last_irq_sum);
453 static DEFINE_PER_CPU(local_t, alert_counter);
454 static DEFINE_PER_CPU(int, nmi_touch);
455
456 void touch_nmi_watchdog (void)
457 {
458 if (nmi_watchdog > 0) {
459 unsigned cpu;
460
461 /*
462 * Tell other CPUs to reset their alert counters. We cannot
463 * do it ourselves because the alert count increase is not
464 * atomic.
465 */
466 for_each_present_cpu (cpu)
467 per_cpu(nmi_touch, cpu) = 1;
468 }
469
470 touch_softlockup_watchdog();
471 }
472
473 void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
474 {
475 int sum;
476 int touched = 0;
477
478 sum = read_pda(apic_timer_irqs);
479 if (__get_cpu_var(nmi_touch)) {
480 __get_cpu_var(nmi_touch) = 0;
481 touched = 1;
482 }
483 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
484 /*
485 * Ayiee, looks like this CPU is stuck ...
486 * wait a few IRQs (5 seconds) before doing the oops ...
487 */
488 local_inc(&__get_cpu_var(alert_counter));
489 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) {
490 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
491 == NOTIFY_STOP) {
492 local_set(&__get_cpu_var(alert_counter), 0);
493 return;
494 }
495 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
496 }
497 } else {
498 __get_cpu_var(last_irq_sum) = sum;
499 local_set(&__get_cpu_var(alert_counter), 0);
500 }
501 if (nmi_perfctr_msr) {
502 if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
503 /*
504 * P4 quirks:
505 * - An overflown perfctr will assert its interrupt
506 * until the OVF flag in its CCCR is cleared.
507 * - LVTPC is masked on interrupt and must be
508 * unmasked by the LVTPC handler.
509 */
510 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
511 apic_write(APIC_LVTPC, APIC_DM_NMI);
512 }
513 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
514 }
515 }
516
517 static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
518 {
519 return 0;
520 }
521
522 static nmi_callback_t nmi_callback = dummy_nmi_callback;
523
524 asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
525 {
526 int cpu = safe_smp_processor_id();
527
528 nmi_enter();
529 add_pda(__nmi_count,1);
530 if (!rcu_dereference(nmi_callback)(regs, cpu))
531 default_do_nmi(regs);
532 nmi_exit();
533 }
534
535 void set_nmi_callback(nmi_callback_t callback)
536 {
537 vmalloc_sync_all();
538 rcu_assign_pointer(nmi_callback, callback);
539 }
540
541 void unset_nmi_callback(void)
542 {
543 nmi_callback = dummy_nmi_callback;
544 }
545
546 #ifdef CONFIG_SYSCTL
547
548 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
549 {
550 unsigned char reason = get_nmi_reason();
551 char buf[64];
552
553 if (!(reason & 0xc0)) {
554 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
555 die_nmi(buf,regs);
556 }
557 return 0;
558 }
559
560 /*
561 * proc handler for /proc/sys/kernel/unknown_nmi_panic
562 */
563 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
564 void __user *buffer, size_t *length, loff_t *ppos)
565 {
566 int old_state;
567
568 old_state = unknown_nmi_panic;
569 proc_dointvec(table, write, file, buffer, length, ppos);
570 if (!!old_state == !!unknown_nmi_panic)
571 return 0;
572
573 if (unknown_nmi_panic) {
574 if (reserve_lapic_nmi() < 0) {
575 unknown_nmi_panic = 0;
576 return -EBUSY;
577 } else {
578 set_nmi_callback(unknown_nmi_panic_callback);
579 }
580 } else {
581 release_lapic_nmi();
582 unset_nmi_callback();
583 }
584 return 0;
585 }
586
587 #endif
588
589 EXPORT_SYMBOL(nmi_active);
590 EXPORT_SYMBOL(nmi_watchdog);
591 EXPORT_SYMBOL(reserve_lapic_nmi);
592 EXPORT_SYMBOL(release_lapic_nmi);
593 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
594 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
595 EXPORT_SYMBOL(touch_nmi_watchdog);
This page took 0.111199 seconds and 5 git commands to generate.