[PATCH] mm acct accounting fix
[deliverable/linux.git] / arch / x86_64 / kernel / nmi.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/nmi.c
3 *
4 * NMI watchdog support on APIC systems
5 *
6 * Started by Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes:
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Pavel Machek and
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 */
14
15#include <linux/config.h>
16#include <linux/mm.h>
17#include <linux/irq.h>
18#include <linux/delay.h>
19#include <linux/bootmem.h>
20#include <linux/smp_lock.h>
21#include <linux/interrupt.h>
22#include <linux/mc146818rtc.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h>
25#include <linux/sysdev.h>
26#include <linux/nmi.h>
27#include <linux/sysctl.h>
28
29#include <asm/smp.h>
30#include <asm/mtrr.h>
31#include <asm/mpspec.h>
32#include <asm/nmi.h>
33#include <asm/msr.h>
34#include <asm/proto.h>
35#include <asm/kdebug.h>
36
37/*
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not
40 * - when not reserved by some other driver, it may be used for
41 * the NMI watchdog, or not
42 *
43 * This is maintained separately from nmi_active because the NMI
44 * watchdog may also be driven from the I/O APIC timer.
45 */
46static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
47static unsigned int lapic_nmi_owner;
48#define LAPIC_NMI_WATCHDOG (1<<0)
49#define LAPIC_NMI_RESERVED (1<<1)
50
51/* nmi_active:
52 * +1: the lapic NMI watchdog is active, but can be disabled
53 * 0: the lapic NMI watchdog has not been set up, and cannot
54 * be enabled
55 * -1: the lapic NMI watchdog is disabled, but can be enabled
56 */
57int nmi_active; /* oprofile uses this */
58int panic_on_timeout;
59
60unsigned int nmi_watchdog = NMI_DEFAULT;
61static unsigned int nmi_hz = HZ;
62unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
63
64/* Note that these events don't tick when the CPU idles. This means
65 the frequency varies with CPU load. */
66
67#define K7_EVNTSEL_ENABLE (1 << 22)
68#define K7_EVNTSEL_INT (1 << 20)
69#define K7_EVNTSEL_OS (1 << 17)
70#define K7_EVNTSEL_USR (1 << 16)
71#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
72#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
73
74#define P6_EVNTSEL0_ENABLE (1 << 22)
75#define P6_EVNTSEL_INT (1 << 20)
76#define P6_EVNTSEL_OS (1 << 17)
77#define P6_EVNTSEL_USR (1 << 16)
78#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
79#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
80
81/* Run after command line and cpu_init init, but before all other checks */
82void __init nmi_watchdog_default(void)
83{
84 if (nmi_watchdog != NMI_DEFAULT)
85 return;
86
87 /* For some reason the IO APIC watchdog doesn't work on the AMD
88 8111 chipset. For now switch to local APIC mode using
89 perfctr0 there. On Intel CPUs we don't have code to handle
90 the perfctr and the IO-APIC seems to work, so use that. */
91
92 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
93 nmi_watchdog = NMI_LOCAL_APIC;
94 printk(KERN_INFO
95 "Using local APIC NMI watchdog using perfctr0\n");
96 } else {
97 printk(KERN_INFO "Using IO APIC NMI watchdog\n");
98 nmi_watchdog = NMI_IO_APIC;
99 }
100}
101
102/* Why is there no CPUID flag for this? */
103static __init int cpu_has_lapic(void)
104{
105 switch (boot_cpu_data.x86_vendor) {
106 case X86_VENDOR_INTEL:
107 case X86_VENDOR_AMD:
108 return boot_cpu_data.x86 >= 6;
109 /* .... add more cpus here or find a different way to figure this out. */
110 default:
111 return 0;
112 }
113}
114
67701ae9 115static int __init check_nmi_watchdog (void)
1da177e4
LT
116{
117 int counts[NR_CPUS];
118 int cpu;
119
67701ae9
JV
120 if (nmi_watchdog == NMI_NONE)
121 return 0;
122
1da177e4
LT
123 if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) {
124 nmi_watchdog = NMI_NONE;
125 return -1;
126 }
127
67701ae9 128 printk(KERN_INFO "Testing NMI watchdog ... ");
1da177e4
LT
129
130 for (cpu = 0; cpu < NR_CPUS; cpu++)
131 counts[cpu] = cpu_pda[cpu].__nmi_count;
132 local_irq_enable();
133 mdelay((10*1000)/nmi_hz); // wait 10 ticks
134
135 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1da177e4
LT
136 if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
137 printk("CPU#%d: NMI appears to be stuck (%d)!\n",
138 cpu,
139 cpu_pda[cpu].__nmi_count);
140 nmi_active = 0;
141 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
142 return -1;
143 }
144 }
145 printk("OK.\n");
146
147 /* now that we know it works we can reduce NMI frequency to
148 something more reasonable; makes a difference in some configs */
149 if (nmi_watchdog == NMI_LOCAL_APIC)
150 nmi_hz = 1;
151
152 return 0;
153}
67701ae9
JV
154/* Have this called later during boot so counters are updating */
155late_initcall(check_nmi_watchdog);
1da177e4
LT
156
157int __init setup_nmi_watchdog(char *str)
158{
159 int nmi;
160
161 if (!strncmp(str,"panic",5)) {
162 panic_on_timeout = 1;
163 str = strchr(str, ',');
164 if (!str)
165 return 1;
166 ++str;
167 }
168
169 get_option(&str, &nmi);
170
171 if (nmi >= NMI_INVALID)
172 return 0;
173 nmi_watchdog = nmi;
174 return 1;
175}
176
177__setup("nmi_watchdog=", setup_nmi_watchdog);
178
179static void disable_lapic_nmi_watchdog(void)
180{
181 if (nmi_active <= 0)
182 return;
183 switch (boot_cpu_data.x86_vendor) {
184 case X86_VENDOR_AMD:
185 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
186 break;
187 case X86_VENDOR_INTEL:
188 wrmsr(MSR_IA32_EVNTSEL0, 0, 0);
189 break;
190 }
191 nmi_active = -1;
192 /* tell do_nmi() and others that we're not active any more */
193 nmi_watchdog = 0;
194}
195
196static void enable_lapic_nmi_watchdog(void)
197{
198 if (nmi_active < 0) {
199 nmi_watchdog = NMI_LOCAL_APIC;
200 setup_apic_nmi_watchdog();
201 }
202}
203
204int reserve_lapic_nmi(void)
205{
206 unsigned int old_owner;
207
208 spin_lock(&lapic_nmi_owner_lock);
209 old_owner = lapic_nmi_owner;
210 lapic_nmi_owner |= LAPIC_NMI_RESERVED;
211 spin_unlock(&lapic_nmi_owner_lock);
212 if (old_owner & LAPIC_NMI_RESERVED)
213 return -EBUSY;
214 if (old_owner & LAPIC_NMI_WATCHDOG)
215 disable_lapic_nmi_watchdog();
216 return 0;
217}
218
219void release_lapic_nmi(void)
220{
221 unsigned int new_owner;
222
223 spin_lock(&lapic_nmi_owner_lock);
224 new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
225 lapic_nmi_owner = new_owner;
226 spin_unlock(&lapic_nmi_owner_lock);
227 if (new_owner & LAPIC_NMI_WATCHDOG)
228 enable_lapic_nmi_watchdog();
229}
230
231void disable_timer_nmi_watchdog(void)
232{
233 if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
234 return;
235
236 disable_irq(0);
237 unset_nmi_callback();
238 nmi_active = -1;
239 nmi_watchdog = NMI_NONE;
240}
241
242void enable_timer_nmi_watchdog(void)
243{
244 if (nmi_active < 0) {
245 nmi_watchdog = NMI_IO_APIC;
246 touch_nmi_watchdog();
247 nmi_active = 1;
248 enable_irq(0);
249 }
250}
251
252#ifdef CONFIG_PM
253
254static int nmi_pm_active; /* nmi_active before suspend */
255
0b9c33a7 256static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
257{
258 nmi_pm_active = nmi_active;
259 disable_lapic_nmi_watchdog();
260 return 0;
261}
262
263static int lapic_nmi_resume(struct sys_device *dev)
264{
265 if (nmi_pm_active > 0)
266 enable_lapic_nmi_watchdog();
267 return 0;
268}
269
270static struct sysdev_class nmi_sysclass = {
271 set_kset_name("lapic_nmi"),
272 .resume = lapic_nmi_resume,
273 .suspend = lapic_nmi_suspend,
274};
275
276static struct sys_device device_lapic_nmi = {
277 .id = 0,
278 .cls = &nmi_sysclass,
279};
280
281static int __init init_lapic_nmi_sysfs(void)
282{
283 int error;
284
285 if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
286 return 0;
287
288 error = sysdev_class_register(&nmi_sysclass);
289 if (!error)
290 error = sysdev_register(&device_lapic_nmi);
291 return error;
292}
293/* must come after the local APIC's device_initcall() */
294late_initcall(init_lapic_nmi_sysfs);
295
296#endif /* CONFIG_PM */
297
298/*
299 * Activate the NMI watchdog via the local APIC.
300 * Original code written by Keith Owens.
301 */
302
303static void setup_k7_watchdog(void)
304{
305 int i;
306 unsigned int evntsel;
307
308 /* No check, so can start with slow frequency */
309 nmi_hz = 1;
310
311 /* XXX should check these in EFER */
312
313 nmi_perfctr_msr = MSR_K7_PERFCTR0;
314
315 for(i = 0; i < 4; ++i) {
316 /* Simulator may not support it */
317 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
318 return;
319 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
320 }
321
322 evntsel = K7_EVNTSEL_INT
323 | K7_EVNTSEL_OS
324 | K7_EVNTSEL_USR
325 | K7_NMI_EVENT;
326
327 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
328 wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
329 apic_write(APIC_LVTPC, APIC_DM_NMI);
330 evntsel |= K7_EVNTSEL_ENABLE;
331 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
332}
333
334void setup_apic_nmi_watchdog(void)
335{
336 switch (boot_cpu_data.x86_vendor) {
337 case X86_VENDOR_AMD:
72e76be2 338 if (boot_cpu_data.x86 != 15)
1da177e4
LT
339 return;
340 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
341 return;
342 setup_k7_watchdog();
343 break;
344 default:
345 return;
346 }
347 lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
348 nmi_active = 1;
349}
350
351/*
352 * the best way to detect whether a CPU has a 'hard lockup' problem
353 * is to check it's local APIC timer IRQ counts. If they are not
354 * changing then that CPU has some problem.
355 *
356 * as these watchdog NMI IRQs are generated on every CPU, we only
357 * have to check the current processor.
358 *
359 * since NMIs don't listen to _any_ locks, we have to be extremely
360 * careful not to rely on unsafe variables. The printk might lock
361 * up though, so we have to break up any console locks first ...
362 * [when there will be more tty-related locks, break them up
363 * here too!]
364 */
365
366static unsigned int
367 last_irq_sums [NR_CPUS],
368 alert_counter [NR_CPUS];
369
370void touch_nmi_watchdog (void)
371{
372 int i;
373
374 /*
375 * Just reset the alert counters, (other CPUs might be
376 * spinning on locks we hold):
377 */
378 for (i = 0; i < NR_CPUS; i++)
379 alert_counter[i] = 0;
380}
381
382void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
383{
384 int sum, cpu;
385
386 cpu = safe_smp_processor_id();
387 sum = read_pda(apic_timer_irqs);
388 if (last_irq_sums[cpu] == sum) {
389 /*
390 * Ayiee, looks like this CPU is stuck ...
391 * wait a few IRQs (5 seconds) before doing the oops ...
392 */
393 alert_counter[cpu]++;
394 if (alert_counter[cpu] == 5*nmi_hz) {
395 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
396 == NOTIFY_STOP) {
397 alert_counter[cpu] = 0;
398 return;
399 }
400 die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
401 }
402 } else {
403 last_irq_sums[cpu] = sum;
404 alert_counter[cpu] = 0;
405 }
406 if (nmi_perfctr_msr)
407 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
408}
409
410static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
411{
412 return 0;
413}
414
415static nmi_callback_t nmi_callback = dummy_nmi_callback;
416
417asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
418{
419 int cpu = safe_smp_processor_id();
420
421 nmi_enter();
422 add_pda(__nmi_count,1);
423 if (!nmi_callback(regs, cpu))
424 default_do_nmi(regs);
425 nmi_exit();
426}
427
428void set_nmi_callback(nmi_callback_t callback)
429{
430 nmi_callback = callback;
431}
432
433void unset_nmi_callback(void)
434{
435 nmi_callback = dummy_nmi_callback;
436}
437
438#ifdef CONFIG_SYSCTL
439
440static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
441{
442 unsigned char reason = get_nmi_reason();
443 char buf[64];
444
445 if (!(reason & 0xc0)) {
446 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
447 die_nmi(buf,regs);
448 }
449 return 0;
450}
451
452/*
453 * proc handler for /proc/sys/kernel/unknown_nmi_panic
454 */
455int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
456 void __user *buffer, size_t *length, loff_t *ppos)
457{
458 int old_state;
459
460 old_state = unknown_nmi_panic;
461 proc_dointvec(table, write, file, buffer, length, ppos);
462 if (!!old_state == !!unknown_nmi_panic)
463 return 0;
464
465 if (unknown_nmi_panic) {
466 if (reserve_lapic_nmi() < 0) {
467 unknown_nmi_panic = 0;
468 return -EBUSY;
469 } else {
470 set_nmi_callback(unknown_nmi_panic_callback);
471 }
472 } else {
473 release_lapic_nmi();
474 unset_nmi_callback();
475 }
476 return 0;
477}
478
479#endif
480
481EXPORT_SYMBOL(nmi_active);
482EXPORT_SYMBOL(nmi_watchdog);
483EXPORT_SYMBOL(reserve_lapic_nmi);
484EXPORT_SYMBOL(release_lapic_nmi);
485EXPORT_SYMBOL(disable_timer_nmi_watchdog);
486EXPORT_SYMBOL(enable_timer_nmi_watchdog);
487EXPORT_SYMBOL(touch_nmi_watchdog);
This page took 0.078086 seconds and 5 git commands to generate.