[PATCH] x86-64: Make lockless machine check record passing a bit more robust.
[deliverable/linux.git] / arch / x86_64 / kernel / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/rcupdate.h>
14#include <linux/kallsyms.h>
15#include <linux/sysdev.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
91c6d400
AK
18#include <linux/cpu.h>
19#include <linux/percpu.h>
1da177e4
LT
20#include <asm/processor.h>
21#include <asm/msr.h>
22#include <asm/mce.h>
23#include <asm/kdebug.h>
24#include <asm/uaccess.h>
25
26#define MISC_MCELOG_MINOR 227
27#define NR_BANKS 5
28
29static int mce_dont_init;
30
31/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic,
32 3: never panic or exit (for testing only) */
33static int tolerant = 1;
34static int banks;
35static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
36static unsigned long console_logged;
37static int notify_user;
94ad8474 38static int rip_msr;
d5172f26 39static int mce_bootlog;
1da177e4
LT
40
41/*
42 * Lockless MCE logging infrastructure.
43 * This avoids deadlocks on printk locks without having to break locks. Also
44 * separate MCEs from kernel messages to avoid bogus bug reports.
45 */
46
47struct mce_log mcelog = {
48 MCE_LOG_SIGNATURE,
49 MCE_LOG_LEN,
50};
51
52void mce_log(struct mce *mce)
53{
54 unsigned next, entry;
55 mce->finished = 0;
56 smp_wmb();
57 for (;;) {
58 entry = rcu_dereference(mcelog.next);
673242c1
AK
59 for (;;) {
60 /* When the buffer fills up discard new entries. Assume
61 that the earlier errors are the more interesting. */
62 if (entry >= MCE_LOG_LEN) {
63 set_bit(MCE_OVERFLOW, &mcelog.flags);
64 return;
65 }
66 /* Old left over entry. Skip. */
67 if (mcelog.entry[entry].finished) {
68 entry++;
69 continue;
70 }
1da177e4 71 }
1da177e4
LT
72 smp_rmb();
73 next = entry + 1;
74 if (cmpxchg(&mcelog.next, entry, next) == entry)
75 break;
76 }
77 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
78 smp_wmb();
79 mcelog.entry[entry].finished = 1;
80 smp_wmb();
81
82 if (!test_and_set_bit(0, &console_logged))
83 notify_user = 1;
84}
85
86static void print_mce(struct mce *m)
87{
88 printk(KERN_EMERG "\n"
89 KERN_EMERG
90 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
91 m->cpu, m->mcgstatus, m->bank, m->status);
92 if (m->rip) {
93 printk(KERN_EMERG
94 "RIP%s %02x:<%016Lx> ",
95 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
96 m->cs, m->rip);
97 if (m->cs == __KERNEL_CS)
98 print_symbol("{%s}", m->rip);
99 printk("\n");
100 }
101 printk(KERN_EMERG "TSC %Lx ", m->tsc);
102 if (m->addr)
103 printk("ADDR %Lx ", m->addr);
104 if (m->misc)
105 printk("MISC %Lx ", m->misc);
106 printk("\n");
107}
108
109static void mce_panic(char *msg, struct mce *backup, unsigned long start)
110{
111 int i;
112 oops_begin();
113 for (i = 0; i < MCE_LOG_LEN; i++) {
114 unsigned long tsc = mcelog.entry[i].tsc;
115 if (time_before(tsc, start))
116 continue;
117 print_mce(&mcelog.entry[i]);
118 if (backup && mcelog.entry[i].tsc == backup->tsc)
119 backup = NULL;
120 }
121 if (backup)
122 print_mce(backup);
123 if (tolerant >= 3)
124 printk("Fake panic: %s\n", msg);
125 else
126 panic(msg);
127}
128
129static int mce_available(struct cpuinfo_x86 *c)
130{
131 return test_bit(X86_FEATURE_MCE, &c->x86_capability) &&
132 test_bit(X86_FEATURE_MCA, &c->x86_capability);
133}
134
94ad8474
AK
135static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
136{
137 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
138 m->rip = regs->rip;
139 m->cs = regs->cs;
140 } else {
141 m->rip = 0;
142 m->cs = 0;
143 }
144 if (rip_msr) {
145 /* Assume the RIP in the MSR is exact. Is this true? */
146 m->mcgstatus |= MCG_STATUS_EIPV;
147 rdmsrl(rip_msr, m->rip);
148 m->cs = 0;
149 }
150}
151
1da177e4
LT
152/*
153 * The actual machine check handler
154 */
155
156void do_machine_check(struct pt_regs * regs, long error_code)
157{
158 struct mce m, panicm;
159 int nowayout = (tolerant < 1);
160 int kill_it = 0;
161 u64 mcestart = 0;
162 int i;
163 int panicm_found = 0;
164
165 if (regs)
166 notify_die(DIE_NMI, "machine check", regs, error_code, 255, SIGKILL);
167 if (!banks)
168 return;
169
170 memset(&m, 0, sizeof(struct mce));
171 m.cpu = hard_smp_processor_id();
172 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
173 if (!(m.mcgstatus & MCG_STATUS_RIPV))
174 kill_it = 1;
175
176 rdtscll(mcestart);
177 barrier();
178
179 for (i = 0; i < banks; i++) {
180 if (!bank[i])
181 continue;
182
183 m.misc = 0;
184 m.addr = 0;
185 m.bank = i;
186 m.tsc = 0;
187
188 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
189 if ((m.status & MCI_STATUS_VAL) == 0)
190 continue;
191
192 if (m.status & MCI_STATUS_EN) {
193 /* In theory _OVER could be a nowayout too, but
194 assume any overflowed errors were no fatal. */
195 nowayout |= !!(m.status & MCI_STATUS_PCC);
196 kill_it |= !!(m.status & MCI_STATUS_UC);
197 }
198
199 if (m.status & MCI_STATUS_MISCV)
200 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
201 if (m.status & MCI_STATUS_ADDRV)
202 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
203
94ad8474 204 mce_get_rip(&m, regs);
d5172f26 205 if (error_code >= 0)
1da177e4
LT
206 rdtscll(m.tsc);
207 wrmsrl(MSR_IA32_MC0_STATUS + i*4, 0);
d5172f26
AK
208 if (error_code != -2)
209 mce_log(&m);
1da177e4
LT
210
211 /* Did this bank cause the exception? */
212 /* Assume that the bank with uncorrectable errors did it,
213 and that there is only a single one. */
214 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
215 panicm = m;
216 panicm_found = 1;
217 }
218
219 tainted |= TAINT_MACHINE_CHECK;
220 }
221
222 /* Never do anything final in the polling timer */
223 if (!regs)
224 goto out;
225
226 /* If we didn't find an uncorrectable error, pick
227 the last one (shouldn't happen, just being safe). */
228 if (!panicm_found)
229 panicm = m;
230 if (nowayout)
231 mce_panic("Machine check", &panicm, mcestart);
232 if (kill_it) {
233 int user_space = 0;
234
235 if (m.mcgstatus & MCG_STATUS_RIPV)
236 user_space = panicm.rip && (panicm.cs & 3);
237
238 /* When the machine was in user space and the CPU didn't get
239 confused it's normally not necessary to panic, unless you
240 are paranoid (tolerant == 0)
241
242 RED-PEN could be more tolerant for MCEs in idle,
243 but most likely they occur at boot anyways, where
244 it is best to just halt the machine. */
245 if ((!user_space && (panic_on_oops || tolerant < 2)) ||
246 (unsigned)current->pid <= 1)
247 mce_panic("Uncorrected machine check", &panicm, mcestart);
248
249 /* do_exit takes an awful lot of locks and has as
250 slight risk of deadlocking. If you don't want that
251 don't set tolerant >= 2 */
252 if (tolerant < 3)
253 do_exit(SIGBUS);
254 }
255
256 out:
257 /* Last thing done in the machine check exception to clear state. */
258 wrmsrl(MSR_IA32_MCG_STATUS, 0);
259}
260
261/*
262 * Periodic polling timer for "silent" machine check errors.
263 */
264
265static int check_interval = 5 * 60; /* 5 minutes */
266static void mcheck_timer(void *data);
267static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
268
269static void mcheck_check_cpu(void *info)
270{
271 if (mce_available(&current_cpu_data))
272 do_machine_check(NULL, 0);
273}
274
275static void mcheck_timer(void *data)
276{
277 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
278 schedule_delayed_work(&mcheck_work, check_interval * HZ);
279
280 /*
281 * It's ok to read stale data here for notify_user and
282 * console_logged as we'll simply get the updated versions
283 * on the next mcheck_timer execution and atomic operations
284 * on console_logged act as synchronization for notify_user
285 * writes.
286 */
287 if (notify_user && console_logged) {
288 notify_user = 0;
289 clear_bit(0, &console_logged);
290 printk(KERN_INFO "Machine check events logged\n");
291 }
292}
293
294
295static __init int periodic_mcheck_init(void)
296{
297 if (check_interval)
298 schedule_delayed_work(&mcheck_work, check_interval*HZ);
299 return 0;
300}
301__initcall(periodic_mcheck_init);
302
303
304/*
305 * Initialize Machine Checks for a CPU.
306 */
307static void mce_init(void *dummy)
308{
309 u64 cap;
310 int i;
311
312 rdmsrl(MSR_IA32_MCG_CAP, cap);
313 banks = cap & 0xff;
314 if (banks > NR_BANKS) {
315 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
316 banks = NR_BANKS;
317 }
94ad8474
AK
318 /* Use accurate RIP reporting if available. */
319 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
320 rip_msr = MSR_IA32_MCG_EIP;
1da177e4
LT
321
322 /* Log the machine checks left over from the previous reset.
323 This also clears all registers */
d5172f26 324 do_machine_check(NULL, mce_bootlog ? -1 : -2);
1da177e4
LT
325
326 set_in_cr4(X86_CR4_MCE);
327
328 if (cap & MCG_CTL_P)
329 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
330
331 for (i = 0; i < banks; i++) {
332 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
333 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
334 }
335}
336
337/* Add per CPU specific workarounds here */
e6982c67 338static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
1da177e4
LT
339{
340 /* This should be disabled by the BIOS, but isn't always */
341 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
342 /* disable GART TBL walk error reporting, which trips off
343 incorrectly with the IOMMU & 3ware & Cerberus. */
344 clear_bit(10, &bank[4]);
345 }
346}
347
e6982c67 348static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
349{
350 switch (c->x86_vendor) {
351 case X86_VENDOR_INTEL:
352 mce_intel_feature_init(c);
353 break;
354 default:
355 break;
356 }
357}
358
359/*
360 * Called for each booted CPU to set up machine checks.
361 * Must be called with preempt off.
362 */
e6982c67 363void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4
LT
364{
365 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
366
367 mce_cpu_quirks(c);
368
369 if (mce_dont_init ||
370 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
371 !mce_available(c))
372 return;
373
374 mce_init(NULL);
375 mce_cpu_features(c);
376}
377
378/*
379 * Character device to read and clear the MCE log.
380 */
381
382static void collect_tscs(void *data)
383{
384 unsigned long *cpu_tsc = (unsigned long *)data;
385 rdtscll(cpu_tsc[smp_processor_id()]);
386}
387
388static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
389{
f0de53bb 390 unsigned long *cpu_tsc;
1da177e4
LT
391 static DECLARE_MUTEX(mce_read_sem);
392 unsigned next;
393 char __user *buf = ubuf;
394 int i, err;
395
f0de53bb
AK
396 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
397 if (!cpu_tsc)
398 return -ENOMEM;
399
1da177e4
LT
400 down(&mce_read_sem);
401 next = rcu_dereference(mcelog.next);
402
403 /* Only supports full reads right now */
404 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
405 up(&mce_read_sem);
f0de53bb 406 kfree(cpu_tsc);
1da177e4
LT
407 return -EINVAL;
408 }
409
410 err = 0;
673242c1
AK
411 for (i = 0; i < next; i++) {
412 unsigned long start = jiffies;
413 while (!mcelog.entry[i].finished) {
414 if (!time_before(jiffies, start + 2)) {
415 memset(mcelog.entry + i,0, sizeof(struct mce));
416 continue;
417 }
418 cpu_relax();
419 }
1da177e4
LT
420 smp_rmb();
421 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
422 buf += sizeof(struct mce);
423 }
424
425 memset(mcelog.entry, 0, next * sizeof(struct mce));
426 mcelog.next = 0;
427
b2b18660 428 synchronize_sched();
1da177e4
LT
429
430 /* Collect entries that were still getting written before the synchronize. */
431
432 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
433 for (i = next; i < MCE_LOG_LEN; i++) {
434 if (mcelog.entry[i].finished &&
435 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
436 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
437 smp_rmb();
438 buf += sizeof(struct mce);
439 memset(&mcelog.entry[i], 0, sizeof(struct mce));
440 }
441 }
442 up(&mce_read_sem);
f0de53bb 443 kfree(cpu_tsc);
1da177e4
LT
444 return err ? -EFAULT : buf - ubuf;
445}
446
447static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
448{
449 int __user *p = (int __user *)arg;
450 if (!capable(CAP_SYS_ADMIN))
451 return -EPERM;
452 switch (cmd) {
453 case MCE_GET_RECORD_LEN:
454 return put_user(sizeof(struct mce), p);
455 case MCE_GET_LOG_LEN:
456 return put_user(MCE_LOG_LEN, p);
457 case MCE_GETCLEAR_FLAGS: {
458 unsigned flags;
459 do {
460 flags = mcelog.flags;
461 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
462 return put_user(flags, p);
463 }
464 default:
465 return -ENOTTY;
466 }
467}
468
469static struct file_operations mce_chrdev_ops = {
470 .read = mce_read,
471 .ioctl = mce_ioctl,
472};
473
474static struct miscdevice mce_log_device = {
475 MISC_MCELOG_MINOR,
476 "mcelog",
477 &mce_chrdev_ops,
478};
479
480/*
481 * Old style boot options parsing. Only for compatibility.
482 */
483
484static int __init mcheck_disable(char *str)
485{
486 mce_dont_init = 1;
487 return 0;
488}
489
490/* mce=off disables machine check. Note you can reenable it later
d5172f26
AK
491 using sysfs.
492 mce=bootlog Log MCEs from before booting. Disabled by default to work
493 around buggy BIOS that leave bogus MCEs. */
1da177e4
LT
494static int __init mcheck_enable(char *str)
495{
d5172f26
AK
496 if (*str == '=')
497 str++;
1da177e4
LT
498 if (!strcmp(str, "off"))
499 mce_dont_init = 1;
d5172f26
AK
500 else if (!strcmp(str, "bootlog"))
501 mce_bootlog = 1;
1da177e4
LT
502 else
503 printk("mce= argument %s ignored. Please use /sys", str);
504 return 0;
505}
506
507__setup("nomce", mcheck_disable);
508__setup("mce", mcheck_enable);
509
510/*
511 * Sysfs support
512 */
513
514/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. */
515static int mce_resume(struct sys_device *dev)
516{
517 on_each_cpu(mce_init, NULL, 1, 1);
518 return 0;
519}
520
521/* Reinit MCEs after user configuration changes */
522static void mce_restart(void)
523{
524 if (check_interval)
525 cancel_delayed_work(&mcheck_work);
526 /* Timer race is harmless here */
527 on_each_cpu(mce_init, NULL, 1, 1);
528 if (check_interval)
529 schedule_delayed_work(&mcheck_work, check_interval*HZ);
530}
531
532static struct sysdev_class mce_sysclass = {
533 .resume = mce_resume,
534 set_kset_name("machinecheck"),
535};
536
91c6d400 537static DEFINE_PER_CPU(struct sys_device, device_mce);
1da177e4
LT
538
539/* Why are there no generic functions for this? */
540#define ACCESSOR(name, var, start) \
541 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
542 return sprintf(buf, "%lx\n", (unsigned long)var); \
543 } \
544 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
545 char *end; \
546 unsigned long new = simple_strtoul(buf, &end, 0); \
547 if (end == buf) return -EINVAL; \
548 var = new; \
549 start; \
550 return end-buf; \
551 } \
552 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
553
554ACCESSOR(bank0ctl,bank[0],mce_restart())
555ACCESSOR(bank1ctl,bank[1],mce_restart())
556ACCESSOR(bank2ctl,bank[2],mce_restart())
557ACCESSOR(bank3ctl,bank[3],mce_restart())
558ACCESSOR(bank4ctl,bank[4],mce_restart())
559ACCESSOR(tolerant,tolerant,)
560ACCESSOR(check_interval,check_interval,mce_restart())
561
91c6d400
AK
562/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
563static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
564{
565 int err;
91c6d400
AK
566 if (!mce_available(&cpu_data[cpu]))
567 return -EIO;
568
569 per_cpu(device_mce,cpu).id = cpu;
570 per_cpu(device_mce,cpu).cls = &mce_sysclass;
571
572 err = sysdev_register(&per_cpu(device_mce,cpu));
573
574 if (!err) {
575 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
576 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
577 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
578 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
579 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
580 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant);
581 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval);
582 }
583 return err;
584}
585
586#ifdef CONFIG_HOTPLUG_CPU
587static __cpuinit void mce_remove_device(unsigned int cpu)
588{
589 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
590 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
591 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
592 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
593 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
594 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
595 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
596 sysdev_unregister(&per_cpu(device_mce,cpu));
597}
598#endif
599
600/* Get notified when a cpu comes on/off. Be hotplug friendly. */
601static __cpuinit int
602mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
603{
604 unsigned int cpu = (unsigned long)hcpu;
605
606 switch (action) {
607 case CPU_ONLINE:
608 mce_create_device(cpu);
609 break;
610#ifdef CONFIG_HOTPLUG_CPU
611 case CPU_DEAD:
612 mce_remove_device(cpu);
613 break;
614#endif
615 }
616 return NOTIFY_OK;
617}
618
619static struct notifier_block mce_cpu_notifier = {
620 .notifier_call = mce_cpu_callback,
621};
622
623static __init int mce_init_device(void)
624{
625 int err;
626 int i = 0;
627
1da177e4
LT
628 if (!mce_available(&boot_cpu_data))
629 return -EIO;
630 err = sysdev_class_register(&mce_sysclass);
91c6d400
AK
631
632 for_each_online_cpu(i) {
633 mce_create_device(i);
634 }
635
636 register_cpu_notifier(&mce_cpu_notifier);
1da177e4
LT
637 misc_register(&mce_log_device);
638 return err;
1da177e4 639}
91c6d400 640
1da177e4 641device_initcall(mce_init_device);
This page took 0.10738 seconds and 5 git commands to generate.