[PATCH] Fix restore of 64-bit PCI BAR's
[deliverable/linux.git] / arch / x86_64 / kernel / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/rcupdate.h>
14#include <linux/kallsyms.h>
15#include <linux/sysdev.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
91c6d400
AK
18#include <linux/cpu.h>
19#include <linux/percpu.h>
1da177e4
LT
20#include <asm/processor.h>
21#include <asm/msr.h>
22#include <asm/mce.h>
23#include <asm/kdebug.h>
24#include <asm/uaccess.h>
25
26#define MISC_MCELOG_MINOR 227
27#define NR_BANKS 5
28
29static int mce_dont_init;
30
31/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic,
32 3: never panic or exit (for testing only) */
33static int tolerant = 1;
34static int banks;
35static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
36static unsigned long console_logged;
37static int notify_user;
94ad8474 38static int rip_msr;
1da177e4
LT
39
40/*
41 * Lockless MCE logging infrastructure.
42 * This avoids deadlocks on printk locks without having to break locks. Also
43 * separate MCEs from kernel messages to avoid bogus bug reports.
44 */
45
46struct mce_log mcelog = {
47 MCE_LOG_SIGNATURE,
48 MCE_LOG_LEN,
49};
50
51void mce_log(struct mce *mce)
52{
53 unsigned next, entry;
54 mce->finished = 0;
55 smp_wmb();
56 for (;;) {
57 entry = rcu_dereference(mcelog.next);
58 /* When the buffer fills up discard new entries. Assume
59 that the earlier errors are the more interesting. */
60 if (entry >= MCE_LOG_LEN) {
61 set_bit(MCE_OVERFLOW, &mcelog.flags);
62 return;
63 }
64 /* Old left over entry. Skip. */
65 if (mcelog.entry[entry].finished)
66 continue;
67 smp_rmb();
68 next = entry + 1;
69 if (cmpxchg(&mcelog.next, entry, next) == entry)
70 break;
71 }
72 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
73 smp_wmb();
74 mcelog.entry[entry].finished = 1;
75 smp_wmb();
76
77 if (!test_and_set_bit(0, &console_logged))
78 notify_user = 1;
79}
80
81static void print_mce(struct mce *m)
82{
83 printk(KERN_EMERG "\n"
84 KERN_EMERG
85 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
86 m->cpu, m->mcgstatus, m->bank, m->status);
87 if (m->rip) {
88 printk(KERN_EMERG
89 "RIP%s %02x:<%016Lx> ",
90 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
91 m->cs, m->rip);
92 if (m->cs == __KERNEL_CS)
93 print_symbol("{%s}", m->rip);
94 printk("\n");
95 }
96 printk(KERN_EMERG "TSC %Lx ", m->tsc);
97 if (m->addr)
98 printk("ADDR %Lx ", m->addr);
99 if (m->misc)
100 printk("MISC %Lx ", m->misc);
101 printk("\n");
102}
103
104static void mce_panic(char *msg, struct mce *backup, unsigned long start)
105{
106 int i;
107 oops_begin();
108 for (i = 0; i < MCE_LOG_LEN; i++) {
109 unsigned long tsc = mcelog.entry[i].tsc;
110 if (time_before(tsc, start))
111 continue;
112 print_mce(&mcelog.entry[i]);
113 if (backup && mcelog.entry[i].tsc == backup->tsc)
114 backup = NULL;
115 }
116 if (backup)
117 print_mce(backup);
118 if (tolerant >= 3)
119 printk("Fake panic: %s\n", msg);
120 else
121 panic(msg);
122}
123
124static int mce_available(struct cpuinfo_x86 *c)
125{
126 return test_bit(X86_FEATURE_MCE, &c->x86_capability) &&
127 test_bit(X86_FEATURE_MCA, &c->x86_capability);
128}
129
94ad8474
AK
130static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
131{
132 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
133 m->rip = regs->rip;
134 m->cs = regs->cs;
135 } else {
136 m->rip = 0;
137 m->cs = 0;
138 }
139 if (rip_msr) {
140 /* Assume the RIP in the MSR is exact. Is this true? */
141 m->mcgstatus |= MCG_STATUS_EIPV;
142 rdmsrl(rip_msr, m->rip);
143 m->cs = 0;
144 }
145}
146
1da177e4
LT
147/*
148 * The actual machine check handler
149 */
150
151void do_machine_check(struct pt_regs * regs, long error_code)
152{
153 struct mce m, panicm;
154 int nowayout = (tolerant < 1);
155 int kill_it = 0;
156 u64 mcestart = 0;
157 int i;
158 int panicm_found = 0;
159
160 if (regs)
161 notify_die(DIE_NMI, "machine check", regs, error_code, 255, SIGKILL);
162 if (!banks)
163 return;
164
165 memset(&m, 0, sizeof(struct mce));
166 m.cpu = hard_smp_processor_id();
167 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
168 if (!(m.mcgstatus & MCG_STATUS_RIPV))
169 kill_it = 1;
170
171 rdtscll(mcestart);
172 barrier();
173
174 for (i = 0; i < banks; i++) {
175 if (!bank[i])
176 continue;
177
178 m.misc = 0;
179 m.addr = 0;
180 m.bank = i;
181 m.tsc = 0;
182
183 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
184 if ((m.status & MCI_STATUS_VAL) == 0)
185 continue;
186
187 if (m.status & MCI_STATUS_EN) {
188 /* In theory _OVER could be a nowayout too, but
189 assume any overflowed errors were no fatal. */
190 nowayout |= !!(m.status & MCI_STATUS_PCC);
191 kill_it |= !!(m.status & MCI_STATUS_UC);
192 }
193
194 if (m.status & MCI_STATUS_MISCV)
195 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
196 if (m.status & MCI_STATUS_ADDRV)
197 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
198
94ad8474 199 mce_get_rip(&m, regs);
1da177e4
LT
200 if (error_code != -1)
201 rdtscll(m.tsc);
202 wrmsrl(MSR_IA32_MC0_STATUS + i*4, 0);
203 mce_log(&m);
204
205 /* Did this bank cause the exception? */
206 /* Assume that the bank with uncorrectable errors did it,
207 and that there is only a single one. */
208 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
209 panicm = m;
210 panicm_found = 1;
211 }
212
213 tainted |= TAINT_MACHINE_CHECK;
214 }
215
216 /* Never do anything final in the polling timer */
217 if (!regs)
218 goto out;
219
220 /* If we didn't find an uncorrectable error, pick
221 the last one (shouldn't happen, just being safe). */
222 if (!panicm_found)
223 panicm = m;
224 if (nowayout)
225 mce_panic("Machine check", &panicm, mcestart);
226 if (kill_it) {
227 int user_space = 0;
228
229 if (m.mcgstatus & MCG_STATUS_RIPV)
230 user_space = panicm.rip && (panicm.cs & 3);
231
232 /* When the machine was in user space and the CPU didn't get
233 confused it's normally not necessary to panic, unless you
234 are paranoid (tolerant == 0)
235
236 RED-PEN could be more tolerant for MCEs in idle,
237 but most likely they occur at boot anyways, where
238 it is best to just halt the machine. */
239 if ((!user_space && (panic_on_oops || tolerant < 2)) ||
240 (unsigned)current->pid <= 1)
241 mce_panic("Uncorrected machine check", &panicm, mcestart);
242
243 /* do_exit takes an awful lot of locks and has as
244 slight risk of deadlocking. If you don't want that
245 don't set tolerant >= 2 */
246 if (tolerant < 3)
247 do_exit(SIGBUS);
248 }
249
250 out:
251 /* Last thing done in the machine check exception to clear state. */
252 wrmsrl(MSR_IA32_MCG_STATUS, 0);
253}
254
255/*
256 * Periodic polling timer for "silent" machine check errors.
257 */
258
259static int check_interval = 5 * 60; /* 5 minutes */
260static void mcheck_timer(void *data);
261static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
262
263static void mcheck_check_cpu(void *info)
264{
265 if (mce_available(&current_cpu_data))
266 do_machine_check(NULL, 0);
267}
268
269static void mcheck_timer(void *data)
270{
271 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
272 schedule_delayed_work(&mcheck_work, check_interval * HZ);
273
274 /*
275 * It's ok to read stale data here for notify_user and
276 * console_logged as we'll simply get the updated versions
277 * on the next mcheck_timer execution and atomic operations
278 * on console_logged act as synchronization for notify_user
279 * writes.
280 */
281 if (notify_user && console_logged) {
282 notify_user = 0;
283 clear_bit(0, &console_logged);
284 printk(KERN_INFO "Machine check events logged\n");
285 }
286}
287
288
289static __init int periodic_mcheck_init(void)
290{
291 if (check_interval)
292 schedule_delayed_work(&mcheck_work, check_interval*HZ);
293 return 0;
294}
295__initcall(periodic_mcheck_init);
296
297
298/*
299 * Initialize Machine Checks for a CPU.
300 */
301static void mce_init(void *dummy)
302{
303 u64 cap;
304 int i;
305
306 rdmsrl(MSR_IA32_MCG_CAP, cap);
307 banks = cap & 0xff;
308 if (banks > NR_BANKS) {
309 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
310 banks = NR_BANKS;
311 }
94ad8474
AK
312 /* Use accurate RIP reporting if available. */
313 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
314 rip_msr = MSR_IA32_MCG_EIP;
1da177e4
LT
315
316 /* Log the machine checks left over from the previous reset.
317 This also clears all registers */
318 do_machine_check(NULL, -1);
319
320 set_in_cr4(X86_CR4_MCE);
321
322 if (cap & MCG_CTL_P)
323 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
324
325 for (i = 0; i < banks; i++) {
326 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
327 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
328 }
329}
330
331/* Add per CPU specific workarounds here */
e6982c67 332static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
1da177e4
LT
333{
334 /* This should be disabled by the BIOS, but isn't always */
335 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
336 /* disable GART TBL walk error reporting, which trips off
337 incorrectly with the IOMMU & 3ware & Cerberus. */
338 clear_bit(10, &bank[4]);
339 }
340}
341
e6982c67 342static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
343{
344 switch (c->x86_vendor) {
345 case X86_VENDOR_INTEL:
346 mce_intel_feature_init(c);
347 break;
348 default:
349 break;
350 }
351}
352
353/*
354 * Called for each booted CPU to set up machine checks.
355 * Must be called with preempt off.
356 */
e6982c67 357void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4
LT
358{
359 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
360
361 mce_cpu_quirks(c);
362
363 if (mce_dont_init ||
364 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
365 !mce_available(c))
366 return;
367
368 mce_init(NULL);
369 mce_cpu_features(c);
370}
371
372/*
373 * Character device to read and clear the MCE log.
374 */
375
376static void collect_tscs(void *data)
377{
378 unsigned long *cpu_tsc = (unsigned long *)data;
379 rdtscll(cpu_tsc[smp_processor_id()]);
380}
381
382static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
383{
f0de53bb 384 unsigned long *cpu_tsc;
1da177e4
LT
385 static DECLARE_MUTEX(mce_read_sem);
386 unsigned next;
387 char __user *buf = ubuf;
388 int i, err;
389
f0de53bb
AK
390 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
391 if (!cpu_tsc)
392 return -ENOMEM;
393
1da177e4
LT
394 down(&mce_read_sem);
395 next = rcu_dereference(mcelog.next);
396
397 /* Only supports full reads right now */
398 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
399 up(&mce_read_sem);
f0de53bb 400 kfree(cpu_tsc);
1da177e4
LT
401 return -EINVAL;
402 }
403
404 err = 0;
405 for (i = 0; i < next; i++) {
406 if (!mcelog.entry[i].finished)
407 continue;
408 smp_rmb();
409 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
410 buf += sizeof(struct mce);
411 }
412
413 memset(mcelog.entry, 0, next * sizeof(struct mce));
414 mcelog.next = 0;
415
b2b18660 416 synchronize_sched();
1da177e4
LT
417
418 /* Collect entries that were still getting written before the synchronize. */
419
420 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
421 for (i = next; i < MCE_LOG_LEN; i++) {
422 if (mcelog.entry[i].finished &&
423 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
424 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
425 smp_rmb();
426 buf += sizeof(struct mce);
427 memset(&mcelog.entry[i], 0, sizeof(struct mce));
428 }
429 }
430 up(&mce_read_sem);
f0de53bb 431 kfree(cpu_tsc);
1da177e4
LT
432 return err ? -EFAULT : buf - ubuf;
433}
434
435static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
436{
437 int __user *p = (int __user *)arg;
438 if (!capable(CAP_SYS_ADMIN))
439 return -EPERM;
440 switch (cmd) {
441 case MCE_GET_RECORD_LEN:
442 return put_user(sizeof(struct mce), p);
443 case MCE_GET_LOG_LEN:
444 return put_user(MCE_LOG_LEN, p);
445 case MCE_GETCLEAR_FLAGS: {
446 unsigned flags;
447 do {
448 flags = mcelog.flags;
449 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
450 return put_user(flags, p);
451 }
452 default:
453 return -ENOTTY;
454 }
455}
456
457static struct file_operations mce_chrdev_ops = {
458 .read = mce_read,
459 .ioctl = mce_ioctl,
460};
461
462static struct miscdevice mce_log_device = {
463 MISC_MCELOG_MINOR,
464 "mcelog",
465 &mce_chrdev_ops,
466};
467
468/*
469 * Old style boot options parsing. Only for compatibility.
470 */
471
472static int __init mcheck_disable(char *str)
473{
474 mce_dont_init = 1;
475 return 0;
476}
477
478/* mce=off disables machine check. Note you can reenable it later
479 using sysfs */
480static int __init mcheck_enable(char *str)
481{
482 if (!strcmp(str, "off"))
483 mce_dont_init = 1;
484 else
485 printk("mce= argument %s ignored. Please use /sys", str);
486 return 0;
487}
488
489__setup("nomce", mcheck_disable);
490__setup("mce", mcheck_enable);
491
492/*
493 * Sysfs support
494 */
495
496/* On resume clear all MCE state. Don't want to see leftovers from the BIOS. */
497static int mce_resume(struct sys_device *dev)
498{
499 on_each_cpu(mce_init, NULL, 1, 1);
500 return 0;
501}
502
503/* Reinit MCEs after user configuration changes */
504static void mce_restart(void)
505{
506 if (check_interval)
507 cancel_delayed_work(&mcheck_work);
508 /* Timer race is harmless here */
509 on_each_cpu(mce_init, NULL, 1, 1);
510 if (check_interval)
511 schedule_delayed_work(&mcheck_work, check_interval*HZ);
512}
513
514static struct sysdev_class mce_sysclass = {
515 .resume = mce_resume,
516 set_kset_name("machinecheck"),
517};
518
91c6d400 519static DEFINE_PER_CPU(struct sys_device, device_mce);
1da177e4
LT
520
521/* Why are there no generic functions for this? */
522#define ACCESSOR(name, var, start) \
523 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
524 return sprintf(buf, "%lx\n", (unsigned long)var); \
525 } \
526 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
527 char *end; \
528 unsigned long new = simple_strtoul(buf, &end, 0); \
529 if (end == buf) return -EINVAL; \
530 var = new; \
531 start; \
532 return end-buf; \
533 } \
534 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
535
536ACCESSOR(bank0ctl,bank[0],mce_restart())
537ACCESSOR(bank1ctl,bank[1],mce_restart())
538ACCESSOR(bank2ctl,bank[2],mce_restart())
539ACCESSOR(bank3ctl,bank[3],mce_restart())
540ACCESSOR(bank4ctl,bank[4],mce_restart())
541ACCESSOR(tolerant,tolerant,)
542ACCESSOR(check_interval,check_interval,mce_restart())
543
91c6d400
AK
544/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
545static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
546{
547 int err;
91c6d400
AK
548 if (!mce_available(&cpu_data[cpu]))
549 return -EIO;
550
551 per_cpu(device_mce,cpu).id = cpu;
552 per_cpu(device_mce,cpu).cls = &mce_sysclass;
553
554 err = sysdev_register(&per_cpu(device_mce,cpu));
555
556 if (!err) {
557 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
558 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
559 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
560 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
561 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
562 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant);
563 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval);
564 }
565 return err;
566}
567
568#ifdef CONFIG_HOTPLUG_CPU
569static __cpuinit void mce_remove_device(unsigned int cpu)
570{
571 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
572 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
573 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
574 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
575 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
576 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
577 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
578 sysdev_unregister(&per_cpu(device_mce,cpu));
579}
580#endif
581
582/* Get notified when a cpu comes on/off. Be hotplug friendly. */
583static __cpuinit int
584mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
585{
586 unsigned int cpu = (unsigned long)hcpu;
587
588 switch (action) {
589 case CPU_ONLINE:
590 mce_create_device(cpu);
591 break;
592#ifdef CONFIG_HOTPLUG_CPU
593 case CPU_DEAD:
594 mce_remove_device(cpu);
595 break;
596#endif
597 }
598 return NOTIFY_OK;
599}
600
601static struct notifier_block mce_cpu_notifier = {
602 .notifier_call = mce_cpu_callback,
603};
604
605static __init int mce_init_device(void)
606{
607 int err;
608 int i = 0;
609
1da177e4
LT
610 if (!mce_available(&boot_cpu_data))
611 return -EIO;
612 err = sysdev_class_register(&mce_sysclass);
91c6d400
AK
613
614 for_each_online_cpu(i) {
615 mce_create_device(i);
616 }
617
618 register_cpu_notifier(&mce_cpu_notifier);
1da177e4
LT
619 misc_register(&mce_log_device);
620 return err;
1da177e4 621}
91c6d400 622
1da177e4 623device_initcall(mce_init_device);
This page took 0.078607 seconds and 5 git commands to generate.