x86, mce: add __read_mostly
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
e9eee03e 3 *
1da177e4 4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
b79109c3
AK
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
1da177e4 9 */
e9eee03e
IM
10#include <linux/thread_info.h>
11#include <linux/capability.h>
12#include <linux/miscdevice.h>
ccc3c319 13#include <linux/interrupt.h>
e9eee03e
IM
14#include <linux/ratelimit.h>
15#include <linux/kallsyms.h>
16#include <linux/rcupdate.h>
e9eee03e 17#include <linux/kobject.h>
14a02530 18#include <linux/uaccess.h>
e9eee03e
IM
19#include <linux/kdebug.h>
20#include <linux/kernel.h>
21#include <linux/percpu.h>
1da177e4 22#include <linux/string.h>
1da177e4 23#include <linux/sysdev.h>
3c079792 24#include <linux/delay.h>
8c566ef5 25#include <linux/ctype.h>
e9eee03e 26#include <linux/sched.h>
0d7482e3 27#include <linux/sysfs.h>
e9eee03e
IM
28#include <linux/types.h>
29#include <linux/init.h>
30#include <linux/kmod.h>
31#include <linux/poll.h>
3c079792 32#include <linux/nmi.h>
e9eee03e 33#include <linux/cpu.h>
14a02530 34#include <linux/smp.h>
e9eee03e 35#include <linux/fs.h>
9b1beaf2 36#include <linux/mm.h>
e9eee03e 37
d88203d1 38#include <asm/processor.h>
ccc3c319
AK
39#include <asm/hw_irq.h>
40#include <asm/apic.h>
e02e68d3 41#include <asm/idle.h>
ccc3c319 42#include <asm/ipi.h>
e9eee03e
IM
43#include <asm/mce.h>
44#include <asm/msr.h>
1da177e4 45
bd19a5e6 46#include "mce-internal.h"
711c2e48
IM
47#include "mce.h"
48
5d727926
AK
49/* Handle unconfigured int18 (should never happen) */
50static void unexpected_machine_check(struct pt_regs *regs, long error_code)
51{
52 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
53 smp_processor_id());
54}
55
56/* Call the installed machine check handler for this CPU setup. */
57void (*machine_check_vector)(struct pt_regs *, long error_code) =
58 unexpected_machine_check;
04b2b1a4 59
4e5b3e69 60int mce_disabled __read_mostly;
04b2b1a4 61
4efc0670 62#ifdef CONFIG_X86_NEW_MCE
711c2e48 63
e9eee03e 64#define MISC_MCELOG_MINOR 227
0d7482e3 65
3c079792
AK
66#define SPINUNIT 100 /* 100ns */
67
553f265f
AK
68atomic_t mce_entry;
69
01ca79f1
AK
70DEFINE_PER_CPU(unsigned, mce_exception_count);
71
bd78432c
TH
72/*
73 * Tolerant levels:
74 * 0: always panic on uncorrected errors, log corrected errors
75 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
76 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
77 * 3: never panic or SIGBUS, log all errors (for testing only)
78 */
4e5b3e69
HS
79static int tolerant __read_mostly = 1;
80static int banks __read_mostly;
81static u64 *bank __read_mostly;
82static int rip_msr __read_mostly;
83static int mce_bootlog __read_mostly = -1;
84static int monarch_timeout __read_mostly = -1;
85static int mce_panic_timeout __read_mostly;
86static int mce_dont_log_ce __read_mostly;
87int mce_cmci_disabled __read_mostly;
88int mce_ignore_ce __read_mostly;
89int mce_ser __read_mostly;
a98f0dd3 90
4e5b3e69 91static unsigned long notify_user;
e9eee03e
IM
92static char trigger[128];
93static char *trigger_argv[2] = { trigger, NULL };
1da177e4 94
06b7a7a5
AK
95static unsigned long dont_init_banks;
96
e02e68d3 97static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
3c079792
AK
98static DEFINE_PER_CPU(struct mce, mces_seen);
99static int cpu_missing;
100
e02e68d3 101
ee031c31
AK
102/* MCA banks polled by the period polling timer for corrected events */
103DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
104 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
105};
106
06b7a7a5
AK
107static inline int skip_bank_init(int i)
108{
109 return i < BITS_PER_LONG && test_bit(i, &dont_init_banks);
110}
111
9b1beaf2
AK
112static DEFINE_PER_CPU(struct work_struct, mce_work);
113
b5f2fa4e
AK
114/* Do initial initialization of a struct mce */
115void mce_setup(struct mce *m)
116{
117 memset(m, 0, sizeof(struct mce));
d620c67f 118 m->cpu = m->extcpu = smp_processor_id();
b5f2fa4e 119 rdtscll(m->tsc);
8ee08347
AK
120 /* We hope get_seconds stays lockless */
121 m->time = get_seconds();
122 m->cpuvendor = boot_cpu_data.x86_vendor;
123 m->cpuid = cpuid_eax(1);
124#ifdef CONFIG_SMP
125 m->socketid = cpu_data(m->extcpu).phys_proc_id;
126#endif
127 m->apicid = cpu_data(m->extcpu).initial_apicid;
128 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
b5f2fa4e
AK
129}
130
ea149b36
AK
131DEFINE_PER_CPU(struct mce, injectm);
132EXPORT_PER_CPU_SYMBOL_GPL(injectm);
133
1da177e4
LT
134/*
135 * Lockless MCE logging infrastructure.
136 * This avoids deadlocks on printk locks without having to break locks. Also
137 * separate MCEs from kernel messages to avoid bogus bug reports.
138 */
139
231fd906 140static struct mce_log mcelog = {
f6fb0ac0
AK
141 .signature = MCE_LOG_SIGNATURE,
142 .len = MCE_LOG_LEN,
143 .recordlen = sizeof(struct mce),
d88203d1 144};
1da177e4
LT
145
146void mce_log(struct mce *mce)
147{
148 unsigned next, entry;
e9eee03e 149
1da177e4 150 mce->finished = 0;
7644143c 151 wmb();
1da177e4
LT
152 for (;;) {
153 entry = rcu_dereference(mcelog.next);
673242c1 154 for (;;) {
e9eee03e
IM
155 /*
156 * When the buffer fills up discard new entries.
157 * Assume that the earlier errors are the more
158 * interesting ones:
159 */
673242c1 160 if (entry >= MCE_LOG_LEN) {
14a02530
HS
161 set_bit(MCE_OVERFLOW,
162 (unsigned long *)&mcelog.flags);
673242c1
AK
163 return;
164 }
e9eee03e 165 /* Old left over entry. Skip: */
673242c1
AK
166 if (mcelog.entry[entry].finished) {
167 entry++;
168 continue;
169 }
7644143c 170 break;
1da177e4 171 }
1da177e4
LT
172 smp_rmb();
173 next = entry + 1;
174 if (cmpxchg(&mcelog.next, entry, next) == entry)
175 break;
176 }
177 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 178 wmb();
1da177e4 179 mcelog.entry[entry].finished = 1;
7644143c 180 wmb();
1da177e4 181
a0189c70 182 mce->finished = 1;
e02e68d3 183 set_bit(0, &notify_user);
1da177e4
LT
184}
185
77e26cca 186static void print_mce(struct mce *m)
1da177e4 187{
86503560 188 printk(KERN_EMERG
1da177e4 189 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
d620c67f 190 m->extcpu, m->mcgstatus, m->bank, m->status);
65ea5b03 191 if (m->ip) {
d88203d1 192 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
1da177e4 193 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
65ea5b03 194 m->cs, m->ip);
1da177e4 195 if (m->cs == __KERNEL_CS)
65ea5b03 196 print_symbol("{%s}", m->ip);
1da177e4
LT
197 printk("\n");
198 }
f6d1826d 199 printk(KERN_EMERG "TSC %llx ", m->tsc);
1da177e4 200 if (m->addr)
f6d1826d 201 printk("ADDR %llx ", m->addr);
1da177e4 202 if (m->misc)
f6d1826d 203 printk("MISC %llx ", m->misc);
1da177e4 204 printk("\n");
8ee08347
AK
205 printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
206 m->cpuvendor, m->cpuid, m->time, m->socketid,
207 m->apicid);
86503560
AK
208}
209
77e26cca
HS
210static void print_mce_head(void)
211{
212 printk(KERN_EMERG "\n" KERN_EMERG "HARDWARE ERROR\n");
213}
214
86503560
AK
215static void print_mce_tail(void)
216{
217 printk(KERN_EMERG "This is not a software problem!\n"
218 KERN_EMERG "Run through mcelog --ascii to decode and contact your hardware vendor\n");
1da177e4
LT
219}
220
f94b61c2
AK
221#define PANIC_TIMEOUT 5 /* 5 seconds */
222
223static atomic_t mce_paniced;
224
225/* Panic in progress. Enable interrupts and wait for final IPI */
226static void wait_for_panic(void)
227{
228 long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
229 preempt_disable();
230 local_irq_enable();
231 while (timeout-- > 0)
232 udelay(1);
29b0f591
AK
233 if (panic_timeout == 0)
234 panic_timeout = mce_panic_timeout;
f94b61c2
AK
235 panic("Panicing machine check CPU died");
236}
237
bd19a5e6 238static void mce_panic(char *msg, struct mce *final, char *exp)
d88203d1 239{
1da177e4 240 int i;
e02e68d3 241
f94b61c2
AK
242 /*
243 * Make sure only one CPU runs in machine check panic
244 */
245 if (atomic_add_return(1, &mce_paniced) > 1)
246 wait_for_panic();
247 barrier();
248
d896a940
AK
249 bust_spinlocks(1);
250 console_verbose();
77e26cca 251 print_mce_head();
a0189c70 252 /* First print corrected ones that are still unlogged */
1da177e4 253 for (i = 0; i < MCE_LOG_LEN; i++) {
a0189c70 254 struct mce *m = &mcelog.entry[i];
77e26cca
HS
255 if (!(m->status & MCI_STATUS_VAL))
256 continue;
257 if (!(m->status & MCI_STATUS_UC))
258 print_mce(m);
a0189c70
AK
259 }
260 /* Now print uncorrected but with the final one last */
261 for (i = 0; i < MCE_LOG_LEN; i++) {
262 struct mce *m = &mcelog.entry[i];
263 if (!(m->status & MCI_STATUS_VAL))
1da177e4 264 continue;
77e26cca
HS
265 if (!(m->status & MCI_STATUS_UC))
266 continue;
a0189c70 267 if (!final || memcmp(m, final, sizeof(struct mce)))
77e26cca 268 print_mce(m);
1da177e4 269 }
a0189c70 270 if (final)
77e26cca 271 print_mce(final);
3c079792
AK
272 if (cpu_missing)
273 printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
86503560 274 print_mce_tail();
bd19a5e6
AK
275 if (exp)
276 printk(KERN_EMERG "Machine check: %s\n", exp);
29b0f591
AK
277 if (panic_timeout == 0)
278 panic_timeout = mce_panic_timeout;
e02e68d3 279 panic(msg);
d88203d1 280}
1da177e4 281
ea149b36
AK
282/* Support code for software error injection */
283
284static int msr_to_offset(u32 msr)
285{
286 unsigned bank = __get_cpu_var(injectm.bank);
287 if (msr == rip_msr)
288 return offsetof(struct mce, ip);
289 if (msr == MSR_IA32_MC0_STATUS + bank*4)
290 return offsetof(struct mce, status);
291 if (msr == MSR_IA32_MC0_ADDR + bank*4)
292 return offsetof(struct mce, addr);
293 if (msr == MSR_IA32_MC0_MISC + bank*4)
294 return offsetof(struct mce, misc);
295 if (msr == MSR_IA32_MCG_STATUS)
296 return offsetof(struct mce, mcgstatus);
297 return -1;
298}
299
5f8c1a54
AK
300/* MSR access wrappers used for error injection */
301static u64 mce_rdmsrl(u32 msr)
302{
303 u64 v;
ea149b36
AK
304 if (__get_cpu_var(injectm).finished) {
305 int offset = msr_to_offset(msr);
306 if (offset < 0)
307 return 0;
308 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
309 }
5f8c1a54
AK
310 rdmsrl(msr, v);
311 return v;
312}
313
314static void mce_wrmsrl(u32 msr, u64 v)
315{
ea149b36
AK
316 if (__get_cpu_var(injectm).finished) {
317 int offset = msr_to_offset(msr);
318 if (offset >= 0)
319 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
320 return;
321 }
5f8c1a54
AK
322 wrmsrl(msr, v);
323}
324
9b1beaf2
AK
325/*
326 * Simple lockless ring to communicate PFNs from the exception handler with the
327 * process context work function. This is vastly simplified because there's
328 * only a single reader and a single writer.
329 */
330#define MCE_RING_SIZE 16 /* we use one entry less */
331
332struct mce_ring {
333 unsigned short start;
334 unsigned short end;
335 unsigned long ring[MCE_RING_SIZE];
336};
337static DEFINE_PER_CPU(struct mce_ring, mce_ring);
338
339/* Runs with CPU affinity in workqueue */
340static int mce_ring_empty(void)
341{
342 struct mce_ring *r = &__get_cpu_var(mce_ring);
343
344 return r->start == r->end;
345}
346
347static int mce_ring_get(unsigned long *pfn)
348{
349 struct mce_ring *r;
350 int ret = 0;
351
352 *pfn = 0;
353 get_cpu();
354 r = &__get_cpu_var(mce_ring);
355 if (r->start == r->end)
356 goto out;
357 *pfn = r->ring[r->start];
358 r->start = (r->start + 1) % MCE_RING_SIZE;
359 ret = 1;
360out:
361 put_cpu();
362 return ret;
363}
364
365/* Always runs in MCE context with preempt off */
366static int mce_ring_add(unsigned long pfn)
367{
368 struct mce_ring *r = &__get_cpu_var(mce_ring);
369 unsigned next;
370
371 next = (r->end + 1) % MCE_RING_SIZE;
372 if (next == r->start)
373 return -1;
374 r->ring[r->end] = pfn;
375 wmb();
376 r->end = next;
377 return 0;
378}
379
88ccbedd 380int mce_available(struct cpuinfo_x86 *c)
1da177e4 381{
04b2b1a4 382 if (mce_disabled)
5b4408fd 383 return 0;
3d1712c9 384 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
385}
386
9b1beaf2
AK
387static void mce_schedule_work(void)
388{
389 if (!mce_ring_empty()) {
390 struct work_struct *work = &__get_cpu_var(mce_work);
391 if (!work_pending(work))
392 schedule_work(work);
393 }
394}
395
1b2797dc
HY
396/*
397 * Get the address of the instruction at the time of the machine check
398 * error.
399 */
94ad8474
AK
400static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
401{
1b2797dc
HY
402
403 if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
65ea5b03 404 m->ip = regs->ip;
94ad8474
AK
405 m->cs = regs->cs;
406 } else {
65ea5b03 407 m->ip = 0;
94ad8474
AK
408 m->cs = 0;
409 }
1b2797dc 410 if (rip_msr)
5f8c1a54 411 m->ip = mce_rdmsrl(rip_msr);
94ad8474
AK
412}
413
ccc3c319
AK
414#ifdef CONFIG_X86_LOCAL_APIC
415/*
416 * Called after interrupts have been reenabled again
417 * when a MCE happened during an interrupts off region
418 * in the kernel.
419 */
420asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
421{
422 ack_APIC_irq();
423 exit_idle();
424 irq_enter();
9ff36ee9 425 mce_notify_irq();
9b1beaf2 426 mce_schedule_work();
ccc3c319
AK
427 irq_exit();
428}
429#endif
430
431static void mce_report_event(struct pt_regs *regs)
432{
433 if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
9ff36ee9 434 mce_notify_irq();
9b1beaf2
AK
435 /*
436 * Triggering the work queue here is just an insurance
437 * policy in case the syscall exit notify handler
438 * doesn't run soon enough or ends up running on the
439 * wrong CPU (can happen when audit sleeps)
440 */
441 mce_schedule_work();
ccc3c319
AK
442 return;
443 }
444
445#ifdef CONFIG_X86_LOCAL_APIC
446 /*
447 * Without APIC do not notify. The event will be picked
448 * up eventually.
449 */
450 if (!cpu_has_apic)
451 return;
452
453 /*
454 * When interrupts are disabled we cannot use
455 * kernel services safely. Trigger an self interrupt
456 * through the APIC to instead do the notification
457 * after interrupts are reenabled again.
458 */
459 apic->send_IPI_self(MCE_SELF_VECTOR);
460
461 /*
462 * Wait for idle afterwards again so that we don't leave the
463 * APIC in a non idle state because the normal APIC writes
464 * cannot exclude us.
465 */
466 apic_wait_icr_idle();
467#endif
468}
469
ca84f696
AK
470DEFINE_PER_CPU(unsigned, mce_poll_count);
471
d88203d1 472/*
b79109c3
AK
473 * Poll for corrected events or events that happened before reset.
474 * Those are just logged through /dev/mcelog.
475 *
476 * This is executed in standard interrupt context.
ed7290d0
AK
477 *
478 * Note: spec recommends to panic for fatal unsignalled
479 * errors here. However this would be quite problematic --
480 * we would need to reimplement the Monarch handling and
481 * it would mess up the exclusion between exception handler
482 * and poll hander -- * so we skip this for now.
483 * These cases should not happen anyways, or only when the CPU
484 * is already totally * confused. In this case it's likely it will
485 * not fully execute the machine check handler either.
b79109c3 486 */
ee031c31 487void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
b79109c3
AK
488{
489 struct mce m;
490 int i;
491
ca84f696
AK
492 __get_cpu_var(mce_poll_count)++;
493
b79109c3
AK
494 mce_setup(&m);
495
5f8c1a54 496 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
b79109c3 497 for (i = 0; i < banks; i++) {
ee031c31 498 if (!bank[i] || !test_bit(i, *b))
b79109c3
AK
499 continue;
500
501 m.misc = 0;
502 m.addr = 0;
503 m.bank = i;
504 m.tsc = 0;
505
506 barrier();
5f8c1a54 507 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
b79109c3
AK
508 if (!(m.status & MCI_STATUS_VAL))
509 continue;
510
511 /*
ed7290d0
AK
512 * Uncorrected or signalled events are handled by the exception
513 * handler when it is enabled, so don't process those here.
b79109c3
AK
514 *
515 * TBD do the same check for MCI_STATUS_EN here?
516 */
ed7290d0
AK
517 if (!(flags & MCP_UC) &&
518 (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
b79109c3
AK
519 continue;
520
521 if (m.status & MCI_STATUS_MISCV)
5f8c1a54 522 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
b79109c3 523 if (m.status & MCI_STATUS_ADDRV)
5f8c1a54 524 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
b79109c3
AK
525
526 if (!(flags & MCP_TIMESTAMP))
527 m.tsc = 0;
528 /*
529 * Don't get the IP here because it's unlikely to
530 * have anything to do with the actual error location.
531 */
62fdac59 532 if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
5679af4c
AK
533 mce_log(&m);
534 add_taint(TAINT_MACHINE_CHECK);
535 }
b79109c3
AK
536
537 /*
538 * Clear state for this bank.
539 */
5f8c1a54 540 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
b79109c3
AK
541 }
542
543 /*
544 * Don't clear MCG_STATUS here because it's only defined for
545 * exceptions.
546 */
88921be3
AK
547
548 sync_core();
b79109c3 549}
ea149b36 550EXPORT_SYMBOL_GPL(machine_check_poll);
b79109c3 551
bd19a5e6
AK
552/*
553 * Do a quick check if any of the events requires a panic.
554 * This decides if we keep the events around or clear them.
555 */
556static int mce_no_way_out(struct mce *m, char **msg)
557{
558 int i;
559
560 for (i = 0; i < banks; i++) {
561 m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
562 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
563 return 1;
564 }
565 return 0;
566}
567
3c079792
AK
568/*
569 * Variable to establish order between CPUs while scanning.
570 * Each CPU spins initially until executing is equal its number.
571 */
572static atomic_t mce_executing;
573
574/*
575 * Defines order of CPUs on entry. First CPU becomes Monarch.
576 */
577static atomic_t mce_callin;
578
579/*
580 * Check if a timeout waiting for other CPUs happened.
581 */
582static int mce_timed_out(u64 *t)
583{
584 /*
585 * The others already did panic for some reason.
586 * Bail out like in a timeout.
587 * rmb() to tell the compiler that system_state
588 * might have been modified by someone else.
589 */
590 rmb();
591 if (atomic_read(&mce_paniced))
592 wait_for_panic();
593 if (!monarch_timeout)
594 goto out;
595 if ((s64)*t < SPINUNIT) {
596 /* CHECKME: Make panic default for 1 too? */
597 if (tolerant < 1)
598 mce_panic("Timeout synchronizing machine check over CPUs",
599 NULL, NULL);
600 cpu_missing = 1;
601 return 1;
602 }
603 *t -= SPINUNIT;
604out:
605 touch_nmi_watchdog();
606 return 0;
607}
608
609/*
610 * The Monarch's reign. The Monarch is the CPU who entered
611 * the machine check handler first. It waits for the others to
612 * raise the exception too and then grades them. When any
613 * error is fatal panic. Only then let the others continue.
614 *
615 * The other CPUs entering the MCE handler will be controlled by the
616 * Monarch. They are called Subjects.
617 *
618 * This way we prevent any potential data corruption in a unrecoverable case
619 * and also makes sure always all CPU's errors are examined.
620 *
621 * Also this detects the case of an machine check event coming from outer
622 * space (not detected by any CPUs) In this case some external agent wants
623 * us to shut down, so panic too.
624 *
625 * The other CPUs might still decide to panic if the handler happens
626 * in a unrecoverable place, but in this case the system is in a semi-stable
627 * state and won't corrupt anything by itself. It's ok to let the others
628 * continue for a bit first.
629 *
630 * All the spin loops have timeouts; when a timeout happens a CPU
631 * typically elects itself to be Monarch.
632 */
633static void mce_reign(void)
634{
635 int cpu;
636 struct mce *m = NULL;
637 int global_worst = 0;
638 char *msg = NULL;
639 char *nmsg = NULL;
640
641 /*
642 * This CPU is the Monarch and the other CPUs have run
643 * through their handlers.
644 * Grade the severity of the errors of all the CPUs.
645 */
646 for_each_possible_cpu(cpu) {
647 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
648 &nmsg);
649 if (severity > global_worst) {
650 msg = nmsg;
651 global_worst = severity;
652 m = &per_cpu(mces_seen, cpu);
653 }
654 }
655
656 /*
657 * Cannot recover? Panic here then.
658 * This dumps all the mces in the log buffer and stops the
659 * other CPUs.
660 */
661 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
ac960375 662 mce_panic("Fatal Machine check", m, msg);
3c079792
AK
663
664 /*
665 * For UC somewhere we let the CPU who detects it handle it.
666 * Also must let continue the others, otherwise the handling
667 * CPU could deadlock on a lock.
668 */
669
670 /*
671 * No machine check event found. Must be some external
672 * source or one CPU is hung. Panic.
673 */
674 if (!m && tolerant < 3)
675 mce_panic("Machine check from unknown source", NULL, NULL);
676
677 /*
678 * Now clear all the mces_seen so that they don't reappear on
679 * the next mce.
680 */
681 for_each_possible_cpu(cpu)
682 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
683}
684
685static atomic_t global_nwo;
686
687/*
688 * Start of Monarch synchronization. This waits until all CPUs have
689 * entered the exception handler and then determines if any of them
690 * saw a fatal event that requires panic. Then it executes them
691 * in the entry order.
692 * TBD double check parallel CPU hotunplug
693 */
7fb06fc9 694static int mce_start(int *no_way_out)
3c079792 695{
7fb06fc9 696 int order;
3c079792
AK
697 int cpus = num_online_cpus();
698 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
699
7fb06fc9
HS
700 if (!timeout)
701 return -1;
3c079792 702
7fb06fc9 703 atomic_add(*no_way_out, &global_nwo);
184e1fdf
HY
704 /*
705 * global_nwo should be updated before mce_callin
706 */
707 smp_wmb();
7fb06fc9 708 order = atomic_add_return(1, &mce_callin);
3c079792
AK
709
710 /*
711 * Wait for everyone.
712 */
713 while (atomic_read(&mce_callin) != cpus) {
714 if (mce_timed_out(&timeout)) {
715 atomic_set(&global_nwo, 0);
7fb06fc9 716 return -1;
3c079792
AK
717 }
718 ndelay(SPINUNIT);
719 }
720
184e1fdf
HY
721 /*
722 * mce_callin should be read before global_nwo
723 */
724 smp_rmb();
3c079792 725
7fb06fc9
HS
726 if (order == 1) {
727 /*
728 * Monarch: Starts executing now, the others wait.
729 */
3c079792 730 atomic_set(&mce_executing, 1);
7fb06fc9
HS
731 } else {
732 /*
733 * Subject: Now start the scanning loop one by one in
734 * the original callin order.
735 * This way when there are any shared banks it will be
736 * only seen by one CPU before cleared, avoiding duplicates.
737 */
738 while (atomic_read(&mce_executing) < order) {
739 if (mce_timed_out(&timeout)) {
740 atomic_set(&global_nwo, 0);
741 return -1;
742 }
743 ndelay(SPINUNIT);
744 }
3c079792
AK
745 }
746
747 /*
7fb06fc9 748 * Cache the global no_way_out state.
3c079792 749 */
7fb06fc9
HS
750 *no_way_out = atomic_read(&global_nwo);
751
752 return order;
3c079792
AK
753}
754
755/*
756 * Synchronize between CPUs after main scanning loop.
757 * This invokes the bulk of the Monarch processing.
758 */
759static int mce_end(int order)
760{
761 int ret = -1;
762 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
763
764 if (!timeout)
765 goto reset;
766 if (order < 0)
767 goto reset;
768
769 /*
770 * Allow others to run.
771 */
772 atomic_inc(&mce_executing);
773
774 if (order == 1) {
775 /* CHECKME: Can this race with a parallel hotplug? */
776 int cpus = num_online_cpus();
777
778 /*
779 * Monarch: Wait for everyone to go through their scanning
780 * loops.
781 */
782 while (atomic_read(&mce_executing) <= cpus) {
783 if (mce_timed_out(&timeout))
784 goto reset;
785 ndelay(SPINUNIT);
786 }
787
788 mce_reign();
789 barrier();
790 ret = 0;
791 } else {
792 /*
793 * Subject: Wait for Monarch to finish.
794 */
795 while (atomic_read(&mce_executing) != 0) {
796 if (mce_timed_out(&timeout))
797 goto reset;
798 ndelay(SPINUNIT);
799 }
800
801 /*
802 * Don't reset anything. That's done by the Monarch.
803 */
804 return 0;
805 }
806
807 /*
808 * Reset all global state.
809 */
810reset:
811 atomic_set(&global_nwo, 0);
812 atomic_set(&mce_callin, 0);
813 barrier();
814
815 /*
816 * Let others run again.
817 */
818 atomic_set(&mce_executing, 0);
819 return ret;
820}
821
9b1beaf2
AK
822/*
823 * Check if the address reported by the CPU is in a format we can parse.
824 * It would be possible to add code for most other cases, but all would
825 * be somewhat complicated (e.g. segment offset would require an instruction
826 * parser). So only support physical addresses upto page granuality for now.
827 */
828static int mce_usable_address(struct mce *m)
829{
830 if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
831 return 0;
832 if ((m->misc & 0x3f) > PAGE_SHIFT)
833 return 0;
834 if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS)
835 return 0;
836 return 1;
837}
838
3c079792
AK
839static void mce_clear_state(unsigned long *toclear)
840{
841 int i;
842
843 for (i = 0; i < banks; i++) {
844 if (test_bit(i, toclear))
845 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
846 }
847}
848
b79109c3
AK
849/*
850 * The actual machine check handler. This only handles real
851 * exceptions when something got corrupted coming in through int 18.
852 *
853 * This is executed in NMI context not subject to normal locking rules. This
854 * implies that most kernel services cannot be safely used. Don't even
855 * think about putting a printk in there!
3c079792
AK
856 *
857 * On Intel systems this is entered on all CPUs in parallel through
858 * MCE broadcast. However some CPUs might be broken beyond repair,
859 * so be always careful when synchronizing with others.
1da177e4 860 */
e9eee03e 861void do_machine_check(struct pt_regs *regs, long error_code)
1da177e4 862{
3c079792 863 struct mce m, *final;
1da177e4 864 int i;
3c079792
AK
865 int worst = 0;
866 int severity;
867 /*
868 * Establish sequential order between the CPUs entering the machine
869 * check handler.
870 */
7fb06fc9 871 int order;
bd78432c
TH
872 /*
873 * If no_way_out gets set, there is no safe way to recover from this
874 * MCE. If tolerant is cranked up, we'll try anyway.
875 */
876 int no_way_out = 0;
877 /*
878 * If kill_it gets set, there might be a way to recover from this
879 * error.
880 */
881 int kill_it = 0;
b79109c3 882 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
bd19a5e6 883 char *msg = "Unknown";
1da177e4 884
553f265f
AK
885 atomic_inc(&mce_entry);
886
01ca79f1
AK
887 __get_cpu_var(mce_exception_count)++;
888
b79109c3 889 if (notify_die(DIE_NMI, "machine check", regs, error_code,
22f5991c 890 18, SIGKILL) == NOTIFY_STOP)
32561696 891 goto out;
b79109c3 892 if (!banks)
32561696 893 goto out;
1da177e4 894
b5f2fa4e
AK
895 mce_setup(&m);
896
5f8c1a54 897 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
bd19a5e6 898 no_way_out = mce_no_way_out(&m, &msg);
d88203d1 899
3c079792
AK
900 final = &__get_cpu_var(mces_seen);
901 *final = m;
902
1da177e4
LT
903 barrier();
904
ed7290d0
AK
905 /*
906 * When no restart IP must always kill or panic.
907 */
908 if (!(m.mcgstatus & MCG_STATUS_RIPV))
909 kill_it = 1;
910
3c079792
AK
911 /*
912 * Go through all the banks in exclusion of the other CPUs.
913 * This way we don't report duplicated events on shared banks
914 * because the first one to see it will clear it.
915 */
7fb06fc9 916 order = mce_start(&no_way_out);
1da177e4 917 for (i = 0; i < banks; i++) {
b79109c3 918 __clear_bit(i, toclear);
0d7482e3 919 if (!bank[i])
1da177e4 920 continue;
d88203d1
TG
921
922 m.misc = 0;
1da177e4
LT
923 m.addr = 0;
924 m.bank = i;
1da177e4 925
5f8c1a54 926 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4);
1da177e4
LT
927 if ((m.status & MCI_STATUS_VAL) == 0)
928 continue;
929
b79109c3 930 /*
ed7290d0
AK
931 * Non uncorrected or non signaled errors are handled by
932 * machine_check_poll. Leave them alone, unless this panics.
b79109c3 933 */
ed7290d0
AK
934 if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
935 !no_way_out)
b79109c3
AK
936 continue;
937
938 /*
939 * Set taint even when machine check was not enabled.
940 */
941 add_taint(TAINT_MACHINE_CHECK);
942
ed7290d0 943 severity = mce_severity(&m, tolerant, NULL);
b79109c3 944
ed7290d0
AK
945 /*
946 * When machine check was for corrected handler don't touch,
947 * unless we're panicing.
948 */
949 if (severity == MCE_KEEP_SEVERITY && !no_way_out)
950 continue;
951 __set_bit(i, toclear);
952 if (severity == MCE_NO_SEVERITY) {
b79109c3
AK
953 /*
954 * Machine check event was not enabled. Clear, but
955 * ignore.
956 */
957 continue;
1da177e4
LT
958 }
959
ed7290d0
AK
960 /*
961 * Kill on action required.
962 */
963 if (severity == MCE_AR_SEVERITY)
964 kill_it = 1;
965
1da177e4 966 if (m.status & MCI_STATUS_MISCV)
5f8c1a54 967 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4);
1da177e4 968 if (m.status & MCI_STATUS_ADDRV)
5f8c1a54 969 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4);
1da177e4 970
9b1beaf2
AK
971 /*
972 * Action optional error. Queue address for later processing.
973 * When the ring overflows we just ignore the AO error.
974 * RED-PEN add some logging mechanism when
975 * usable_address or mce_add_ring fails.
976 * RED-PEN don't ignore overflow for tolerant == 0
977 */
978 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
979 mce_ring_add(m.addr >> PAGE_SHIFT);
980
94ad8474 981 mce_get_rip(&m, regs);
b79109c3 982 mce_log(&m);
1da177e4 983
3c079792
AK
984 if (severity > worst) {
985 *final = m;
986 worst = severity;
1da177e4 987 }
1da177e4
LT
988 }
989
3c079792
AK
990 if (!no_way_out)
991 mce_clear_state(toclear);
992
e9eee03e 993 /*
3c079792
AK
994 * Do most of the synchronization with other CPUs.
995 * When there's any problem use only local no_way_out state.
e9eee03e 996 */
3c079792
AK
997 if (mce_end(order) < 0)
998 no_way_out = worst >= MCE_PANIC_SEVERITY;
bd78432c
TH
999
1000 /*
1001 * If we have decided that we just CAN'T continue, and the user
e9eee03e 1002 * has not set tolerant to an insane level, give up and die.
3c079792
AK
1003 *
1004 * This is mainly used in the case when the system doesn't
1005 * support MCE broadcasting or it has been disabled.
bd78432c
TH
1006 */
1007 if (no_way_out && tolerant < 3)
ac960375 1008 mce_panic("Fatal machine check on current CPU", final, msg);
bd78432c
TH
1009
1010 /*
1011 * If the error seems to be unrecoverable, something should be
1012 * done. Try to kill as little as possible. If we can kill just
1013 * one task, do that. If the user has set the tolerance very
1014 * high, don't try to do anything at all.
1015 */
bd78432c 1016
ed7290d0
AK
1017 if (kill_it && tolerant < 3)
1018 force_sig(SIGBUS, current);
1da177e4 1019
e02e68d3
TH
1020 /* notify userspace ASAP */
1021 set_thread_flag(TIF_MCE_NOTIFY);
1022
3c079792
AK
1023 if (worst > 0)
1024 mce_report_event(regs);
5f8c1a54 1025 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
32561696 1026out:
553f265f 1027 atomic_dec(&mce_entry);
88921be3 1028 sync_core();
1da177e4 1029}
ea149b36 1030EXPORT_SYMBOL_GPL(do_machine_check);
1da177e4 1031
9b1beaf2
AK
1032/* dummy to break dependency. actual code is in mm/memory-failure.c */
1033void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
1034{
1035 printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
1036}
1037
1038/*
1039 * Called after mce notification in process context. This code
1040 * is allowed to sleep. Call the high level VM handler to process
1041 * any corrupted pages.
1042 * Assume that the work queue code only calls this one at a time
1043 * per CPU.
1044 * Note we don't disable preemption, so this code might run on the wrong
1045 * CPU. In this case the event is picked up by the scheduled work queue.
1046 * This is merely a fast path to expedite processing in some common
1047 * cases.
1048 */
1049void mce_notify_process(void)
1050{
1051 unsigned long pfn;
1052 mce_notify_irq();
1053 while (mce_ring_get(&pfn))
1054 memory_failure(pfn, MCE_VECTOR);
1055}
1056
1057static void mce_process_work(struct work_struct *dummy)
1058{
1059 mce_notify_process();
1060}
1061
15d5f839
DZ
1062#ifdef CONFIG_X86_MCE_INTEL
1063/***
1064 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 1065 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
1066 * @status: Event status information
1067 *
1068 * This function should be called by the thermal interrupt after the
1069 * event has been processed and the decision was made to log the event
1070 * further.
1071 *
1072 * The status parameter will be saved to the 'status' field of 'struct mce'
1073 * and historically has been the register value of the
1074 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1075 */
b5f2fa4e 1076void mce_log_therm_throt_event(__u64 status)
15d5f839
DZ
1077{
1078 struct mce m;
1079
b5f2fa4e 1080 mce_setup(&m);
15d5f839
DZ
1081 m.bank = MCE_THERMAL_BANK;
1082 m.status = status;
15d5f839
DZ
1083 mce_log(&m);
1084}
1085#endif /* CONFIG_X86_MCE_INTEL */
1086
1da177e4 1087/*
8a336b0a
TH
1088 * Periodic polling timer for "silent" machine check errors. If the
1089 * poller finds an MCE, poll 2x faster. When the poller finds no more
1090 * errors, poll 2x slower (up to check_interval seconds).
1da177e4 1091 */
1da177e4 1092static int check_interval = 5 * 60; /* 5 minutes */
e9eee03e 1093
6298c512 1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
52d168e2 1095static DEFINE_PER_CPU(struct timer_list, mce_timer);
1da177e4 1096
52d168e2 1097static void mcheck_timer(unsigned long data)
1da177e4 1098{
52d168e2 1099 struct timer_list *t = &per_cpu(mce_timer, data);
6298c512 1100 int *n;
52d168e2
AK
1101
1102 WARN_ON(smp_processor_id() != data);
1103
e9eee03e 1104 if (mce_available(&current_cpu_data)) {
ee031c31
AK
1105 machine_check_poll(MCP_TIMESTAMP,
1106 &__get_cpu_var(mce_poll_banks));
e9eee03e 1107 }
1da177e4
LT
1108
1109 /*
e02e68d3
TH
1110 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval.
1da177e4 1112 */
6298c512 1113 n = &__get_cpu_var(next_interval);
9ff36ee9 1114 if (mce_notify_irq())
6298c512 1115 *n = max(*n/2, HZ/100);
14a02530 1116 else
6298c512 1117 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
e02e68d3 1118
6298c512 1119 t->expires = jiffies + *n;
52d168e2 1120 add_timer(t);
e02e68d3
TH
1121}
1122
9bd98405
AK
1123static void mce_do_trigger(struct work_struct *work)
1124{
1125 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
1126}
1127
1128static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1129
e02e68d3 1130/*
9bd98405
AK
1131 * Notify the user(s) about new machine check events.
1132 * Can be called from interrupt context, but not from machine check/NMI
1133 * context.
e02e68d3 1134 */
9ff36ee9 1135int mce_notify_irq(void)
e02e68d3 1136{
8457c84d
AK
1137 /* Not more than two messages every minute */
1138 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1139
e02e68d3 1140 clear_thread_flag(TIF_MCE_NOTIFY);
e9eee03e 1141
e02e68d3 1142 if (test_and_clear_bit(0, &notify_user)) {
e02e68d3 1143 wake_up_interruptible(&mce_wait);
9bd98405
AK
1144
1145 /*
1146 * There is no risk of missing notifications because
1147 * work_pending is always cleared before the function is
1148 * executed.
1149 */
1150 if (trigger[0] && !work_pending(&mce_trigger_work))
1151 schedule_work(&mce_trigger_work);
e02e68d3 1152
8457c84d 1153 if (__ratelimit(&ratelimit))
8a336b0a 1154 printk(KERN_INFO "Machine check events logged\n");
e02e68d3
TH
1155
1156 return 1;
1da177e4 1157 }
e02e68d3
TH
1158 return 0;
1159}
9ff36ee9 1160EXPORT_SYMBOL_GPL(mce_notify_irq);
8a336b0a 1161
d88203d1 1162/*
1da177e4
LT
1163 * Initialize Machine Checks for a CPU.
1164 */
0d7482e3 1165static int mce_cap_init(void)
1da177e4 1166{
0d7482e3 1167 unsigned b;
e9eee03e 1168 u64 cap;
1da177e4
LT
1169
1170 rdmsrl(MSR_IA32_MCG_CAP, cap);
01c6680a
TG
1171
1172 b = cap & MCG_BANKCNT_MASK;
b659294b
IM
1173 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
1174
0d7482e3
AK
1175 if (b > MAX_NR_BANKS) {
1176 printk(KERN_WARNING
1177 "MCE: Using only %u machine check banks out of %u\n",
1178 MAX_NR_BANKS, b);
1179 b = MAX_NR_BANKS;
1180 }
1181
1182 /* Don't support asymmetric configurations today */
1183 WARN_ON(banks != 0 && b != banks);
1184 banks = b;
1185 if (!bank) {
1186 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
1187 if (!bank)
1188 return -ENOMEM;
1189 memset(bank, 0xff, banks * sizeof(u64));
1da177e4 1190 }
0d7482e3 1191
94ad8474 1192 /* Use accurate RIP reporting if available. */
01c6680a 1193 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
94ad8474 1194 rip_msr = MSR_IA32_MCG_EIP;
1da177e4 1195
ed7290d0
AK
1196 if (cap & MCG_SER_P)
1197 mce_ser = 1;
1198
0d7482e3
AK
1199 return 0;
1200}
1201
8be91105 1202static void mce_init(void)
0d7482e3 1203{
e9eee03e 1204 mce_banks_t all_banks;
0d7482e3
AK
1205 u64 cap;
1206 int i;
1207
b79109c3
AK
1208 /*
1209 * Log the machine checks left over from the previous reset.
1210 */
ee031c31 1211 bitmap_fill(all_banks, MAX_NR_BANKS);
5679af4c 1212 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
1da177e4
LT
1213
1214 set_in_cr4(X86_CR4_MCE);
1215
0d7482e3 1216 rdmsrl(MSR_IA32_MCG_CAP, cap);
1da177e4
LT
1217 if (cap & MCG_CTL_P)
1218 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1219
1220 for (i = 0; i < banks; i++) {
06b7a7a5
AK
1221 if (skip_bank_init(i))
1222 continue;
0d7482e3 1223 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
1da177e4 1224 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
d88203d1 1225 }
1da177e4
LT
1226}
1227
1228/* Add per CPU specific workarounds here */
ec5b3d32 1229static void mce_cpu_quirks(struct cpuinfo_x86 *c)
d88203d1 1230{
1da177e4 1231 /* This should be disabled by the BIOS, but isn't always */
911f6a7b 1232 if (c->x86_vendor == X86_VENDOR_AMD) {
e9eee03e
IM
1233 if (c->x86 == 15 && banks > 4) {
1234 /*
1235 * disable GART TBL walk error reporting, which
1236 * trips off incorrectly with the IOMMU & 3ware
1237 * & Cerberus:
1238 */
0d7482e3 1239 clear_bit(10, (unsigned long *)&bank[4]);
e9eee03e
IM
1240 }
1241 if (c->x86 <= 17 && mce_bootlog < 0) {
1242 /*
1243 * Lots of broken BIOS around that don't clear them
1244 * by default and leave crap in there. Don't log:
1245 */
911f6a7b 1246 mce_bootlog = 0;
e9eee03e 1247 }
2e6f694f
AK
1248 /*
1249 * Various K7s with broken bank 0 around. Always disable
1250 * by default.
1251 */
1252 if (c->x86 == 6)
1253 bank[0] = 0;
1da177e4 1254 }
e583538f 1255
06b7a7a5
AK
1256 if (c->x86_vendor == X86_VENDOR_INTEL) {
1257 /*
1258 * SDM documents that on family 6 bank 0 should not be written
1259 * because it aliases to another special BIOS controlled
1260 * register.
1261 * But it's not aliased anymore on model 0x1a+
1262 * Don't ignore bank 0 completely because there could be a
1263 * valid event later, merely don't write CTL0.
1264 */
1265
1266 if (c->x86 == 6 && c->x86_model < 0x1A)
1267 __set_bit(0, &dont_init_banks);
3c079792
AK
1268
1269 /*
1270 * All newer Intel systems support MCE broadcasting. Enable
1271 * synchronization with a one second timeout.
1272 */
1273 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1274 monarch_timeout < 0)
1275 monarch_timeout = USEC_PER_SEC;
06b7a7a5 1276 }
3c079792
AK
1277 if (monarch_timeout < 0)
1278 monarch_timeout = 0;
29b0f591
AK
1279 if (mce_bootlog != 0)
1280 mce_panic_timeout = 30;
d88203d1 1281}
1da177e4 1282
4efc0670
AK
1283static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
1284{
1285 if (c->x86 != 5)
1286 return;
1287 switch (c->x86_vendor) {
1288 case X86_VENDOR_INTEL:
1289 if (mce_p5_enabled())
1290 intel_p5_mcheck_init(c);
1291 break;
1292 case X86_VENDOR_CENTAUR:
1293 winchip_mcheck_init(c);
1294 break;
1295 }
1296}
1297
cc3ca220 1298static void mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
1299{
1300 switch (c->x86_vendor) {
1301 case X86_VENDOR_INTEL:
1302 mce_intel_feature_init(c);
1303 break;
89b831ef
JS
1304 case X86_VENDOR_AMD:
1305 mce_amd_feature_init(c);
1306 break;
1da177e4
LT
1307 default:
1308 break;
1309 }
1310}
1311
52d168e2
AK
1312static void mce_init_timer(void)
1313{
1314 struct timer_list *t = &__get_cpu_var(mce_timer);
6298c512 1315 int *n = &__get_cpu_var(next_interval);
52d168e2 1316
62fdac59
HS
1317 if (mce_ignore_ce)
1318 return;
1319
6298c512
AK
1320 *n = check_interval * HZ;
1321 if (!*n)
52d168e2
AK
1322 return;
1323 setup_timer(t, mcheck_timer, smp_processor_id());
6298c512 1324 t->expires = round_jiffies(jiffies + *n);
52d168e2
AK
1325 add_timer(t);
1326}
1327
d88203d1 1328/*
1da177e4 1329 * Called for each booted CPU to set up machine checks.
e9eee03e 1330 * Must be called with preempt off:
1da177e4 1331 */
e6982c67 1332void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4 1333{
4efc0670
AK
1334 if (mce_disabled)
1335 return;
1336
1337 mce_ancient_init(c);
1338
5b4408fd 1339 if (!mce_available(c))
1da177e4
LT
1340 return;
1341
0d7482e3 1342 if (mce_cap_init() < 0) {
04b2b1a4 1343 mce_disabled = 1;
0d7482e3
AK
1344 return;
1345 }
1346 mce_cpu_quirks(c);
1347
5d727926
AK
1348 machine_check_vector = do_machine_check;
1349
8be91105 1350 mce_init();
1da177e4 1351 mce_cpu_features(c);
52d168e2 1352 mce_init_timer();
9b1beaf2 1353 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
1da177e4
LT
1354}
1355
1356/*
1357 * Character device to read and clear the MCE log.
1358 */
1359
f528e7ba 1360static DEFINE_SPINLOCK(mce_state_lock);
e9eee03e
IM
1361static int open_count; /* #times opened */
1362static int open_exclu; /* already open exclusive? */
f528e7ba
TH
1363
1364static int mce_open(struct inode *inode, struct file *file)
1365{
1366 spin_lock(&mce_state_lock);
1367
1368 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
1369 spin_unlock(&mce_state_lock);
e9eee03e 1370
f528e7ba
TH
1371 return -EBUSY;
1372 }
1373
1374 if (file->f_flags & O_EXCL)
1375 open_exclu = 1;
1376 open_count++;
1377
1378 spin_unlock(&mce_state_lock);
1379
bd78432c 1380 return nonseekable_open(inode, file);
f528e7ba
TH
1381}
1382
1383static int mce_release(struct inode *inode, struct file *file)
1384{
1385 spin_lock(&mce_state_lock);
1386
1387 open_count--;
1388 open_exclu = 0;
1389
1390 spin_unlock(&mce_state_lock);
1391
1392 return 0;
1393}
1394
d88203d1
TG
1395static void collect_tscs(void *data)
1396{
1da177e4 1397 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 1398
1da177e4 1399 rdtscll(cpu_tsc[smp_processor_id()]);
d88203d1 1400}
1da177e4 1401
e9eee03e
IM
1402static DEFINE_MUTEX(mce_read_mutex);
1403
d88203d1
TG
1404static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1405 loff_t *off)
1da177e4 1406{
e9eee03e 1407 char __user *buf = ubuf;
f0de53bb 1408 unsigned long *cpu_tsc;
ef41df43 1409 unsigned prev, next;
1da177e4
LT
1410 int i, err;
1411
6bca67f9 1412 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
f0de53bb
AK
1413 if (!cpu_tsc)
1414 return -ENOMEM;
1415
8c8b8859 1416 mutex_lock(&mce_read_mutex);
1da177e4
LT
1417 next = rcu_dereference(mcelog.next);
1418
1419 /* Only supports full reads right now */
d88203d1 1420 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
8c8b8859 1421 mutex_unlock(&mce_read_mutex);
f0de53bb 1422 kfree(cpu_tsc);
e9eee03e 1423
1da177e4
LT
1424 return -EINVAL;
1425 }
1426
1427 err = 0;
ef41df43
HY
1428 prev = 0;
1429 do {
1430 for (i = prev; i < next; i++) {
1431 unsigned long start = jiffies;
1432
1433 while (!mcelog.entry[i].finished) {
1434 if (time_after_eq(jiffies, start + 2)) {
1435 memset(mcelog.entry + i, 0,
1436 sizeof(struct mce));
1437 goto timeout;
1438 }
1439 cpu_relax();
673242c1 1440 }
ef41df43
HY
1441 smp_rmb();
1442 err |= copy_to_user(buf, mcelog.entry + i,
1443 sizeof(struct mce));
1444 buf += sizeof(struct mce);
1445timeout:
1446 ;
673242c1 1447 }
1da177e4 1448
ef41df43
HY
1449 memset(mcelog.entry + prev, 0,
1450 (next - prev) * sizeof(struct mce));
1451 prev = next;
1452 next = cmpxchg(&mcelog.next, prev, 0);
1453 } while (next != prev);
1da177e4 1454
b2b18660 1455 synchronize_sched();
1da177e4 1456
d88203d1
TG
1457 /*
1458 * Collect entries that were still getting written before the
1459 * synchronize.
1460 */
15c8b6c1 1461 on_each_cpu(collect_tscs, cpu_tsc, 1);
e9eee03e 1462
d88203d1
TG
1463 for (i = next; i < MCE_LOG_LEN; i++) {
1464 if (mcelog.entry[i].finished &&
1465 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
1466 err |= copy_to_user(buf, mcelog.entry+i,
1467 sizeof(struct mce));
1da177e4
LT
1468 smp_rmb();
1469 buf += sizeof(struct mce);
1470 memset(&mcelog.entry[i], 0, sizeof(struct mce));
1471 }
d88203d1 1472 }
8c8b8859 1473 mutex_unlock(&mce_read_mutex);
f0de53bb 1474 kfree(cpu_tsc);
e9eee03e 1475
d88203d1 1476 return err ? -EFAULT : buf - ubuf;
1da177e4
LT
1477}
1478
e02e68d3
TH
1479static unsigned int mce_poll(struct file *file, poll_table *wait)
1480{
1481 poll_wait(file, &mce_wait, wait);
1482 if (rcu_dereference(mcelog.next))
1483 return POLLIN | POLLRDNORM;
1484 return 0;
1485}
1486
c68461b6 1487static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
1488{
1489 int __user *p = (int __user *)arg;
d88203d1 1490
1da177e4 1491 if (!capable(CAP_SYS_ADMIN))
d88203d1 1492 return -EPERM;
e9eee03e 1493
1da177e4 1494 switch (cmd) {
d88203d1 1495 case MCE_GET_RECORD_LEN:
1da177e4
LT
1496 return put_user(sizeof(struct mce), p);
1497 case MCE_GET_LOG_LEN:
d88203d1 1498 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
1499 case MCE_GETCLEAR_FLAGS: {
1500 unsigned flags;
d88203d1
TG
1501
1502 do {
1da177e4 1503 flags = mcelog.flags;
d88203d1 1504 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
e9eee03e 1505
d88203d1 1506 return put_user(flags, p);
1da177e4
LT
1507 }
1508 default:
d88203d1
TG
1509 return -ENOTTY;
1510 }
1da177e4
LT
1511}
1512
a1ff41bf 1513/* Modified in mce-inject.c, so not static or const */
ea149b36 1514struct file_operations mce_chrdev_ops = {
e9eee03e
IM
1515 .open = mce_open,
1516 .release = mce_release,
1517 .read = mce_read,
1518 .poll = mce_poll,
1519 .unlocked_ioctl = mce_ioctl,
1da177e4 1520};
ea149b36 1521EXPORT_SYMBOL_GPL(mce_chrdev_ops);
1da177e4
LT
1522
1523static struct miscdevice mce_log_device = {
1524 MISC_MCELOG_MINOR,
1525 "mcelog",
1526 &mce_chrdev_ops,
1527};
1528
13503fa9 1529/*
62fdac59
HS
1530 * mce=off Disables machine check
1531 * mce=no_cmci Disables CMCI
1532 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1533 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
3c079792
AK
1534 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1535 * monarchtimeout is how long to wait for other CPUs on machine
1536 * check, or 0 to not wait
13503fa9
HS
1537 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1538 * mce=nobootlog Don't log MCEs from before booting.
1539 */
1da177e4
LT
1540static int __init mcheck_enable(char *str)
1541{
4efc0670
AK
1542 if (*str == 0)
1543 enable_p5_mce();
1544 if (*str == '=')
1545 str++;
1da177e4 1546 if (!strcmp(str, "off"))
04b2b1a4 1547 mce_disabled = 1;
62fdac59
HS
1548 else if (!strcmp(str, "no_cmci"))
1549 mce_cmci_disabled = 1;
1550 else if (!strcmp(str, "dont_log_ce"))
1551 mce_dont_log_ce = 1;
1552 else if (!strcmp(str, "ignore_ce"))
1553 mce_ignore_ce = 1;
13503fa9
HS
1554 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1555 mce_bootlog = (str[0] == 'b');
3c079792 1556 else if (isdigit(str[0])) {
8c566ef5 1557 get_option(&str, &tolerant);
3c079792
AK
1558 if (*str == ',') {
1559 ++str;
1560 get_option(&str, &monarch_timeout);
1561 }
1562 } else {
4efc0670 1563 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
13503fa9
HS
1564 str);
1565 return 0;
1566 }
9b41046c 1567 return 1;
1da177e4 1568}
4efc0670 1569__setup("mce", mcheck_enable);
1da177e4 1570
d88203d1 1571/*
1da177e4 1572 * Sysfs support
d88203d1 1573 */
1da177e4 1574
973a2dd1
AK
1575/*
1576 * Disable machine checks on suspend and shutdown. We can't really handle
1577 * them later.
1578 */
1579static int mce_disable(void)
1580{
1581 int i;
1582
06b7a7a5
AK
1583 for (i = 0; i < banks; i++) {
1584 if (!skip_bank_init(i))
1585 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1586 }
973a2dd1
AK
1587 return 0;
1588}
1589
1590static int mce_suspend(struct sys_device *dev, pm_message_t state)
1591{
1592 return mce_disable();
1593}
1594
1595static int mce_shutdown(struct sys_device *dev)
1596{
1597 return mce_disable();
1598}
1599
e9eee03e
IM
1600/*
1601 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
1602 * Only one CPU is active at this time, the others get re-added later using
1603 * CPU hotplug:
1604 */
1da177e4
LT
1605static int mce_resume(struct sys_device *dev)
1606{
8be91105 1607 mce_init();
6ec68bff 1608 mce_cpu_features(&current_cpu_data);
e9eee03e 1609
1da177e4
LT
1610 return 0;
1611}
1612
52d168e2
AK
1613static void mce_cpu_restart(void *data)
1614{
1615 del_timer_sync(&__get_cpu_var(mce_timer));
33edbf02
HS
1616 if (!mce_available(&current_cpu_data))
1617 return;
1618 mce_init();
52d168e2
AK
1619 mce_init_timer();
1620}
1621
1da177e4 1622/* Reinit MCEs after user configuration changes */
d88203d1
TG
1623static void mce_restart(void)
1624{
52d168e2 1625 on_each_cpu(mce_cpu_restart, NULL, 1);
1da177e4
LT
1626}
1627
1628static struct sysdev_class mce_sysclass = {
e9eee03e
IM
1629 .suspend = mce_suspend,
1630 .shutdown = mce_shutdown,
1631 .resume = mce_resume,
1632 .name = "machinecheck",
1da177e4
LT
1633};
1634
cb491fca 1635DEFINE_PER_CPU(struct sys_device, mce_dev);
e9eee03e
IM
1636
1637__cpuinitdata
1638void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1da177e4 1639
0d7482e3
AK
1640static struct sysdev_attribute *bank_attrs;
1641
1642static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1643 char *buf)
1644{
1645 u64 b = bank[attr - bank_attrs];
e9eee03e 1646
f6d1826d 1647 return sprintf(buf, "%llx\n", b);
0d7482e3
AK
1648}
1649
1650static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
9319cec8 1651 const char *buf, size_t size)
0d7482e3 1652{
9319cec8 1653 u64 new;
e9eee03e 1654
9319cec8 1655 if (strict_strtoull(buf, 0, &new) < 0)
0d7482e3 1656 return -EINVAL;
e9eee03e 1657
0d7482e3
AK
1658 bank[attr - bank_attrs] = new;
1659 mce_restart();
e9eee03e 1660
9319cec8 1661 return size;
0d7482e3 1662}
a98f0dd3 1663
e9eee03e
IM
1664static ssize_t
1665show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
a98f0dd3
AK
1666{
1667 strcpy(buf, trigger);
1668 strcat(buf, "\n");
1669 return strlen(trigger) + 1;
1670}
1671
4a0b2b4d 1672static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
e9eee03e 1673 const char *buf, size_t siz)
a98f0dd3
AK
1674{
1675 char *p;
1676 int len;
e9eee03e 1677
a98f0dd3
AK
1678 strncpy(trigger, buf, sizeof(trigger));
1679 trigger[sizeof(trigger)-1] = 0;
1680 len = strlen(trigger);
1681 p = strchr(trigger, '\n');
e9eee03e
IM
1682
1683 if (*p)
1684 *p = 0;
1685
a98f0dd3
AK
1686 return len;
1687}
1688
b56f642d
AK
1689static ssize_t store_int_with_restart(struct sys_device *s,
1690 struct sysdev_attribute *attr,
1691 const char *buf, size_t size)
1692{
1693 ssize_t ret = sysdev_store_int(s, attr, buf, size);
1694 mce_restart();
1695 return ret;
1696}
1697
a98f0dd3 1698static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
d95d62c0 1699static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
3c079792 1700static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
e9eee03e 1701
b56f642d
AK
1702static struct sysdev_ext_attribute attr_check_interval = {
1703 _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
1704 store_int_with_restart),
1705 &check_interval
1706};
e9eee03e 1707
cb491fca 1708static struct sysdev_attribute *mce_attrs[] = {
b56f642d 1709 &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger,
3c079792 1710 &attr_monarch_timeout.attr,
a98f0dd3
AK
1711 NULL
1712};
1da177e4 1713
cb491fca 1714static cpumask_var_t mce_dev_initialized;
bae19fe0 1715
e9eee03e 1716/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
91c6d400 1717static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
1718{
1719 int err;
73ca5358 1720 int i;
92cb7612 1721
90367556 1722 if (!mce_available(&boot_cpu_data))
91c6d400
AK
1723 return -EIO;
1724
cb491fca
IM
1725 memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
1726 per_cpu(mce_dev, cpu).id = cpu;
1727 per_cpu(mce_dev, cpu).cls = &mce_sysclass;
91c6d400 1728
cb491fca 1729 err = sysdev_register(&per_cpu(mce_dev, cpu));
d435d862
AM
1730 if (err)
1731 return err;
1732
cb491fca
IM
1733 for (i = 0; mce_attrs[i]; i++) {
1734 err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
d435d862
AM
1735 if (err)
1736 goto error;
1737 }
0d7482e3 1738 for (i = 0; i < banks; i++) {
cb491fca 1739 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
0d7482e3
AK
1740 &bank_attrs[i]);
1741 if (err)
1742 goto error2;
1743 }
cb491fca 1744 cpumask_set_cpu(cpu, mce_dev_initialized);
91c6d400 1745
d435d862 1746 return 0;
0d7482e3 1747error2:
cb491fca
IM
1748 while (--i >= 0)
1749 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
d435d862 1750error:
cb491fca
IM
1751 while (--i >= 0)
1752 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1753
1754 sysdev_unregister(&per_cpu(mce_dev, cpu));
d435d862 1755
91c6d400
AK
1756 return err;
1757}
1758
2d9cd6c2 1759static __cpuinit void mce_remove_device(unsigned int cpu)
91c6d400 1760{
73ca5358
SL
1761 int i;
1762
cb491fca 1763 if (!cpumask_test_cpu(cpu, mce_dev_initialized))
bae19fe0
AH
1764 return;
1765
cb491fca
IM
1766 for (i = 0; mce_attrs[i]; i++)
1767 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1768
0d7482e3 1769 for (i = 0; i < banks; i++)
cb491fca
IM
1770 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
1771
1772 sysdev_unregister(&per_cpu(mce_dev, cpu));
1773 cpumask_clear_cpu(cpu, mce_dev_initialized);
91c6d400 1774}
91c6d400 1775
d6b75584 1776/* Make sure there are no machine checks on offlined CPUs. */
ec5b3d32 1777static void mce_disable_cpu(void *h)
d6b75584 1778{
88ccbedd 1779 unsigned long action = *(unsigned long *)h;
cb491fca 1780 int i;
d6b75584
AK
1781
1782 if (!mce_available(&current_cpu_data))
1783 return;
88ccbedd
AK
1784 if (!(action & CPU_TASKS_FROZEN))
1785 cmci_clear();
06b7a7a5
AK
1786 for (i = 0; i < banks; i++) {
1787 if (!skip_bank_init(i))
1788 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1789 }
d6b75584
AK
1790}
1791
ec5b3d32 1792static void mce_reenable_cpu(void *h)
d6b75584 1793{
88ccbedd 1794 unsigned long action = *(unsigned long *)h;
e9eee03e 1795 int i;
d6b75584
AK
1796
1797 if (!mce_available(&current_cpu_data))
1798 return;
e9eee03e 1799
88ccbedd
AK
1800 if (!(action & CPU_TASKS_FROZEN))
1801 cmci_reenable();
06b7a7a5
AK
1802 for (i = 0; i < banks; i++) {
1803 if (!skip_bank_init(i))
1804 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1805 }
d6b75584
AK
1806}
1807
91c6d400 1808/* Get notified when a cpu comes on/off. Be hotplug friendly. */
e9eee03e
IM
1809static int __cpuinit
1810mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
91c6d400
AK
1811{
1812 unsigned int cpu = (unsigned long)hcpu;
52d168e2 1813 struct timer_list *t = &per_cpu(mce_timer, cpu);
91c6d400
AK
1814
1815 switch (action) {
bae19fe0
AH
1816 case CPU_ONLINE:
1817 case CPU_ONLINE_FROZEN:
1818 mce_create_device(cpu);
8735728e
RW
1819 if (threshold_cpu_callback)
1820 threshold_cpu_callback(action, cpu);
91c6d400 1821 break;
91c6d400 1822 case CPU_DEAD:
8bb78442 1823 case CPU_DEAD_FROZEN:
8735728e
RW
1824 if (threshold_cpu_callback)
1825 threshold_cpu_callback(action, cpu);
91c6d400
AK
1826 mce_remove_device(cpu);
1827 break;
52d168e2
AK
1828 case CPU_DOWN_PREPARE:
1829 case CPU_DOWN_PREPARE_FROZEN:
1830 del_timer_sync(t);
88ccbedd 1831 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
52d168e2
AK
1832 break;
1833 case CPU_DOWN_FAILED:
1834 case CPU_DOWN_FAILED_FROZEN:
6298c512
AK
1835 t->expires = round_jiffies(jiffies +
1836 __get_cpu_var(next_interval));
52d168e2 1837 add_timer_on(t, cpu);
88ccbedd
AK
1838 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1839 break;
1840 case CPU_POST_DEAD:
1841 /* intentionally ignoring frozen here */
1842 cmci_rediscover(cpu);
52d168e2 1843 break;
91c6d400 1844 }
bae19fe0 1845 return NOTIFY_OK;
91c6d400
AK
1846}
1847
1e35669d 1848static struct notifier_block mce_cpu_notifier __cpuinitdata = {
91c6d400
AK
1849 .notifier_call = mce_cpu_callback,
1850};
1851
0d7482e3
AK
1852static __init int mce_init_banks(void)
1853{
1854 int i;
1855
1856 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1857 GFP_KERNEL);
1858 if (!bank_attrs)
1859 return -ENOMEM;
1860
1861 for (i = 0; i < banks; i++) {
1862 struct sysdev_attribute *a = &bank_attrs[i];
e9eee03e
IM
1863
1864 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
0d7482e3
AK
1865 if (!a->attr.name)
1866 goto nomem;
e9eee03e
IM
1867
1868 a->attr.mode = 0644;
1869 a->show = show_bank;
1870 a->store = set_bank;
0d7482e3
AK
1871 }
1872 return 0;
1873
1874nomem:
1875 while (--i >= 0)
1876 kfree(bank_attrs[i].attr.name);
1877 kfree(bank_attrs);
1878 bank_attrs = NULL;
e9eee03e 1879
0d7482e3
AK
1880 return -ENOMEM;
1881}
1882
91c6d400
AK
1883static __init int mce_init_device(void)
1884{
1885 int err;
1886 int i = 0;
1887
1da177e4
LT
1888 if (!mce_available(&boot_cpu_data))
1889 return -EIO;
0d7482e3 1890
cb491fca 1891 alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
996867d0 1892
0d7482e3
AK
1893 err = mce_init_banks();
1894 if (err)
1895 return err;
1896
1da177e4 1897 err = sysdev_class_register(&mce_sysclass);
d435d862
AM
1898 if (err)
1899 return err;
91c6d400
AK
1900
1901 for_each_online_cpu(i) {
d435d862
AM
1902 err = mce_create_device(i);
1903 if (err)
1904 return err;
91c6d400
AK
1905 }
1906
be6b5a35 1907 register_hotcpu_notifier(&mce_cpu_notifier);
1da177e4 1908 misc_register(&mce_log_device);
e9eee03e 1909
1da177e4 1910 return err;
1da177e4 1911}
91c6d400 1912
1da177e4 1913device_initcall(mce_init_device);
a988d334 1914
4efc0670 1915#else /* CONFIG_X86_OLD_MCE: */
a988d334 1916
a988d334
IM
1917int nr_mce_banks;
1918EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
1919
a988d334
IM
1920/* This has to be run for each processor */
1921void mcheck_init(struct cpuinfo_x86 *c)
1922{
1923 if (mce_disabled == 1)
1924 return;
1925
1926 switch (c->x86_vendor) {
1927 case X86_VENDOR_AMD:
1928 amd_mcheck_init(c);
1929 break;
1930
1931 case X86_VENDOR_INTEL:
1932 if (c->x86 == 5)
1933 intel_p5_mcheck_init(c);
1934 if (c->x86 == 6)
1935 intel_p6_mcheck_init(c);
1936 if (c->x86 == 15)
1937 intel_p4_mcheck_init(c);
1938 break;
1939
1940 case X86_VENDOR_CENTAUR:
1941 if (c->x86 == 5)
1942 winchip_mcheck_init(c);
1943 break;
1944
1945 default:
1946 break;
1947 }
b659294b 1948 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks);
a988d334
IM
1949}
1950
a988d334
IM
1951static int __init mcheck_enable(char *str)
1952{
1953 mce_disabled = -1;
1954 return 1;
1955}
1956
a988d334
IM
1957__setup("mce", mcheck_enable);
1958
d7c3c9a6
AK
1959#endif /* CONFIG_X86_OLD_MCE */
1960
1961/*
1962 * Old style boot options parsing. Only for compatibility.
1963 */
1964static int __init mcheck_disable(char *str)
1965{
1966 mce_disabled = 1;
1967 return 1;
1968}
1969__setup("nomce", mcheck_disable);
This page took 0.795681 seconds and 5 git commands to generate.