vmwatchdog: BKL pushdown
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
CommitLineData
1da177e4
LT
1/*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
d88203d1
TG
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
1da177e4
LT
6 */
7
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/rcupdate.h>
14#include <linux/kallsyms.h>
15#include <linux/sysdev.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
a9415644 18#include <linux/capability.h>
91c6d400
AK
19#include <linux/cpu.h>
20#include <linux/percpu.h>
e02e68d3
TH
21#include <linux/poll.h>
22#include <linux/thread_info.h>
8c566ef5 23#include <linux/ctype.h>
a98f0dd3 24#include <linux/kmod.h>
1eeb66a1 25#include <linux/kdebug.h>
d88203d1 26#include <asm/processor.h>
1da177e4
LT
27#include <asm/msr.h>
28#include <asm/mce.h>
1da177e4 29#include <asm/uaccess.h>
0a9c3ee7 30#include <asm/smp.h>
e02e68d3 31#include <asm/idle.h>
1da177e4
LT
32
33#define MISC_MCELOG_MINOR 227
73ca5358 34#define NR_BANKS 6
1da177e4 35
553f265f
AK
36atomic_t mce_entry;
37
1da177e4
LT
38static int mce_dont_init;
39
bd78432c
TH
40/*
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
46 */
1da177e4
LT
47static int tolerant = 1;
48static int banks;
49static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
e02e68d3 50static unsigned long notify_user;
94ad8474 51static int rip_msr;
911f6a7b 52static int mce_bootlog = -1;
a98f0dd3
AK
53static atomic_t mce_events;
54
55static char trigger[128];
56static char *trigger_argv[2] = { trigger, NULL };
1da177e4 57
e02e68d3
TH
58static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
59
1da177e4
LT
60/*
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
64 */
65
231fd906 66static struct mce_log mcelog = {
1da177e4
LT
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
d88203d1 69};
1da177e4
LT
70
71void mce_log(struct mce *mce)
72{
73 unsigned next, entry;
a98f0dd3 74 atomic_inc(&mce_events);
1da177e4 75 mce->finished = 0;
7644143c 76 wmb();
1da177e4
LT
77 for (;;) {
78 entry = rcu_dereference(mcelog.next);
673242c1
AK
79 for (;;) {
80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */
82 if (entry >= MCE_LOG_LEN) {
53756d37 83 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
673242c1
AK
84 return;
85 }
86 /* Old left over entry. Skip. */
87 if (mcelog.entry[entry].finished) {
88 entry++;
89 continue;
90 }
7644143c 91 break;
1da177e4 92 }
1da177e4
LT
93 smp_rmb();
94 next = entry + 1;
95 if (cmpxchg(&mcelog.next, entry, next) == entry)
96 break;
97 }
98 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
7644143c 99 wmb();
1da177e4 100 mcelog.entry[entry].finished = 1;
7644143c 101 wmb();
1da177e4 102
e02e68d3 103 set_bit(0, &notify_user);
1da177e4
LT
104}
105
106static void print_mce(struct mce *m)
107{
108 printk(KERN_EMERG "\n"
4855170f 109 KERN_EMERG "HARDWARE ERROR\n"
1da177e4
LT
110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status);
65ea5b03 113 if (m->ip) {
d88203d1 114 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
1da177e4 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
65ea5b03 116 m->cs, m->ip);
1da177e4 117 if (m->cs == __KERNEL_CS)
65ea5b03 118 print_symbol("{%s}", m->ip);
1da177e4
LT
119 printk("\n");
120 }
d88203d1 121 printk(KERN_EMERG "TSC %Lx ", m->tsc);
1da177e4
LT
122 if (m->addr)
123 printk("ADDR %Lx ", m->addr);
124 if (m->misc)
d88203d1 125 printk("MISC %Lx ", m->misc);
1da177e4 126 printk("\n");
4855170f 127 printk(KERN_EMERG "This is not a software problem!\n");
d88203d1
TG
128 printk(KERN_EMERG "Run through mcelog --ascii to decode "
129 "and contact your hardware vendor\n");
1da177e4
LT
130}
131
132static void mce_panic(char *msg, struct mce *backup, unsigned long start)
d88203d1 133{
1da177e4 134 int i;
e02e68d3 135
1da177e4
LT
136 oops_begin();
137 for (i = 0; i < MCE_LOG_LEN; i++) {
138 unsigned long tsc = mcelog.entry[i].tsc;
d88203d1 139
1da177e4
LT
140 if (time_before(tsc, start))
141 continue;
d88203d1 142 print_mce(&mcelog.entry[i]);
1da177e4
LT
143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL;
145 }
146 if (backup)
147 print_mce(backup);
e02e68d3 148 panic(msg);
d88203d1 149}
1da177e4
LT
150
151static int mce_available(struct cpuinfo_x86 *c)
152{
3d1712c9 153 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
1da177e4
LT
154}
155
94ad8474
AK
156static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
157{
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
65ea5b03 159 m->ip = regs->ip;
94ad8474
AK
160 m->cs = regs->cs;
161 } else {
65ea5b03 162 m->ip = 0;
94ad8474
AK
163 m->cs = 0;
164 }
165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV;
65ea5b03 168 rdmsrl(rip_msr, m->ip);
94ad8474
AK
169 m->cs = 0;
170 }
171}
172
d88203d1 173/*
1da177e4
LT
174 * The actual machine check handler
175 */
1da177e4
LT
176void do_machine_check(struct pt_regs * regs, long error_code)
177{
178 struct mce m, panicm;
1da177e4
LT
179 u64 mcestart = 0;
180 int i;
181 int panicm_found = 0;
bd78432c
TH
182 /*
183 * If no_way_out gets set, there is no safe way to recover from this
184 * MCE. If tolerant is cranked up, we'll try anyway.
185 */
186 int no_way_out = 0;
187 /*
188 * If kill_it gets set, there might be a way to recover from this
189 * error.
190 */
191 int kill_it = 0;
1da177e4 192
553f265f
AK
193 atomic_inc(&mce_entry);
194
22f5991c
JB
195 if ((regs
196 && notify_die(DIE_NMI, "machine check", regs, error_code,
197 18, SIGKILL) == NOTIFY_STOP)
198 || !banks)
553f265f 199 goto out2;
1da177e4
LT
200
201 memset(&m, 0, sizeof(struct mce));
151f8cc1 202 m.cpu = smp_processor_id();
1da177e4 203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
bd78432c 204 /* if the restart IP is not valid, we're done for */
1da177e4 205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
bd78432c 206 no_way_out = 1;
d88203d1 207
1da177e4
LT
208 rdtscll(mcestart);
209 barrier();
210
211 for (i = 0; i < banks; i++) {
212 if (!bank[i])
213 continue;
d88203d1
TG
214
215 m.misc = 0;
1da177e4
LT
216 m.addr = 0;
217 m.bank = i;
218 m.tsc = 0;
219
220 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
221 if ((m.status & MCI_STATUS_VAL) == 0)
222 continue;
223
224 if (m.status & MCI_STATUS_EN) {
bd78432c
TH
225 /* if PCC was set, there's no way out */
226 no_way_out |= !!(m.status & MCI_STATUS_PCC);
227 /*
228 * If this error was uncorrectable and there was
229 * an overflow, we're in trouble. If no overflow,
230 * we might get away with just killing a task.
231 */
232 if (m.status & MCI_STATUS_UC) {
233 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
234 no_way_out = 1;
235 kill_it = 1;
236 }
1da177e4
LT
237 }
238
239 if (m.status & MCI_STATUS_MISCV)
240 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
241 if (m.status & MCI_STATUS_ADDRV)
242 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
243
94ad8474 244 mce_get_rip(&m, regs);
d5172f26 245 if (error_code >= 0)
1da177e4 246 rdtscll(m.tsc);
d5172f26
AK
247 if (error_code != -2)
248 mce_log(&m);
1da177e4
LT
249
250 /* Did this bank cause the exception? */
251 /* Assume that the bank with uncorrectable errors did it,
252 and that there is only a single one. */
253 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
254 panicm = m;
255 panicm_found = 1;
256 }
257
9f158333 258 add_taint(TAINT_MACHINE_CHECK);
1da177e4
LT
259 }
260
261 /* Never do anything final in the polling timer */
e02e68d3 262 if (!regs)
1da177e4
LT
263 goto out;
264
265 /* If we didn't find an uncorrectable error, pick
266 the last one (shouldn't happen, just being safe). */
267 if (!panicm_found)
268 panicm = m;
bd78432c
TH
269
270 /*
271 * If we have decided that we just CAN'T continue, and the user
272 * has not set tolerant to an insane level, give up and die.
273 */
274 if (no_way_out && tolerant < 3)
1da177e4 275 mce_panic("Machine check", &panicm, mcestart);
bd78432c
TH
276
277 /*
278 * If the error seems to be unrecoverable, something should be
279 * done. Try to kill as little as possible. If we can kill just
280 * one task, do that. If the user has set the tolerance very
281 * high, don't try to do anything at all.
282 */
283 if (kill_it && tolerant < 3) {
1da177e4
LT
284 int user_space = 0;
285
bd78432c
TH
286 /*
287 * If the EIPV bit is set, it means the saved IP is the
288 * instruction which caused the MCE.
289 */
290 if (m.mcgstatus & MCG_STATUS_EIPV)
65ea5b03 291 user_space = panicm.ip && (panicm.cs & 3);
bd78432c
TH
292
293 /*
294 * If we know that the error was in user space, send a
295 * SIGBUS. Otherwise, panic if tolerance is low.
296 *
297 * do_exit() takes an awful lot of locks and has a slight
298 * risk of deadlocking.
299 */
300 if (user_space) {
1da177e4 301 do_exit(SIGBUS);
bd78432c
TH
302 } else if (panic_on_oops || tolerant < 2) {
303 mce_panic("Uncorrected machine check",
304 &panicm, mcestart);
305 }
1da177e4
LT
306 }
307
e02e68d3
TH
308 /* notify userspace ASAP */
309 set_thread_flag(TIF_MCE_NOTIFY);
310
1da177e4 311 out:
bd78432c
TH
312 /* the last thing we do is clear state */
313 for (i = 0; i < banks; i++)
314 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
1da177e4 315 wrmsrl(MSR_IA32_MCG_STATUS, 0);
553f265f
AK
316 out2:
317 atomic_dec(&mce_entry);
1da177e4
LT
318}
319
15d5f839
DZ
320#ifdef CONFIG_X86_MCE_INTEL
321/***
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
676b1855 323 * @cpu: The CPU on which the event occurred.
15d5f839
DZ
324 * @status: Event status information
325 *
326 * This function should be called by the thermal interrupt after the
327 * event has been processed and the decision was made to log the event
328 * further.
329 *
330 * The status parameter will be saved to the 'status' field of 'struct mce'
331 * and historically has been the register value of the
332 * MSR_IA32_THERMAL_STATUS (Intel) msr.
333 */
334void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
335{
336 struct mce m;
337
338 memset(&m, 0, sizeof(m));
339 m.cpu = cpu;
340 m.bank = MCE_THERMAL_BANK;
341 m.status = status;
342 rdtscll(m.tsc);
343 mce_log(&m);
344}
345#endif /* CONFIG_X86_MCE_INTEL */
346
1da177e4 347/*
8a336b0a
TH
348 * Periodic polling timer for "silent" machine check errors. If the
349 * poller finds an MCE, poll 2x faster. When the poller finds no more
350 * errors, poll 2x slower (up to check_interval seconds).
1da177e4
LT
351 */
352
353static int check_interval = 5 * 60; /* 5 minutes */
8a336b0a 354static int next_interval; /* in jiffies */
65f27f38
DH
355static void mcheck_timer(struct work_struct *work);
356static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
1da177e4
LT
357
358static void mcheck_check_cpu(void *info)
359{
360 if (mce_available(&current_cpu_data))
361 do_machine_check(NULL, 0);
362}
363
65f27f38 364static void mcheck_timer(struct work_struct *work)
1da177e4
LT
365{
366 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
1da177e4
LT
367
368 /*
e02e68d3
TH
369 * Alert userspace if needed. If we logged an MCE, reduce the
370 * polling interval, otherwise increase the polling interval.
1da177e4 371 */
e02e68d3
TH
372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100);
374 } else {
d88203d1 375 next_interval = min(next_interval * 2,
22293e58 376 (int)round_jiffies_relative(check_interval*HZ));
e02e68d3
TH
377 }
378
379 schedule_delayed_work(&mcheck_work, next_interval);
380}
381
382/*
383 * This is only called from process context. This is where we do
384 * anything we need to alert userspace about new MCEs. This is called
385 * directly from the poller and also from entry.S and idle, thanks to
386 * TIF_MCE_NOTIFY.
387 */
388int mce_notify_user(void)
389{
390 clear_thread_flag(TIF_MCE_NOTIFY);
391 if (test_and_clear_bit(0, &notify_user)) {
8a336b0a
TH
392 static unsigned long last_print;
393 unsigned long now = jiffies;
394
e02e68d3
TH
395 wake_up_interruptible(&mce_wait);
396 if (trigger[0])
397 call_usermodehelper(trigger, trigger_argv, NULL,
398 UMH_NO_WAIT);
399
8a336b0a
TH
400 if (time_after_eq(now, last_print + (check_interval*HZ))) {
401 last_print = now;
402 printk(KERN_INFO "Machine check events logged\n");
403 }
e02e68d3
TH
404
405 return 1;
1da177e4 406 }
e02e68d3
TH
407 return 0;
408}
8a336b0a 409
e02e68d3
TH
410/* see if the idle task needs to notify userspace */
411static int
412mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
413{
414 /* IDLE_END should be safe - interrupts are back on */
415 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
416 mce_notify_user();
417
418 return NOTIFY_OK;
1da177e4
LT
419}
420
e02e68d3
TH
421static struct notifier_block mce_idle_notifier = {
422 .notifier_call = mce_idle_callback,
423};
1da177e4
LT
424
425static __init int periodic_mcheck_init(void)
d88203d1 426{
8a336b0a
TH
427 next_interval = check_interval * HZ;
428 if (next_interval)
22293e58
VP
429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval));
e02e68d3 431 idle_notifier_register(&mce_idle_notifier);
1da177e4 432 return 0;
d88203d1 433}
1da177e4
LT
434__initcall(periodic_mcheck_init);
435
436
d88203d1 437/*
1da177e4
LT
438 * Initialize Machine Checks for a CPU.
439 */
440static void mce_init(void *dummy)
441{
442 u64 cap;
443 int i;
444
445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff;
d88203d1 447 if (banks > NR_BANKS) {
1da177e4 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
d88203d1 449 banks = NR_BANKS;
1da177e4 450 }
94ad8474
AK
451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
453 rip_msr = MSR_IA32_MCG_EIP;
1da177e4
LT
454
455 /* Log the machine checks left over from the previous reset.
456 This also clears all registers */
d5172f26 457 do_machine_check(NULL, mce_bootlog ? -1 : -2);
1da177e4
LT
458
459 set_in_cr4(X86_CR4_MCE);
460
461 if (cap & MCG_CTL_P)
462 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
463
464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
d88203d1 467 }
1da177e4
LT
468}
469
470/* Add per CPU specific workarounds here */
e6982c67 471static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
d88203d1 472{
1da177e4 473 /* This should be disabled by the BIOS, but isn't always */
911f6a7b
JB
474 if (c->x86_vendor == X86_VENDOR_AMD) {
475 if(c->x86 == 15)
476 /* disable GART TBL walk error reporting, which trips off
477 incorrectly with the IOMMU & 3ware & Cerberus. */
478 clear_bit(10, &bank[4]);
479 if(c->x86 <= 17 && mce_bootlog < 0)
480 /* Lots of broken BIOS around that don't clear them
481 by default and leave crap in there. Don't log. */
482 mce_bootlog = 0;
1da177e4 483 }
e583538f 484
d88203d1 485}
1da177e4 486
e6982c67 487static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
1da177e4
LT
488{
489 switch (c->x86_vendor) {
490 case X86_VENDOR_INTEL:
491 mce_intel_feature_init(c);
492 break;
89b831ef
JS
493 case X86_VENDOR_AMD:
494 mce_amd_feature_init(c);
495 break;
1da177e4
LT
496 default:
497 break;
498 }
499}
500
d88203d1 501/*
1da177e4 502 * Called for each booted CPU to set up machine checks.
d88203d1 503 * Must be called with preempt off.
1da177e4 504 */
e6982c67 505void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
1da177e4 506{
7ded5689 507 static cpumask_t mce_cpus = CPU_MASK_NONE;
1da177e4 508
d88203d1 509 mce_cpu_quirks(c);
1da177e4
LT
510
511 if (mce_dont_init ||
512 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
513 !mce_available(c))
514 return;
515
516 mce_init(NULL);
517 mce_cpu_features(c);
518}
519
520/*
521 * Character device to read and clear the MCE log.
522 */
523
f528e7ba
TH
524static DEFINE_SPINLOCK(mce_state_lock);
525static int open_count; /* #times opened */
526static int open_exclu; /* already open exclusive? */
527
528static int mce_open(struct inode *inode, struct file *file)
529{
530 spin_lock(&mce_state_lock);
531
532 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
533 spin_unlock(&mce_state_lock);
534 return -EBUSY;
535 }
536
537 if (file->f_flags & O_EXCL)
538 open_exclu = 1;
539 open_count++;
540
541 spin_unlock(&mce_state_lock);
542
bd78432c 543 return nonseekable_open(inode, file);
f528e7ba
TH
544}
545
546static int mce_release(struct inode *inode, struct file *file)
547{
548 spin_lock(&mce_state_lock);
549
550 open_count--;
551 open_exclu = 0;
552
553 spin_unlock(&mce_state_lock);
554
555 return 0;
556}
557
d88203d1
TG
558static void collect_tscs(void *data)
559{
1da177e4 560 unsigned long *cpu_tsc = (unsigned long *)data;
d88203d1 561
1da177e4 562 rdtscll(cpu_tsc[smp_processor_id()]);
d88203d1 563}
1da177e4 564
d88203d1
TG
565static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
566 loff_t *off)
1da177e4 567{
f0de53bb 568 unsigned long *cpu_tsc;
8c8b8859 569 static DEFINE_MUTEX(mce_read_mutex);
1da177e4
LT
570 unsigned next;
571 char __user *buf = ubuf;
572 int i, err;
573
f0de53bb
AK
574 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
575 if (!cpu_tsc)
576 return -ENOMEM;
577
8c8b8859 578 mutex_lock(&mce_read_mutex);
1da177e4
LT
579 next = rcu_dereference(mcelog.next);
580
581 /* Only supports full reads right now */
d88203d1 582 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
8c8b8859 583 mutex_unlock(&mce_read_mutex);
f0de53bb 584 kfree(cpu_tsc);
1da177e4
LT
585 return -EINVAL;
586 }
587
588 err = 0;
d88203d1 589 for (i = 0; i < next; i++) {
673242c1 590 unsigned long start = jiffies;
d88203d1 591
673242c1 592 while (!mcelog.entry[i].finished) {
4f84e4be 593 if (time_after_eq(jiffies, start + 2)) {
673242c1 594 memset(mcelog.entry + i,0, sizeof(struct mce));
4f84e4be 595 goto timeout;
673242c1
AK
596 }
597 cpu_relax();
598 }
1da177e4
LT
599 smp_rmb();
600 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
d88203d1 601 buf += sizeof(struct mce);
4f84e4be
JW
602 timeout:
603 ;
d88203d1 604 }
1da177e4
LT
605
606 memset(mcelog.entry, 0, next * sizeof(struct mce));
607 mcelog.next = 0;
608
b2b18660 609 synchronize_sched();
1da177e4 610
d88203d1
TG
611 /*
612 * Collect entries that were still getting written before the
613 * synchronize.
614 */
1da177e4 615 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
d88203d1
TG
616 for (i = next; i < MCE_LOG_LEN; i++) {
617 if (mcelog.entry[i].finished &&
618 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
619 err |= copy_to_user(buf, mcelog.entry+i,
620 sizeof(struct mce));
1da177e4
LT
621 smp_rmb();
622 buf += sizeof(struct mce);
623 memset(&mcelog.entry[i], 0, sizeof(struct mce));
624 }
d88203d1 625 }
8c8b8859 626 mutex_unlock(&mce_read_mutex);
f0de53bb 627 kfree(cpu_tsc);
d88203d1 628 return err ? -EFAULT : buf - ubuf;
1da177e4
LT
629}
630
e02e68d3
TH
631static unsigned int mce_poll(struct file *file, poll_table *wait)
632{
633 poll_wait(file, &mce_wait, wait);
634 if (rcu_dereference(mcelog.next))
635 return POLLIN | POLLRDNORM;
636 return 0;
637}
638
c68461b6 639static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
640{
641 int __user *p = (int __user *)arg;
d88203d1 642
1da177e4 643 if (!capable(CAP_SYS_ADMIN))
d88203d1 644 return -EPERM;
1da177e4 645 switch (cmd) {
d88203d1 646 case MCE_GET_RECORD_LEN:
1da177e4
LT
647 return put_user(sizeof(struct mce), p);
648 case MCE_GET_LOG_LEN:
d88203d1 649 return put_user(MCE_LOG_LEN, p);
1da177e4
LT
650 case MCE_GETCLEAR_FLAGS: {
651 unsigned flags;
d88203d1
TG
652
653 do {
1da177e4 654 flags = mcelog.flags;
d88203d1
TG
655 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
656 return put_user(flags, p);
1da177e4
LT
657 }
658 default:
d88203d1
TG
659 return -ENOTTY;
660 }
1da177e4
LT
661}
662
5dfe4c96 663static const struct file_operations mce_chrdev_ops = {
f528e7ba
TH
664 .open = mce_open,
665 .release = mce_release,
1da177e4 666 .read = mce_read,
e02e68d3 667 .poll = mce_poll,
c68461b6 668 .unlocked_ioctl = mce_ioctl,
1da177e4
LT
669};
670
671static struct miscdevice mce_log_device = {
672 MISC_MCELOG_MINOR,
673 "mcelog",
674 &mce_chrdev_ops,
675};
676
8f4e956b
AK
677static unsigned long old_cr4 __initdata;
678
679void __init stop_mce(void)
680{
681 old_cr4 = read_cr4();
682 clear_in_cr4(X86_CR4_MCE);
683}
684
685void __init restart_mce(void)
686{
687 if (old_cr4 & X86_CR4_MCE)
688 set_in_cr4(X86_CR4_MCE);
689}
690
d88203d1
TG
691/*
692 * Old style boot options parsing. Only for compatibility.
1da177e4 693 */
1da177e4
LT
694static int __init mcheck_disable(char *str)
695{
696 mce_dont_init = 1;
9b41046c 697 return 1;
1da177e4
LT
698}
699
676b1855 700/* mce=off disables machine check. Note you can re-enable it later
d5172f26 701 using sysfs.
8c566ef5 702 mce=TOLERANCELEVEL (number, see above)
e583538f
AK
703 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
704 mce=nobootlog Don't log MCEs from before booting. */
1da177e4
LT
705static int __init mcheck_enable(char *str)
706{
707 if (!strcmp(str, "off"))
708 mce_dont_init = 1;
e583538f
AK
709 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
710 mce_bootlog = str[0] == 'b';
8c566ef5
AK
711 else if (isdigit(str[0]))
712 get_option(&str, &tolerant);
1da177e4 713 else
d88203d1 714 printk("mce= argument %s ignored. Please use /sys", str);
9b41046c 715 return 1;
1da177e4
LT
716}
717
718__setup("nomce", mcheck_disable);
909dd324 719__setup("mce=", mcheck_enable);
1da177e4 720
d88203d1 721/*
1da177e4 722 * Sysfs support
d88203d1 723 */
1da177e4 724
413588c7
AK
725/* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
726 Only one CPU is active at this time, the others get readded later using
727 CPU hotplug. */
1da177e4
LT
728static int mce_resume(struct sys_device *dev)
729{
413588c7 730 mce_init(NULL);
1da177e4
LT
731 return 0;
732}
733
734/* Reinit MCEs after user configuration changes */
d88203d1
TG
735static void mce_restart(void)
736{
8a336b0a 737 if (next_interval)
1da177e4
LT
738 cancel_delayed_work(&mcheck_work);
739 /* Timer race is harmless here */
d88203d1 740 on_each_cpu(mce_init, NULL, 1, 1);
8a336b0a
TH
741 next_interval = check_interval * HZ;
742 if (next_interval)
22293e58
VP
743 schedule_delayed_work(&mcheck_work,
744 round_jiffies_relative(next_interval));
1da177e4
LT
745}
746
747static struct sysdev_class mce_sysclass = {
748 .resume = mce_resume,
af5ca3f4 749 .name = "machinecheck",
1da177e4
LT
750};
751
fff2e89f 752DEFINE_PER_CPU(struct sys_device, device_mce);
1da177e4
LT
753
754/* Why are there no generic functions for this? */
755#define ACCESSOR(name, var, start) \
d88203d1
TG
756 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
757 return sprintf(buf, "%lx\n", (unsigned long)var); \
758 } \
1da177e4 759 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
d88203d1
TG
760 char *end; \
761 unsigned long new = simple_strtoul(buf, &end, 0); \
762 if (end == buf) return -EINVAL; \
763 var = new; \
764 start; \
765 return end-buf; \
766 } \
1da177e4
LT
767 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
768
a98f0dd3 769/* TBD should generate these dynamically based on number of available banks */
1da177e4
LT
770ACCESSOR(bank0ctl,bank[0],mce_restart())
771ACCESSOR(bank1ctl,bank[1],mce_restart())
772ACCESSOR(bank2ctl,bank[2],mce_restart())
773ACCESSOR(bank3ctl,bank[3],mce_restart())
774ACCESSOR(bank4ctl,bank[4],mce_restart())
73ca5358 775ACCESSOR(bank5ctl,bank[5],mce_restart())
a98f0dd3
AK
776
777static ssize_t show_trigger(struct sys_device *s, char *buf)
778{
779 strcpy(buf, trigger);
780 strcat(buf, "\n");
781 return strlen(trigger) + 1;
782}
783
784static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
785{
786 char *p;
787 int len;
788 strncpy(trigger, buf, sizeof(trigger));
789 trigger[sizeof(trigger)-1] = 0;
790 len = strlen(trigger);
791 p = strchr(trigger, '\n');
792 if (*p) *p = 0;
793 return len;
794}
795
796static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
1da177e4
LT
797ACCESSOR(tolerant,tolerant,)
798ACCESSOR(check_interval,check_interval,mce_restart())
a98f0dd3
AK
799static struct sysdev_attribute *mce_attributes[] = {
800 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
801 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
802 &attr_tolerant, &attr_check_interval, &attr_trigger,
803 NULL
804};
1da177e4 805
bae19fe0
AH
806static cpumask_t mce_device_initialized = CPU_MASK_NONE;
807
91c6d400
AK
808/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
809static __cpuinit int mce_create_device(unsigned int cpu)
1da177e4
LT
810{
811 int err;
73ca5358 812 int i;
92cb7612 813
90367556 814 if (!mce_available(&boot_cpu_data))
91c6d400
AK
815 return -EIO;
816
d435d862 817 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
91c6d400
AK
818 per_cpu(device_mce,cpu).id = cpu;
819 per_cpu(device_mce,cpu).cls = &mce_sysclass;
820
821 err = sysdev_register(&per_cpu(device_mce,cpu));
d435d862
AM
822 if (err)
823 return err;
824
825 for (i = 0; mce_attributes[i]; i++) {
826 err = sysdev_create_file(&per_cpu(device_mce,cpu),
827 mce_attributes[i]);
828 if (err)
829 goto error;
830 }
bae19fe0 831 cpu_set(cpu, mce_device_initialized);
91c6d400 832
d435d862
AM
833 return 0;
834error:
835 while (i--) {
836 sysdev_remove_file(&per_cpu(device_mce,cpu),
837 mce_attributes[i]);
91c6d400 838 }
d435d862
AM
839 sysdev_unregister(&per_cpu(device_mce,cpu));
840
91c6d400
AK
841 return err;
842}
843
be6b5a35 844static void mce_remove_device(unsigned int cpu)
91c6d400 845{
73ca5358
SL
846 int i;
847
bae19fe0
AH
848 if (!cpu_isset(cpu, mce_device_initialized))
849 return;
850
a98f0dd3 851 for (i = 0; mce_attributes[i]; i++)
73ca5358 852 sysdev_remove_file(&per_cpu(device_mce,cpu),
a98f0dd3 853 mce_attributes[i]);
91c6d400 854 sysdev_unregister(&per_cpu(device_mce,cpu));
bae19fe0 855 cpu_clear(cpu, mce_device_initialized);
91c6d400 856}
91c6d400
AK
857
858/* Get notified when a cpu comes on/off. Be hotplug friendly. */
1e35669d
SR
859static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
860 unsigned long action, void *hcpu)
91c6d400
AK
861{
862 unsigned int cpu = (unsigned long)hcpu;
863
864 switch (action) {
bae19fe0
AH
865 case CPU_ONLINE:
866 case CPU_ONLINE_FROZEN:
867 mce_create_device(cpu);
91c6d400 868 break;
91c6d400 869 case CPU_DEAD:
8bb78442 870 case CPU_DEAD_FROZEN:
91c6d400
AK
871 mce_remove_device(cpu);
872 break;
91c6d400 873 }
bae19fe0 874 return NOTIFY_OK;
91c6d400
AK
875}
876
1e35669d 877static struct notifier_block mce_cpu_notifier __cpuinitdata = {
91c6d400
AK
878 .notifier_call = mce_cpu_callback,
879};
880
881static __init int mce_init_device(void)
882{
883 int err;
884 int i = 0;
885
1da177e4
LT
886 if (!mce_available(&boot_cpu_data))
887 return -EIO;
888 err = sysdev_class_register(&mce_sysclass);
d435d862
AM
889 if (err)
890 return err;
91c6d400
AK
891
892 for_each_online_cpu(i) {
d435d862
AM
893 err = mce_create_device(i);
894 if (err)
895 return err;
91c6d400
AK
896 }
897
be6b5a35 898 register_hotcpu_notifier(&mce_cpu_notifier);
1da177e4
LT
899 misc_register(&mce_log_device);
900 return err;
1da177e4 901}
91c6d400 902
1da177e4 903device_initcall(mce_init_device);
This page took 0.442132 seconds and 5 git commands to generate.